* 'master' of ssh://ftp-master.debian.org/srv/ftp.debian.org/git/dak: (25 commits)
remove accepted references
re-enable fingerprint importing
honour No-Action and say what's going on
move build queue cleaning to manage-build-queues
teach clean_suites about build_queue_files
honour No-Action in clean suites
re-enable clean-queues
log instead of print
update scripts for new world order
use manage-build-queues
remove the release file before generating the new one
argh
use %s
fix os.system call
small fixes
more typo fixes
and another one
typo
add mbq to dak.py
add --all flag
...
# used by cron.dinstall *and* cron.unchecked.
function make_buildd_dir () {
- cd $configdir
- apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd
+ dak manage-build-queues -v buildd
- cd ${incoming}
- rm -f buildd/Release*
- apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="Debian" -o APT::FTPArchive::Release::Label="Debian" -o APT::FTPArchive::Release::Description="buildd incoming" -o APT::FTPArchive::Release::Architectures="${archs}" release buildd > Release
- gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o Release.gpg Release
- mv Release* buildd/.
-
- cd ${incoming}
- mkdir -p tree/${STAMP}
- cp -al ${incoming}/buildd/. tree/${STAMP}/
- ln -sfT tree/${STAMP} ${incoming}/builddweb
- find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+ cd ${incoming}
+ mkdir -p tree/${STAMP}
+ cp -al ${incoming}/buildd/. tree/${STAMP}/
+ ln -sfT tree/${STAMP} ${incoming}/builddweb
+ find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
}
# Do the unchecked processing, in case we have files.
ARGS=""
ERR=""
)
-### TODO: clean-* fixup
-#stage $GO
+stage $GO
GO=(
- FUNC="buildd"
- TIME="buildd"
+ FUNC="buildd_dir"
+ TIME="buildd_dir"
ARGS=""
ERR=""
)
function do_buildd () {
if lockfile -r3 $NOTICE; then
LOCKDAILY="YES"
- psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
cd $overridedir
dak make-overrides &>/dev/null
rm -f override.sid.all3 override.sid.all3.src
UrgencyLog "/srv/release.debian.org/britney/input/urgencies/";
Queue
{
- Accepted "/srv/ftp.debian.org/queue/accepted/";
Byhand "/srv/ftp.debian.org/queue/byhand/";
ProposedUpdates "/srv/ftp.debian.org/queue/p-u-new/";
OldProposedUpdates "/srv/ftp.debian.org/queue/o-p-u-new/";
function fingerprints() {
log "Not updating fingerprints - scripts needs checking"
-# log "Updating fingerprints"
-# dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
-
-# OUTFILE=$(mktemp)
-# dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
-
-# if [ -s "${OUTFILE}" ]; then
-# /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
-#From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
-#To: <debian-project@lists.debian.org>
-#Subject: Debian Maintainers Keyring changes
-#Content-Type: text/plain; charset=utf-8
-#MIME-Version: 1.0
-#
-#The following changes to the debian-maintainers keyring have just been activated:
-#
-#$(cat $OUTFILE)
-#
-#Debian distribution maintenance software,
-#on behalf of the Keyring maintainers
-#
-#EOF
-# fi
-# rm -f "$OUTFILE"
+ log "Updating fingerprints"
+ dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
+
+ OUTFILE=$(mktemp)
+ dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
+
+ if [ -s "${OUTFILE}" ]; then
+ /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
+From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
+To: <debian-project@lists.debian.org>
+Subject: Debian Maintainers Keyring changes
+Content-Type: text/plain; charset=utf-8
+MIME-Version: 1.0
+
+The following changes to the debian-maintainers keyring have just been activated:
+
+$(cat $OUTFILE)
+
+Debian distribution maintenance software,
+on behalf of the Keyring maintainers
+
+EOF
+ fi
+ rm -f "$OUTFILE"
}
function overrides() {
function dakcleanup() {
log "Cleanup old packages/files"
- dak clean-suites -m 10000
+ # TODO: Fix up clean-suites
+ #dak clean-suites -m 10000
dak clean-queues
}
-function buildd() {
- # Needs to be rebuilt, as files have moved. Due to unaccepts, we need to
- # update this before wanna-build is updated.
- log "Regenerating wanna-build/buildd information"
- psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id =build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
- symlinks -d /srv/incoming.debian.org/buildd > /dev/null
- apt-ftparchive generate apt.conf.buildd
-}
-
function buildd_dir() {
# Rebuilt the buildd dir to avoid long times of 403
log "Regenerating the buildd incoming dir"
################################################################################
-def gen_blacklist(dir):
- for entry in os.listdir(dir):
- entry = entry.split('_')[0]
- blacklist[entry] = 1
-
def process(osuite, affected_suites, originosuite, component, otype, session):
global Logger, Options, sections, priorities
else:
Logger = daklog.Logger(cnf, "check-overrides", 1)
- gen_blacklist(cnf["Dir::Queue::Accepted"])
-
for osuite in cnf.SubTree("Check-Overrides::OverrideSuites").List():
if "1" != cnf["Check-Overrides::OverrideSuites::%s::Process" % osuite]:
continue
# deletion.
q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
- WHERE f.last_used IS NULL AND b.file = f.id
- AND NOT EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
-
+SELECT b.file, f.filename
+ FROM binaries b
+ LEFT JOIN files f
+ ON (b.file = f.id)
+ WHERE f.last_used IS NULL
+ AND b.id NOT IN
+ (SELECT ba.bin FROM bin_associations ba)
+ AND f.id NOT IN
+ (SELECT bqf.fileid FROM build_queue_files bqf)""")
for i in q.fetchall():
Logger.log(["set lastused", i[1]])
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
- {'lastused': now_date, 'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
+ {'lastused': now_date, 'fileid': i[0]})
+
+ if not Options["No-Action"]:
+ session.commit()
# Check for any binaries which are marked for eventual deletion
# but are now used again.
q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
- WHERE f.last_used IS NOT NULL AND f.id = b.file
- AND EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
+SELECT b.file, f.filename
+ FROM binaries b
+ LEFT JOIN files f
+ ON (b.file = f.id)
+ WHERE f.last_used IS NOT NULL
+ AND (b.id IN
+ (SELECT ba.bin FROM bin_associations ba)
+ OR f.id IN
+ (SELECT bqf.fileid FROM build_queue_files bqf))""")
for i in q.fetchall():
Logger.log(["unset lastused", i[1]])
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
+
+ if not Options["No-Action"]:
+ session.commit()
########################################
# Get the list of source packages not in a suite and not used by
# any binaries.
q = session.execute("""
-SELECT s.id, s.file, f.filename FROM source s, files f
- WHERE f.last_used IS NULL AND s.file = f.id
- AND NOT EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id)
- AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)""")
+SELECT s.id, s.file, f.filename
+ FROM source s
+ LEFT JOIN files f
+ ON (s.file = f.id)
+ WHERE f.last_used IS NULL
+ AND s.id NOT IN
+ (SELECT sa.source FROM src_associations sa)
+ AND s.id NOT IN
+ (SELECT b.source FROM binaries b)
+ AND f.id NOT IN
+ (SELECT bqf.fileid FROM build_queue_files bqf)""")
#### XXX: this should ignore cases where the files for the binary b
#### have been marked for deletion (so the delay between bins go
# Mark the .dsc file for deletion
Logger.log(["set lastused", dsc_fname])
- session.execute("""UPDATE files SET last_used = :last_used
- WHERE id = :dscfileid AND last_used IS NULL""",
- {'last_used': now_date, 'dscfileid': dsc_file_id})
+ if not Options["No-Action"]:
+ session.execute("""UPDATE files SET last_used = :last_used
+ WHERE id = :dscfileid AND last_used IS NULL""",
+ {'last_used': now_date, 'dscfileid': dsc_file_id})
# Mark all other files references by .dsc too if they're not used by anyone else
x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
if len(y.fetchall()) == 1:
Logger.log(["set lastused", file_name])
- session.execute("""UPDATE files SET last_used = :lastused
- WHERE id = :fileid AND last_used IS NULL""",
- {'lastused': now_date, 'fileid': file_id})
+ if not Options["No-Action"]:
+ session.execute("""UPDATE files SET last_used = :lastused
+ WHERE id = :fileid AND last_used IS NULL""",
+ {'lastused': now_date, 'fileid': file_id})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
# Check for any sources which are marked for deletion but which
# are now used again.
-
q = session.execute("""
SELECT f.id, f.filename FROM source s, files f, dsc_files df
WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
- OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)))""")
+ OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id))
+ OR (EXISTS (SELECT 1 FROM build_queue_files bqf WHERE bqf.fileid = s.file)))""")
#### XXX: this should also handle deleted binaries specially (ie, not
#### reinstate sources because of them
for i in q.fetchall():
Logger.log(["unset lastused", i[1]])
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
- {'fileid': i[0]})
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
+ {'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
########################################
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
- AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
+ AND NOT EXISTS (SELECT 1 FROM build_queue_files qf WHERE qf.fileid = f.id)
AND last_used IS NULL
ORDER BY filename""")
for x in ql:
utils.warn("orphaned file: %s" % x)
Logger.log(["set lastused", x[1], "ORPHANED FILE"])
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
- {'lastused': now_date, 'fileid': x[0]})
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
+ {'lastused': now_date, 'fileid': x[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
def clean_binaries(now_date, delete_date, max_delete, session):
# We do this here so that the binaries we remove will have their
cur_date = now_date.strftime("%Y-%m-%d")
dest = os.path.join(cnf["Dir::Morgue"], cnf["Clean-Suites::MorgueSubDir"], cur_date)
- if not os.path.exists(dest):
+ if not Options["No-Action"] and not os.path.exists(dest):
os.mkdir(dest)
# Delete from source
################################################################################
-def clean_queue_build(now_date, delete_date, max_delete, session):
-
- cnf = Config()
-
- if not cnf.ValueList("Dinstall::QueueBuildSuites") or Options["No-Action"]:
- return
-
- print "Cleaning out queue build symlinks..."
-
- our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
- count = 0
-
- for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date):
- if not os.path.exists(qf.filename):
- utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
- continue
-
- if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(qf.filename):
- utils.fubar("%s (from queue_build) should be a symlink but isn't." % (qf.filename))
-
- Logger.log(["delete queue build", qf.filename])
- if not Options["No-Action"]:
- os.unlink(qf.filename)
- session.delete(qf)
- count += 1
-
- if not Options["No-Action"]:
- session.commit()
-
- if count:
- Logger.log(["total", count])
- print "Cleaned %d queue_build files." % (count)
-
-################################################################################
-
def clean_empty_directories(session):
"""
Removes empty directories from pool directories.
"""
+ print "Cleaning out empty directories..."
+
count = 0
cursor = session.execute(
clean(now_date, delete_date, max_delete, session)
clean_maintainers(now_date, delete_date, max_delete, session)
clean_fingerprints(now_date, delete_date, max_delete, session)
- clean_queue_build(now_date, delete_date, max_delete, session)
clean_empty_directories(session)
Logger.close()
"Generate .diff/Index files"),
("clean-suites",
"Clean unused/superseded packages from the archive"),
+ ("manage-build-queues",
+ "Clean and update metadata for build queues"),
("clean-queues",
"Clean cruft from incoming"),
("clean-proposed-updates",
--- /dev/null
+#!/usr/bin/env python
+
+"""
+Add some meta info to queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import psycopg2
+
+def do_update(self):
+ print "Add meta info columns to queues."
+
+ try:
+ c = self.db.cursor()
+
+ c.execute("ALTER TABLE policy_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN origin TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN label TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+ c.execute("""ALTER TABLE policy_queue
+ ADD CONSTRAINT policy_queue_meta_sanity_check
+ CHECK ( (generate_metadata IS FALSE)
+ OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+ c.execute("ALTER TABLE build_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN origin TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN label TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+ c.execute("""ALTER TABLE build_queue
+ ADD CONSTRAINT build_queue_meta_sanity_check
+ CHECK ( (generate_metadata IS FALSE)
+ OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '24' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.InternalError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
SELECT path, filename
FROM srcfiles_suite_component
WHERE suite = :suite AND component = :component
+ ORDER BY filename
"""
args = { 'suite': suite.suite_id,
'component': component.component_id }
FROM binfiles_suite_component_arch
WHERE suite = :suite AND component = :component AND type = :type AND
(architecture = :architecture OR architecture = 2)
+ ORDER BY filename
"""
args = { 'suite': suite.suite_id,
'component': component.component_id,
def run(self):
cnf = Config()
count = 1
- for directory in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
+ for directory in [ "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
checkdir = cnf["Dir::Queue::%s" % (directory) ]
if os.path.exists(checkdir):
print "Looking into %s" % (checkdir)
--- /dev/null
+#!/usr/bin/env python
+
+"""Manage build queues"""
+# Copyright (C) 2000, 2001, 2002, 2006 James Troup <james@nocrew.org>
+# Copyright (C) 2009 Mark Hymers <mhy@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import os, os.path, stat, sys
+from datetime import datetime
+import apt_pkg
+
+from daklib import daklog
+from daklib.dbconn import *
+from daklib.config import Config
+
+################################################################################
+
+Options = None
+Logger = None
+
+################################################################################
+
+def usage (exit_code=0):
+ print """Usage: dak manage-build-queues [OPTIONS] buildqueue1 buildqueue2
+Manage the contents of one or more build queues
+
+ -a, --all run on all known build queues
+ -n, --no-action don't do anything
+ -v, --verbose explain what is being done
+ -h, --help show this help and exit"""
+
+ sys.exit(exit_code)
+
+################################################################################
+
+def main ():
+ global Options, Logger
+
+ cnf = Config()
+
+ for i in ["Help", "No-Action", "Verbose", "All"]:
+ if not cnf.has_key("Manage-Build-Queues::Options::%s" % (i)):
+ cnf["Manage-Build-Queues::Options::%s" % (i)] = ""
+
+ Arguments = [('h',"help","Manage-Build-Queues::Options::Help"),
+ ('n',"no-action","Manage-Build-Queues::Options::No-Action"),
+ ('a',"all","Manage-Build-Queues::Options::All"),
+ ('v',"verbose","Manage-Build-Queues::Options::Verbose")]
+
+ queue_names = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+ Options = cnf.SubTree("Manage-Build-Queues::Options")
+
+ if Options["Help"]:
+ usage()
+
+ Logger = daklog.Logger(cnf, 'manage-build-queues', Options['No-Action'])
+
+ starttime = datetime.now()
+
+ session = DBConn().session()
+
+ if Options["All"]:
+ if len(queue_names) != 0:
+ print "E: Cannot use both -a and a queue_name"
+ sys.exit(1)
+ queues = session.query(BuildQueue).all()
+
+ else:
+ queues = []
+ for q in queue_names:
+ queue = get_build_queue(q.lower(), session)
+ if queue:
+ queues.append(queue)
+ else:
+ Logger.log(['cannot find queue %s' % q])
+
+ # For each given queue, look up object and call manage_queue
+ for q in queues:
+ Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
+ q.clean_and_update(starttime, Logger, dryrun=Options["No-Action"])
+
+ Logger.close()
+
+#######################################################################################
+
+if __name__ == '__main__':
+ main()
################################################################################
Cnf = None
-required_database_schema = 23
+required_database_schema = 24
################################################################################
import re
import psycopg2
import traceback
-from datetime import datetime
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
from inspect import getargspec
################################################################################
+MINIMAL_APT_CONF="""
+Dir
+{
+ ArchiveDir "%(archivepath)s";
+ OverrideDir "/srv/ftp.debian.org/scripts/override/";
+ CacheDir "/srv/ftp.debian.org/database/";
+};
+
+Default
+{
+ Packages::Compress ". bzip2 gzip";
+ Sources::Compress ". bzip2 gzip";
+ DeLinkLimit 0;
+ FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+ Packages "Packages";
+ Contents " ";
+
+ BinOverride "override.sid.all3";
+ BinCacheDB "packages-accepted.db";
+
+ FileList "%(filelist)s";
+
+ PathPrefix "";
+ Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+ Sources "Sources";
+ BinOverride "override.sid.all3";
+ SrcOverride "override.sid.all3.src";
+ FileList "%(filelist)s";
+};
+"""
+
class BuildQueue(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<BuildQueue %s>' % self.queue_name
+ def write_metadata(self, starttime, force=False):
+ # Do we write out metafiles?
+ if not (force or self.generate_metadata):
+ return
+
+ session = DBConn().session().object_session(self)
+
+ fl_fd = fl_name = ac_fd = ac_name = None
+ tempdir = None
+ arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+ startdir = os.getcwd()
+
+ try:
+ # Grab files we want to include
+ newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+ # Write file list with newer files
+ (fl_fd, fl_name) = mkstemp()
+ for n in newer:
+ os.write(fl_fd, '%s\n' % n.fullpath)
+ os.close(fl_fd)
+
+ # Write minimal apt.conf
+ # TODO: Remove hardcoding from template
+ (ac_fd, ac_name) = mkstemp()
+ os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+ 'filelist': fl_name})
+ os.close(ac_fd)
+
+ # Run apt-ftparchive generate
+ os.chdir(os.path.dirname(ac_name))
+ os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+ # Run apt-ftparchive release
+ # TODO: Eww - fix this
+ bname = os.path.basename(self.path)
+ os.chdir(self.path)
+ os.chdir('..')
+
+ # We have to remove the Release file otherwise it'll be included in the
+ # new one
+ try:
+ os.unlink(os.path.join(bname, 'Release'))
+ except OSError:
+ pass
+
+ os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+ # Sign if necessary
+ if self.signingkey:
+ cnf = Config()
+ keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+ if cnf.has_key("Dinstall::SigningPubKeyring"):
+ keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+ os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+ # Move the files if we got this far
+ os.rename('Release', os.path.join(bname, 'Release'))
+ if self.signingkey:
+ os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+ # Clean up any left behind files
+ finally:
+ os.chdir(startdir)
+ if fl_fd:
+ try:
+ os.close(fl_fd)
+ except OSError:
+ pass
+
+ if fl_name:
+ try:
+ os.unlink(fl_name)
+ except OSError:
+ pass
+
+ if ac_fd:
+ try:
+ os.close(ac_fd)
+ except OSError:
+ pass
+
+ if ac_name:
+ try:
+ os.unlink(ac_name)
+ except OSError:
+ pass
+
+ def clean_and_update(self, starttime, Logger, dryrun=False):
+ """WARNING: This routine commits for you"""
+ session = DBConn().session().object_session(self)
+
+ if self.generate_metadata and not dryrun:
+ self.write_metadata(starttime)
+
+ # Grab files older than our execution time
+ older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+ for o in older:
+ killdb = False
+ try:
+ if dryrun:
+ Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+ else:
+ Logger.log(["I: Removing %s from the queue" % o.fullpath])
+ os.unlink(o.fullpath)
+ killdb = True
+ except OSError, e:
+ # If it wasn't there, don't worry
+ if e.errno == ENOENT:
+ killdb = True
+ else:
+ # TODO: Replace with proper logging call
+ Logger.log(["E: Could not remove %s" % o.fullpath])
+
+ if killdb:
+ session.delete(o)
+
+ session.commit()
+
+ for f in os.listdir(self.path):
+ if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'):
+ continue
+
+ try:
+ r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+ except NoResultFound:
+ fp = os.path.join(self.path, f)
+ if dryrun:
+ Logger.log(["I: Would remove unused link %s" % fp])
+ else:
+ Logger.log(["I: Removing unused link %s" % fp])
+ try:
+ os.unlink(fp)
+ except OSError:
+ Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
def add_file_from_pool(self, poolfile):
"""Copies a file into the pool. Assumes that the PoolFile object is
attached to the same SQLAlchemy session as the Queue object is.
pass
def __repr__(self):
- return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+ return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
+
+ @property
+ def fullpath(self):
+ return os.path.join(self.buildqueue.path, self.filename)
+
__all__.append('BuildQueueFile')
entry["new"] = 1
else:
dsc_file_exists = False
- for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+ for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
if cnf.has_key("Dir::Queue::%s" % (myq)):
if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
dsc_file_exists = True
continue
# Look in some other queues for the file
- queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+ queues = ('New', 'Byhand', 'ProposedUpdates',
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
- for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not Cnf.has_key("Dir::Queue::%s" % (directory)):
continue
in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
source_epochless_version = re_no_epoch.sub('', source_version)
dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
found = False
- for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+ for q in ["Embargoed", "Unembargoed", "Newstage"]:
if cnf.has_key("Dir::Queue::%s" % (q)):
if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
found = True