From: Joerg Jaspert Date: Sat, 31 Oct 2009 22:17:54 +0000 (+0100) Subject: Merge commit 'mhy/master' into merge X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=b2b983bf21df6bc13f5b073f26722269610ef2cb;hp=ddf87585d74ff7f427d7af23992c47a6002652ec;p=dak.git Merge commit 'mhy/master' into merge * commit 'mhy/master': (50 commits) hmm... fix name right variable name I'm never tidying up names again poolfile, you moron in_queue is the object, _id the id queue_id -> policy_queue_id fix variable name argh When changing Queue -> Build/PolicyQueue, it helps to do a full job argh, another typo typo well spotted, djpig Fix up hand off to policy and buildd queues sort out permission columns do not support multi-archive mode in process-upload check_files: Fix check for known changes files Small fixes Fix typo in foreign key constraint name initial policy queue implementation in process_upload ... Signed-off-by: Joerg Jaspert --- diff --git a/dak/clean_suites.py b/dak/clean_suites.py index 72a1d5a8..99f0c8b4 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -164,6 +164,7 @@ SELECT id, filename FROM files f WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id) AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id) AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id) + AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id) AND last_used IS NULL ORDER BY filename""") @@ -337,7 +338,7 @@ def clean_queue_build(now_date, delete_date, max_delete, session): our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"])) count = 0 - for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date): + for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date): if not os.path.exists(qf.filename): utils.warn("%s (from queue_build) doesn't exist." % (qf.filename)) continue diff --git a/dak/dak.py b/dak/dak.py index e424836f..47bbedfa 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -66,10 +66,8 @@ def init(): ("process-new", "Process NEW and BYHAND packages"), - ("process-unchecked", + ("process-upload", "Process packages in queue/unchecked"), - ("process-accepted", - "Install packages into the pool"), ("make-suite-file-list", "Generate lists of packages per suite for apt-ftparchive"), diff --git a/dak/dakdb/update22.py b/dak/dakdb/update22.py new file mode 100755 index 00000000..b6fbbb44 --- /dev/null +++ b/dak/dakdb/update22.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Clean up queue SQL + +@contact: Debian FTP Master +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +import os +import datetime +import traceback + +from daklib.dak_exceptions import DBUpdateError +from daklib.config import Config + +################################################################################ + +def do_update(self): + print "Splitting up queues and fixing general design mistakes" + + try: + c = self.db.cursor() + + cnf = Config() + + print "Adding build_queue table" + c.execute("""CREATE TABLE build_queue ( + id SERIAL PRIMARY KEY, + queue_name TEXT NOT NULL UNIQUE, + path TEXT NOT NULL, + copy_files BOOL DEFAULT FALSE NOT NULL)""") + + print "Adding policy_queue table" + c.execute("""CREATE TABLE policy_queue ( + id SERIAL PRIMARY KEY, + queue_name TEXT NOT NULL UNIQUE, + path TEXT NOT NULL, + perms CHAR(4) NOT NULL DEFAULT '0660' CHECK (perms SIMILAR TO '^[0-7][0-7][0-7][0-7]$'), + change_perms CHAR(4) NOT NULL DEFAULT '0660' CHECK (change_perms SIMILAR TO '^[0-7][0-7][0-7][0-7]$') + )""") + + print "Copying queues" + queues = {} + c.execute("""SELECT queue.id, queue.queue_name, queue.path, queue.copy_pool_files FROM queue""") + + for q in c.fetchall(): + queues[q[0]] = q[1] + if q[1] in ['accepted', 'buildd']: + # Move to build_queue_table + c.execute("""INSERT INTO build_queue (queue_name, path, copy_files) + VALUES ('%s', '%s', '%s')""" % (q[1], q[2], q[3])) + + else: + # Move to policy_queue_table + c.execute("""INSERT INTO policy_queue (queue_name, path) + VALUES ('%s', '%s')""" % (q[1], q[2])) + + + print "Fixing up build_queue_files" + c.execute("""ALTER TABLE queue_files DROP CONSTRAINT queue_files_queueid_fkey""") + c.execute("""ALTER TABLE queue_files RENAME TO build_queue_files""") + c.execute("""ALTER TABLE build_queue_files RENAME COLUMN queueid TO build_queue_id""") + + c.execute("""UPDATE build_queue_files + SET build_queue_id = (SELECT build_queue.id FROM build_queue + WHERE build_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = build_queue_files.build_queue_id))""") + + c.execute("""ALTER TABLE build_queue_files + ADD CONSTRAINT build_queue_files_build_queue_id_fkey + FOREIGN KEY (build_queue_id) + REFERENCES build_queue(id) + ON DELETE CASCADE""") + + + c.execute("""ALTER TABLE suite DROP CONSTRAINT suite_policy_queue_id_fkey""") + + c.execute("""UPDATE suite + SET policy_queue_id = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = suite.policy_queue_id))""") + + c.execute("""ALTER TABLE suite + ADD CONSTRAINT suite_policy_queue_fkey + FOREIGN KEY (policy_queue_id) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_approved_for_fkey""") + c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_in_queue_fkey""") + + c.execute("""UPDATE known_changes + SET in_queue = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = known_changes.in_queue))""") + + c.execute("""ALTER TABLE known_changes + ADD CONSTRAINT known_changes_in_queue_fkey + FOREIGN KEY (in_queue) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + + + c.execute("""UPDATE known_changes + SET approved_for = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = known_changes.approved_for))""") + + c.execute("""ALTER TABLE known_changes + ADD CONSTRAINT known_changes_approved_for_fkey + FOREIGN KEY (in_queue) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + c.execute("""ALTER TABLE suite_queue_copy RENAME TO suite_build_queue_copy""") + + c.execute("""ALTER TABLE suite_build_queue_copy DROP CONSTRAINT suite_queue_copy_queue_fkey""") + + c.execute("""ALTER TABLE suite_build_queue_copy RENAME COLUMN queue TO build_queue_id""") + + c.execute("""UPDATE suite_build_queue_copy + SET build_queue_id = (SELECT build_queue.id FROM build_queue + WHERE build_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = suite_build_queue_copy.build_queue_id))""") + + c.execute("""ALTER TABLE suite_build_queue_copy + ADD CONSTRAINT suite_build_queue_copy_build_queue_id_fkey + FOREIGN KEY (build_queue_id) + REFERENCES build_queue (id) + ON DELETE RESTRICT""") + + c.execute("""DROP TABLE changes_pending_files""") + + c.execute("""CREATE TABLE changes_pending_files ( + id SERIAL PRIMARY KEY, + filename TEXT NOT NULL UNIQUE, + size BIGINT NOT NULL, + md5sum TEXT NOT NULL, + sha1sum TEXT NOT NULL, + sha256sum TEXT NOT NULL )""") + + c.execute("""CREATE TABLE changes_pending_files_map ( + file_id INT4 NOT NULL REFERENCES changes_pending_files (id), + change_id INT4 NOT NULL REFERENCES known_changes (id), + + PRIMARY KEY (file_id, change_id))""") + + c.execute("""CREATE TABLE changes_pending_source ( + id SERIAL PRIMARY KEY, + change_id INT4 NOT NULL REFERENCES known_changes (id), + source TEXT NOT NULL, + version DEBVERSION NOT NULL, + maintainer_id INT4 NOT NULL REFERENCES maintainer (id), + changedby_id INT4 NOT NULL REFERENCES maintainer (id), + sig_fpr INT4 NOT NULL REFERENCES fingerprint (id), + dm_upload_allowed BOOL NOT NULL DEFAULT FALSE )""") + + c.execute("""CREATE TABLE changes_pending_source_files ( + pending_source_id INT4 REFERENCES changes_pending_source (id) NOT NULL, + pending_file_id INT4 REFERENCES changes_pending_files (id) NOT NULL, + + PRIMARY KEY (pending_source_id, pending_file_id) )""") + + c.execute("""CREATE TABLE changes_pending_binaries ( + id SERIAL PRIMARY KEY, + change_id INT4 NOT NULL REFERENCES known_changes (id), + package TEXT NOT NULL, + version DEBVERSION NOT NULL, + architecture_id INT4 REFERENCES architecture (id) NOT NULL, + source_id INT4 REFERENCES source (id), + pending_source_id INT4 REFERENCES changes_pending_source (id), + pending_file_id INT4 REFERENCES changes_pending_files (id), + + UNIQUE (package, version, architecture_id), + CHECK (source_id IS NOT NULL or pending_source_id IS NOT NULL ) )""") + + print "Getting rid of old queue table" + c.execute("""DROP TABLE queue""") + + print "Sorting out permission columns" + c.execute("""UPDATE policy_queue SET perms = '0664' WHERE queue_name IN ('proposedupdates', 'oldproposedupdates')""") + + print "Moving known_changes table" + c.execute("""ALTER TABLE known_changes RENAME TO changes""") + + print "Sorting out permissions" + + for t in ['build_queue', 'policy_queue', 'build_queue_files', + 'changes_pending_binaries', 'changes_pending_source_files', + 'changes_pending_source', 'changes_pending_files', + 'changes_pool_files', 'suite_build_queue_copy']: + c.execute("GRANT SELECT ON %s TO public" % t) + c.execute("GRANT ALL ON %s TO ftpmaster" % t) + + for s in ['queue_files_id_seq', 'build_queue_id_seq', + 'changes_pending_source_id_seq', + 'changes_pending_binaries_id_seq', + 'changes_pending_files_id_seq', + 'changes_pending_source_id_seq', + 'known_changes_id_seq', + 'policy_queue_id_seq']: + c.execute("GRANT USAGE ON %s TO ftpmaster" % s) + + print "Committing" + c.execute("UPDATE config SET value = '22' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.InternalError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/import_known_changes.py b/dak/import_known_changes.py index cdb1d3af..c8d5bf96 100755 --- a/dak/import_known_changes.py +++ b/dak/import_known_changes.py @@ -32,7 +32,7 @@ import sys import os import logging import threading -from daklib.dbconn import DBConn,get_knownchange +from daklib.dbconn import DBConn, get_dbchange from daklib.config import Config import apt_pkg from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError @@ -218,7 +218,7 @@ class ChangesGenerator(threading.Thread): continue count += 1 - if not get_knownchange(changesfile, self.session): + if not get_dbchange(changesfile, self.session): to_import = ChangesToImport(dirpath, changesfile, count) if self.die: return diff --git a/dak/process_accepted.py b/dak/process_accepted.py deleted file mode 100755 index b203f498..00000000 --- a/dak/process_accepted.py +++ /dev/null @@ -1,706 +0,0 @@ -#!/usr/bin/env python - -""" -Installs Debian packages from queue/accepted into the pool - -@contact: Debian FTP Master -@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup -@copyright: 2009 Joerg Jaspert -@license: GNU General Public License version 2 or later - -""" -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -############################################################################### - -# Cartman: "I'm trying to make the best of a bad situation, I don't -# need to hear crap from a bunch of hippy freaks living in -# denial. Screw you guys, I'm going home." -# -# Kyle: "But Cartman, we're trying to..." -# -# Cartman: "uhh.. screw you guys... home." - -############################################################################### - -import errno -import fcntl -import os -import sys -from datetime import datetime -import apt_pkg - -from daklib import daklog -from daklib.queue import * -from daklib import utils -from daklib.dbconn import * -from daklib.dak_exceptions import * -from daklib.regexes import re_default_answer, re_issource, re_fdnic -from daklib.urgencylog import UrgencyLog -from daklib.summarystats import SummaryStats -from daklib.config import Config - -############################################################################### - -Options = None -Logger = None - -############################################################################### - -def init(): - global Options - - # Initialize config and connection to db - cnf = Config() - DBConn() - - Arguments = [('a',"automatic","Dinstall::Options::Automatic"), - ('h',"help","Dinstall::Options::Help"), - ('n',"no-action","Dinstall::Options::No-Action"), - ('p',"no-lock", "Dinstall::Options::No-Lock"), - ('s',"no-mail", "Dinstall::Options::No-Mail"), - ('d',"directory", "Dinstall::Options::Directory", "HasArg")] - - for i in ["automatic", "help", "no-action", "no-lock", "no-mail", - "version", "directory"]: - if not cnf.has_key("Dinstall::Options::%s" % (i)): - cnf["Dinstall::Options::%s" % (i)] = "" - - changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv) - Options = cnf.SubTree("Dinstall::Options") - - if Options["Help"]: - usage() - - # If we have a directory flag, use it to find our files - if cnf["Dinstall::Options::Directory"] != "": - # Note that we clobber the list of files we were given in this case - # so warn if the user has done both - if len(changes_files) > 0: - utils.warn("Directory provided so ignoring files given on command line") - - changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"]) - - return changes_files - -############################################################################### - -def usage (exit_code=0): - print """Usage: dak process-accepted [OPTION]... [CHANGES]... - -a, --automatic automatic run - -h, --help show this help and exit. - -n, --no-action don't do anything - -p, --no-lock don't check lockfile !! for cron.daily only !! - -s, --no-mail don't send any mail - -V, --version display the version number and exit""" - sys.exit(exit_code) - -############################################################################### - -def action (u, stable_queue=None, log_urgency=True, session=None): - (summary, short_summary) = u.build_summaries() - pi = u.package_info() - - (prompt, answer) = ("", "XXX") - if Options["No-Action"] or Options["Automatic"]: - answer = 'S' - - if len(u.rejects) > 0: - print "REJECT\n" + pi - prompt = "[R]eject, Skip, Quit ?" - if Options["Automatic"]: - answer = 'R' - else: - print "INSTALL to " + ", ".join(u.pkg.changes["distribution"].keys()) - print pi + summary, - prompt = "[I]nstall, Skip, Quit ?" - if Options["Automatic"]: - answer = 'I' - - while prompt.find(answer) == -1: - answer = utils.our_raw_input(prompt) - m = re_default_answer.match(prompt) - if answer == "": - answer = m.group(1) - answer = answer[:1].upper() - - if answer == 'R': - u.do_unaccept() - Logger.log(["unaccepted", u.pkg.changes_file]) - elif answer == 'I': - if stable_queue: - stable_install(u, summary, short_summary, stable_queue, log_urgency) - else: - install(u, session, log_urgency) - elif answer == 'Q': - sys.exit(0) - - -############################################################################### -def add_poolfile(filename, datadict, location_id, session): - poolfile = PoolFile() - poolfile.filename = filename - poolfile.filesize = datadict["size"] - poolfile.md5sum = datadict["md5sum"] - poolfile.sha1sum = datadict["sha1sum"] - poolfile.sha256sum = datadict["sha256sum"] - poolfile.location_id = location_id - - session.add(poolfile) - # Flush to get a file id (NB: This is not a commit) - session.flush() - - return poolfile - -def add_dsc_to_db(u, filename, session): - entry = u.pkg.files[filename] - source = DBSource() - - source.source = u.pkg.dsc["source"] - source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch - source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id - source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id - source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id - source.install_date = datetime.now().date() - - dsc_component = entry["component"] - dsc_location_id = entry["location id"] - - source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") - - # Set up a new poolfile if necessary - if not entry.has_key("files id") or not entry["files id"]: - filename = entry["pool name"] + filename - poolfile = add_poolfile(filename, entry, dsc_location_id, session) - entry["files id"] = poolfile.file_id - - source.poolfile_id = entry["files id"] - session.add(source) - session.flush() - - for suite_name in u.pkg.changes["distribution"].keys(): - sa = SrcAssociation() - sa.source_id = source.source_id - sa.suite_id = get_suite(suite_name).suite_id - session.add(sa) - - session.flush() - - # Add the source files to the DB (files and dsc_files) - dscfile = DSCFile() - dscfile.source_id = source.source_id - dscfile.poolfile_id = entry["files id"] - session.add(dscfile) - - for dsc_file, dentry in u.pkg.dsc_files.items(): - df = DSCFile() - df.source_id = source.source_id - - # If the .orig tarball is already in the pool, it's - # files id is stored in dsc_files by check_dsc(). - files_id = dentry.get("files id", None) - - # Find the entry in the files hash - # TODO: Bail out here properly - dfentry = None - for f, e in u.pkg.files.items(): - if f == dsc_file: - dfentry = e - break - - if files_id is None: - filename = dfentry["pool name"] + dsc_file - - (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) - # FIXME: needs to check for -1/-2 and or handle exception - if found and obj is not None: - files_id = obj.file_id - - # If still not found, add it - if files_id is None: - # HACK: Force sha1sum etc into dentry - dentry["sha1sum"] = dfentry["sha1sum"] - dentry["sha256sum"] = dfentry["sha256sum"] - poolfile = add_poolfile(filename, dentry, dsc_location_id, session) - files_id = poolfile.file_id - - df.poolfile_id = files_id - session.add(df) - - session.flush() - - # Add the src_uploaders to the DB - uploader_ids = [source.maintainer_id] - if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].split(","): - up = up.strip() - uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) - - added_ids = {} - for up in uploader_ids: - if added_ids.has_key(up): - utils.warn("Already saw uploader %s for source %s" % (up, source.source)) - continue - - added_ids[u]=1 - - su = SrcUploader() - su.maintainer_id = up - su.source_id = source.source_id - session.add(su) - - session.flush() - - return dsc_component, dsc_location_id - -def add_deb_to_db(u, filename, session): - """ - Contrary to what you might expect, this routine deals with both - debs and udebs. That info is in 'dbtype', whilst 'type' is - 'deb' for both of them - """ - cnf = Config() - entry = u.pkg.files[filename] - - bin = DBBinary() - bin.package = entry["package"] - bin.version = entry["version"] - bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id - bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id - bin.arch_id = get_architecture(entry["architecture"], session).arch_id - bin.binarytype = entry["dbtype"] - - # Find poolfile id - filename = entry["pool name"] + filename - fullpath = os.path.join(cnf["Dir::Pool"], filename) - if not entry.get("location id", None): - entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id - - if not entry.get("files id", None): - poolfile = add_poolfile(filename, entry, entry["location id"], session) - entry["files id"] = poolfile.file_id - - bin.poolfile_id = entry["files id"] - - # Find source id - bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) - if len(bin_sources) != 1: - raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ - (bin.package, bin.version, bin.architecture.arch_string, - filename, bin.binarytype, u.pkg.changes["fingerprint"]) - - bin.source_id = bin_sources[0].source_id - - # Add and flush object so it has an ID - session.add(bin) - session.flush() - - # Add BinAssociations - for suite_name in u.pkg.changes["distribution"].keys(): - ba = BinAssociation() - ba.binary_id = bin.binary_id - ba.suite_id = get_suite(suite_name).suite_id - session.add(ba) - - session.flush() - - # Deal with contents - disabled for now - #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) - #if not contents: - # print "REJECT\nCould not determine contents of package %s" % bin.package - # session.rollback() - # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) - - -def install(u, session, log_urgency=True): - cnf = Config() - summarystats = SummaryStats() - - print "Installing." - - Logger.log(["installing changes", u.pkg.changes_file]) - - # Ensure that we have all the hashes we need below. - u.ensure_hashes() - if len(u.rejects) > 0: - # There were errors. Print them and SKIP the changes. - for msg in u.rejects: - utils.warn(msg) - return - - # Add the .dsc file to the DB first - for newfile, entry in u.pkg.files.items(): - if entry["type"] == "dsc": - dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session) - - # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb) - for newfile, entry in u.pkg.files.items(): - if entry["type"] == "deb": - add_deb_to_db(u, newfile, session) - - # If this is a sourceful diff only upload that is moving - # cross-component we need to copy the .orig files into the new - # component too for the same reasons as above. - if u.pkg.changes["architecture"].has_key("source"): - for orig_file in u.pkg.orig_files.keys(): - if not u.pkg.orig_files[orig_file].has_key("id"): - continue # Skip if it's not in the pool - orig_file_id = u.pkg.orig_files[orig_file]["id"] - if u.pkg.orig_files[orig_file]["location"] == dsc_location_id: - continue # Skip if the location didn't change - - # Do the move - oldf = get_poolfile_by_id(orig_file_id, session) - old_filename = os.path.join(oldf.location.path, oldf.filename) - old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, - 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} - - new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) - - # TODO: Care about size/md5sum collisions etc - (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) - - if newf is None: - utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename)) - newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) - - # TODO: Check that there's only 1 here - source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0] - dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0] - dscf.poolfile_id = newf.file_id - session.add(dscf) - session.flush() - - # Install the files into the pool - for newfile, entry in u.pkg.files.items(): - destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile) - utils.move(newfile, destination) - Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]]) - summarystats.accept_bytes += float(entry["size"]) - - # Copy the .changes file across for suite which need it. - copy_changes = {} - copy_dot_dak = {} - for suite_name in u.pkg.changes["distribution"].keys(): - if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)): - copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = "" - # and the .dak file... - if cnf.has_key("Suite::%s::CopyDotDak" % (suite_name)): - copy_dot_dak[cnf["Suite::%s::CopyDotDak" % (suite_name)]] = "" - - for dest in copy_changes.keys(): - utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest)) - - for dest in copy_dot_dak.keys(): - utils.copy(u.pkg.changes_file[:-8]+".dak", dest) - - # We're done - commit the database changes - session.commit() - - # Move the .changes into the 'done' directory - utils.move(u.pkg.changes_file, - os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file))) - - # Remove the .dak file - os.unlink(u.pkg.changes_file[:-8] + ".dak") - - if u.pkg.changes["architecture"].has_key("source") and log_urgency: - UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"]) - - # Our SQL session will automatically start a new transaction after - # the last commit - - # Undo the work done in queue.py(accept) to help auto-building - # from accepted. - now_date = datetime.now() - - for suite_name in u.pkg.changes["distribution"].keys(): - if suite_name not in cnf.ValueList("Dinstall::QueueBuildSuites"): - continue - - suite = get_suite(suite_name, session) - dest_dir = cnf["Dir::QueueBuild"] - - if cnf.FindB("Dinstall::SecurityQueueBuild"): - dest_dir = os.path.join(dest_dir, suite_name) - - for newfile, entry in u.pkg.files.items(): - dest = os.path.join(dest_dir, newfile) - - qb = get_queue_build(dest, suite.suite_id, session) - - # Remove it from the list of packages for later processing by apt-ftparchive - if qb: - qb.last_used = now_date - qb.in_queue = False - session.add(qb) - - if not cnf.FindB("Dinstall::SecurityQueueBuild"): - # Update the symlink to point to the new location in the pool - pool_location = utils.poolify(u.pkg.changes["source"], entry["component"]) - src = os.path.join(cnf["Dir::Pool"], pool_location, os.path.basename(newfile)) - if os.path.islink(dest): - os.unlink(dest) - os.symlink(src, dest) - - # Update last_used on any non-uploaded .orig symlink - for orig_file in u.pkg.orig_files.keys(): - # Determine the .orig.tar.gz file name - if not u.pkg.orig_files[orig_file].has_key("id"): - continue # Skip files not in the pool - # XXX: do we really want to update the orig_files dict here - # instead of using a temporary variable? - u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file) - - # Remove it from the list of packages for later processing by apt-ftparchive - qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session) - if qb: - qb.in_queue = False - qb.last_used = now_date - session.add(qb) - - session.commit() - - # Finally... - summarystats.accept_count += 1 - -################################################################################ - -def stable_install(u, session, summary, short_summary, fromsuite_name="proposed-updates"): - summarystats = SummaryStats() - - fromsuite_name = fromsuite_name.lower() - tosuite_name = "Stable" - if fromsuite_name == "oldstable-proposed-updates": - tosuite_name = "OldStable" - - print "Installing from %s to %s." % (fromsuite_name, tosuite_name) - - fromsuite = get_suite(fromsuite_name) - tosuite = get_suite(tosuite_name) - - # Add the source to stable (and remove it from proposed-updates) - for newfile, entry in u.pkg.files.items(): - if entry["type"] == "dsc": - package = u.pkg.dsc["source"] - # NB: not files[file]["version"], that has no epoch - version = u.pkg.dsc["version"] - - source = get_sources_from_name(package, version, session) - if len(source) < 1: - utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s) in source table." % (package, version)) - source = source[0] - - # Remove from old suite - old = session.query(SrcAssociation).filter_by(source_id = source.source_id) - old = old.filter_by(suite_id = fromsuite.suite_id) - old.delete() - - # Add to new suite - new = SrcAssociation() - new.source_id = source.source_id - new.suite_id = tosuite.suite_id - session.add(new) - - # Add the binaries to stable (and remove it/them from proposed-updates) - for newfile, entry in u.pkg.files.items(): - if entry["type"] == "deb": - package = entry["package"] - version = entry["version"] - architecture = entry["architecture"] - - binary = get_binaries_from_name(package, version, [architecture, 'all']) - - if len(binary) < 1: - utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s for %s architecture) in binaries table." % (package, version, architecture)) - binary = binary[0] - - # Remove from old suite - old = session.query(BinAssociation).filter_by(binary_id = binary.binary_id) - old = old.filter_by(suite_id = fromsuite.suite_id) - old.delete() - - # Add to new suite - new = BinAssociation() - new.binary_id = binary.binary_id - new.suite_id = tosuite.suite_id - session.add(new) - - session.commit() - - utils.move(u.pkg.changes_file, - os.path.join(cnf["Dir::Morgue"], 'process-accepted', os.path.basename(u.pkg.changes_file))) - - ## Update the Stable ChangeLog file - # TODO: URGH - Use a proper tmp file - new_changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + ".ChangeLog" - changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + "ChangeLog" - if os.path.exists(new_changelog_filename): - os.unlink(new_changelog_filename) - - new_changelog = utils.open_file(new_changelog_filename, 'w') - for newfile, entry in u.pkg.files.items(): - if entry["type"] == "deb": - new_changelog.write("%s/%s/binary-%s/%s\n" % (tosuite.suite_name, - entry["component"], - entry["architecture"], - newfile)) - elif re_issource.match(newfile): - new_changelog.write("%s/%s/source/%s\n" % (tosuite.suite_name, - entry["component"], - newfile)) - else: - new_changelog.write("%s\n" % (newfile)) - - chop_changes = re_fdnic.sub("\n", u.pkg.changes["changes"]) - new_changelog.write(chop_changes + '\n\n') - - if os.access(changelog_filename, os.R_OK) != 0: - changelog = utils.open_file(changelog_filename) - new_changelog.write(changelog.read()) - - new_changelog.close() - - if os.access(changelog_filename, os.R_OK) != 0: - os.unlink(changelog_filename) - utils.move(new_changelog_filename, changelog_filename) - - summarystats.accept_count += 1 - - if not Options["No-Mail"] and u.pkg.changes["architecture"].has_key("source"): - u.Subst["__SUITE__"] = " into %s" % (tosuite) - u.Subst["__SUMMARY__"] = summary - u.Subst["__BCC__"] = "X-DAK: dak process-accepted" - - if cnf.has_key("Dinstall::Bcc"): - u.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"]) - - template = os.path.join(cnf["Dir::Templates"], 'process-accepted.install') - - mail_message = utils.TemplateSubst(u.Subst, template) - utils.send_mail(mail_message) - u.announce(short_summary, True) - - # Finally remove the .dak file - dot_dak_file = os.path.join(cnf["Suite::%s::CopyDotDak" % (fromsuite.suite_name)], - os.path.basename(u.pkg.changes_file[:-8]+".dak")) - os.unlink(dot_dak_file) - -################################################################################ - -def process_it(changes_file, stable_queue, log_urgency, session): - cnf = Config() - u = Upload() - - overwrite_checks = True - - # Absolutize the filename to avoid the requirement of being in the - # same directory as the .changes file. - cfile = os.path.abspath(changes_file) - - # And since handling of installs to stable munges with the CWD - # save and restore it. - u.prevdir = os.getcwd() - - if stable_queue: - old = cfile - cfile = os.path.basename(old) - os.chdir(cnf["Suite::%s::CopyDotDak" % (stable_queue)]) - # overwrite_checks should not be performed if installing to stable - overwrite_checks = False - - u.pkg.load_dot_dak(cfile) - u.update_subst() - - if stable_queue: - u.pkg.changes_file = old - - u.accepted_checks(overwrite_checks, session) - action(u, stable_queue, log_urgency, session) - - # Restore CWD - os.chdir(u.prevdir) - -############################################################################### - -def main(): - global Logger - - cnf = Config() - summarystats = SummaryStats() - changes_files = init() - log_urgency = False - stable_queue = None - - # -n/--dry-run invalidates some other options which would involve things happening - if Options["No-Action"]: - Options["Automatic"] = "" - - # Check that we aren't going to clash with the daily cron job - - if not Options["No-Action"] and os.path.exists("%s/Archive_Maintenance_In_Progress" % (cnf["Dir::Root"])) and not Options["No-Lock"]: - utils.fubar("Archive maintenance in progress. Try again later.") - - # If running from within proposed-updates; assume an install to stable - queue = "" - if os.getenv('PWD').find('oldstable-proposed-updates') != -1: - stable_queue = "Oldstable-Proposed-Updates" - elif os.getenv('PWD').find('proposed-updates') != -1: - stable_queue = "Proposed-Updates" - - # Obtain lock if not in no-action mode and initialize the log - if not Options["No-Action"]: - lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) - try: - fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError, e: - if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': - utils.fubar("Couldn't obtain lock; assuming another 'dak process-accepted' is already running.") - else: - raise - Logger = daklog.Logger(cnf, "process-accepted") - if not stable_queue and cnf.get("Dir::UrgencyLog"): - # Initialise UrgencyLog() - log_urgency = True - UrgencyLog() - - # Sort the .changes files so that we process sourceful ones first - changes_files.sort(utils.changes_compare) - - - # Process the changes files - for changes_file in changes_files: - print "\n" + changes_file - session = DBConn().session() - process_it(changes_file, stable_queue, log_urgency, session) - session.close() - - if summarystats.accept_count: - sets = "set" - if summarystats.accept_count > 1: - sets = "sets" - sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets, - utils.size_type(int(summarystats.accept_bytes)))) - Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes]) - - if not Options["No-Action"]: - Logger.close() - if log_urgency: - UrgencyLog().close() - -############################################################################### - -if __name__ == '__main__': - main() diff --git a/dak/process_new.py b/dak/process_new.py index bec55df5..46c8546d 100755 --- a/dak/process_new.py +++ b/dak/process_new.py @@ -821,7 +821,7 @@ def _accept(upload): if Options["No-Action"]: return (summary, short_summary) = upload.build_summaries() - upload.accept(summary, short_summary, targetdir=Config()["Dir::Queue::Newstage"]) + upload.accept(summary, short_summary, targetqueue) os.unlink(upload.pkg.changes_file[:-8]+".dak") def do_accept(upload): @@ -832,7 +832,7 @@ def do_accept(upload): if cnf.FindB("Dinstall::SecurityQueueHandling"): upload.dump_vars(cnf["Dir::Queue::Embargoed"]) - upload.move_to_dir(cnf["Dir::Queue::Embargoed"]) + upload.move_to_queue(get_policy_queue('embargoed')) upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"]) # Check for override disparities upload.Subst["__SUMMARY__"] = summary diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py deleted file mode 100755 index 8a3e49d1..00000000 --- a/dak/process_unchecked.py +++ /dev/null @@ -1,593 +0,0 @@ -#!/usr/bin/env python - -""" -Checks Debian packages from Incoming -@contact: Debian FTP Master -@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup -@copyright: 2009 Joerg Jaspert -@copyright: 2009 Mark Hymers -@license: GNU General Public License version 2 or later -""" - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -# Originally based on dinstall by Guy Maor - -################################################################################ - -# Computer games don't affect kids. I mean if Pacman affected our generation as -# kids, we'd all run around in a darkened room munching pills and listening to -# repetitive music. -# -- Unknown - -################################################################################ - -import errno -import fcntl -import os -import sys -import traceback -import apt_pkg - -from daklib.dbconn import * -from daklib import daklog -from daklib.queue import * -from daklib import utils -from daklib.textutils import fix_maintainer -from daklib.dak_exceptions import * -from daklib.regexes import re_default_answer -from daklib.summarystats import SummaryStats -from daklib.holding import Holding -from daklib.config import Config - -from types import * - -################################################################################ - - -################################################################################ - -# Globals -Options = None -Logger = None - -############################################################################### - -def init(): - global Options - - apt_pkg.init() - cnf = Config() - - Arguments = [('a',"automatic","Dinstall::Options::Automatic"), - ('h',"help","Dinstall::Options::Help"), - ('n',"no-action","Dinstall::Options::No-Action"), - ('p',"no-lock", "Dinstall::Options::No-Lock"), - ('s',"no-mail", "Dinstall::Options::No-Mail"), - ('d',"directory", "Dinstall::Options::Directory", "HasArg")] - - for i in ["automatic", "help", "no-action", "no-lock", "no-mail", - "override-distribution", "version", "directory"]: - cnf["Dinstall::Options::%s" % (i)] = "" - - changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv) - Options = cnf.SubTree("Dinstall::Options") - - if Options["Help"]: - usage() - - # If we have a directory flag, use it to find our files - if cnf["Dinstall::Options::Directory"] != "": - # Note that we clobber the list of files we were given in this case - # so warn if the user has done both - if len(changes_files) > 0: - utils.warn("Directory provided so ignoring files given on command line") - - changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"]) - - return changes_files - -################################################################################ - -def usage (exit_code=0): - print """Usage: dak process-unchecked [OPTION]... [CHANGES]... - -a, --automatic automatic run - -h, --help show this help and exit. - -n, --no-action don't do anything - -p, --no-lock don't check lockfile !! for cron.daily only !! - -s, --no-mail don't send any mail - -V, --version display the version number and exit""" - sys.exit(exit_code) - -################################################################################ - -def action(u): - cnf = Config() - - # changes["distribution"] may not exist in corner cases - # (e.g. unreadable changes files) - if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType): - u.pkg.changes["distribution"] = {} - - (summary, short_summary) = u.build_summaries() - - # q-unapproved hax0ring - queue_info = { - "New": { "is": is_new, "process": acknowledge_new }, - "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, - "Byhand" : { "is": is_byhand, "process": do_byhand }, - "OldStableUpdate" : { "is": is_oldstableupdate, - "process": do_oldstableupdate }, - "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate }, - "Unembargo" : { "is": is_unembargo, "process": queue_unembargo }, - "Embargo" : { "is": is_embargo, "process": queue_embargo }, - } - - queues = [ "New", "Autobyhand", "Byhand" ] - if cnf.FindB("Dinstall::SecurityQueueHandling"): - queues += [ "Unembargo", "Embargo" ] - else: - queues += [ "OldStableUpdate", "StableUpdate" ] - - (prompt, answer) = ("", "XXX") - if Options["No-Action"] or Options["Automatic"]: - answer = 'S' - - queuekey = '' - - pi = u.package_info() - - if len(u.rejects) > 0: - if u.upload_too_new(): - print "SKIP (too new)\n" + pi, - prompt = "[S]kip, Quit ?" - else: - print "REJECT\n" + pi - prompt = "[R]eject, Skip, Quit ?" - if Options["Automatic"]: - answer = 'R' - else: - qu = None - for q in queues: - if queue_info[q]["is"](u): - qu = q - break - if qu: - print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary) - queuekey = qu[0].upper() - if queuekey in "RQSA": - queuekey = "D" - prompt = "[D]ivert, Skip, Quit ?" - else: - prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower()) - if Options["Automatic"]: - answer = queuekey - else: - print "ACCEPT\n" + pi + summary, - prompt = "[A]ccept, Skip, Quit ?" - if Options["Automatic"]: - answer = 'A' - - while prompt.find(answer) == -1: - answer = utils.our_raw_input(prompt) - m = re_default_answer.match(prompt) - if answer == "": - answer = m.group(1) - answer = answer[:1].upper() - - if answer == 'R': - os.chdir(u.pkg.directory) - u.do_reject(0, pi) - elif answer == 'A': - u.pkg.add_known_changes( "Accepted" ) - u.accept(summary, short_summary) - u.check_override() - u.remove() - elif answer == queuekey: - u.pkg.add_known_changes( qu ) - queue_info[qu]["process"](u, summary, short_summary) - u.remove() - elif answer == 'Q': - sys.exit(0) - -################################################################################ - -def package_to_suite(u, suite): - if not u.pkg.changes["distribution"].has_key(suite): - return False - - ret = True - - if not u.pkg.changes["architecture"].has_key("source"): - s = DBConn().session() - q = s.query(SrcAssociation.sa_id) - q = q.join(Suite).filter_by(suite_name=suite) - q = q.join(DBSource).filter_by(source=u.pkg.changes['source']) - q = q.filter_by(version=u.pkg.changes['version']).limit(1) - - # NB: Careful, this logic isn't what you would think it is - # Source is already in {old-,}proposed-updates so no need to hold - # Instead, we don't move to the holding area, we just do an ACCEPT - if q.count() > 0: - ret = False - - s.close() - - return ret - -def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None): - cnf = Config() - dir = cnf["Dir::Queue::%s" % queue] - - print "Moving to %s holding area" % queue.upper() - Logger.log(["Moving to %s" % queue, u.pkg.changes_file]) - - u.pkg.write_dot_dak(dir) - u.move_to_dir(dir, perms=perms) - if build: - get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir) - - # Check for override disparities - u.check_override() - - # Send accept mail, announce to lists and close bugs - if announce and not cnf["Dinstall::Options::No-Mail"]: - template = os.path.join(cnf["Dir::Templates"], announce) - u.update_subst() - u.Subst["__SUITE__"] = "" - mail_message = utils.TemplateSubst(u.Subst, template) - utils.send_mail(mail_message) - u.announce(short_summary, True) - -################################################################################ - -def is_unembargo(u): - session = DBConn().session() - cnf = Config() - - q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) - if q.rowcount > 0: - session.close() - return True - - oldcwd = os.getcwd() - os.chdir(cnf["Dir::Queue::Disembargo"]) - disdir = os.getcwd() - os.chdir(oldcwd) - - ret = False - - if u.pkg.directory == disdir: - if u.pkg.changes["architecture"].has_key("source"): - if not Options["No-Action"]: - session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) - session.commit() - - ret = True - - session.close() - - return ret - -def queue_unembargo(u, summary, short_summary): - return package_to_queue(u, summary, short_summary, "Unembargoed", - perms=0660, build=True, announce='process-unchecked.accepted') - -################################################################################ - -def is_embargo(u): - # if embargoed queues are enabled always embargo - return True - -def queue_embargo(u, summary, short_summary): - return package_to_queue(u, summary, short_summary, "Unembargoed", - perms=0660, build=True, announce='process-unchecked.accepted') - -################################################################################ - -def is_stableupdate(u): - return package_to_suite(u, 'proposed-updates') - -def do_stableupdate(u, summary, short_summary): - return package_to_queue(u, summary, short_summary, "ProposedUpdates", - perms=0664, build=False, announce=None) - -################################################################################ - -def is_oldstableupdate(u): - return package_to_suite(u, 'oldstable-proposed-updates') - -def do_oldstableupdate(u, summary, short_summary): - return package_to_queue(u, summary, short_summary, "OldProposedUpdates", - perms=0664, build=False, announce=None) - -################################################################################ - -def is_autobyhand(u): - cnf = Config() - - all_auto = 1 - any_auto = 0 - for f in u.pkg.files.keys(): - if u.pkg.files[f].has_key("byhand"): - any_auto = 1 - - # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH - # don't contain underscores, and ARCH doesn't contain dots. - # further VER matches the .changes Version:, and ARCH should be in - # the .changes Architecture: list. - if f.count("_") < 2: - all_auto = 0 - continue - - (pckg, ver, archext) = f.split("_", 2) - if archext.count(".") < 1 or u.pkg.changes["version"] != ver: - all_auto = 0 - continue - - ABH = cnf.SubTree("AutomaticByHandPackages") - if not ABH.has_key(pckg) or \ - ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]: - print "not match %s %s" % (pckg, u.pkg.changes["source"]) - all_auto = 0 - continue - - (arch, ext) = archext.split(".", 1) - if arch not in u.pkg.changes["architecture"]: - all_auto = 0 - continue - - u.pkg.files[f]["byhand-arch"] = arch - u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)] - - return any_auto and all_auto - -def do_autobyhand(u, summary, short_summary): - print "Attempting AUTOBYHAND." - byhandleft = True - for f, entry in u.pkg.files.items(): - byhandfile = f - - if not entry.has_key("byhand"): - continue - - if not entry.has_key("byhand-script"): - byhandleft = True - continue - - os.system("ls -l %s" % byhandfile) - - result = os.system("%s %s %s %s %s" % ( - entry["byhand-script"], - byhandfile, - u.pkg.changes["version"], - entry["byhand-arch"], - os.path.abspath(u.pkg.changes_file))) - - if result == 0: - os.unlink(byhandfile) - del entry - else: - print "Error processing %s, left as byhand." % (f) - byhandleft = True - - if byhandleft: - do_byhand(u, summary, short_summary) - else: - u.accept(summary, short_summary) - u.check_override() - # XXX: We seem to be missing a u.remove() here - # This might explain why we get byhand leftovers in unchecked - mhy - -################################################################################ - -def is_byhand(u): - for f in u.pkg.files.keys(): - if u.pkg.files[f].has_key("byhand"): - return True - return False - -def do_byhand(u, summary, short_summary): - return package_to_queue(u, summary, short_summary, "Byhand", - perms=0660, build=False, announce=None) - -################################################################################ - -def is_new(u): - for f in u.pkg.files.keys(): - if u.pkg.files[f].has_key("new"): - return True - return False - -def acknowledge_new(u, summary, short_summary): - cnf = Config() - - print "Moving to NEW holding area." - Logger.log(["Moving to new", u.pkg.changes_file]) - - u.pkg.write_dot_dak(cnf["Dir::Queue::New"]) - u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644) - - if not Options["No-Mail"]: - print "Sending new ack." - template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new') - u.update_subst() - u.Subst["__SUMMARY__"] = summary - new_ack_message = utils.TemplateSubst(u.Subst, template) - utils.send_mail(new_ack_message) - -################################################################################ - -# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in -# Incoming. -1 will reference the .orig.tar.gz, but -2 will not. -# Upload.check_dsc_against_db() can find the .orig.tar.gz but it will -# not have processed it during it's checks of -2. If -1 has been -# deleted or otherwise not checked by 'dak process-unchecked', the -# .orig.tar.gz will not have been checked at all. To get round this, -# we force the .orig.tar.gz into the .changes structure and reprocess -# the .changes file. - -def process_it(changes_file): - global Logger - - cnf = Config() - - holding = Holding() - - u = Upload() - u.pkg.changes_file = changes_file - u.pkg.directory = os.getcwd() - u.logger = Logger - origchanges = os.path.join(u.pkg.directory, u.pkg.changes_file) - - # Some defaults in case we can't fully process the .changes file - u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"] - u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"] - - # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header - bcc = "X-DAK: dak process-unchecked" - if cnf.has_key("Dinstall::Bcc"): - u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"]) - else: - u.Subst["__BCC__"] = bcc - - # Remember where we are so we can come back after cd-ing into the - # holding directory. TODO: Fix this stupid hack - u.prevdir = os.getcwd() - - # TODO: Figure out something better for this (or whether it's even - # necessary - it seems to have been for use when we were - # still doing the is_unchecked check; reprocess = 2) - u.reprocess = 1 - - try: - # If this is the Real Thing(tm), copy things into a private - # holding directory first to avoid replacable file races. - if not Options["No-Action"]: - os.chdir(cnf["Dir::Queue::Holding"]) - - # Absolutize the filename to avoid the requirement of being in the - # same directory as the .changes file. - holding.copy_to_holding(origchanges) - - # Relativize the filename so we use the copy in holding - # rather than the original... - changespath = os.path.basename(u.pkg.changes_file) - - (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath) - - if u.pkg.changes["fingerprint"]: - valid_changes_p = u.load_changes(changespath) - else: - valid_changes_p = False - u.rejects.extend(rejects) - - if valid_changes_p: - while u.reprocess: - u.check_distributions() - u.check_files(not Options["No-Action"]) - valid_dsc_p = u.check_dsc(not Options["No-Action"]) - if valid_dsc_p and not Options["No-Action"]: - u.check_source() - u.check_lintian() - u.check_hashes() - u.check_urgency() - u.check_timestamps() - u.check_signed_by_key() - - action(u) - - except (SystemExit, KeyboardInterrupt): - raise - - except: - print "ERROR" - traceback.print_exc(file=sys.stderr) - - # Restore previous WD - os.chdir(u.prevdir) - -############################################################################### - -def main(): - global Options, Logger - - cnf = Config() - changes_files = init() - - # -n/--dry-run invalidates some other options which would involve things happening - if Options["No-Action"]: - Options["Automatic"] = "" - - # Initialize our Holding singleton - holding = Holding() - - # Ensure all the arguments we were given are .changes files - for f in changes_files: - if not f.endswith(".changes"): - utils.warn("Ignoring '%s' because it's not a .changes file." % (f)) - changes_files.remove(f) - - if changes_files == []: - if cnf["Dinstall::Options::Directory"] == "": - utils.fubar("Need at least one .changes file as an argument.") - else: - sys.exit(0) - - # Check that we aren't going to clash with the daily cron job - if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]: - utils.fubar("Archive maintenance in progress. Try again later.") - - # Obtain lock if not in no-action mode and initialize the log - if not Options["No-Action"]: - lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) - try: - fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError, e: - if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': - utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.") - else: - raise - Logger = daklog.Logger(cnf, "process-unchecked") - - # Sort the .changes files so that we process sourceful ones first - changes_files.sort(utils.changes_compare) - - # Process the changes files - for changes_file in changes_files: - print "\n" + changes_file - try: - process_it (changes_file) - finally: - if not Options["No-Action"]: - holding.clean() - - accept_count = SummaryStats().accept_count - accept_bytes = SummaryStats().accept_bytes - - if accept_count: - sets = "set" - if accept_count > 1: - sets = "sets" - print "Accepted %d package %s, %s." % (accept_count, sets, utils.size_type(int(accept_bytes))) - Logger.log(["total",accept_count,accept_bytes]) - - if not Options["No-Action"]: - Logger.close() - -################################################################################ - -if __name__ == '__main__': - main() diff --git a/dak/process_upload.py b/dak/process_upload.py new file mode 100755 index 00000000..ddf9b1c0 --- /dev/null +++ b/dak/process_upload.py @@ -0,0 +1,494 @@ +#!/usr/bin/env python + +""" +Checks Debian packages from Incoming +@contact: Debian FTP Master +@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup +@copyright: 2009 Joerg Jaspert +@copyright: 2009 Mark Hymers +@copyright: 2009 Frank Lichtenheld +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +# based on process-unchecked and process-accepted + +## pu|pa: locking (daily.lock) +## pu|pa: parse arguments -> list of changes files +## pa: initialize urgency log +## pu|pa: sort changes list + +## foreach changes: +### pa: load dak file +## pu: copy CHG to tempdir +## pu: check CHG signature +## pu: parse changes file +## pu: checks: +## pu: check distribution (mappings, rejects) +## pu: copy FILES to tempdir +## pu: check whether CHG already exists in CopyChanges +## pu: check whether FILES already exist in one of the policy queues +## for deb in FILES: +## pu: extract control information +## pu: various checks on control information +## pu|pa: search for source (in CHG, projectb, policy queues) +## pu|pa: check whether "Version" fulfills target suite requirements/suite propagation +## pu|pa: check whether deb already exists in the pool +## for src in FILES: +## pu: various checks on filenames and CHG consistency +## pu: if isdsc: check signature +## for file in FILES: +## pu: various checks +## pu: NEW? +## //pu: check whether file already exists in the pool +## pu: store what "Component" the package is currently in +## pu: check whether we found everything we were looking for in CHG +## pu: check the DSC: +## pu: check whether we need and have ONE DSC +## pu: parse the DSC +## pu: various checks //maybe drop some of the in favor of lintian +## pu|pa: check whether "Version" fulfills target suite requirements/suite propagation +## pu: check whether DSC_FILES is consistent with "Format" +## for src in DSC_FILES: +## pu|pa: check whether file already exists in the pool (with special handling for .orig.tar.gz) +## pu: create new tempdir +## pu: create symlink mirror of source +## pu: unpack source +## pu: extract changelog information for BTS +## //pu: create missing .orig symlink +## pu: check with lintian +## for file in FILES: +## pu: check checksums and sizes +## for file in DSC_FILES: +## pu: check checksums and sizes +## pu: CHG: check urgency +## for deb in FILES: +## pu: extract contents list and check for dubious timestamps +## pu: check that the uploader is actually allowed to upload the package +### pa: install: +### if stable_install: +### pa: remove from p-u +### pa: add to stable +### pa: move CHG to morgue +### pa: append data to ChangeLog +### pa: send mail +### pa: remove .dak file +### else: +### pa: add dsc to db: +### for file in DSC_FILES: +### pa: add file to file +### pa: add file to dsc_files +### pa: create source entry +### pa: update source associations +### pa: update src_uploaders +### for deb in FILES: +### pa: add deb to db: +### pa: add file to file +### pa: find source entry +### pa: create binaries entry +### pa: update binary associations +### pa: .orig component move +### pa: move files to pool +### pa: save CHG +### pa: move CHG to done/ +### pa: change entry in queue_build +## pu: use dispatch table to choose target queue: +## if NEW: +## pu: write .dak file +## pu: move to NEW +## pu: send mail +## elsif AUTOBYHAND: +## pu: run autobyhand script +## pu: if stuff left, do byhand or accept +## elsif targetqueue in (oldstable, stable, embargo, unembargo): +## pu: write .dak file +## pu: check overrides +## pu: move to queue +## pu: send mail +## else: +## pu: write .dak file +## pu: move to ACCEPTED +## pu: send mails +## pu: create files for BTS +## pu: create entry in queue_build +## pu: check overrides + +# Integrity checks +## GPG +## Parsing changes (check for duplicates) +## Parse dsc +## file list checks + +# New check layout (TODO: Implement) +## Permission checks +### suite mappings +### ACLs +### version checks (suite) +### override checks + +## Source checks +### copy orig +### unpack +### BTS changelog +### src contents +### lintian +### urgency log + +## Binary checks +### timestamps +### control checks +### src relation check +### contents + +## Database insertion (? copy from stuff) +### BYHAND / NEW / Policy queues +### Pool + +## Queue builds + +from errno import EACCES, EAGAIN +import fcntl +import os +import sys +import traceback +import apt_pkg +from sqlalchemy.orm.exc import NoResultFound + +from daklib import daklog +from daklib.queue import * +from daklib.queue_install import * +from daklib import utils +from daklib.dbconn import * +from daklib.urgencylog import UrgencyLog +from daklib.summarystats import SummaryStats +from daklib.holding import Holding +from daklib.config import Config + +############################################################################### + +Options = None +Logger = None + +############################################################################### + +def usage (exit_code=0): + print """Usage: dak process-upload [OPTION]... [CHANGES]... + -a, --automatic automatic run + -h, --help show this help and exit. + -n, --no-action don't do anything + -p, --no-lock don't check lockfile !! for cron.daily only !! + -s, --no-mail don't send any mail + -V, --version display the version number and exit""" + sys.exit(exit_code) + +############################################################################### + +def action(u, session): + cnf = Config() + holding = Holding() + + # changes["distribution"] may not exist in corner cases + # (e.g. unreadable changes files) + if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict): + u.pkg.changes["distribution"] = {} + + (summary, short_summary) = u.build_summaries() + + (prompt, answer) = ("", "XXX") + if Options["No-Action"] or Options["Automatic"]: + answer = 'S' + + queuekey = '' + + pi = u.package_info() + + try: + chg = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one() + except NoResultFound, e: + chg = None + + if len(u.rejects) > 0: + if u.upload_too_new(): + print "SKIP (too new)\n" + pi, + prompt = "[S]kip, Quit ?" + else: + print "REJECT\n" + pi + prompt = "[R]eject, Skip, Quit ?" + if Options["Automatic"]: + answer = 'R' + else: + # Are we headed for NEW / BYHAND / AUTOBYHAND? + # Note that policy queues are no longer handled here + qu = determine_target(u) + if qu: + print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary) + queuekey = qu[0].upper() + if queuekey in "RQSA": + queuekey = "D" + prompt = "[D]ivert, Skip, Quit ?" + else: + prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower()) + if Options["Automatic"]: + answer = queuekey + else: + # Does suite have a policy_queue configured + divert = False + for s in u.pkg.changes["distribution"].keys(): + suite = get_suite(s, session) + if suite.policy_queue: + if not chg or chg.approved_for_id != su.policy_queue.policy_queue_id: + # This routine will check whether the upload is a binary + # upload when the source is already in the target suite. If + # so, we skip the policy queue, otherwise we go there. + divert = package_to_suite(u, suite.suite_name, session=session) + if divert: + print "%s for %s\n%s%s" % ( suite.policy_queue.queue_name.upper(), + ", ".join(u.pkg.changes["distribution"].keys()), + pi, summary) + queuekey = "P" + prompt = "[P]olicy, Skip, Quit ?" + policyqueue = suite.policy_queue + if Options["Automatic"]: + answer = 'P' + break + + if not divert: + print "ACCEPT\n" + pi + summary, + prompt = "[A]ccept, Skip, Quit ?" + if Options["Automatic"]: + answer = 'A' + + while prompt.find(answer) == -1: + answer = utils.our_raw_input(prompt) + m = re_default_answer.match(prompt) + if answer == "": + answer = m.group(1) + answer = answer[:1].upper() + + if answer == 'R': + os.chdir(u.pkg.directory) + u.do_reject(0, pi) + elif answer == 'A': + if not chg: + chg = u.pkg.add_known_changes(holding.holding_dir, session) + u.accept(summary, short_summary, session) + u.check_override() + session.commit() + u.remove() + elif answer == 'P': + if not chg: + chg = u.pkg.add_known_changes(holding.holding_dir, session) + u.move_to_queue(policyqueue) + chg.in_queue_id = policyqueue.policy_queue_id + session.add(chg) + session.commit() + u.remove() + elif answer == queuekey: + if not chg: + chg = u.pkg.add_known_changes(holding.holding_dir, session) + QueueInfo[qu]["process"](u, summary, short_summary, chg, session) + session.commit() + u.remove() + elif answer == 'Q': + sys.exit(0) + + session.commit() + +############################################################################### + +def cleanup(): + h = Holding() + if not Options["No-Action"]: + h.clean() + +def process_it(changes_file, session): + global Logger + + Logger.log(["Processing changes file", changes_file]) + + cnf = Config() + + holding = Holding() + + # TODO: Actually implement using pending* tables so that we don't lose track + # of what is where + + u = Upload() + u.pkg.changes_file = changes_file + u.pkg.directory = os.getcwd() + u.logger = Logger + origchanges = os.path.abspath(u.pkg.changes_file) + + # Some defaults in case we can't fully process the .changes file + u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"] + u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"] + + # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header + bcc = "X-DAK: dak process-upload" + if cnf.has_key("Dinstall::Bcc"): + u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"]) + else: + u.Subst["__BCC__"] = bcc + + # Remember where we are so we can come back after cd-ing into the + # holding directory. TODO: Fix this stupid hack + u.prevdir = os.getcwd() + + try: + # If this is the Real Thing(tm), copy things into a private + # holding directory first to avoid replacable file races. + if not Options["No-Action"]: + os.chdir(cnf["Dir::Queue::Holding"]) + + # Absolutize the filename to avoid the requirement of being in the + # same directory as the .changes file. + holding.copy_to_holding(origchanges) + + # Relativize the filename so we use the copy in holding + # rather than the original... + changespath = os.path.basename(u.pkg.changes_file) + else: + changespath = origchanges + + (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath) + + if u.pkg.changes["fingerprint"]: + valid_changes_p = u.load_changes(changespath) + else: + valid_changes_p = False + u.rejects.extend(rejects) + + if valid_changes_p: + u.check_distributions() + u.check_files(not Options["No-Action"]) + valid_dsc_p = u.check_dsc(not Options["No-Action"]) + if valid_dsc_p and not Options["No-Action"]: + u.check_source() + u.check_lintian() + u.check_hashes() + u.check_urgency() + u.check_timestamps() + u.check_signed_by_key() + + action(u, session) + + except (SystemExit, KeyboardInterrupt): + cleanup() + raise + + except: + print "ERROR" + traceback.print_exc(file=sys.stderr) + + cleanup() + # Restore previous WD + os.chdir(u.prevdir) + +############################################################################### + +def main(): + global Options, Logger + + cnf = Config() + summarystats = SummaryStats() + log_urgency = False + + DBConn() + + Arguments = [('a',"automatic","Dinstall::Options::Automatic"), + ('h',"help","Dinstall::Options::Help"), + ('n',"no-action","Dinstall::Options::No-Action"), + ('p',"no-lock", "Dinstall::Options::No-Lock"), + ('s',"no-mail", "Dinstall::Options::No-Mail"), + ('d',"directory", "Dinstall::Options::Directory", "HasArg")] + + for i in ["automatic", "help", "no-action", "no-lock", "no-mail", + "version", "directory"]: + if not cnf.has_key("Dinstall::Options::%s" % (i)): + cnf["Dinstall::Options::%s" % (i)] = "" + + changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv) + Options = cnf.SubTree("Dinstall::Options") + + if Options["Help"]: + usage() + + # -n/--dry-run invalidates some other options which would involve things happening + if Options["No-Action"]: + Options["Automatic"] = "" + + # Check that we aren't going to clash with the daily cron job + if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]: + utils.fubar("Archive maintenance in progress. Try again later.") + + # Obtain lock if not in no-action mode and initialize the log + if not Options["No-Action"]: + lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) + try: + fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError, e: + if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': + utils.fubar("Couldn't obtain lock; assuming another 'dak process-upload' is already running.") + else: + raise + if cnf.get("Dir::UrgencyLog"): + # Initialise UrgencyLog() + log_urgency = True + UrgencyLog() + + Logger = daklog.Logger(cnf, "process-upload", Options["No-Action"]) + + # If we have a directory flag, use it to find our files + if cnf["Dinstall::Options::Directory"] != "": + # Note that we clobber the list of files we were given in this case + # so warn if the user has done both + if len(changes_files) > 0: + utils.warn("Directory provided so ignoring files given on command line") + + changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"]) + Logger.log(["Using changes files from directory", cnf["Dinstall::Options::Directory"], len(changes_files)]) + elif not len(changes_files) > 0: + utils.fubar("No changes files given and no directory specified") + else: + Logger.log(["Using changes files from command-line", len(changes_files)]) + + # Sort the .changes files so that we process sourceful ones first + changes_files.sort(utils.changes_compare) + + # Process the changes files + for changes_file in changes_files: + print "\n" + changes_file + session = DBConn().session() + process_it(changes_file, session) + session.close() + + if summarystats.accept_count: + sets = "set" + if summarystats.accept_count > 1: + sets = "sets" + sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets, + utils.size_type(int(summarystats.accept_bytes)))) + Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes]) + + if not Options["No-Action"]: + if log_urgency: + UrgencyLog().close() + Logger.close() + +############################################################################### + +if __name__ == '__main__': + main() diff --git a/dak/queue_report.py b/dak/queue_report.py index 8e338e52..c9013a52 100755 --- a/dak/queue_report.py +++ b/dak/queue_report.py @@ -39,7 +39,7 @@ import glob, os, stat, sys, time import apt_pkg from daklib import utils -from daklib.changes import Changes +from daklib.queue import Upload from daklib.dbconn import DBConn, has_new_comment from daklib.textutils import fix_maintainer from daklib.dak_exceptions import * @@ -301,9 +301,9 @@ def process_changes_files(changes_files, type, log): # Read in all the .changes files for filename in changes_files: try: - c = Changes() - c.load_dot_dak(filename) - cache[filename] = copy(c.changes) + u = Upload() + u.load_changes(filename) + cache[filename] = copy(u.pkg.changes) cache[filename]["filename"] = filename except Exception, e: print "WARNING: Exception %s" % e diff --git a/dak/update_db.py b/dak/update_db.py index 4e7704e4..27b6ad8f 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -39,12 +39,13 @@ import time import errno from daklib import utils +from daklib.config import Config from daklib.dak_exceptions import DBUpdateError ################################################################################ Cnf = None -required_database_schema = 21 +required_database_schema = 22 ################################################################################ @@ -104,12 +105,13 @@ Updates dak's database schema to the lastest version. You should disable crontab def update_db(self): # Ok, try and find the configuration table print "Determining dak database revision ..." + cnf = Config() try: # Build a connect string - connect_str = "dbname=%s"% (Cnf["DB::Name"]) - if Cnf["DB::Host"] != '': connect_str += " host=%s" % (Cnf["DB::Host"]) - if Cnf["DB::Port"] != '-1': connect_str += " port=%d" % (int(Cnf["DB::Port"])) + connect_str = "dbname=%s"% (cnf["DB::Name"]) + if cnf["DB::Host"] != '': connect_str += " host=%s" % (cnf["DB::Host"]) + if cnf["DB::Port"] != '-1': connect_str += " port=%d" % (int(cnf["DB::Port"])) self.db = psycopg2.connect(connect_str) @@ -133,22 +135,22 @@ Updates dak's database schema to the lastest version. You should disable crontab self.update_db_to_zero() database_revision = 0 - print "dak database schema at " + str(database_revision) - print "dak version requires schema " + str(required_database_schema) + print "dak database schema at %d" % database_revision + print "dak version requires schema %d" % required_database_schema if database_revision == required_database_schema: print "no updates required" sys.exit(0) for i in range (database_revision, required_database_schema): - print "updating database schema from " + str(database_revision) + " to " + str(i+1) + print "updating database schema from %d to %d" % (database_revision, i+1) try: dakdb = __import__("dakdb", globals(), locals(), ['update'+str(i+1)]) update_module = getattr(dakdb, "update"+str(i+1)) update_module.do_update(self) except DBUpdateError, e: # Seems the update did not work. - print "Was unable to update database schema from %s to %s." % (str(database_revision), str(i+1)) + print "Was unable to update database schema from %d to %d." % (database_revision, i+1) print "The error message received was %s" % (e) utils.fubar("DB Schema upgrade failed") database_revision += 1 @@ -156,33 +158,30 @@ Updates dak's database schema to the lastest version. You should disable crontab ################################################################################ def init (self): - global Cnf - - Cnf = utils.get_conf() + cnf = Config() arguments = [('h', "help", "Update-DB::Options::Help")] for i in [ "help" ]: - if not Cnf.has_key("Update-DB::Options::%s" % (i)): - Cnf["Update-DB::Options::%s" % (i)] = "" + if not cnf.has_key("Update-DB::Options::%s" % (i)): + cnf["Update-DB::Options::%s" % (i)] = "" - arguments = apt_pkg.ParseCommandLine(Cnf, arguments, sys.argv) + arguments = apt_pkg.ParseCommandLine(cnf.Cnf, arguments, sys.argv) - options = Cnf.SubTree("Update-DB::Options") + options = cnf.SubTree("Update-DB::Options") if options["Help"]: self.usage() elif arguments: utils.warn("dak update-db takes no arguments.") self.usage(exit_code=1) - - self.update_db() - try: - lock_fd = os.open(Cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) + lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.") + self.update_db() + ################################################################################ diff --git a/daklib/changes.py b/daklib/changes.py index fd09cb7f..c1f8f5ba 100755 --- a/daklib/changes.py +++ b/daklib/changes.py @@ -177,17 +177,9 @@ class Changes(object): return summary + @session_wrapper def remove_known_changes(self, session=None): - if session is None: - session = DBConn().session() - privatetrans = True - - session.delete(get_knownchange(self.changes_file, session)) - - if privatetrans: - session.commit() - session.close() - + session.delete(get_dbchange(self.changes_file, session)) def mark_missing_fields(self): """add "missing" in fields which we will require for the known_changes table""" @@ -195,180 +187,46 @@ class Changes(object): if (not self.changes.has_key(key)) or (not self.changes[key]): self.changes[key]='missing' + @session_wrapper def add_known_changes(self, dirpath, session=None): """add "missing" in fields which we will require for the known_changes table""" cnf = Config() - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True changesfile = os.path.join(dirpath, self.changes_file) filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile)) self.mark_missing_fields() + multivalues = {} + for key in ("distribution", "architecture", "binary"): + if isinstance(self.changes[key], dict): + multivalues[key] = " ".join(self.changes[key].keys()) + else: + multivalues[key] = self.changes[key].keys() + + # TODO: Use ORM session.execute( - """INSERT INTO known_changes + """INSERT INTO changes (changesname, seen, source, binaries, architecture, version, distribution, urgency, maintainer, fingerprint, changedby, date) VALUES (:changesfile,:filetime,:source,:binary, :architecture, :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""", - { 'changesfile':self.changes_file, - 'filetime':filetime, - 'source':self.changes["source"], - 'binary':self.changes["binary"], - 'architecture':self.changes["architecture"], - 'version':self.changes["version"], - 'distribution':self.changes["distribution"], - 'urgency':self.changes["urgency"], - 'maintainer':self.changes["maintainer"], - 'fingerprint':self.changes["fingerprint"], - 'changedby':self.changes["changed-by"], - 'date':self.changes["date"]} ) - - if privatetrans: - session.commit() - session.close() - - def load_dot_dak(self, changesfile): - """ - Update ourself by reading a previously created cPickle .dak dumpfile. - """ - - self.changes_file = changesfile - dump_filename = self.changes_file[:-8]+".dak" - dump_file = open_file(dump_filename) - - p = Unpickler(dump_file) - - self.changes.update(p.load()) - self.dsc.update(p.load()) - self.files.update(p.load()) - self.dsc_files.update(p.load()) - - next_obj = p.load() - if isinstance(next_obj, dict): - self.orig_files.update(next_obj) - else: - # Auto-convert old dak files to new format supporting - # multiple tarballs - orig_tar_gz = None - for dsc_file in self.dsc_files.keys(): - if dsc_file.endswith(".orig.tar.gz"): - orig_tar_gz = dsc_file - self.orig_files[orig_tar_gz] = {} - if next_obj != None: - self.orig_files[orig_tar_gz]["id"] = next_obj - next_obj = p.load() - if next_obj != None and next_obj != "": - self.orig_files[orig_tar_gz]["location"] = next_obj - if len(self.orig_files[orig_tar_gz]) == 0: - del self.orig_files[orig_tar_gz] - - dump_file.close() - - def sanitised_files(self): - ret = {} - for name, entry in self.files.items(): - ret[name] = {} - for i in CHANGESFIELDS_FILES: - if entry.has_key(i): - ret[name][i] = entry[i] - - return ret - - def sanitised_changes(self): - ret = {} - # Mandatory changes fields - for i in CHANGESFIELDS_MANDATORY: - ret[i] = self.changes[i] - - # Optional changes fields - for i in CHANGESFIELDS_OPTIONAL: - if self.changes.has_key(i): - ret[i] = self.changes[i] - - return ret - - def sanitised_dsc(self): - ret = {} - for i in CHANGESFIELDS_DSC: - if self.dsc.has_key(i): - ret[i] = self.dsc[i] - - return ret - - def sanitised_dsc_files(self): - ret = {} - for name, entry in self.dsc_files.items(): - ret[name] = {} - # Mandatory dsc_files fields - for i in CHANGESFIELDS_DSCFILES_MANDATORY: - ret[name][i] = entry[i] - - # Optional dsc_files fields - for i in CHANGESFIELDS_DSCFILES_OPTIONAL: - if entry.has_key(i): - ret[name][i] = entry[i] - - return ret - - def sanitised_orig_files(self): - ret = {} - for name, entry in self.orig_files.items(): - ret[name] = {} - # Optional orig_files fields - for i in CHANGESFIELDS_ORIGFILES: - if entry.has_key(i): - ret[name][i] = entry[i] - - return ret - - def write_dot_dak(self, dest_dir): - """ - Dump ourself into a cPickle file. - - @type dest_dir: string - @param dest_dir: Path where the dumpfile should be stored - - @note: This could just dump the dictionaries as is, but I'd like to avoid this so - there's some idea of what process-accepted & process-new use from - process-unchecked. (JT) - - """ - - dump_filename = os.path.join(dest_dir, self.changes_file[:-8] + ".dak") - dump_file = open_file(dump_filename, 'w') - - try: - os.chmod(dump_filename, 0664) - except OSError, e: - # chmod may fail when the dumpfile is not owned by the user - # invoking dak (like e.g. when NEW is processed by a member - # of ftpteam) - if e.errno == EPERM: - perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE]) - # security precaution, should never happen unless a weird - # umask is set anywhere - if perms & stat.S_IWOTH: - fubar("%s is world writable and chmod failed." % \ - (dump_filename,)) - # ignore the failed chmod otherwise as the file should - # already have the right privileges and is just, at worst, - # unreadable for world - else: - raise - - p = Pickler(dump_file, 1) - - p.dump(self.sanitised_changes()) - p.dump(self.sanitised_dsc()) - p.dump(self.sanitised_files()) - p.dump(self.sanitised_dsc_files()) - p.dump(self.sanitised_orig_files()) - - dump_file.close() + { 'changesfile': self.changes_file, + 'filetime': filetime, + 'source': self.changes["source"], + 'binary': multivalues["binary"], + 'architecture': multivalues["architecture"], + 'version': self.changes["version"], + 'distribution': multivalues["distribution"], + 'urgency': self.changes["urgency"], + 'maintainer': self.changes["maintainer"], + 'fingerprint': self.changes["fingerprint"], + 'changedby': self.changes["changed-by"], + 'date': self.changes["date"]} ) + + session.commit() + + return session.query(DBChange).filter_by(changesname = self.changes_file).one() def unknown_files_fields(self, name): return sorted(list( set(self.files[name].keys()) - diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 9e5afec7..26191ae2 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -37,7 +37,7 @@ import os import re import psycopg2 import traceback -import datetime +from datetime import datetime from inspect import getargspec @@ -50,8 +50,6 @@ from sqlalchemy import types as sqltypes from sqlalchemy.exc import * from sqlalchemy.orm.exc import NoResultFound -# Only import Config until Queue stuff is changed to store its config -# in the database from config import Config from singleton import Singleton from textutils import fix_maintainer @@ -125,6 +123,8 @@ def session_wrapper(fn): return wrapped +__all__.append('session_wrapper') + ################################################################################ class Architecture(object): @@ -430,6 +430,132 @@ __all__.append('BinaryACLMap') ################################################################################ +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the BuildQueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(pf).add(f) + return f + + # Prepare BuildQueueFile object + qf = BuildQueueFile() + qf.build_queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = poolfile_basename + + targetpath = poolfile.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_files: + # We need to copy instead of symlink + import utils + utils.copy(targetpath, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetpath, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_build_queue(queuename, session=None): + """ + Returns BuildQueue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: BuildQueue + @return: BuildQueue object for the given queue + """ + + q = session.query(BuildQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_build_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.queue_id) + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -850,6 +976,39 @@ def get_poolfile_like_name(filename, session=None): __all__.append('get_poolfile_like_name') +@session_wrapper +def add_poolfile(filename, datadict, location_id, session=None): + """ + Add a new file to the pool + + @type filename: string + @param filename: filename + + @type datadict: dict + @param datadict: dict with needed data + + @type location_id: int + @param location_id: database id of the location + + @rtype: PoolFile + @return: the PoolFile object created + """ + poolfile = PoolFile() + poolfile.filename = filename + poolfile.filesize = datadict["size"] + poolfile.md5sum = datadict["md5sum"] + poolfile.sha1sum = datadict["sha1sum"] + poolfile.sha256sum = datadict["sha256sum"] + poolfile.location_id = location_id + + session.add(poolfile) + # Flush to get a file id (NB: This is not a commit) + session.flush() + + return poolfile + +__all__.append('add_poolfile') + ################################################################################ class Fingerprint(object): @@ -1089,19 +1248,19 @@ __all__.append('KeyringACLMap') ################################################################################ -class KnownChange(object): +class DBChange(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.changesname + return '' % self.changesname -__all__.append('KnownChange') +__all__.append('DBChange') @session_wrapper -def get_knownchange(filename, session=None): +def get_dbchange(filename, session=None): """ - returns knownchange object for given C{filename}. + returns DBChange object for given C{filename}. @type archive: string @param archive: the name of the arhive @@ -1114,25 +1273,14 @@ def get_knownchange(filename, session=None): @return: Archive object for the given name (None if not present) """ - q = session.query(KnownChange).filter_by(changesname=filename) + q = session.query(DBChange).filter_by(changesname=filename) try: return q.one() except NoResultFound: return None -__all__.append('get_knownchange') - -################################################################################ - -class KnownChangePendingFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.known_change_pending_file_id - -__all__.append('KnownChangePendingFile') +__all__.append('get_dbchange') ################################################################################ @@ -1502,6 +1650,42 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +@session_wrapper +def get_policy_queue(queuename, session=None): + """ + Returns PolicyQueue object for given C{queue name} + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass @@ -1572,99 +1756,6 @@ __all__.append('get_priorities') ################################################################################ -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name - - def add_file_from_pool(self, poolfile): - """Copies a file into the pool. Assumes that the PoolFile object is - attached to the same SQLAlchemy session as the Queue object is. - - The caller is responsible for committing after calling this function.""" - poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] - - # Check if we have a file of this name or this ID already - for f in self.queuefiles: - if f.fileid is not None and f.fileid == poolfile.file_id or \ - f.poolfile.filename == poolfile_basename: - # In this case, update the QueueFile entry so we - # don't remove it too early - f.lastused = datetime.now() - DBConn().session().object_session(pf).add(f) - return f - - # Prepare QueueFile object - qf = QueueFile() - qf.queue_id = self.queue_id - qf.lastused = datetime.now() - qf.filename = dest - - targetpath = qf.fullpath - queuepath = os.path.join(self.path, poolfile_basename) - - try: - if self.copy_pool_files: - # We need to copy instead of symlink - import utils - utils.copy(targetfile, queuepath) - # NULL in the fileid field implies a copy - qf.fileid = None - else: - os.symlink(targetfile, queuepath) - qf.fileid = poolfile.file_id - except OSError: - return None - - # Get the same session as the PoolFile is using and add the qf to it - DBConn().session().object_session(poolfile).add(qf) - - return qf - - -__all__.append('Queue') - -@session_wrapper -def get_queue(queuename, session=None): - """ - Returns Queue object for given C{queue name}, creating it if it does not - exist. - - @type queuename: string - @param queuename: The name of the queue - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - q = session.query(Queue).filter_by(queue_name=queuename) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue') - -################################################################################ - -class QueueFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueFile') - -################################################################################ - class Section(object): def __init__(self, *args, **kwargs): pass @@ -1895,6 +1986,181 @@ __all__.append('get_source_in_suite') ################################################################################ +@session_wrapper +def add_dsc_to_db(u, filename, session=None): + entry = u.pkg.files[filename] + source = DBSource() + pfs = [] + + source.source = u.pkg.dsc["source"] + source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch + source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + source.install_date = datetime.now().date() + + dsc_component = entry["component"] + dsc_location_id = entry["location id"] + + source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") + + # Set up a new poolfile if necessary + if not entry.has_key("files id") or not entry["files id"]: + filename = entry["pool name"] + filename + poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + pfs.append(poolfile) + entry["files id"] = poolfile.file_id + + source.poolfile_id = entry["files id"] + session.add(source) + session.flush() + + for suite_name in u.pkg.changes["distribution"].keys(): + sa = SrcAssociation() + sa.source_id = source.source_id + sa.suite_id = get_suite(suite_name).suite_id + session.add(sa) + + session.flush() + + # Add the source files to the DB (files and dsc_files) + dscfile = DSCFile() + dscfile.source_id = source.source_id + dscfile.poolfile_id = entry["files id"] + session.add(dscfile) + + for dsc_file, dentry in u.pkg.dsc_files.items(): + df = DSCFile() + df.source_id = source.source_id + + # If the .orig tarball is already in the pool, it's + # files id is stored in dsc_files by check_dsc(). + files_id = dentry.get("files id", None) + + # Find the entry in the files hash + # TODO: Bail out here properly + dfentry = None + for f, e in u.pkg.files.items(): + if f == dsc_file: + dfentry = e + break + + if files_id is None: + filename = dfentry["pool name"] + dsc_file + + (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) + # FIXME: needs to check for -1/-2 and or handle exception + if found and obj is not None: + files_id = obj.file_id + pfs.append(obj) + + # If still not found, add it + if files_id is None: + # HACK: Force sha1sum etc into dentry + dentry["sha1sum"] = dfentry["sha1sum"] + dentry["sha256sum"] = dfentry["sha256sum"] + poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + pfs.append(poolfile) + files_id = poolfile.file_id + + df.poolfile_id = files_id + session.add(df) + + session.flush() + + # Add the src_uploaders to the DB + uploader_ids = [source.maintainer_id] + if u.pkg.dsc.has_key("uploaders"): + for up in u.pkg.dsc["uploaders"].split(","): + up = up.strip() + uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) + + added_ids = {} + for up in uploader_ids: + if added_ids.has_key(up): + utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + continue + + added_ids[u]=1 + + su = SrcUploader() + su.maintainer_id = up + su.source_id = source.source_id + session.add(su) + + session.flush() + + return dsc_component, dsc_location_id, pfs + +__all__.append('add_dsc_to_db') + +@session_wrapper +def add_deb_to_db(u, filename, session=None): + """ + Contrary to what you might expect, this routine deals with both + debs and udebs. That info is in 'dbtype', whilst 'type' is + 'deb' for both of them + """ + cnf = Config() + entry = u.pkg.files[filename] + + bin = DBBinary() + bin.package = entry["package"] + bin.version = entry["version"] + bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id + bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + bin.arch_id = get_architecture(entry["architecture"], session).arch_id + bin.binarytype = entry["dbtype"] + + # Find poolfile id + filename = entry["pool name"] + filename + fullpath = os.path.join(cnf["Dir::Pool"], filename) + if not entry.get("location id", None): + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id + + if entry.get("files id", None): + poolfile = get_poolfile_by_id(bin.poolfile_id) + bin.poolfile_id = entry["files id"] + else: + poolfile = add_poolfile(filename, entry, entry["location id"], session) + bin.poolfile_id = entry["files id"] = poolfile.file_id + + # Find source id + bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + if len(bin_sources) != 1: + raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + (bin.package, bin.version, bin.architecture.arch_string, + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.source_id = bin_sources[0].source_id + + # Add and flush object so it has an ID + session.add(bin) + session.flush() + + # Add BinAssociations + for suite_name in u.pkg.changes["distribution"].keys(): + ba = BinAssociation() + ba.binary_id = bin.binary_id + ba.suite_id = get_suite(suite_name).suite_id + session.add(ba) + + session.flush() + + # Deal with contents - disabled for now + #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) + #if not contents: + # print "REJECT\nCould not determine contents of package %s" % bin.package + # session.rollback() + # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + + return poolfile + +__all__.append('add_deb_to_db') + +################################################################################ + class SourceACL(object): def __init__(self, *args, **kwargs): pass @@ -2256,18 +2522,24 @@ class DBConn(Singleton): self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) + self.tbl_build_queue = Table('build_queue', self.db_meta, autoload=True) + self.tbl_build_queue_files = Table('build_queue_files', self.db_meta, autoload=True) self.tbl_component = Table('component', self.db_meta, autoload=True) self.tbl_config = Table('config', self.db_meta, autoload=True) self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) + self.tbl_changes_pending_binary = Table('changes_pending_binaries', self.db_meta, autoload=True) self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True) + self.tbl_changes_pending_files_map = Table('changes_pending_files_map', self.db_meta, autoload=True) + self.tbl_changes_pending_source = Table('changes_pending_source', self.db_meta, autoload=True) + self.tbl_changes_pending_source_files = Table('changes_pending_source_files', self.db_meta, autoload=True) self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True) self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) self.tbl_files = Table('files', self.db_meta, autoload=True) self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) - self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True) + self.tbl_changes = Table('changes', self.db_meta, autoload=True) self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True) self.tbl_location = Table('location', self.db_meta, autoload=True) self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) @@ -2275,9 +2547,8 @@ class DBConn(Singleton): self.tbl_override = Table('override', self.db_meta, autoload=True) self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True) + self.tbl_policy_queue = Table('policy_queue', self.db_meta, autoload=True) self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True) self.tbl_section = Table('section', self.db_meta, autoload=True) self.tbl_source = Table('source', self.db_meta, autoload=True) self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) @@ -2287,7 +2558,7 @@ class DBConn(Singleton): self.tbl_suite = Table('suite', self.db_meta, autoload=True) self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) - self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True) + self.tbl_suite_build_queue_copy = Table('suite_build_queue_copy', self.db_meta, autoload=True) self.tbl_uid = Table('uid', self.db_meta, autoload=True) self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) @@ -2306,6 +2577,12 @@ class DBConn(Singleton): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, @@ -2367,16 +2644,36 @@ class DBConn(Singleton): properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) - mapper(KnownChange, self.tbl_known_changes, - properties = dict(known_change_id = self.tbl_known_changes.c.id, + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, poolfiles = relation(PoolFile, secondary=self.tbl_changes_pool_files, backref="changeslinks"), - files = relation(KnownChangePendingFile, backref="changesfile"))) - - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) - + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binary, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binary.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, keyring = relation(Keyring, backref="keyring_acl_map"), @@ -2412,16 +2709,12 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueFile, self.tbl_queue_files, - properties = dict(queue = relation(Queue, backref='queuefiles'), - poolfile = relation(PoolFile, backref='queueinstances'))) - mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id)) @@ -2469,8 +2762,8 @@ class DBConn(Singleton): mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, - policy_queue = relation(Queue), - copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy))) + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite, diff --git a/daklib/queue.py b/daklib/queue.py index 1694deb4..95268527 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -38,6 +38,8 @@ import commands import shutil import textwrap from types import * +from sqlalchemy.sql.expression import desc +from sqlalchemy.orm.exc import NoResultFound import yaml @@ -46,6 +48,7 @@ from changes import * from regexes import * from config import Config from holding import Holding +from urgencylog import UrgencyLog from dbconn import * from summarystats import SummaryStats from utils import parse_changes, check_dsc_files @@ -285,6 +288,7 @@ class Upload(object): for title, messages in msgs: if messages: msg += '\n\n%s:\n%s' % (title, '\n'.join(messages)) + msg += '\n' return msg @@ -434,12 +438,6 @@ class Upload(object): self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"]) self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"]) - # Check there isn't already a changes file of the same name in one - # of the queue directories. - base_filename = os.path.basename(filename) - if get_knownchange(base_filename): - self.rejects.append("%s: a file with this name already exists." % (base_filename)) - # Check the .changes is non-empty if not self.pkg.files: self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename)) @@ -722,7 +720,6 @@ class Upload(object): def per_suite_file_checks(self, f, suite, session): cnf = Config() entry = self.pkg.files[f] - archive = utils.where_am_i() # Skip byhand if entry.has_key("byhand"): @@ -766,9 +763,9 @@ class Upload(object): # Determine the location location = cnf["Dir::Pool"] - l = get_location(location, entry["component"], archive, session) + l = get_location(location, entry["component"], session=session) if l is None: - self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive)) + self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"]) entry["location id"] = -1 else: entry["location id"] = l.location_id @@ -796,17 +793,11 @@ class Upload(object): entry["othercomponents"] = res.fetchone()[0] def check_files(self, action=True): - archive = utils.where_am_i() file_keys = self.pkg.files.keys() holding = Holding() cnf = Config() - # XXX: As far as I can tell, this can no longer happen - see - # comments by AJ in old revisions - mhy - # if reprocess is 2 we've already done this and we're checking - # things again for the new .orig.tar.gz. - # [Yes, I'm fully aware of how disgusting this is] - if action and self.reprocess < 2: + if action: cwd = os.getcwd() os.chdir(self.pkg.directory) for f in file_keys: @@ -817,36 +808,31 @@ class Upload(object): os.chdir(cwd) - # Check there isn't already a .changes or .dak file of the same name in - # the proposed-updates "CopyChanges" or "CopyDotDak" storage directories. + # check we already know the changes file # [NB: this check must be done post-suite mapping] base_filename = os.path.basename(self.pkg.changes_file) - dot_dak_filename = base_filename[:-8] + ".dak" - for suite in self.pkg.changes["distribution"].keys(): - copychanges = "Suite::%s::CopyChanges" % (suite) - if cnf.has_key(copychanges) and \ - os.path.exists(os.path.join(cnf[copychanges], base_filename)): - self.rejects.append("%s: a file with this name already exists in %s" \ - % (base_filename, cnf[copychanges])) - - copy_dot_dak = "Suite::%s::CopyDotDak" % (suite) - if cnf.has_key(copy_dot_dak) and \ - os.path.exists(os.path.join(cnf[copy_dot_dak], dot_dak_filename)): - self.rejects.append("%s: a file with this name already exists in %s" \ - % (dot_dak_filename, Cnf[copy_dot_dak])) - - self.reprocess = 0 + session = DBConn().session() + + try: + dbc = session.query(DBChange).filter_by(changesname=base_filename).one() + # if in the pool or in a queue other than unchecked, reject + if (dbc.in_queue is None) \ + or (dbc.in_queue is not None + and dbc.in_queue.queue_name != 'unchecked'): + self.rejects.append("%s file already known to dak" % base_filename) + except NoResultFound, e: + # not known, good + pass + has_binaries = False has_source = False - session = DBConn().session() - for f, entry in self.pkg.files.items(): # Ensure the file does not already exist in one of the accepted directories - for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: + for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: if not cnf.has_key("Dir::Queue::%s" % (d)): continue - if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f): + if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)): self.rejects.append("%s file already exists in the %s directory." % (f, d)) if not re_taint_free.match(f): @@ -1084,15 +1070,10 @@ class Upload(object): self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename)) def check_source(self): - # XXX: I'm fairly sure reprocess == 2 can never happen - # AJT disabled the is_incoming check years ago - mhy - # We should probably scrap or rethink the whole reprocess thing # Bail out if: # a) there's no source - # or b) reprocess is 2 - we will do this check next time when orig - # tarball is in 'files' # or c) the orig files are MIA - if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \ + if not self.pkg.changes["architecture"].has_key("source") \ or len(self.pkg.orig_files) == 0: return @@ -1493,7 +1474,7 @@ class Upload(object): # or binary, whereas keys with no access might be able to # upload some binaries) if fpr.source_acl.access_level == 'dm': - self.check_dm_source_upload(fpr, session) + self.check_dm_upload(fpr, session) else: # Check source-based permissions for other types if self.pkg.changes["architecture"].has_key("source"): @@ -1837,13 +1818,13 @@ distribution.""" return summary ########################################################################### - - def accept (self, summary, short_summary, targetdir=None): + @session_wrapper + def accept (self, summary, short_summary, session=None): """ Accept an upload. - This moves all files referenced from the .changes into the I{accepted} - queue, sends the accepted mail, announces to lists, closes bugs and + This moves all files referenced from the .changes into the pool, + sends the accepted mail, announces to lists, closes bugs and also checks for override disparities. If enabled it will write out the version history for the BTS Version Tracking and will finally call L{queue_build}. @@ -1853,31 +1834,90 @@ distribution.""" @type short_summary: string @param short_summary: Short summary - """ cnf = Config() stats = SummaryStats() - accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted') + print "Installing." + self.logger.log(["installing changes", self.pkg.changes_file]) - if targetdir is None: - targetdir = cnf["Dir::Queue::Accepted"] + poolfiles = [] - print "Accepting." - if self.logger: - self.logger.log(["Accepting changes", self.pkg.changes_file]) - - self.pkg.write_dot_dak(targetdir) + # Add the .dsc file to the DB first + for newfile, entry in self.pkg.files.items(): + if entry["type"] == "dsc": + dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session) + for j in pfs: + poolfiles.append(j) - # Move all the files into the accepted directory - utils.move(self.pkg.changes_file, targetdir) + # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb) + for newfile, entry in self.pkg.files.items(): + if entry["type"] == "deb": + poolfiles.append(add_deb_to_db(self, newfile, session)) - for name, entry in sorted(self.pkg.files.items()): - utils.move(name, targetdir) + # If this is a sourceful diff only upload that is moving + # cross-component we need to copy the .orig files into the new + # component too for the same reasons as above. + if self.pkg.changes["architecture"].has_key("source"): + for orig_file in self.pkg.orig_files.keys(): + if not self.pkg.orig_files[orig_file].has_key("id"): + continue # Skip if it's not in the pool + orig_file_id = self.pkg.orig_files[orig_file]["id"] + if self.pkg.orig_files[orig_file]["location"] == dsc_location_id: + continue # Skip if the location didn't change + + # Do the move + oldf = get_poolfile_by_id(orig_file_id, session) + old_filename = os.path.join(oldf.location.path, oldf.filename) + old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, + 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} + + new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) + + # TODO: Care about size/md5sum collisions etc + (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) + + if newf is None: + utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename)) + newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) + + # TODO: Check that there's only 1 here + source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0] + dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0] + dscf.poolfile_id = newf.file_id + session.add(dscf) + session.flush() + + poolfiles.append(newf) + + # Install the files into the pool + for newfile, entry in self.pkg.files.items(): + destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile) + utils.move(newfile, destination) + self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]]) stats.accept_bytes += float(entry["size"]) - stats.accept_count += 1 + # Copy the .changes file across for suite which need it. + copy_changes = {} + for suite_name in self.pkg.changes["distribution"].keys(): + if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)): + copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = "" + + for dest in copy_changes.keys(): + utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest)) + + # We're done - commit the database changes + session.commit() + # Our SQL session will automatically start a new transaction after + # the last commit + + # Move the .changes into the 'done' directory + utils.move(self.pkg.changes_file, + os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file))) + + if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"): + UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"]) # Send accept mail, announce to lists, close bugs and check for # override disparities @@ -1885,7 +1925,8 @@ distribution.""" self.update_subst() self.Subst["__SUITE__"] = "" self.Subst["__SUMMARY__"] = summary - mail_message = utils.TemplateSubst(self.Subst, accepttemplate) + mail_message = utils.TemplateSubst(self.Subst, + os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')) utils.send_mail(mail_message) self.announce(short_summary, 1) @@ -1923,13 +1964,19 @@ distribution.""" os.rename(temp_filename, filename) os.chmod(filename, 0644) - # This routine returns None on success or an error on failure - # TODO: Replace queue copying using the new queue.add_file_from_pool routine - # and by looking up which queues in suite.copy_queues - #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) - #if res: - # utils.fubar(res) + session.commit() + + # Set up our copy queues (e.g. buildd queues) + for suite_name in self.pkg.changes["distribution"].keys(): + suite = get_suite(suite_name, session) + for q in suite.copy_queues: + for f in poolfiles: + q.add_file_from_pool(f) + session.commit() + + # Finally... + stats.accept_count += 1 def check_override(self): """ @@ -1968,25 +2015,33 @@ distribution.""" def remove(self, from_dir=None): """ Used (for instance) in p-u to remove the package from unchecked + + Also removes the package from holding area. """ if from_dir is None: - os.chdir(self.pkg.directory) - else: - os.chdir(from_dir) + from_dir = self.pkg.directory + h = Holding() for f in self.pkg.files.keys(): - os.unlink(f) - os.unlink(self.pkg.changes_file) + os.unlink(os.path.join(from_dir, f)) + if os.path.exists(os.path.join(h.holding_dir, f)): + os.unlink(os.path.join(h.holding_dir, f)) + + os.unlink(os.path.join(from_dir, self.pkg.changes_file)) + if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)): + os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file)) ########################################################################### - def move_to_dir (self, dest, perms=0660, changesperms=0664): + def move_to_queue (self, queue): """ - Move files to dest with certain perms/changesperms + Move files to a destination queue using the permissions in the table """ - utils.move(self.pkg.changes_file, dest, perms=changesperms) + h = Holding() + utils.move(os.path.join(h.holding_dir, self.pkg.changes_file), + queue.path, perms=int(queue.change_perms, 8)) for f in self.pkg.files.keys(): - utils.move(f, dest, perms=perms) + utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8)) ########################################################################### @@ -2377,6 +2432,7 @@ distribution.""" # This would fix the stupidity of changing something we often iterate over # whilst we're doing it del self.pkg.files[dsc_name] + dsc_entry["files id"] = i.file_id if not orig_files.has_key(dsc_name): orig_files[dsc_name] = {} orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename) diff --git a/daklib/queue_install.py b/daklib/queue_install.py new file mode 100644 index 00000000..3283e1ef --- /dev/null +++ b/daklib/queue_install.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# vim:set et sw=4: + +""" +Utility functions for process-upload + +@contact: Debian FTP Master +@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup +@copyright: 2009 Joerg Jaspert +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +import os + +from daklib import utils +from daklib.dbconn import * +from daklib.config import Config + +################################################################################ + +def package_to_suite(u, suite_name, session): + if not u.pkg.changes["distribution"].has_key(suite_name): + return False + + ret = True + + if not u.pkg.changes["architecture"].has_key("source"): + q = session.query(SrcAssociation.sa_id) + q = q.join(Suite).filter_by(suite_name=suite_name) + q = q.join(DBSource).filter_by(source=u.pkg.changes['source']) + q = q.filter_by(version=u.pkg.changes['version']).limit(1) + + # NB: Careful, this logic isn't what you would think it is + # Source is already in the target suite so no need to go to policy + # Instead, we don't move to the policy area, we just do an ACCEPT + if q.count() > 0: + ret = False + + return ret + +def package_to_queue(u, summary, short_summary, queue, chg, session, announce=None): + cnf = Config() + dir = queue.path + + print "Moving to %s policy queue" % queue.queue_name.upper() + u.logger.log(["Moving to %s" % queue.queue_name, u.pkg.changes_file]) + + u.move_to_queue(queue) + chg.in_queue_id = queue.policy_queue_id + session.add(chg) + session.commit() + + # Check for override disparities + u.check_override() + + # Send accept mail, announce to lists and close bugs + if announce and not cnf["Dinstall::Options::No-Mail"]: + template = os.path.join(cnf["Dir::Templates"], announce) + u.update_subst() + u.Subst["__SUITE__"] = "" + mail_message = utils.TemplateSubst(u.Subst, template) + utils.send_mail(mail_message) + u.announce(short_summary, True) + +################################################################################ + +# TODO: This logic needs to be replaced with policy queues before we upgrade +# security master + +#def is_unembargo(u): +# session = DBConn().session() +# cnf = Config() +# +# q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) +# if q.rowcount > 0: +# session.close() +# return True +# +# oldcwd = os.getcwd() +# os.chdir(cnf["Dir::Queue::Disembargo"]) +# disdir = os.getcwd() +# os.chdir(oldcwd) +# +# ret = False +# +# if u.pkg.directory == disdir: +# if u.pkg.changes["architecture"].has_key("source"): +# session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) +# session.commit() +# +# ret = True +# +# session.close() +# +# return ret +# +#def queue_unembargo(u, summary, short_summary, session=None): +# return package_to_queue(u, summary, short_summary, "Unembargoed", +# perms=0660, build=True, announce='process-unchecked.accepted') +# +################################################################################# +# +#def is_embargo(u): +# # if embargoed queues are enabled always embargo +# return True +# +#def queue_embargo(u, summary, short_summary, session=None): +# return package_to_queue(u, summary, short_summary, "Unembargoed", +# perms=0660, build=True, announce='process-unchecked.accepted') + +################################################################################ + +def is_autobyhand(u): + cnf = Config() + + all_auto = 1 + any_auto = 0 + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("byhand"): + any_auto = 1 + + # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH + # don't contain underscores, and ARCH doesn't contain dots. + # further VER matches the .changes Version:, and ARCH should be in + # the .changes Architecture: list. + if f.count("_") < 2: + all_auto = 0 + continue + + (pckg, ver, archext) = f.split("_", 2) + if archext.count(".") < 1 or u.pkg.changes["version"] != ver: + all_auto = 0 + continue + + ABH = cnf.SubTree("AutomaticByHandPackages") + if not ABH.has_key(pckg) or \ + ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]: + print "not match %s %s" % (pckg, u.pkg.changes["source"]) + all_auto = 0 + continue + + (arch, ext) = archext.split(".", 1) + if arch not in u.pkg.changes["architecture"]: + all_auto = 0 + continue + + u.pkg.files[f]["byhand-arch"] = arch + u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)] + + return any_auto and all_auto + +def do_autobyhand(u, summary, short_summary, chg, session): + print "Attempting AUTOBYHAND." + byhandleft = True + for f, entry in u.pkg.files.items(): + byhandfile = f + + if not entry.has_key("byhand"): + continue + + if not entry.has_key("byhand-script"): + byhandleft = True + continue + + os.system("ls -l %s" % byhandfile) + + result = os.system("%s %s %s %s %s" % ( + entry["byhand-script"], + byhandfile, + u.pkg.changes["version"], + entry["byhand-arch"], + os.path.abspath(u.pkg.changes_file))) + + if result == 0: + os.unlink(byhandfile) + del entry + else: + print "Error processing %s, left as byhand." % (f) + byhandleft = True + + if byhandleft: + do_byhand(u, summary, short_summary, chg, session) + else: + u.accept(summary, short_summary, session) + u.check_override() + +################################################################################ + +def is_byhand(u): + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("byhand"): + return True + return False + +def do_byhand(u, summary, short_summary, chg, session): + return package_to_queue(u, summary, short_summary, + get_policy_queue('byhand'), chg, session, + announce=None) + +################################################################################ + +def is_new(u): + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("new"): + return True + return False + +def acknowledge_new(u, summary, short_summary, chg, session): + cnf = Config() + + print "Moving to NEW queue." + u.logger.log(["Moving to new", u.pkg.changes_file]) + + q = get_policy_queue('new', session) + + u.move_to_queue(q) + chg.in_queue_id = q.policy_queue_id + session.add(chg) + session.commit() + + if not cnf["Dinstall::Options::No-Mail"]: + print "Sending new ack." + template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new') + u.update_subst() + u.Subst["__SUMMARY__"] = summary + new_ack_message = utils.TemplateSubst(u.Subst, template) + utils.send_mail(new_ack_message) + +################################################################################ + +# q-unapproved hax0ring +QueueInfo = { + "new": { "is": is_new, "process": acknowledge_new }, + "autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, + "byhand" : { "is": is_byhand, "process": do_byhand }, +} + +def determine_target(u): + cnf = Config() + + # Statically handled queues + target = None + + for q in ["new", "autobyhand", "byhand"]: + if QueueInfo[q]["is"](u): + target = q + break + + return target + +############################################################################### +