]> git.decadent.org.uk Git - dak.git/commitdiff
Merge commit 'ftpmaster/master'
authorMark Hymers <mhy@debian.org>
Sat, 31 Oct 2009 09:17:48 +0000 (09:17 +0000)
committerMark Hymers <mhy@debian.org>
Sat, 31 Oct 2009 09:17:48 +0000 (09:17 +0000)
Conflicts:
daklib/dbconn.py

Signed-off-by: Mark Hymers <mhy@debian.org>
dak/clean_suites.py
dak/dak.py
dak/dakdb/update22.py [new file with mode: 0755]
dak/process_accepted.py [deleted file]
dak/process_unchecked.py [deleted file]
dak/process_upload.py [new file with mode: 0755]
dak/update_db.py
daklib/changes.py
daklib/dbconn.py
daklib/queue.py
daklib/queue_install.py [new file with mode: 0644]

index 72a1d5a8a545bc4695065e37706b9aa2c7a56721..99f0c8b4629162018a54936baf381814a4edd3da 100755 (executable)
@@ -164,6 +164,7 @@ SELECT id, filename FROM files f
   WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
+    AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
     AND last_used IS NULL
     ORDER BY filename""")
 
@@ -337,7 +338,7 @@ def clean_queue_build(now_date, delete_date, max_delete, session):
     our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
     count = 0
 
-    for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date):
+    for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date):
         if not os.path.exists(qf.filename):
             utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
             continue
index e424836f750b1a1f683d027ade063de8359ea5f4..47bbedfa5906511407a020e3947e113078536f59 100755 (executable)
@@ -66,10 +66,8 @@ def init():
 
         ("process-new",
          "Process NEW and BYHAND packages"),
-        ("process-unchecked",
+        ("process-upload",
          "Process packages in queue/unchecked"),
-        ("process-accepted",
-         "Install packages into the pool"),
 
         ("make-suite-file-list",
          "Generate lists of packages per suite for apt-ftparchive"),
diff --git a/dak/dakdb/update22.py b/dak/dakdb/update22.py
new file mode 100755 (executable)
index 0000000..758430c
--- /dev/null
@@ -0,0 +1,225 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Clean up queue SQL
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+import os
+import datetime
+import traceback
+
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+
+def do_update(self):
+    print "Splitting up queues and fixing general design mistakes"
+
+    try:
+        c = self.db.cursor()
+
+        cnf = Config()
+
+        print "Adding build_queue table"
+        c.execute("""CREATE TABLE build_queue (
+                            id          SERIAL PRIMARY KEY,
+                            queue_name  TEXT NOT NULL UNIQUE,
+                            path        TEXT NOT NULL,
+                            copy_files  BOOL DEFAULT FALSE NOT NULL)""")
+
+        print "Adding policy_queue table"
+        c.execute("""CREATE TABLE policy_queue (
+                            id          SERIAL PRIMARY KEY,
+                            queue_name  TEXT NOT NULL UNIQUE,
+                            path        TEXT NOT NULL)""")
+
+        print "Copying queues"
+        queues = {}
+        c.execute("""SELECT queue.id, queue.queue_name, queue.path, queue.copy_pool_files FROM queue""")
+
+        for q in c.fetchall():
+            queues[q[0]] = q[1]
+            if q[1] in ['accepted', 'buildd']:
+                # Move to build_queue_table
+                c.execute("""INSERT INTO build_queue (queue_name, path, copy_files)
+                                   VALUES ('%s', '%s', '%s')""" % (q[1], q[2], q[3]))
+
+            else:
+                # Move to policy_queue_table
+                c.execute("""INSERT INTO policy_queue (queue_name, path)
+                                   VALUES ('%s', '%s')""" % (q[1], q[2]))
+
+
+        print "Fixing up build_queue_files"
+        c.execute("""ALTER TABLE queue_files DROP CONSTRAINT queue_files_queueid_fkey""")
+        c.execute("""ALTER TABLE queue_files RENAME TO build_queue_files""")
+        c.execute("""ALTER TABLE build_queue_files RENAME COLUMN queueid TO build_queue_id""")
+
+        c.execute("""UPDATE build_queue_files
+                        SET build_queue_id = (SELECT build_queue.id FROM build_queue
+                                               WHERE build_queue.queue_name =
+                                                (SELECT queue.queue_name FROM queue
+                                                  WHERE queue.id = build_queue_files.build_queue_id))""")
+
+        c.execute("""ALTER TABLE build_queue_files
+                       ADD CONSTRAINT build_queue_files_build_queue_id_fkey
+                       FOREIGN KEY (build_queue_id)
+                       REFERENCES build_queue(id)
+                       ON DELETE CASCADE""")
+
+
+        c.execute("""ALTER TABLE suite DROP CONSTRAINT suite_policy_queue_fkey""")
+
+        c.execute("""UPDATE suite
+    SET policy_queue_id = (SELECT policy_queue.id FROM policy_queue
+                             WHERE policy_queue.queue_name =
+                              (SELECT queue.queue_name FROM queue
+                               WHERE queue.id = suite.policy_queue_id))""")
+
+        c.execute("""ALTER TABLE suite
+                       ADD CONSTRAINT suite_policy_queue_fkey
+                       FOREIGN KEY (policy_queue_id)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_approved_for_fkey""")
+        c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_in_queue_fkey""")
+
+        c.execute("""UPDATE known_changes
+    SET in_queue = (SELECT policy_queue.id FROM policy_queue
+                             WHERE policy_queue.queue_name =
+                              (SELECT queue.queue_name FROM queue
+                               WHERE queue.id = known_changes.in_queue))""")
+
+        c.execute("""ALTER TABLE known_changes
+                       ADD CONSTRAINT known_changes_in_queue_fkey
+                       FOREIGN KEY (in_queue)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+
+
+        c.execute("""UPDATE known_changes
+    SET approved_for = (SELECT policy_queue.id FROM policy_queue
+                               WHERE policy_queue.queue_name =
+                                (SELECT queue.queue_name FROM queue
+                                  WHERE queue.id = known_changes.approved_for))""")
+
+        c.execute("""ALTER TABLE known_changes
+                       ADD CONSTRAINT known_changes_approved_for_fkey
+                       FOREIGN KEY (in_queue)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""ALTER TABLE suite_queue_copy RENAME TO suite_build_queue_copy""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy DROP CONSTRAINT suite_queue_copy_queue_fkey""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy RENAME COLUMN queue TO build_queue_id""")
+
+        c.execute("""UPDATE suite_build_queue_copy
+    SET build_queue_id = (SELECT build_queue.id FROM build_queue
+                                 WHERE build_queue.queue_name =
+                                (SELECT queue.queue_name FROM queue
+                                  WHERE queue.id = suite_build_queue_copy.build_queue_id))""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy
+                       ADD CONSTRAINT suite_build_queue_copy_build_queue_id_fkey
+                       FOREIGN KEY (build_queue_id)
+                       REFERENCES build_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""DROP TABLE changes_pending_files""")
+
+        c.execute("""CREATE TABLE changes_pending_files (
+                            id             SERIAL PRIMARY KEY,
+                            filename       TEXT NOT NULL UNIQUE,
+                            size           BIGINT NOT NULL,
+                            md5sum         TEXT NOT NULL,
+                            sha1sum        TEXT NOT NULL,
+                            sha256sum      TEXT NOT NULL )""")
+
+        c.execute("""CREATE TABLE changes_pending_source (
+                            id             SERIAL PRIMARY KEY,
+                            change_id      INT4 NOT NULL REFERENCES known_changes (id),
+                            source         TEXT NOT NULL,
+                            version        DEBVERSION NOT NULL,
+                            maintainer_id  INT4 NOT NULL REFERENCES maintainer (id),
+                            changedby_id   INT4 NOT NULL REFERENCES maintainer (id),
+                            sig_fpr        INT4 NOT NULL REFERENCES fingerprint (id),
+                            dm_upload_allowed BOOL NOT NULL DEFAULT FALSE )""")
+
+        c.execute("""CREATE TABLE changes_pending_source_files (
+                            pending_source_id INT4 REFERENCES changes_pending_source (id) NOT NULL,
+                            pending_file_id   INT4 REFERENCES changes_pending_files (id) NOT NULL,
+
+                            PRIMARY KEY (pending_source_id, pending_file_id) )""")
+
+        c.execute("""CREATE TABLE changes_pending_binaries (
+                            id                 SERIAL PRIMARY KEY,
+                            change_id          INT4 NOT NULL REFERENCES known_changes (id),
+                            package            TEXT NOT NULL,
+                            version            DEBVERSION NOT NULL,
+                            architecture_id    INT4 REFERENCES architecture (id) NOT NULL,
+                            source_id          INT4 REFERENCES source (id),
+                            pending_source_id  INT4 REFERENCES changes_pending_source (id),
+                            pending_file_id    INT4 REFERENCES changes_pending_files (id),
+
+                            UNIQUE (package, version, architecture_id),
+                            CHECK (source_id IS NOT NULL or pending_source_id IS NOT NULL ) )""")
+
+        print "Getting rid of old queue table"
+        c.execute("""DROP TABLE queue""")
+
+        print "Sorting out permissions"
+
+        for t in ['build_queue', 'policy_queue', 'build_queue_files',
+                  'changes_pending_binaries', 'changes_pending_source_files',
+                  'changes_pending_source', 'changes_pending_files',
+                  'changes_pool_files', 'suite_build_queue_copy']:
+            c.execute("GRANT SELECT ON %s TO public" % t)
+            c.execute("GRANT ALL ON %s TO ftpmaster" % t)
+
+        for s in ['queue_files_id_seq', 'build_queue_id_seq',
+                  'changes_pending_source_id_seq',
+                  'changes_pending_binaries_id_seq',
+                  'changes_pending_files_id_seq',
+                  'changes_pending_source_id_seq',
+                  'known_changes_id_seq',
+                  'policy_queue_id_seq']:
+            c.execute("GRANT USAGE ON %s TO ftpmaster" % s)
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '22' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.InternalError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg))
diff --git a/dak/process_accepted.py b/dak/process_accepted.py
deleted file mode 100755 (executable)
index b203f49..0000000
+++ /dev/null
@@ -1,706 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Installs Debian packages from queue/accepted into the pool
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2006  James Troup <james@nocrew.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
-@license: GNU General Public License version 2 or later
-
-"""
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-###############################################################################
-
-#    Cartman: "I'm trying to make the best of a bad situation, I don't
-#              need to hear crap from a bunch of hippy freaks living in
-#              denial.  Screw you guys, I'm going home."
-#
-#    Kyle: "But Cartman, we're trying to..."
-#
-#    Cartman: "uhh.. screw you guys... home."
-
-###############################################################################
-
-import errno
-import fcntl
-import os
-import sys
-from datetime import datetime
-import apt_pkg
-
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.dbconn import *
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer, re_issource, re_fdnic
-from daklib.urgencylog import UrgencyLog
-from daklib.summarystats import SummaryStats
-from daklib.config import Config
-
-###############################################################################
-
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
-    global Options
-
-    # Initialize config and connection to db
-    cnf = Config()
-    DBConn()
-
-    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
-                 ('h',"help","Dinstall::Options::Help"),
-                 ('n',"no-action","Dinstall::Options::No-Action"),
-                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
-                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
-                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
-    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
-              "version", "directory"]:
-        if not cnf.has_key("Dinstall::Options::%s" % (i)):
-            cnf["Dinstall::Options::%s" % (i)] = ""
-
-    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
-    Options = cnf.SubTree("Dinstall::Options")
-
-    if Options["Help"]:
-        usage()
-
-    # If we have a directory flag, use it to find our files
-    if cnf["Dinstall::Options::Directory"] != "":
-        # Note that we clobber the list of files we were given in this case
-        # so warn if the user has done both
-        if len(changes_files) > 0:
-            utils.warn("Directory provided so ignoring files given on command line")
-
-        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
-    return changes_files
-
-###############################################################################
-
-def usage (exit_code=0):
-    print """Usage: dak process-accepted [OPTION]... [CHANGES]...
-  -a, --automatic           automatic run
-  -h, --help                show this help and exit.
-  -n, --no-action           don't do anything
-  -p, --no-lock             don't check lockfile !! for cron.daily only !!
-  -s, --no-mail             don't send any mail
-  -V, --version             display the version number and exit"""
-    sys.exit(exit_code)
-
-###############################################################################
-
-def action (u, stable_queue=None, log_urgency=True, session=None):
-    (summary, short_summary) = u.build_summaries()
-    pi = u.package_info()
-
-    (prompt, answer) = ("", "XXX")
-    if Options["No-Action"] or Options["Automatic"]:
-        answer = 'S'
-
-    if len(u.rejects) > 0:
-        print "REJECT\n" + pi
-        prompt = "[R]eject, Skip, Quit ?"
-        if Options["Automatic"]:
-            answer = 'R'
-    else:
-        print "INSTALL to " + ", ".join(u.pkg.changes["distribution"].keys())
-        print pi + summary,
-        prompt = "[I]nstall, Skip, Quit ?"
-        if Options["Automatic"]:
-            answer = 'I'
-
-    while prompt.find(answer) == -1:
-        answer = utils.our_raw_input(prompt)
-        m = re_default_answer.match(prompt)
-        if answer == "":
-            answer = m.group(1)
-        answer = answer[:1].upper()
-
-    if answer == 'R':
-        u.do_unaccept()
-        Logger.log(["unaccepted", u.pkg.changes_file])
-    elif answer == 'I':
-        if stable_queue:
-            stable_install(u, summary, short_summary, stable_queue, log_urgency)
-        else:
-            install(u, session, log_urgency)
-    elif answer == 'Q':
-        sys.exit(0)
-
-
-###############################################################################
-def add_poolfile(filename, datadict, location_id, session):
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
-
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
-
-    return poolfile
-
-def add_dsc_to_db(u, filename, session):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-    session.flush()
-
-    for suite_name in u.pkg.changes["distribution"].keys():
-        sa = SrcAssociation()
-        sa.source_id = source.source_id
-        sa.suite_id = get_suite(suite_name).suite_id
-        session.add(sa)
-
-    session.flush()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                files_id = poolfile.file_id
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    session.flush()
-
-    # Add the src_uploaders to the DB
-    uploader_ids = [source.maintainer_id]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in u.pkg.dsc["uploaders"].split(","):
-            up = up.strip()
-            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
-
-    added_ids = {}
-    for up in uploader_ids:
-        if added_ids.has_key(up):
-            utils.warn("Already saw uploader %s for source %s" % (up, source.source))
-            continue
-
-        added_ids[u]=1
-
-        su = SrcUploader()
-        su.maintainer_id = up
-        su.source_id = source.source_id
-        session.add(su)
-
-    session.flush()
-
-    return dsc_component, dsc_location_id
-
-def add_deb_to_db(u, filename, session):
-    """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
-    """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id
-
-    if not entry.get("files id", None):
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        entry["files id"] = poolfile.file_id
-
-    bin.poolfile_id = entry["files id"]
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, bin.architecture.arch_string,
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
-    bin.source_id = bin_sources[0].source_id
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-    session.flush()
-
-    # Add BinAssociations
-    for suite_name in u.pkg.changes["distribution"].keys():
-        ba = BinAssociation()
-        ba.binary_id = bin.binary_id
-        ba.suite_id = get_suite(suite_name).suite_id
-        session.add(ba)
-
-    session.flush()
-
-    # Deal with contents - disabled for now
-    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
-    #if not contents:
-    #    print "REJECT\nCould not determine contents of package %s" % bin.package
-    #    session.rollback()
-    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-
-def install(u, session, log_urgency=True):
-    cnf = Config()
-    summarystats = SummaryStats()
-
-    print "Installing."
-
-    Logger.log(["installing changes", u.pkg.changes_file])
-
-    # Ensure that we have all the hashes we need below.
-    u.ensure_hashes()
-    if len(u.rejects) > 0:
-        # There were errors.  Print them and SKIP the changes.
-        for msg in u.rejects:
-            utils.warn(msg)
-        return
-
-    # Add the .dsc file to the DB first
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "dsc":
-            dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session)
-
-    # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            add_deb_to_db(u, newfile, session)
-
-    # If this is a sourceful diff only upload that is moving
-    # cross-component we need to copy the .orig files into the new
-    # component too for the same reasons as above.
-    if u.pkg.changes["architecture"].has_key("source"):
-        for orig_file in u.pkg.orig_files.keys():
-            if not u.pkg.orig_files[orig_file].has_key("id"):
-                continue # Skip if it's not in the pool
-            orig_file_id = u.pkg.orig_files[orig_file]["id"]
-            if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
-                continue # Skip if the location didn't change
-
-            # Do the move
-            oldf = get_poolfile_by_id(orig_file_id, session)
-            old_filename = os.path.join(oldf.location.path, oldf.filename)
-            old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
-                       'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
-            new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
-            # TODO: Care about size/md5sum collisions etc
-            (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
-
-            if newf is None:
-                utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
-                newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
-                # TODO: Check that there's only 1 here
-                source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
-                dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
-                dscf.poolfile_id = newf.file_id
-                session.add(dscf)
-                session.flush()
-
-    # Install the files into the pool
-    for newfile, entry in u.pkg.files.items():
-        destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
-        utils.move(newfile, destination)
-        Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
-        summarystats.accept_bytes += float(entry["size"])
-
-    # Copy the .changes file across for suite which need it.
-    copy_changes = {}
-    copy_dot_dak = {}
-    for suite_name in u.pkg.changes["distribution"].keys():
-        if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
-            copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
-        # and the .dak file...
-        if cnf.has_key("Suite::%s::CopyDotDak" % (suite_name)):
-            copy_dot_dak[cnf["Suite::%s::CopyDotDak" % (suite_name)]] = ""
-
-    for dest in copy_changes.keys():
-        utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
-
-    for dest in copy_dot_dak.keys():
-        utils.copy(u.pkg.changes_file[:-8]+".dak", dest)
-
-    # We're done - commit the database changes
-    session.commit()
-
-    # Move the .changes into the 'done' directory
-    utils.move(u.pkg.changes_file,
-               os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file)))
-
-    # Remove the .dak file
-    os.unlink(u.pkg.changes_file[:-8] + ".dak")
-
-    if u.pkg.changes["architecture"].has_key("source") and log_urgency:
-        UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"])
-
-    # Our SQL session will automatically start a new transaction after
-    # the last commit
-
-    # Undo the work done in queue.py(accept) to help auto-building
-    # from accepted.
-    now_date = datetime.now()
-
-    for suite_name in u.pkg.changes["distribution"].keys():
-        if suite_name not in cnf.ValueList("Dinstall::QueueBuildSuites"):
-            continue
-
-        suite = get_suite(suite_name, session)
-        dest_dir = cnf["Dir::QueueBuild"]
-
-        if cnf.FindB("Dinstall::SecurityQueueBuild"):
-            dest_dir = os.path.join(dest_dir, suite_name)
-
-        for newfile, entry in u.pkg.files.items():
-            dest = os.path.join(dest_dir, newfile)
-
-            qb = get_queue_build(dest, suite.suite_id, session)
-
-            # Remove it from the list of packages for later processing by apt-ftparchive
-            if qb:
-                qb.last_used = now_date
-                qb.in_queue = False
-                session.add(qb)
-
-            if not cnf.FindB("Dinstall::SecurityQueueBuild"):
-                # Update the symlink to point to the new location in the pool
-                pool_location = utils.poolify(u.pkg.changes["source"], entry["component"])
-                src = os.path.join(cnf["Dir::Pool"], pool_location, os.path.basename(newfile))
-                if os.path.islink(dest):
-                    os.unlink(dest)
-                os.symlink(src, dest)
-
-        # Update last_used on any non-uploaded .orig symlink
-        for orig_file in u.pkg.orig_files.keys():
-            # Determine the .orig.tar.gz file name
-            if not u.pkg.orig_files[orig_file].has_key("id"):
-                continue # Skip files not in the pool
-            # XXX: do we really want to update the orig_files dict here
-            # instead of using a temporary variable?
-            u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file)
-
-            # Remove it from the list of packages for later processing by apt-ftparchive
-            qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session)
-            if qb:
-                qb.in_queue = False
-                qb.last_used = now_date
-                session.add(qb)
-
-    session.commit()
-
-    # Finally...
-    summarystats.accept_count += 1
-
-################################################################################
-
-def stable_install(u, session, summary, short_summary, fromsuite_name="proposed-updates"):
-    summarystats = SummaryStats()
-
-    fromsuite_name = fromsuite_name.lower()
-    tosuite_name = "Stable"
-    if fromsuite_name == "oldstable-proposed-updates":
-        tosuite_name = "OldStable"
-
-    print "Installing from %s to %s." % (fromsuite_name, tosuite_name)
-
-    fromsuite = get_suite(fromsuite_name)
-    tosuite = get_suite(tosuite_name)
-
-    # Add the source to stable (and remove it from proposed-updates)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "dsc":
-            package = u.pkg.dsc["source"]
-            # NB: not files[file]["version"], that has no epoch
-            version = u.pkg.dsc["version"]
-
-            source = get_sources_from_name(package, version, session)
-            if len(source) < 1:
-                utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s) in source table." % (package, version))
-            source = source[0]
-
-            # Remove from old suite
-            old = session.query(SrcAssociation).filter_by(source_id = source.source_id)
-            old = old.filter_by(suite_id = fromsuite.suite_id)
-            old.delete()
-
-            # Add to new suite
-            new = SrcAssociation()
-            new.source_id = source.source_id
-            new.suite_id = tosuite.suite_id
-            session.add(new)
-
-    # Add the binaries to stable (and remove it/them from proposed-updates)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            package = entry["package"]
-            version = entry["version"]
-            architecture = entry["architecture"]
-
-            binary = get_binaries_from_name(package, version, [architecture, 'all'])
-
-            if len(binary) < 1:
-                utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s for %s architecture) in binaries table." % (package, version, architecture))
-            binary = binary[0]
-
-            # Remove from old suite
-            old = session.query(BinAssociation).filter_by(binary_id = binary.binary_id)
-            old = old.filter_by(suite_id = fromsuite.suite_id)
-            old.delete()
-
-            # Add to new suite
-            new = BinAssociation()
-            new.binary_id = binary.binary_id
-            new.suite_id = tosuite.suite_id
-            session.add(new)
-
-    session.commit()
-
-    utils.move(u.pkg.changes_file,
-               os.path.join(cnf["Dir::Morgue"], 'process-accepted', os.path.basename(u.pkg.changes_file)))
-
-    ## Update the Stable ChangeLog file
-    # TODO: URGH - Use a proper tmp file
-    new_changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + ".ChangeLog"
-    changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + "ChangeLog"
-    if os.path.exists(new_changelog_filename):
-        os.unlink(new_changelog_filename)
-
-    new_changelog = utils.open_file(new_changelog_filename, 'w')
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            new_changelog.write("%s/%s/binary-%s/%s\n" % (tosuite.suite_name,
-                                                          entry["component"],
-                                                          entry["architecture"],
-                                                          newfile))
-        elif re_issource.match(newfile):
-            new_changelog.write("%s/%s/source/%s\n" % (tosuite.suite_name,
-                                                       entry["component"],
-                                                       newfile))
-        else:
-            new_changelog.write("%s\n" % (newfile))
-
-    chop_changes = re_fdnic.sub("\n", u.pkg.changes["changes"])
-    new_changelog.write(chop_changes + '\n\n')
-
-    if os.access(changelog_filename, os.R_OK) != 0:
-        changelog = utils.open_file(changelog_filename)
-        new_changelog.write(changelog.read())
-
-    new_changelog.close()
-
-    if os.access(changelog_filename, os.R_OK) != 0:
-        os.unlink(changelog_filename)
-    utils.move(new_changelog_filename, changelog_filename)
-
-    summarystats.accept_count += 1
-
-    if not Options["No-Mail"] and u.pkg.changes["architecture"].has_key("source"):
-        u.Subst["__SUITE__"] = " into %s" % (tosuite)
-        u.Subst["__SUMMARY__"] = summary
-        u.Subst["__BCC__"] = "X-DAK: dak process-accepted"
-
-        if cnf.has_key("Dinstall::Bcc"):
-            u.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-
-        template = os.path.join(cnf["Dir::Templates"], 'process-accepted.install')
-
-        mail_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(mail_message)
-        u.announce(short_summary, True)
-
-    # Finally remove the .dak file
-    dot_dak_file = os.path.join(cnf["Suite::%s::CopyDotDak" % (fromsuite.suite_name)],
-                                os.path.basename(u.pkg.changes_file[:-8]+".dak"))
-    os.unlink(dot_dak_file)
-
-################################################################################
-
-def process_it(changes_file, stable_queue, log_urgency, session):
-    cnf = Config()
-    u = Upload()
-
-    overwrite_checks = True
-
-    # Absolutize the filename to avoid the requirement of being in the
-    # same directory as the .changes file.
-    cfile = os.path.abspath(changes_file)
-
-    # And since handling of installs to stable munges with the CWD
-    # save and restore it.
-    u.prevdir = os.getcwd()
-
-    if stable_queue:
-        old = cfile
-        cfile = os.path.basename(old)
-        os.chdir(cnf["Suite::%s::CopyDotDak" % (stable_queue)])
-        # overwrite_checks should not be performed if installing to stable
-        overwrite_checks = False
-
-    u.pkg.load_dot_dak(cfile)
-    u.update_subst()
-
-    if stable_queue:
-        u.pkg.changes_file = old
-
-    u.accepted_checks(overwrite_checks, session)
-    action(u, stable_queue, log_urgency, session)
-
-    # Restore CWD
-    os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
-    global Logger
-
-    cnf = Config()
-    summarystats = SummaryStats()
-    changes_files = init()
-    log_urgency = False
-    stable_queue = None
-
-    # -n/--dry-run invalidates some other options which would involve things happening
-    if Options["No-Action"]:
-        Options["Automatic"] = ""
-
-    # Check that we aren't going to clash with the daily cron job
-
-    if not Options["No-Action"] and os.path.exists("%s/Archive_Maintenance_In_Progress" % (cnf["Dir::Root"])) and not Options["No-Lock"]:
-        utils.fubar("Archive maintenance in progress.  Try again later.")
-
-    # If running from within proposed-updates; assume an install to stable
-    queue = ""
-    if os.getenv('PWD').find('oldstable-proposed-updates') != -1:
-        stable_queue = "Oldstable-Proposed-Updates"
-    elif os.getenv('PWD').find('proposed-updates') != -1:
-        stable_queue = "Proposed-Updates"
-
-    # Obtain lock if not in no-action mode and initialize the log
-    if not Options["No-Action"]:
-        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
-        try:
-            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError, e:
-            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
-                utils.fubar("Couldn't obtain lock; assuming another 'dak process-accepted' is already running.")
-            else:
-                raise
-        Logger = daklog.Logger(cnf, "process-accepted")
-        if not stable_queue and cnf.get("Dir::UrgencyLog"):
-            # Initialise UrgencyLog()
-            log_urgency = True
-            UrgencyLog()
-
-    # Sort the .changes files so that we process sourceful ones first
-    changes_files.sort(utils.changes_compare)
-
-
-    # Process the changes files
-    for changes_file in changes_files:
-        print "\n" + changes_file
-        session = DBConn().session()
-        process_it(changes_file, stable_queue, log_urgency, session)
-        session.close()
-
-    if summarystats.accept_count:
-        sets = "set"
-        if summarystats.accept_count > 1:
-            sets = "sets"
-        sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
-                                                             utils.size_type(int(summarystats.accept_bytes))))
-        Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
-
-    if not Options["No-Action"]:
-        Logger.close()
-        if log_urgency:
-            UrgencyLog().close()
-
-###############################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py
deleted file mode 100755 (executable)
index 8a3e49d..0000000
+++ /dev/null
@@ -1,593 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Checks Debian packages from Incoming
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
-@copyright: 2009  Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-# Originally based on dinstall by Guy Maor <maor@debian.org>
-
-################################################################################
-
-# Computer games don't affect kids. I mean if Pacman affected our generation as
-# kids, we'd all run around in a darkened room munching pills and listening to
-# repetitive music.
-#         -- Unknown
-
-################################################################################
-
-import errno
-import fcntl
-import os
-import sys
-import traceback
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.textutils import fix_maintainer
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer
-from daklib.summarystats import SummaryStats
-from daklib.holding import Holding
-from daklib.config import Config
-
-from types import *
-
-################################################################################
-
-
-################################################################################
-
-# Globals
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
-    global Options
-
-    apt_pkg.init()
-    cnf = Config()
-
-    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
-                 ('h',"help","Dinstall::Options::Help"),
-                 ('n',"no-action","Dinstall::Options::No-Action"),
-                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
-                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
-                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
-    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
-              "override-distribution", "version", "directory"]:
-        cnf["Dinstall::Options::%s" % (i)] = ""
-
-    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
-    Options = cnf.SubTree("Dinstall::Options")
-
-    if Options["Help"]:
-        usage()
-
-    # If we have a directory flag, use it to find our files
-    if cnf["Dinstall::Options::Directory"] != "":
-        # Note that we clobber the list of files we were given in this case
-        # so warn if the user has done both
-        if len(changes_files) > 0:
-            utils.warn("Directory provided so ignoring files given on command line")
-
-        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
-    return changes_files
-
-################################################################################
-
-def usage (exit_code=0):
-    print """Usage: dak process-unchecked [OPTION]... [CHANGES]...
-  -a, --automatic           automatic run
-  -h, --help                show this help and exit.
-  -n, --no-action           don't do anything
-  -p, --no-lock             don't check lockfile !! for cron.daily only !!
-  -s, --no-mail             don't send any mail
-  -V, --version             display the version number and exit"""
-    sys.exit(exit_code)
-
-################################################################################
-
-def action(u):
-    cnf = Config()
-
-    # changes["distribution"] may not exist in corner cases
-    # (e.g. unreadable changes files)
-    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType):
-        u.pkg.changes["distribution"] = {}
-
-    (summary, short_summary) = u.build_summaries()
-
-    # q-unapproved hax0ring
-    queue_info = {
-         "New": { "is": is_new, "process": acknowledge_new },
-         "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
-         "Byhand" : { "is": is_byhand, "process": do_byhand },
-         "OldStableUpdate" : { "is": is_oldstableupdate,
-                               "process": do_oldstableupdate },
-         "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate },
-         "Unembargo" : { "is": is_unembargo, "process": queue_unembargo },
-         "Embargo" : { "is": is_embargo, "process": queue_embargo },
-    }
-
-    queues = [ "New", "Autobyhand", "Byhand" ]
-    if cnf.FindB("Dinstall::SecurityQueueHandling"):
-        queues += [ "Unembargo", "Embargo" ]
-    else:
-        queues += [ "OldStableUpdate", "StableUpdate" ]
-
-    (prompt, answer) = ("", "XXX")
-    if Options["No-Action"] or Options["Automatic"]:
-        answer = 'S'
-
-    queuekey = ''
-
-    pi = u.package_info()
-
-    if len(u.rejects) > 0:
-        if u.upload_too_new():
-            print "SKIP (too new)\n" + pi,
-            prompt = "[S]kip, Quit ?"
-        else:
-            print "REJECT\n" + pi
-            prompt = "[R]eject, Skip, Quit ?"
-            if Options["Automatic"]:
-                answer = 'R'
-    else:
-        qu = None
-        for q in queues:
-            if queue_info[q]["is"](u):
-                qu = q
-                break
-        if qu:
-            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
-            queuekey = qu[0].upper()
-            if queuekey in "RQSA":
-                queuekey = "D"
-                prompt = "[D]ivert, Skip, Quit ?"
-            else:
-                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
-            if Options["Automatic"]:
-                answer = queuekey
-        else:
-            print "ACCEPT\n" + pi + summary,
-            prompt = "[A]ccept, Skip, Quit ?"
-            if Options["Automatic"]:
-                answer = 'A'
-
-    while prompt.find(answer) == -1:
-        answer = utils.our_raw_input(prompt)
-        m = re_default_answer.match(prompt)
-        if answer == "":
-            answer = m.group(1)
-        answer = answer[:1].upper()
-
-    if answer == 'R':
-        os.chdir(u.pkg.directory)
-        u.do_reject(0, pi)
-    elif answer == 'A':
-        u.pkg.add_known_changes( "Accepted" )
-        u.accept(summary, short_summary)
-        u.check_override()
-        u.remove()
-    elif answer == queuekey:
-        u.pkg.add_known_changes( qu )
-        queue_info[qu]["process"](u, summary, short_summary)
-        u.remove()
-    elif answer == 'Q':
-        sys.exit(0)
-
-################################################################################
-
-def package_to_suite(u, suite):
-    if not u.pkg.changes["distribution"].has_key(suite):
-        return False
-
-    ret = True
-
-    if not u.pkg.changes["architecture"].has_key("source"):
-        s = DBConn().session()
-        q = s.query(SrcAssociation.sa_id)
-        q = q.join(Suite).filter_by(suite_name=suite)
-        q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
-        q = q.filter_by(version=u.pkg.changes['version']).limit(1)
-
-        # NB: Careful, this logic isn't what you would think it is
-        # Source is already in {old-,}proposed-updates so no need to hold
-        # Instead, we don't move to the holding area, we just do an ACCEPT
-        if q.count() > 0:
-            ret = False
-
-        s.close()
-
-    return ret
-
-def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None):
-    cnf = Config()
-    dir = cnf["Dir::Queue::%s" % queue]
-
-    print "Moving to %s holding area" % queue.upper()
-    Logger.log(["Moving to %s" % queue, u.pkg.changes_file])
-
-    u.pkg.write_dot_dak(dir)
-    u.move_to_dir(dir, perms=perms)
-    if build:
-        get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
-
-    # Check for override disparities
-    u.check_override()
-
-    # Send accept mail, announce to lists and close bugs
-    if announce and not cnf["Dinstall::Options::No-Mail"]:
-        template = os.path.join(cnf["Dir::Templates"], announce)
-        u.update_subst()
-        u.Subst["__SUITE__"] = ""
-        mail_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(mail_message)
-        u.announce(short_summary, True)
-
-################################################################################
-
-def is_unembargo(u):
-    session = DBConn().session()
-    cnf = Config()
-
-    q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
-    if q.rowcount > 0:
-        session.close()
-        return True
-
-    oldcwd = os.getcwd()
-    os.chdir(cnf["Dir::Queue::Disembargo"])
-    disdir = os.getcwd()
-    os.chdir(oldcwd)
-
-    ret = False
-
-    if u.pkg.directory == disdir:
-        if u.pkg.changes["architecture"].has_key("source"):
-            if not Options["No-Action"]:
-                session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
-                session.commit()
-
-            ret = True
-
-    session.close()
-
-    return ret
-
-def queue_unembargo(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Unembargoed",
-                            perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_embargo(u):
-    # if embargoed queues are enabled always embargo
-    return True
-
-def queue_embargo(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Unembargoed",
-                            perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_stableupdate(u):
-    return package_to_suite(u, 'proposed-updates')
-
-def do_stableupdate(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "ProposedUpdates",
-                            perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_oldstableupdate(u):
-    return package_to_suite(u, 'oldstable-proposed-updates')
-
-def do_oldstableupdate(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "OldProposedUpdates",
-                            perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_autobyhand(u):
-    cnf = Config()
-
-    all_auto = 1
-    any_auto = 0
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("byhand"):
-            any_auto = 1
-
-            # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
-            # don't contain underscores, and ARCH doesn't contain dots.
-            # further VER matches the .changes Version:, and ARCH should be in
-            # the .changes Architecture: list.
-            if f.count("_") < 2:
-                all_auto = 0
-                continue
-
-            (pckg, ver, archext) = f.split("_", 2)
-            if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
-                all_auto = 0
-                continue
-
-            ABH = cnf.SubTree("AutomaticByHandPackages")
-            if not ABH.has_key(pckg) or \
-              ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
-                print "not match %s %s" % (pckg, u.pkg.changes["source"])
-                all_auto = 0
-                continue
-
-            (arch, ext) = archext.split(".", 1)
-            if arch not in u.pkg.changes["architecture"]:
-                all_auto = 0
-                continue
-
-            u.pkg.files[f]["byhand-arch"] = arch
-            u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
-
-    return any_auto and all_auto
-
-def do_autobyhand(u, summary, short_summary):
-    print "Attempting AUTOBYHAND."
-    byhandleft = True
-    for f, entry in u.pkg.files.items():
-        byhandfile = f
-
-        if not entry.has_key("byhand"):
-            continue
-
-        if not entry.has_key("byhand-script"):
-            byhandleft = True
-            continue
-
-        os.system("ls -l %s" % byhandfile)
-
-        result = os.system("%s %s %s %s %s" % (
-                entry["byhand-script"],
-                byhandfile,
-                u.pkg.changes["version"],
-                entry["byhand-arch"],
-                os.path.abspath(u.pkg.changes_file)))
-
-        if result == 0:
-            os.unlink(byhandfile)
-            del entry
-        else:
-            print "Error processing %s, left as byhand." % (f)
-            byhandleft = True
-
-    if byhandleft:
-        do_byhand(u, summary, short_summary)
-    else:
-        u.accept(summary, short_summary)
-        u.check_override()
-        # XXX: We seem to be missing a u.remove() here
-        #      This might explain why we get byhand leftovers in unchecked - mhy
-
-################################################################################
-
-def is_byhand(u):
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("byhand"):
-            return True
-    return False
-
-def do_byhand(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Byhand",
-                            perms=0660, build=False, announce=None)
-
-################################################################################
-
-def is_new(u):
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("new"):
-            return True
-    return False
-
-def acknowledge_new(u, summary, short_summary):
-    cnf = Config()
-
-    print "Moving to NEW holding area."
-    Logger.log(["Moving to new", u.pkg.changes_file])
-
-    u.pkg.write_dot_dak(cnf["Dir::Queue::New"])
-    u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644)
-
-    if not Options["No-Mail"]:
-        print "Sending new ack."
-        template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
-        u.update_subst()
-        u.Subst["__SUMMARY__"] = summary
-        new_ack_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(new_ack_message)
-
-################################################################################
-
-# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in
-# Incoming. -1 will reference the .orig.tar.gz, but -2 will not.
-# Upload.check_dsc_against_db() can find the .orig.tar.gz but it will
-# not have processed it during it's checks of -2.  If -1 has been
-# deleted or otherwise not checked by 'dak process-unchecked', the
-# .orig.tar.gz will not have been checked at all.  To get round this,
-# we force the .orig.tar.gz into the .changes structure and reprocess
-# the .changes file.
-
-def process_it(changes_file):
-    global Logger
-
-    cnf = Config()
-
-    holding = Holding()
-
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    u.pkg.directory = os.getcwd()
-    u.logger = Logger
-    origchanges = os.path.join(u.pkg.directory, u.pkg.changes_file)
-
-    # Some defaults in case we can't fully process the .changes file
-    u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
-    u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
-
-    # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
-    bcc = "X-DAK: dak process-unchecked"
-    if cnf.has_key("Dinstall::Bcc"):
-        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-    else:
-        u.Subst["__BCC__"] = bcc
-
-    # Remember where we are so we can come back after cd-ing into the
-    # holding directory.  TODO: Fix this stupid hack
-    u.prevdir = os.getcwd()
-
-    # TODO: Figure out something better for this (or whether it's even
-    #       necessary - it seems to have been for use when we were
-    #       still doing the is_unchecked check; reprocess = 2)
-    u.reprocess = 1
-
-    try:
-        # If this is the Real Thing(tm), copy things into a private
-        # holding directory first to avoid replacable file races.
-        if not Options["No-Action"]:
-            os.chdir(cnf["Dir::Queue::Holding"])
-
-            # Absolutize the filename to avoid the requirement of being in the
-            # same directory as the .changes file.
-            holding.copy_to_holding(origchanges)
-
-            # Relativize the filename so we use the copy in holding
-            # rather than the original...
-            changespath = os.path.basename(u.pkg.changes_file)
-
-        (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
-
-        if u.pkg.changes["fingerprint"]:
-            valid_changes_p = u.load_changes(changespath)
-        else:
-            valid_changes_p = False
-            u.rejects.extend(rejects)
-
-        if valid_changes_p:
-            while u.reprocess:
-                u.check_distributions()
-                u.check_files(not Options["No-Action"])
-                valid_dsc_p = u.check_dsc(not Options["No-Action"])
-                if valid_dsc_p and not Options["No-Action"]:
-                    u.check_source()
-                    u.check_lintian()
-                u.check_hashes()
-                u.check_urgency()
-                u.check_timestamps()
-                u.check_signed_by_key()
-
-        action(u)
-
-    except (SystemExit, KeyboardInterrupt):
-        raise
-
-    except:
-        print "ERROR"
-        traceback.print_exc(file=sys.stderr)
-
-    # Restore previous WD
-    os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
-    global Options, Logger
-
-    cnf = Config()
-    changes_files = init()
-
-    # -n/--dry-run invalidates some other options which would involve things happening
-    if Options["No-Action"]:
-        Options["Automatic"] = ""
-
-    # Initialize our Holding singleton
-    holding = Holding()
-
-    # Ensure all the arguments we were given are .changes files
-    for f in changes_files:
-        if not f.endswith(".changes"):
-            utils.warn("Ignoring '%s' because it's not a .changes file." % (f))
-            changes_files.remove(f)
-
-    if changes_files == []:
-        if cnf["Dinstall::Options::Directory"] == "":
-            utils.fubar("Need at least one .changes file as an argument.")
-        else:
-            sys.exit(0)
-
-    # Check that we aren't going to clash with the daily cron job
-    if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]:
-        utils.fubar("Archive maintenance in progress.  Try again later.")
-
-    # Obtain lock if not in no-action mode and initialize the log
-    if not Options["No-Action"]:
-        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
-        try:
-            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError, e:
-            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
-                utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.")
-            else:
-                raise
-        Logger = daklog.Logger(cnf, "process-unchecked")
-
-    # Sort the .changes files so that we process sourceful ones first
-    changes_files.sort(utils.changes_compare)
-
-    # Process the changes files
-    for changes_file in changes_files:
-        print "\n" + changes_file
-        try:
-            process_it (changes_file)
-        finally:
-            if not Options["No-Action"]:
-                holding.clean()
-
-    accept_count = SummaryStats().accept_count
-    accept_bytes = SummaryStats().accept_bytes
-
-    if accept_count:
-        sets = "set"
-        if accept_count > 1:
-            sets = "sets"
-        print "Accepted %d package %s, %s." % (accept_count, sets, utils.size_type(int(accept_bytes)))
-        Logger.log(["total",accept_count,accept_bytes])
-
-    if not Options["No-Action"]:
-        Logger.close()
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/process_upload.py b/dak/process_upload.py
new file mode 100755 (executable)
index 0000000..cf1594a
--- /dev/null
@@ -0,0 +1,420 @@
+#!/usr/bin/env python
+
+"""
+Checks Debian packages from Incoming
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@copyright: 2009  Frank Lichtenheld <djpig@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+# based on process-unchecked and process-accepted
+
+## pu|pa: locking (daily.lock)
+## pu|pa: parse arguments -> list of changes files
+## pa: initialize urgency log
+## pu|pa: sort changes list
+
+## foreach changes:
+###  pa: load dak file
+##   pu: copy CHG to tempdir
+##   pu: check CHG signature
+##   pu: parse changes file
+##   pu: checks:
+##     pu: check distribution (mappings, rejects)
+##     pu: copy FILES to tempdir
+##     pu: check whether CHG already exists in CopyChanges
+##     pu: check whether FILES already exist in one of the policy queues
+##     for deb in FILES:
+##       pu: extract control information
+##       pu: various checks on control information
+##       pu|pa: search for source (in CHG, projectb, policy queues)
+##       pu|pa: check whether "Version" fulfills target suite requirements/suite propagation
+##       pu|pa: check whether deb already exists in the pool
+##     for src in FILES:
+##       pu: various checks on filenames and CHG consistency
+##       pu: if isdsc: check signature
+##     for file in FILES:
+##       pu: various checks
+##       pu: NEW?
+##       //pu: check whether file already exists in the pool
+##       pu: store what "Component" the package is currently in
+##     pu: check whether we found everything we were looking for in CHG
+##     pu: check the DSC:
+##       pu: check whether we need and have ONE DSC
+##       pu: parse the DSC
+##       pu: various checks //maybe drop some of the in favor of lintian
+##       pu|pa: check whether "Version" fulfills target suite requirements/suite propagation
+##       pu: check whether DSC_FILES is consistent with "Format"
+##       for src in DSC_FILES:
+##         pu|pa: check whether file already exists in the pool (with special handling for .orig.tar.gz)
+##     pu: create new tempdir
+##     pu: create symlink mirror of source
+##     pu: unpack source
+##     pu: extract changelog information for BTS
+##     //pu: create missing .orig symlink
+##     pu: check with lintian
+##     for file in FILES:
+##       pu: check checksums and sizes
+##     for file in DSC_FILES:
+##       pu: check checksums and sizes
+##     pu: CHG: check urgency
+##     for deb in FILES:
+##       pu: extract contents list and check for dubious timestamps
+##     pu: check that the uploader is actually allowed to upload the package
+###  pa: install:
+###    if stable_install:
+###      pa: remove from p-u
+###      pa: add to stable
+###      pa: move CHG to morgue
+###      pa: append data to ChangeLog
+###      pa: send mail
+###      pa: remove .dak file
+###    else:
+###      pa: add dsc to db:
+###        for file in DSC_FILES:
+###          pa: add file to file
+###          pa: add file to dsc_files
+###        pa: create source entry
+###        pa: update source associations
+###        pa: update src_uploaders
+###      for deb in FILES:
+###        pa: add deb to db:
+###          pa: add file to file
+###          pa: find source entry
+###          pa: create binaries entry
+###          pa: update binary associations
+###      pa: .orig component move
+###      pa: move files to pool
+###      pa: save CHG
+###      pa: move CHG to done/
+###      pa: change entry in queue_build
+##   pu: use dispatch table to choose target queue:
+##     if NEW:
+##       pu: write .dak file
+##       pu: move to NEW
+##       pu: send mail
+##     elsif AUTOBYHAND:
+##       pu: run autobyhand script
+##       pu: if stuff left, do byhand or accept
+##     elsif targetqueue in (oldstable, stable, embargo, unembargo):
+##       pu: write .dak file
+##       pu: check overrides
+##       pu: move to queue
+##       pu: send mail
+##     else:
+##       pu: write .dak file
+##       pu: move to ACCEPTED
+##       pu: send mails
+##       pu: create files for BTS
+##       pu: create entry in queue_build
+##       pu: check overrides
+import errno
+import fcntl
+import os
+import sys
+#from datetime import datetime
+import traceback
+import apt_pkg
+
+from daklib import daklog
+from daklib.queue import *
+from daklib.queue_install import *
+from daklib import utils
+from daklib.dbconn import *
+#from daklib.dak_exceptions import *
+#from daklib.regexes import re_default_answer, re_issource, re_fdnic
+from daklib.urgencylog import UrgencyLog
+from daklib.summarystats import SummaryStats
+from daklib.holding import Holding
+from daklib.config import Config
+
+###############################################################################
+
+Options = None
+Logger = None
+
+###############################################################################
+
+def usage (exit_code=0):
+    print """Usage: dak process-upload [OPTION]... [CHANGES]...
+  -a, --automatic           automatic run
+  -h, --help                show this help and exit.
+  -n, --no-action           don't do anything
+  -p, --no-lock             don't check lockfile !! for cron.daily only !!
+  -s, --no-mail             don't send any mail
+  -V, --version             display the version number and exit"""
+    sys.exit(exit_code)
+
+###############################################################################
+
+def action(u):
+    cnf = Config()
+    holding = Holding()
+
+    # changes["distribution"] may not exist in corner cases
+    # (e.g. unreadable changes files)
+    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType):
+        u.pkg.changes["distribution"] = {}
+
+    (summary, short_summary) = u.build_summaries()
+
+    (prompt, answer) = ("", "XXX")
+    if Options["No-Action"] or Options["Automatic"]:
+        answer = 'S'
+
+    queuekey = ''
+
+    pi = u.package_info()
+
+    if len(u.rejects) > 0:
+        if u.upload_too_new():
+            print "SKIP (too new)\n" + pi,
+            prompt = "[S]kip, Quit ?"
+        else:
+            print "REJECT\n" + pi
+            prompt = "[R]eject, Skip, Quit ?"
+            if Options["Automatic"]:
+                answer = 'R'
+    else:
+        qu = determine_target(u)
+        if qu:
+            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
+            queuekey = qu[0].upper()
+            if queuekey in "RQSA":
+                queuekey = "D"
+                prompt = "[D]ivert, Skip, Quit ?"
+            else:
+                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
+            if Options["Automatic"]:
+                answer = queuekey
+        else:
+            print "ACCEPT\n" + pi + summary,
+            prompt = "[A]ccept, Skip, Quit ?"
+            if Options["Automatic"]:
+                answer = 'A'
+
+    while prompt.find(answer) == -1:
+        answer = utils.our_raw_input(prompt)
+        m = re_default_answer.match(prompt)
+        if answer == "":
+            answer = m.group(1)
+        answer = answer[:1].upper()
+
+    session = DBConn().session()
+
+    if answer == 'R':
+        os.chdir(u.pkg.directory)
+        u.do_reject(0, pi)
+    elif answer == 'A':
+        u.pkg.add_known_changes(holding.holding_dir, session)
+        u.accept(summary, short_summary, session)
+        u.check_override()
+        u.remove()
+    elif answer == queuekey:
+        u.pkg.add_known_changes(holding.holding_dir, session)
+        QueueInfo[qu]["process"](u, summary, short_summary, session)
+        u.remove()
+    elif answer == 'Q':
+        sys.exit(0)
+
+    session.commit()
+
+###############################################################################
+
+def cleanup():
+    h = Holding()
+    if not Options["No-Action"]:
+        h.clean()
+
+def process_it(changes_file):
+    global Logger
+
+    Logger.log(["Processing changes file", changes_file])
+
+    cnf = Config()
+
+    holding = Holding()
+
+    u = Upload()
+    u.pkg.changes_file = changes_file
+    u.pkg.directory = os.getcwd()
+    u.logger = Logger
+    origchanges = os.path.abspath(u.pkg.changes_file)
+
+    # Some defaults in case we can't fully process the .changes file
+    u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
+    u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
+
+    # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
+    bcc = "X-DAK: dak process-upload"
+    if cnf.has_key("Dinstall::Bcc"):
+        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+    else:
+        u.Subst["__BCC__"] = bcc
+
+    # Remember where we are so we can come back after cd-ing into the
+    # holding directory.  TODO: Fix this stupid hack
+    u.prevdir = os.getcwd()
+
+    try:
+        # If this is the Real Thing(tm), copy things into a private
+        # holding directory first to avoid replacable file races.
+        if not Options["No-Action"]:
+            os.chdir(cnf["Dir::Queue::Holding"])
+
+            # Absolutize the filename to avoid the requirement of being in the
+            # same directory as the .changes file.
+            holding.copy_to_holding(origchanges)
+
+            # Relativize the filename so we use the copy in holding
+            # rather than the original...
+            changespath = os.path.basename(u.pkg.changes_file)
+        else:
+            changespath = origchanges
+
+        (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
+
+        if u.pkg.changes["fingerprint"]:
+            valid_changes_p = u.load_changes(changespath)
+        else:
+            valid_changes_p = False
+            u.rejects.extend(rejects)
+
+        if valid_changes_p:
+            u.check_distributions()
+            u.check_files(not Options["No-Action"])
+            valid_dsc_p = u.check_dsc(not Options["No-Action"])
+            if valid_dsc_p and not Options["No-Action"]:
+                u.check_source()
+                u.check_lintian()
+            u.check_hashes()
+            u.check_urgency()
+            u.check_timestamps()
+            u.check_signed_by_key()
+
+        action(u)
+
+    except (SystemExit, KeyboardInterrupt):
+        cleanup()
+        raise
+
+    except:
+        print "ERROR"
+        traceback.print_exc(file=sys.stderr)
+
+    cleanup()
+    # Restore previous WD
+    os.chdir(u.prevdir)
+
+###############################################################################
+
+def main():
+    global Options, Logger
+
+    cnf = Config()
+    summarystats = SummaryStats()
+    log_urgency = False
+
+    DBConn()
+
+    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
+                 ('h',"help","Dinstall::Options::Help"),
+                 ('n',"no-action","Dinstall::Options::No-Action"),
+                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
+                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
+                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
+
+    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
+              "version", "directory"]:
+        if not cnf.has_key("Dinstall::Options::%s" % (i)):
+            cnf["Dinstall::Options::%s" % (i)] = ""
+
+    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+    Options = cnf.SubTree("Dinstall::Options")
+
+    if Options["Help"]:
+        usage()
+
+    # -n/--dry-run invalidates some other options which would involve things happening
+    if Options["No-Action"]:
+        Options["Automatic"] = ""
+
+    # Check that we aren't going to clash with the daily cron job
+    if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]:
+        utils.fubar("Archive maintenance in progress.  Try again later.")
+
+    # Obtain lock if not in no-action mode and initialize the log
+    if not Options["No-Action"]:
+        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
+        try:
+            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError, e:
+            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
+                utils.fubar("Couldn't obtain lock; assuming another 'dak process-upload' is already running.")
+            else:
+                raise
+        if cnf.get("Dir::UrgencyLog"):
+            # Initialise UrgencyLog()
+            log_urgency = True
+            UrgencyLog()
+
+    Logger = daklog.Logger(cnf, "process-upload", Options["No-Action"])
+
+    # If we have a directory flag, use it to find our files
+    if cnf["Dinstall::Options::Directory"] != "":
+        # Note that we clobber the list of files we were given in this case
+        # so warn if the user has done both
+        if len(changes_files) > 0:
+            utils.warn("Directory provided so ignoring files given on command line")
+
+        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
+        Logger.log(["Using changes files from directory", cnf["Dinstall::Options::Directory"], len(changes_files)])
+    elif not len(changes_files) > 0:
+        utils.fubar("No changes files given and no directory specified")
+    else:
+        Logger.log(["Using changes files from command-line", len(changes_files)])
+
+    # Sort the .changes files so that we process sourceful ones first
+    changes_files.sort(utils.changes_compare)
+
+    # Process the changes files
+    for changes_file in changes_files:
+        print "\n" + changes_file
+        session = DBConn().session()
+        process_it(changes_file)
+        session.close()
+
+    if summarystats.accept_count:
+        sets = "set"
+        if summarystats.accept_count > 1:
+            sets = "sets"
+        sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
+                                                             utils.size_type(int(summarystats.accept_bytes))))
+        Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
+
+    if not Options["No-Action"]:
+        if log_urgency:
+            UrgencyLog().close()
+    Logger.close()
+
+###############################################################################
+
+if __name__ == '__main__':
+    main()
index 4e7704e42a57883e2f2643d83a4630ead71e8309..a296bdd2805d7d08acff218fd4d6d7336211fc3b 100755 (executable)
@@ -44,7 +44,7 @@ from daklib.dak_exceptions import DBUpdateError
 ################################################################################
 
 Cnf = None
-required_database_schema = 21
+required_database_schema = 22
 
 ################################################################################
 
index fd09cb7f2086b4c96ebaeffd21da5a047ef6a31a..596b74618b85bfd9adef2579f8724b528bfd6ca7 100755 (executable)
@@ -177,198 +177,51 @@ class Changes(object):
 
         return summary
 
+    @session_wrapper
     def remove_known_changes(self, session=None):
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
-
         session.delete(get_knownchange(self.changes_file, session))
 
-        if privatetrans:
-            session.commit()
-            session.close()
-
-
     def mark_missing_fields(self):
         """add "missing" in fields which we will require for the known_changes table"""
         for key in ['urgency', 'maintainer', 'fingerprint', 'changed-by' ]:
             if (not self.changes.has_key(key)) or (not self.changes[key]):
                 self.changes[key]='missing'
 
+    @session_wrapper
     def add_known_changes(self, dirpath, session=None):
         """add "missing" in fields which we will require for the known_changes table"""
         cnf = Config()
-        privatetrans = False
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
 
         changesfile = os.path.join(dirpath, self.changes_file)
         filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile))
 
         self.mark_missing_fields()
 
+        multivalues = {}
+        for key in ("distribution", "architecture", "binary"):
+            if isinstance(self.changes[key], dict):
+                multivalues[key] = ", ".join(self.changes[key].keys())
+            else:
+                multivalues[key] = self.changes[key].keys()
+
         session.execute(
             """INSERT INTO known_changes
               (changesname, seen, source, binaries, architecture, version,
               distribution, urgency, maintainer, fingerprint, changedby, date)
               VALUES (:changesfile,:filetime,:source,:binary, :architecture,
               :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
-              { 'changesfile':self.changes_file,
-                'filetime':filetime,
-                'source':self.changes["source"],
-                'binary':self.changes["binary"],
-                'architecture':self.changes["architecture"],
-                'version':self.changes["version"],
-                'distribution':self.changes["distribution"],
-                'urgency':self.changes["urgency"],
-                'maintainer':self.changes["maintainer"],
-                'fingerprint':self.changes["fingerprint"],
-                'changedby':self.changes["changed-by"],
-                'date':self.changes["date"]} )
-
-        if privatetrans:
-            session.commit()
-            session.close()
-
-    def load_dot_dak(self, changesfile):
-        """
-        Update ourself by reading a previously created cPickle .dak dumpfile.
-        """
-
-        self.changes_file = changesfile
-        dump_filename = self.changes_file[:-8]+".dak"
-        dump_file = open_file(dump_filename)
-
-        p = Unpickler(dump_file)
-
-        self.changes.update(p.load())
-        self.dsc.update(p.load())
-        self.files.update(p.load())
-        self.dsc_files.update(p.load())
-
-        next_obj = p.load()
-        if isinstance(next_obj, dict):
-            self.orig_files.update(next_obj)
-        else:
-            # Auto-convert old dak files to new format supporting
-            # multiple tarballs
-            orig_tar_gz = None
-            for dsc_file in self.dsc_files.keys():
-                if dsc_file.endswith(".orig.tar.gz"):
-                    orig_tar_gz = dsc_file
-            self.orig_files[orig_tar_gz] = {}
-            if next_obj != None:
-                self.orig_files[orig_tar_gz]["id"] = next_obj
-            next_obj = p.load()
-            if next_obj != None and next_obj != "":
-                self.orig_files[orig_tar_gz]["location"] = next_obj
-            if len(self.orig_files[orig_tar_gz]) == 0:
-                del self.orig_files[orig_tar_gz]
-
-        dump_file.close()
-
-    def sanitised_files(self):
-        ret = {}
-        for name, entry in self.files.items():
-            ret[name] = {}
-            for i in CHANGESFIELDS_FILES:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def sanitised_changes(self):
-        ret = {}
-        # Mandatory changes fields
-        for i in CHANGESFIELDS_MANDATORY:
-            ret[i] = self.changes[i]
-
-        # Optional changes fields
-        for i in CHANGESFIELDS_OPTIONAL:
-            if self.changes.has_key(i):
-                ret[i] = self.changes[i]
-
-        return ret
-
-    def sanitised_dsc(self):
-        ret = {}
-        for i in CHANGESFIELDS_DSC:
-            if self.dsc.has_key(i):
-                ret[i] = self.dsc[i]
-
-        return ret
-
-    def sanitised_dsc_files(self):
-        ret = {}
-        for name, entry in self.dsc_files.items():
-            ret[name] = {}
-            # Mandatory dsc_files fields
-            for i in CHANGESFIELDS_DSCFILES_MANDATORY:
-                ret[name][i] = entry[i]
-
-            # Optional dsc_files fields
-            for i in CHANGESFIELDS_DSCFILES_OPTIONAL:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def sanitised_orig_files(self):
-        ret = {}
-        for name, entry in self.orig_files.items():
-            ret[name] = {}
-            # Optional orig_files fields
-            for i in CHANGESFIELDS_ORIGFILES:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def write_dot_dak(self, dest_dir):
-        """
-        Dump ourself into a cPickle file.
-
-        @type dest_dir: string
-        @param dest_dir: Path where the dumpfile should be stored
-
-        @note: This could just dump the dictionaries as is, but I'd like to avoid this so
-               there's some idea of what process-accepted & process-new use from
-               process-unchecked. (JT)
-
-        """
-
-        dump_filename = os.path.join(dest_dir, self.changes_file[:-8] + ".dak")
-        dump_file = open_file(dump_filename, 'w')
-
-        try:
-            os.chmod(dump_filename, 0664)
-        except OSError, e:
-            # chmod may fail when the dumpfile is not owned by the user
-            # invoking dak (like e.g. when NEW is processed by a member
-            # of ftpteam)
-            if e.errno == EPERM:
-                perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE])
-                # security precaution, should never happen unless a weird
-                # umask is set anywhere
-                if perms & stat.S_IWOTH:
-                    fubar("%s is world writable and chmod failed." % \
-                        (dump_filename,))
-                # ignore the failed chmod otherwise as the file should
-                # already have the right privileges and is just, at worst,
-                # unreadable for world
-            else:
-                raise
-
-        p = Pickler(dump_file, 1)
-
-        p.dump(self.sanitised_changes())
-        p.dump(self.sanitised_dsc())
-        p.dump(self.sanitised_files())
-        p.dump(self.sanitised_dsc_files())
-        p.dump(self.sanitised_orig_files())
-
-        dump_file.close()
+              { 'changesfile':  self.changes_file,
+                'filetime':     filetime,
+                'source':       self.changes["source"],
+                'binary':       multivalues["binary"],
+                'architecture': multivalues["architecture"],
+                'version':      self.changes["version"],
+                'distribution': multivalues["distribution"],
+                'urgency':      self.changes["urgency"],
+                'maintainer':   self.changes["maintainer"],
+                'fingerprint':  self.changes["fingerprint"],
+                'changedby':    self.changes["changed-by"],
+                'date':         self.changes["date"]} )
 
     def unknown_files_fields(self, name):
         return sorted(list( set(self.files[name].keys()) -
index 9e5afec7444cb36dfbbfc6632e38a4c91ecaf84f..5b30fce99612469c74991d42fe3fbc5871ff532e 100755 (executable)
@@ -37,7 +37,7 @@ import os
 import re
 import psycopg2
 import traceback
-import datetime
+from datetime import datetime
 
 from inspect import getargspec
 
@@ -50,8 +50,6 @@ from sqlalchemy import types as sqltypes
 from sqlalchemy.exc import *
 from sqlalchemy.orm.exc import NoResultFound
 
-# Only import Config until Queue stuff is changed to store its config
-# in the database
 from config import Config
 from singleton import Singleton
 from textutils import fix_maintainer
@@ -125,6 +123,8 @@ def session_wrapper(fn):
 
     return wrapped
 
+__all__.append('session_wrapper')
+
 ################################################################################
 
 class Architecture(object):
@@ -430,6 +430,132 @@ __all__.append('BinaryACLMap')
 
 ################################################################################
 
+class BuildQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<Queue %s>' % self.queue_name
+
+    def add_file_from_pool(self, poolfile):
+        """Copies a file into the pool.  Assumes that the PoolFile object is
+        attached to the same SQLAlchemy session as the Queue object is.
+
+        The caller is responsible for committing after calling this function."""
+        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+
+        # Check if we have a file of this name or this ID already
+        for f in self.queuefiles:
+            if f.fileid is not None and f.fileid == poolfile.file_id or \
+               f.poolfile.filename == poolfile_basename:
+                   # In this case, update the QueueFile entry so we
+                   # don't remove it too early
+                   f.lastused = datetime.now()
+                   DBConn().session().object_session(pf).add(f)
+                   return f
+
+        # Prepare QueueFile object
+        qf = QueueFile()
+        qf.queue_id = self.queue_id
+        qf.lastused = datetime.now()
+        qf.filename = dest
+
+        targetpath = qf.fullpath
+        queuepath = os.path.join(self.path, poolfile_basename)
+
+        try:
+            if self.copy_pool_files:
+                # We need to copy instead of symlink
+                import utils
+                utils.copy(targetfile, queuepath)
+                # NULL in the fileid field implies a copy
+                qf.fileid = None
+            else:
+                os.symlink(targetfile, queuepath)
+                qf.fileid = poolfile.file_id
+        except OSError:
+            return None
+
+        # Get the same session as the PoolFile is using and add the qf to it
+        DBConn().session().object_session(poolfile).add(qf)
+
+        return qf
+
+
+__all__.append('BuildQueue')
+
+@session_wrapper
+def get_queue(queuename, session=None):
+    """
+    Returns Queue object for given C{queue name}, creating it if it does not
+    exist.
+
+    @type queuename: string
+    @param queuename: The name of the queue
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: Queue
+    @return: Queue object for the given queue
+    """
+
+    q = session.query(Queue).filter_by(queue_name=queuename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_queue')
+
+################################################################################
+
+class BuildQueueFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+
+__all__.append('BuildQueueFile')
+
+################################################################################
+
+class ChangePendingBinary(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
+
+__all__.append('ChangePendingBinary')
+
+################################################################################
+
+class ChangePendingFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingFile %s>' % self.change_pending_file_id
+
+__all__.append('ChangePendingFile')
+
+################################################################################
+
+class ChangePendingSource(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingSource %s>' % self.change_pending_source_id
+
+__all__.append('ChangePendingSource')
+
+################################################################################
+
 class Component(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -850,6 +976,39 @@ def get_poolfile_like_name(filename, session=None):
 
 __all__.append('get_poolfile_like_name')
 
+@session_wrapper
+def add_poolfile(filename, datadict, location_id, session=None):
+    """
+    Add a new file to the pool
+
+    @type filename: string
+    @param filename: filename
+
+    @type datadict: dict
+    @param datadict: dict with needed data
+
+    @type location_id: int
+    @param location_id: database id of the location
+
+    @rtype: PoolFile
+    @return: the PoolFile object created
+    """
+    poolfile = PoolFile()
+    poolfile.filename = filename
+    poolfile.filesize = datadict["size"]
+    poolfile.md5sum = datadict["md5sum"]
+    poolfile.sha1sum = datadict["sha1sum"]
+    poolfile.sha256sum = datadict["sha256sum"]
+    poolfile.location_id = location_id
+
+    session.add(poolfile)
+    # Flush to get a file id (NB: This is not a commit)
+    session.flush()
+
+    return poolfile
+
+__all__.append('add_poolfile')
+
 ################################################################################
 
 class Fingerprint(object):
@@ -1125,17 +1284,6 @@ __all__.append('get_knownchange')
 
 ################################################################################
 
-class KnownChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
-
-__all__.append('KnownChangePendingFile')
-
-################################################################################
-
 class Location(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1502,6 +1650,17 @@ __all__.append('insert_pending_content_paths')
 
 ################################################################################
 
+class PolicyQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<PolicyQueue %s>' % self.queue_name
+
+__all__.append('PolicyQueue')
+
+################################################################################
+
 class Priority(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1572,99 +1731,6 @@ __all__.append('get_priorities')
 
 ################################################################################
 
-class Queue(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<Queue %s>' % self.queue_name
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if f.fileid is not None and f.fileid == poolfile.file_id or \
-               f.poolfile.filename == poolfile_basename:
-                   # In this case, update the QueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(pf).add(f)
-                   return f
-
-        # Prepare QueueFile object
-        qf = QueueFile()
-        qf.queue_id = self.queue_id
-        qf.lastused = datetime.now()
-        qf.filename = dest
-
-        targetpath = qf.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_pool_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetfile, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetfile, queuepath)
-                qf.fileid = poolfile.file_id
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-
-__all__.append('Queue')
-
-@session_wrapper
-def get_queue(queuename, session=None):
-    """
-    Returns Queue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: Queue
-    @return: Queue object for the given queue
-    """
-
-    q = session.query(Queue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_queue')
-
-################################################################################
-
-class QueueFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
-
-__all__.append('QueueFile')
-
-################################################################################
-
 class Section(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1895,6 +1961,174 @@ __all__.append('get_source_in_suite')
 
 ################################################################################
 
+@session_wrapper
+def add_dsc_to_db(u, filename, session=None):
+    entry = u.pkg.files[filename]
+    source = DBSource()
+
+    source.source = u.pkg.dsc["source"]
+    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    source.install_date = datetime.now().date()
+
+    dsc_component = entry["component"]
+    dsc_location_id = entry["location id"]
+
+    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+
+    # Set up a new poolfile if necessary
+    if not entry.has_key("files id") or not entry["files id"]:
+        filename = entry["pool name"] + filename
+        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+        session.flush()
+        entry["files id"] = poolfile.file_id
+
+    source.poolfile_id = entry["files id"]
+    session.add(source)
+    session.flush()
+
+    for suite_name in u.pkg.changes["distribution"].keys():
+        sa = SrcAssociation()
+        sa.source_id = source.source_id
+        sa.suite_id = get_suite(suite_name).suite_id
+        session.add(sa)
+
+    session.flush()
+
+    # Add the source files to the DB (files and dsc_files)
+    dscfile = DSCFile()
+    dscfile.source_id = source.source_id
+    dscfile.poolfile_id = entry["files id"]
+    session.add(dscfile)
+
+    for dsc_file, dentry in u.pkg.dsc_files.items():
+        df = DSCFile()
+        df.source_id = source.source_id
+
+        # If the .orig tarball is already in the pool, it's
+        # files id is stored in dsc_files by check_dsc().
+        files_id = dentry.get("files id", None)
+
+        # Find the entry in the files hash
+        # TODO: Bail out here properly
+        dfentry = None
+        for f, e in u.pkg.files.items():
+            if f == dsc_file:
+                dfentry = e
+                break
+
+        if files_id is None:
+            filename = dfentry["pool name"] + dsc_file
+
+            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+            # FIXME: needs to check for -1/-2 and or handle exception
+            if found and obj is not None:
+                files_id = obj.file_id
+
+            # If still not found, add it
+            if files_id is None:
+                # HACK: Force sha1sum etc into dentry
+                dentry["sha1sum"] = dfentry["sha1sum"]
+                dentry["sha256sum"] = dfentry["sha256sum"]
+                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+                files_id = poolfile.file_id
+
+        df.poolfile_id = files_id
+        session.add(df)
+
+    session.flush()
+
+    # Add the src_uploaders to the DB
+    uploader_ids = [source.maintainer_id]
+    if u.pkg.dsc.has_key("uploaders"):
+        for up in u.pkg.dsc["uploaders"].split(","):
+            up = up.strip()
+            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
+
+    added_ids = {}
+    for up in uploader_ids:
+        if added_ids.has_key(up):
+            utils.warn("Already saw uploader %s for source %s" % (up, source.source))
+            continue
+
+        added_ids[u]=1
+
+        su = SrcUploader()
+        su.maintainer_id = up
+        su.source_id = source.source_id
+        session.add(su)
+
+    session.flush()
+
+    return dsc_component, dsc_location_id
+
+__all__.append('add_dsc_to_db')
+
+@session_wrapper
+def add_deb_to_db(u, filename, session=None):
+    """
+    Contrary to what you might expect, this routine deals with both
+    debs and udebs.  That info is in 'dbtype', whilst 'type' is
+    'deb' for both of them
+    """
+    cnf = Config()
+    entry = u.pkg.files[filename]
+
+    bin = DBBinary()
+    bin.package = entry["package"]
+    bin.version = entry["version"]
+    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+    bin.binarytype = entry["dbtype"]
+
+    # Find poolfile id
+    filename = entry["pool name"] + filename
+    fullpath = os.path.join(cnf["Dir::Pool"], filename)
+    if not entry.get("location id", None):
+        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id
+
+    if not entry.get("files id", None):
+        poolfile = add_poolfile(filename, entry, entry["location id"], session)
+        entry["files id"] = poolfile.file_id
+
+    bin.poolfile_id = entry["files id"]
+
+    # Find source id
+    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+    if len(bin_sources) != 1:
+        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                  (bin.package, bin.version, bin.architecture.arch_string,
+                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+    bin.source_id = bin_sources[0].source_id
+
+    # Add and flush object so it has an ID
+    session.add(bin)
+    session.flush()
+
+    # Add BinAssociations
+    for suite_name in u.pkg.changes["distribution"].keys():
+        ba = BinAssociation()
+        ba.binary_id = bin.binary_id
+        ba.suite_id = get_suite(suite_name).suite_id
+        session.add(ba)
+
+    session.flush()
+
+    # Deal with contents - disabled for now
+    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+    #if not contents:
+    #    print "REJECT\nCould not determine contents of package %s" % bin.package
+    #    session.rollback()
+    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+
+__all__.append('add_deb_to_db')
+
+################################################################################
+
 class SourceACL(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -2256,12 +2490,17 @@ class DBConn(Singleton):
         self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
         self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
         self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
+        self.tbl_build_queue = Table('build_queue', self.db_meta, autoload=True)
+        self.tbl_build_queue_files = Table('build_queue_files', self.db_meta, autoload=True)
         self.tbl_component = Table('component', self.db_meta, autoload=True)
         self.tbl_config = Table('config', self.db_meta, autoload=True)
         self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
         self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
         self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
+        self.tbl_changes_pending_binary = Table('changes_pending_binaries', self.db_meta, autoload=True)
         self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
+        self.tbl_changes_pending_source = Table('changes_pending_source', self.db_meta, autoload=True)
+        self.tbl_changes_pending_source_files = Table('changes_pending_source_files', self.db_meta, autoload=True)
         self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
         self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
         self.tbl_files = Table('files', self.db_meta, autoload=True)
@@ -2275,9 +2514,8 @@ class DBConn(Singleton):
         self.tbl_override = Table('override', self.db_meta, autoload=True)
         self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
         self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True)
+        self.tbl_policy_queue = Table('policy_queue', self.db_meta, autoload=True)
         self.tbl_priority = Table('priority', self.db_meta, autoload=True)
-        self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-        self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
         self.tbl_section = Table('section', self.db_meta, autoload=True)
         self.tbl_source = Table('source', self.db_meta, autoload=True)
         self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
@@ -2287,7 +2525,7 @@ class DBConn(Singleton):
         self.tbl_suite = Table('suite', self.db_meta, autoload=True)
         self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
         self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
-        self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
+        self.tbl_suite_build_queue_copy = Table('suite_build_queue_copy', self.db_meta, autoload=True)
         self.tbl_uid = Table('uid', self.db_meta, autoload=True)
         self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
 
@@ -2306,6 +2544,12 @@ class DBConn(Singleton):
                                  binary_id = self.tbl_bin_associations.c.bin,
                                  binary = relation(DBBinary)))
 
+        mapper(BuildQueue, self.tbl_build_queue,
+               properties = dict(queue_id = self.tbl_build_queue.c.id))
+
+        mapper(BuildQueueFile, self.tbl_build_queue_files,
+               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
+                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
 
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
@@ -2372,11 +2616,25 @@ class DBConn(Singleton):
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
-                                 files = relation(KnownChangePendingFile, backref="changesfile")))
+                                 files = relation(ChangePendingFile, backref="changesfile")))
 
-        mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id))
+        mapper(ChangePendingBinary, self.tbl_changes_pending_binary,
+               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binary.c.id))
 
+        mapper(ChangePendingFile, self.tbl_changes_pending_files,
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+
+        mapper(ChangePendingSource, self.tbl_changes_pending_source,
+               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+                                 change = relation(KnownChange),
+                                 maintainer = relation(Maintainer,
+                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+                                 changedby = relation(Maintainer,
+                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+                                 fingerprint = relation(Fingerprint),
+                                 source_files = relation(ChangePendingFile,
+                                                         secondary=self.tbl_changes_pending_source_files,
+                                                         backref="pending_sources")))
         mapper(KeyringACLMap, self.tbl_keyring_acl_map,
                properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
                                  keyring = relation(Keyring, backref="keyring_acl_map"),
@@ -2412,16 +2670,12 @@ class DBConn(Singleton):
                properties = dict(overridetype = self.tbl_override_type.c.type,
                                  overridetype_id = self.tbl_override_type.c.id))
 
+        mapper(PolicyQueue, self.tbl_policy_queue,
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
 
-        mapper(Queue, self.tbl_queue,
-               properties = dict(queue_id = self.tbl_queue.c.id))
-
-        mapper(QueueFile, self.tbl_queue_files,
-               properties = dict(queue = relation(Queue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='queueinstances')))
-
         mapper(Section, self.tbl_section,
                properties = dict(section_id = self.tbl_section.c.id))
 
@@ -2469,8 +2723,8 @@ class DBConn(Singleton):
 
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(Queue),
-                                 copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
+                                 policy_queue = relation(PolicyQueue),
+                                 copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
 
         mapper(SuiteArchitecture, self.tbl_suite_architectures,
                properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
index 1694deb490df2b1bb0d6d23d4be80c91c7369db8..effbb4e04be847177c0fddfb2df842c30dc9cb59 100755 (executable)
@@ -38,6 +38,8 @@ import commands
 import shutil
 import textwrap
 from types import *
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
 
 import yaml
 
@@ -46,6 +48,7 @@ from changes import *
 from regexes import *
 from config import Config
 from holding import Holding
+from urgencylog import UrgencyLog
 from dbconn import *
 from summarystats import SummaryStats
 from utils import parse_changes, check_dsc_files
@@ -285,6 +288,7 @@ class Upload(object):
         for title, messages in msgs:
             if messages:
                 msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+        msg += '\n'
 
         return msg
 
@@ -796,17 +800,11 @@ class Upload(object):
             entry["othercomponents"] = res.fetchone()[0]
 
     def check_files(self, action=True):
-        archive = utils.where_am_i()
         file_keys = self.pkg.files.keys()
         holding = Holding()
         cnf = Config()
 
-        # XXX: As far as I can tell, this can no longer happen - see
-        #      comments by AJ in old revisions - mhy
-        # if reprocess is 2 we've already done this and we're checking
-        # things again for the new .orig.tar.gz.
-        # [Yes, I'm fully aware of how disgusting this is]
-        if action and self.reprocess < 2:
+        if action:
             cwd = os.getcwd()
             os.chdir(self.pkg.directory)
             for f in file_keys:
@@ -817,36 +815,28 @@ class Upload(object):
 
             os.chdir(cwd)
 
-        # Check there isn't already a .changes or .dak file of the same name in
-        # the proposed-updates "CopyChanges" or "CopyDotDak" storage directories.
+        # check we already know the changes file
         # [NB: this check must be done post-suite mapping]
         base_filename = os.path.basename(self.pkg.changes_file)
-        dot_dak_filename = base_filename[:-8] + ".dak"
 
-        for suite in self.pkg.changes["distribution"].keys():
-            copychanges = "Suite::%s::CopyChanges" % (suite)
-            if cnf.has_key(copychanges) and \
-                   os.path.exists(os.path.join(cnf[copychanges], base_filename)):
-                self.rejects.append("%s: a file with this name already exists in %s" \
-                           % (base_filename, cnf[copychanges]))
-
-            copy_dot_dak = "Suite::%s::CopyDotDak" % (suite)
-            if cnf.has_key(copy_dot_dak) and \
-                   os.path.exists(os.path.join(cnf[copy_dot_dak], dot_dak_filename)):
-                self.rejects.append("%s: a file with this name already exists in %s" \
-                           % (dot_dak_filename, Cnf[copy_dot_dak]))
-
-        self.reprocess = 0
+        session = DBConn().session()
+
+        try:
+            changes = session.query(KnownChange).filter_by(changesname=base_filename).one()
+            if not changes.approved_for:
+                self.rejects.append("%s file already known to dak" % base_filename)
+        except NoResultFound, e:
+            # not known, good
+            pass
+
         has_binaries = False
         has_source = False
 
-        session = DBConn().session()
-
         for f, entry in self.pkg.files.items():
             # Ensure the file does not already exist in one of the accepted directories
-            for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+            for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
                 if not cnf.has_key("Dir::Queue::%s" % (d)): continue
-                if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
+                if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
                     self.rejects.append("%s file already exists in the %s directory." % (f, d))
 
             if not re_taint_free.match(f):
@@ -1084,15 +1074,10 @@ class Upload(object):
             self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
 
     def check_source(self):
-        # XXX: I'm fairly sure reprocess == 2 can never happen
-        #      AJT disabled the is_incoming check years ago - mhy
-        #      We should probably scrap or rethink the whole reprocess thing
         # Bail out if:
         #    a) there's no source
-        # or b) reprocess is 2 - we will do this check next time when orig
-        #       tarball is in 'files'
         # or c) the orig files are MIA
-        if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \
+        if not self.pkg.changes["architecture"].has_key("source") \
            or len(self.pkg.orig_files) == 0:
             return
 
@@ -1493,7 +1478,7 @@ class Upload(object):
         #  or binary, whereas keys with no access might be able to
         #  upload some binaries)
         if fpr.source_acl.access_level == 'dm':
-            self.check_dm_source_upload(fpr, session)
+            self.check_dm_upload(fpr, session)
         else:
             # Check source-based permissions for other types
             if self.pkg.changes["architecture"].has_key("source"):
@@ -1837,13 +1822,13 @@ distribution."""
         return summary
 
     ###########################################################################
-
-    def accept (self, summary, short_summary, targetdir=None):
+    @session_wrapper
+    def accept (self, summary, short_summary, session=None):
         """
         Accept an upload.
 
-        This moves all files referenced from the .changes into the I{accepted}
-        queue, sends the accepted mail, announces to lists, closes bugs and
+        This moves all files referenced from the .changes into the pool,
+        sends the accepted mail, announces to lists, closes bugs and
         also checks for override disparities. If enabled it will write out
         the version history for the BTS Version Tracking and will finally call
         L{queue_build}.
@@ -1853,31 +1838,84 @@ distribution."""
 
         @type short_summary: string
         @param short_summary: Short summary
-
         """
 
         cnf = Config()
         stats = SummaryStats()
 
-        accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')
+        print "Installing."
+        self.logger.log(["installing changes", self.pkg.changes_file])
 
-        if targetdir is None:
-            targetdir = cnf["Dir::Queue::Accepted"]
+        # Add the .dsc file to the DB first
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "dsc":
+                dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
 
-        print "Accepting."
-        if self.logger:
-            self.logger.log(["Accepting changes", self.pkg.changes_file])
+        # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "deb":
+                add_deb_to_db(self, newfile, session)
 
-        self.pkg.write_dot_dak(targetdir)
+        # If this is a sourceful diff only upload that is moving
+        # cross-component we need to copy the .orig files into the new
+        # component too for the same reasons as above.
+        if self.pkg.changes["architecture"].has_key("source"):
+            for orig_file in self.pkg.orig_files.keys():
+                if not self.pkg.orig_files[orig_file].has_key("id"):
+                    continue # Skip if it's not in the pool
+                orig_file_id = self.pkg.orig_files[orig_file]["id"]
+                if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+                    continue # Skip if the location didn't change
+
+                # Do the move
+                oldf = get_poolfile_by_id(orig_file_id, session)
+                old_filename = os.path.join(oldf.location.path, oldf.filename)
+                old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
+                           'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
+
+                new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+
+                # TODO: Care about size/md5sum collisions etc
+                (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+
+                if newf is None:
+                    utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
+                    newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
+
+                    # TODO: Check that there's only 1 here
+                    source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
+                    dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
+                    dscf.poolfile_id = newf.file_id
+                    session.add(dscf)
+                    session.flush()
+
+        # Install the files into the pool
+        for newfile, entry in self.pkg.files.items():
+            destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
+            utils.move(newfile, destination)
+            self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
+            stats.accept_bytes += float(entry["size"])
 
-        # Move all the files into the accepted directory
-        utils.move(self.pkg.changes_file, targetdir)
+        # Copy the .changes file across for suite which need it.
+        copy_changes = {}
+        for suite_name in self.pkg.changes["distribution"].keys():
+            if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
+                copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
 
-        for name, entry in sorted(self.pkg.files.items()):
-            utils.move(name, targetdir)
-            stats.accept_bytes += float(entry["size"])
+        for dest in copy_changes.keys():
+            utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
 
-        stats.accept_count += 1
+        # We're done - commit the database changes
+        session.commit()
+        # Our SQL session will automatically start a new transaction after
+        # the last commit
+
+        # Move the .changes into the 'done' directory
+        utils.move(self.pkg.changes_file,
+                   os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
+
+        if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+            UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
 
         # Send accept mail, announce to lists, close bugs and check for
         # override disparities
@@ -1885,7 +1923,8 @@ distribution."""
             self.update_subst()
             self.Subst["__SUITE__"] = ""
             self.Subst["__SUMMARY__"] = summary
-            mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+            mail_message = utils.TemplateSubst(self.Subst,
+                                               os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
             utils.send_mail(mail_message)
             self.announce(short_summary, 1)
 
@@ -1930,6 +1969,10 @@ distribution."""
         #if res:
         #    utils.fubar(res)
 
+        session.commit()
+
+        # Finally...
+        stats.accept_count += 1
 
     def check_override(self):
         """
@@ -1968,15 +2011,21 @@ distribution."""
     def remove(self, from_dir=None):
         """
         Used (for instance) in p-u to remove the package from unchecked
+
+        Also removes the package from holding area.
         """
         if from_dir is None:
-            os.chdir(self.pkg.directory)
-        else:
-            os.chdir(from_dir)
+            from_dir = self.pkg.directory
+        h = Holding()
 
         for f in self.pkg.files.keys():
-            os.unlink(f)
-        os.unlink(self.pkg.changes_file)
+            os.unlink(os.path.join(from_dir, f))
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
+
+        os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+        if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+            os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
 
     ###########################################################################
 
@@ -1984,9 +2033,11 @@ distribution."""
         """
         Move files to dest with certain perms/changesperms
         """
-        utils.move(self.pkg.changes_file, dest, perms=changesperms)
+        h = Holding()
+        utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+                   dest, perms=changesperms)
         for f in self.pkg.files.keys():
-            utils.move(f, dest, perms=perms)
+            utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
 
     ###########################################################################
 
@@ -2377,6 +2428,7 @@ distribution."""
                                 # This would fix the stupidity of changing something we often iterate over
                                 # whilst we're doing it
                                 del self.pkg.files[dsc_name]
+                                dsc_entry["files id"] = i.file_id
                                 if not orig_files.has_key(dsc_name):
                                     orig_files[dsc_name] = {}
                                 orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
diff --git a/daklib/queue_install.py b/daklib/queue_install.py
new file mode 100644 (file)
index 0000000..c8fa39e
--- /dev/null
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+# vim:set et sw=4:
+
+"""
+Utility functions for process-upload
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import os
+
+from daklib import utils
+from daklib.dbconn import *
+from daklib.config import Config
+
+###############################################################################
+
+def determine_target(u):
+    cnf = Config()
+
+    queues = [ "New", "Autobyhand", "Byhand" ]
+    if cnf.FindB("Dinstall::SecurityQueueHandling"):
+        queues += [ "Unembargo", "Embargo" ]
+    else:
+        queues += [ "OldStableUpdate", "StableUpdate" ]
+
+    target = None
+    for q in queues:
+        if QueueInfo[q]["is"](u):
+            target = q
+            break
+
+    return target
+
+################################################################################
+
+def package_to_suite(u, suite):
+    if not u.pkg.changes["distribution"].has_key(suite):
+        return False
+
+    ret = True
+
+    if not u.pkg.changes["architecture"].has_key("source"):
+        s = DBConn().session()
+        q = s.query(SrcAssociation.sa_id)
+        q = q.join(Suite).filter_by(suite_name=suite)
+        q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
+        q = q.filter_by(version=u.pkg.changes['version']).limit(1)
+
+        # NB: Careful, this logic isn't what you would think it is
+        # Source is already in {old-,}proposed-updates so no need to hold
+        # Instead, we don't move to the holding area, we just do an ACCEPT
+        if q.count() > 0:
+            ret = False
+
+        s.close()
+
+    return ret
+
+def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None):
+    cnf = Config()
+    dir = cnf["Dir::Queue::%s" % queue]
+
+    print "Moving to %s holding area" % queue.upper()
+    u.logger.log(["Moving to %s" % queue, u.pkg.changes_file])
+
+    u.move_to_dir(dir, perms=perms)
+    if build:
+        get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
+
+    # Check for override disparities
+    u.check_override()
+
+    # Send accept mail, announce to lists and close bugs
+    if announce and not cnf["Dinstall::Options::No-Mail"]:
+        template = os.path.join(cnf["Dir::Templates"], announce)
+        u.update_subst()
+        u.Subst["__SUITE__"] = ""
+        mail_message = utils.TemplateSubst(u.Subst, template)
+        utils.send_mail(mail_message)
+        u.announce(short_summary, True)
+
+################################################################################
+
+def is_unembargo(u):
+    session = DBConn().session()
+    cnf = Config()
+
+    q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
+    if q.rowcount > 0:
+        session.close()
+        return True
+
+    oldcwd = os.getcwd()
+    os.chdir(cnf["Dir::Queue::Disembargo"])
+    disdir = os.getcwd()
+    os.chdir(oldcwd)
+
+    ret = False
+
+    if u.pkg.directory == disdir:
+        if u.pkg.changes["architecture"].has_key("source"):
+            session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
+            session.commit()
+
+            ret = True
+
+    session.close()
+
+    return ret
+
+def queue_unembargo(u, summary, short_summary, session=None):
+    return package_to_queue(u, summary, short_summary, "Unembargoed",
+                            perms=0660, build=True, announce='process-unchecked.accepted')
+
+################################################################################
+
+def is_embargo(u):
+    # if embargoed queues are enabled always embargo
+    return True
+
+def queue_embargo(u, summary, short_summary, session=None):
+    return package_to_queue(u, summary, short_summary, "Unembargoed",
+                            perms=0660, build=True, announce='process-unchecked.accepted')
+
+################################################################################
+
+def is_stableupdate(u):
+    return package_to_suite(u, 'proposed-updates')
+
+def do_stableupdate(u, summary, short_summary, session=None):
+    return package_to_queue(u, summary, short_summary, "ProposedUpdates",
+                            perms=0664, build=False, announce=None)
+
+################################################################################
+
+def is_oldstableupdate(u):
+    return package_to_suite(u, 'oldstable-proposed-updates')
+
+def do_oldstableupdate(u, summary, short_summary, session=None):
+    return package_to_queue(u, summary, short_summary, "OldProposedUpdates",
+                            perms=0664, build=False, announce=None)
+
+################################################################################
+
+def is_autobyhand(u):
+    cnf = Config()
+
+    all_auto = 1
+    any_auto = 0
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("byhand"):
+            any_auto = 1
+
+            # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
+            # don't contain underscores, and ARCH doesn't contain dots.
+            # further VER matches the .changes Version:, and ARCH should be in
+            # the .changes Architecture: list.
+            if f.count("_") < 2:
+                all_auto = 0
+                continue
+
+            (pckg, ver, archext) = f.split("_", 2)
+            if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
+                all_auto = 0
+                continue
+
+            ABH = cnf.SubTree("AutomaticByHandPackages")
+            if not ABH.has_key(pckg) or \
+              ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
+                print "not match %s %s" % (pckg, u.pkg.changes["source"])
+                all_auto = 0
+                continue
+
+            (arch, ext) = archext.split(".", 1)
+            if arch not in u.pkg.changes["architecture"]:
+                all_auto = 0
+                continue
+
+            u.pkg.files[f]["byhand-arch"] = arch
+            u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
+
+    return any_auto and all_auto
+
+def do_autobyhand(u, summary, short_summary, session=None):
+    print "Attempting AUTOBYHAND."
+    byhandleft = True
+    for f, entry in u.pkg.files.items():
+        byhandfile = f
+
+        if not entry.has_key("byhand"):
+            continue
+
+        if not entry.has_key("byhand-script"):
+            byhandleft = True
+            continue
+
+        os.system("ls -l %s" % byhandfile)
+
+        result = os.system("%s %s %s %s %s" % (
+                entry["byhand-script"],
+                byhandfile,
+                u.pkg.changes["version"],
+                entry["byhand-arch"],
+                os.path.abspath(u.pkg.changes_file)))
+
+        if result == 0:
+            os.unlink(byhandfile)
+            del entry
+        else:
+            print "Error processing %s, left as byhand." % (f)
+            byhandleft = True
+
+    if byhandleft:
+        do_byhand(u, summary, short_summary, session)
+    else:
+        u.accept(summary, short_summary, session)
+        u.check_override()
+
+################################################################################
+
+def is_byhand(u):
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("byhand"):
+            return True
+    return False
+
+def do_byhand(u, summary, short_summary, session=None):
+    return package_to_queue(u, summary, short_summary, "Byhand",
+                            perms=0660, build=False, announce=None)
+
+################################################################################
+
+def is_new(u):
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("new"):
+            return True
+    return False
+
+def acknowledge_new(u, summary, short_summary, session=None):
+    cnf = Config()
+
+    print "Moving to NEW queue."
+    u.logger.log(["Moving to new", u.pkg.changes_file])
+
+    u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644)
+
+    if not cnf["Dinstall::Options::No-Mail"]:
+        print "Sending new ack."
+        template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
+        u.update_subst()
+        u.Subst["__SUMMARY__"] = summary
+        new_ack_message = utils.TemplateSubst(u.Subst, template)
+        utils.send_mail(new_ack_message)
+
+################################################################################
+
+# q-unapproved hax0ring
+QueueInfo = {
+    "New": { "is": is_new, "process": acknowledge_new },
+    "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
+    "Byhand" : { "is": is_byhand, "process": do_byhand },
+    "OldStableUpdate" : { "is": is_oldstableupdate,
+                          "process": do_oldstableupdate },
+    "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate },
+    "Unembargo" : { "is": is_unembargo, "process": queue_unembargo },
+    "Embargo" : { "is": is_embargo, "process": queue_embargo },
+}