]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/queue.py
rename known_changes
[dak.git] / daklib / queue.py
index 4729b44fd51f086cfabbb95b5bd7e9e8f988d892..39dab347da5133fcbeb3da741b41dfc2b59da75c 100755 (executable)
@@ -26,31 +26,71 @@ Queue utility functions for dak
 
 ###############################################################################
 
-import cPickle
 import errno
 import os
-import pg
 import stat
 import sys
 import time
 import apt_inst
 import apt_pkg
 import utils
-import database
+import commands
+import shutil
+import textwrap
+from types import *
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
+
+import yaml
 
 from dak_exceptions import *
 from changes import *
-from regexes import re_default_answer, re_fdnic, re_bin_only_nmu
+from regexes import *
 from config import Config
+from holding import Holding
+from urgencylog import UrgencyLog
+from dbconn import *
 from summarystats import SummaryStats
-
-from types import *
+from utils import parse_changes, check_dsc_files
+from textutils import fix_maintainer
+from binary import Binary
 
 ###############################################################################
 
+def get_type(f, session):
+    """
+    Get the file type of C{f}
+
+    @type f: dict
+    @param f: file entry from Changes object
+
+    @type session: SQLA Session
+    @param session: SQL Alchemy session object
+
+    @rtype: string
+    @return: filetype
+
+    """
+    # Determine the type
+    if f.has_key("dbtype"):
+        file_type = f["dbtype"]
+    elif re_source_ext.match(f["type"]):
+        file_type = "dsc"
+    else:
+        utils.fubar("invalid type (%s) for new.  Dazed, confused and sure as heck not continuing." % (file_type))
+
+    # Validate the override type
+    type_id = get_override_type(file_type, session)
+    if type_id is None:
+        utils.fubar("invalid type (%s) for new.  Say wha?" % (file_type))
+
+    return file_type
+
+################################################################################
+
 # Determine what parts in a .changes are NEW
 
-def determine_new(changes, files, projectB, warn=1):
+def determine_new(changes, files, warn=1):
     """
     Determine what parts in a C{changes} file are NEW.
 
@@ -60,9 +100,6 @@ def determine_new(changes, files, projectB, warn=1):
     @type files: Upload.Pkg.files dict
     @param files: Files dictionary
 
-    @type projectB: pgobject
-    @param projectB: DB handle
-
     @type warn: bool
     @param warn: Warn if overrides are added for (old)stable
 
@@ -72,20 +109,22 @@ def determine_new(changes, files, projectB, warn=1):
     """
     new = {}
 
+    session = DBConn().session()
+
     # Build up a list of potentially new things
-    for file_entry in files.keys():
-        f = files[file_entry]
+    for name, f in files.items():
         # Skip byhand elements
         if f["type"] == "byhand":
             continue
         pkg = f["package"]
         priority = f["priority"]
         section = f["section"]
-        file_type = get_type(f)
+        file_type = get_type(f, session)
         component = f["component"]
 
         if file_type == "dsc":
             priority = "source"
+
         if not new.has_key(pkg):
             new[pkg] = {}
             new[pkg]["priority"] = priority
@@ -102,66 +141,35 @@ def determine_new(changes, files, projectB, warn=1):
                     new[pkg]["section"] = section
                     new[pkg]["type"] = file_type
                     new[pkg]["component"] = component
-        new[pkg]["files"].append(file_entry)
+
+        new[pkg]["files"].append(name)
+
         if f.has_key("othercomponents"):
             new[pkg]["othercomponents"] = f["othercomponents"]
 
     for suite in changes["suite"].keys():
-        suite_id = database.get_suite_id(suite)
         for pkg in new.keys():
-            component_id = database.get_component_id(new[pkg]["component"])
-            type_id = database.get_override_type_id(new[pkg]["type"])
-            q = projectB.query("SELECT package FROM override WHERE package = '%s' AND suite = %s AND component = %s AND type = %s" % (pkg, suite_id, component_id, type_id))
-            ql = q.getresult()
-            if ql:
+            ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
+            if len(ql) > 0:
                 for file_entry in new[pkg]["files"]:
                     if files[file_entry].has_key("new"):
                         del files[file_entry]["new"]
                 del new[pkg]
 
     if warn:
-        if changes["suite"].has_key("stable"):
-            print "WARNING: overrides will be added for stable!"
-            if changes["suite"].has_key("oldstable"):
-                print "WARNING: overrides will be added for OLDstable!"
+        for s in ['stable', 'oldstable']:
+            if changes["suite"].has_key(s):
+                print "WARNING: overrides will be added for %s!" % s
         for pkg in new.keys():
             if new[pkg].has_key("othercomponents"):
                 print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
 
-    return new
-
-################################################################################
-
-def get_type(file):
-    """
-    Get the file type of C{file}
-
-    @type file: dict
-    @param file: file entry
-
-    @rtype: string
-    @return: filetype
-
-    """
-    # Determine the type
-    if file.has_key("dbtype"):
-        file_type = file["dbtype"]
-    elif file["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]:
-        file_type = "dsc"
-    else:
-        utils.fubar("invalid type (%s) for new.  Dazed, confused and sure as heck not continuing." % (file_type))
-
-    # Validate the override type
-    type_id = database.get_override_type_id(file_type)
-    if type_id == -1:
-        utils.fubar("invalid type (%s) for new.  Say wha?" % (file_type))
+    session.close()
 
-    return file_type
+    return new
 
 ################################################################################
 
-
-
 def check_valid(new):
     """
     Check if section and priority for NEW packages exist in database.
@@ -175,19 +183,64 @@ def check_valid(new):
 
     """
     for pkg in new.keys():
-        section = new[pkg]["section"]
-        priority = new[pkg]["priority"]
+        section_name = new[pkg]["section"]
+        priority_name = new[pkg]["priority"]
         file_type = new[pkg]["type"]
-        new[pkg]["section id"] = database.get_section_id(section)
-        new[pkg]["priority id"] = database.get_priority_id(new[pkg]["priority"])
+
+        section = get_section(section_name)
+        if section is None:
+            new[pkg]["section id"] = -1
+        else:
+            new[pkg]["section id"] = section.section_id
+
+        priority = get_priority(priority_name)
+        if priority is None:
+            new[pkg]["priority id"] = -1
+        else:
+            new[pkg]["priority id"] = priority.priority_id
+
         # Sanity checks
-        di = section.find("debian-installer") != -1
-        if (di and file_type not in ("udeb", "dsc")) or (not di and file_type == "udeb"):
+        di = section_name.find("debian-installer") != -1
+
+        # If d-i, we must be udeb and vice-versa
+        if     (di and file_type not in ("udeb", "dsc")) or \
+           (not di and file_type == "udeb"):
             new[pkg]["section id"] = -1
+
+        # If dsc we need to be source and vice-versa
         if (priority == "source" and file_type != "dsc") or \
            (priority != "source" and file_type == "dsc"):
             new[pkg]["priority id"] = -1
 
+###############################################################################
+
+def check_status(files):
+    new = byhand = 0
+    for f in files.keys():
+        if files[f]["type"] == "byhand":
+            byhand = 1
+        elif files[f].has_key("new"):
+            new = 1
+    return (new, byhand)
+
+###############################################################################
+
+# Used by Upload.check_timestamps
+class TarTime(object):
+    def __init__(self, future_cutoff, past_cutoff):
+        self.reset()
+        self.future_cutoff = future_cutoff
+        self.past_cutoff = past_cutoff
+
+    def reset(self):
+        self.future_files = {}
+        self.ancient_files = {}
+
+    def callback(self, Kind, Name, Link, Mode, UID, GID, Size, MTime, Major, Minor):
+        if MTime > self.future_cutoff:
+            self.future_files[Name] = MTime
+        if MTime < self.past_cutoff:
+            self.ancient_files[Name] = MTime
 
 ###############################################################################
 
@@ -197,12 +250,7 @@ class Upload(object):
 
     """
     def __init__(self):
-        """
-        Initialize various variables and the global substitution template mappings.
-        Also connect to the DB and initialize the Database module.
-
-        """
-
+        self.logger = None
         self.pkg = Changes()
         self.reset()
 
@@ -211,7 +259,7 @@ class Upload(object):
     def reset (self):
         """ Reset a number of internal variables."""
 
-       # Initialize the substitution template map
+        # Initialize the substitution template map
         cnf = Config()
         self.Subst = {}
         self.Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"]
@@ -219,18 +267,40 @@ class Upload(object):
         self.Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"]
         self.Subst["__DAK_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
 
-        self.reject_message = ""
-        self.changes.reset()
+        self.rejects = []
+        self.warnings = []
+        self.notes = []
+
+        self.pkg.reset()
+
+    def package_info(self):
+        """
+        Format various messages from this Upload to send to the maintainer.
+        """
+
+        msgs = (
+            ('Reject Reasons', self.rejects),
+            ('Warnings', self.warnings),
+            ('Notes', self.notes),
+        )
+
+        msg = ''
+        for title, messages in msgs:
+            if messages:
+                msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+        msg += '\n'
+
+        return msg
 
     ###########################################################################
-    def update_subst(self, reject_message = ""):
+    def update_subst(self):
         """ Set up the per-package template substitution mappings """
 
         cnf = Config()
 
         # If 'dak process-unchecked' crashed out in the right place, architecture may still be a string.
         if not self.pkg.changes.has_key("architecture") or not \
-           isinstance(changes["architecture"], DictType):
+           isinstance(self.pkg.changes["architecture"], dict):
             self.pkg.changes["architecture"] = { "Unknown" : "" }
 
         # and maintainer2047 may not exist.
@@ -247,7 +317,7 @@ class Upload(object):
            (self.pkg.changes["changedby822"] != self.pkg.changes["maintainer822"]):
 
             self.Subst["__MAINTAINER_FROM__"] = self.pkg.changes["changedby2047"]
-            self.Subst["__MAINTAINER_TO__"] = "%s, %s" % (self.pkg.changes["changedby2047"], changes["maintainer2047"])
+            self.Subst["__MAINTAINER_TO__"] = "%s, %s" % (self.pkg.changes["changedby2047"], self.pkg.changes["maintainer2047"])
             self.Subst["__MAINTAINER__"] = self.pkg.changes.get("changed-by", "Unknown")
         else:
             self.Subst["__MAINTAINER_FROM__"] = self.pkg.changes["maintainer2047"]
@@ -265,11 +335,1359 @@ class Upload(object):
             self.Subst["__MAINTAINER_TO__"] = cnf["Dinstall::OverrideMaintainer"]
             self.Subst["__MAINTAINER_FROM__"] = cnf["Dinstall::OverrideMaintainer"]
 
-        self.Subst["__REJECT_MESSAGE__"] = self.reject_message
+        self.Subst["__REJECT_MESSAGE__"] = self.package_info()
         self.Subst["__SOURCE__"] = self.pkg.changes.get("source", "Unknown")
         self.Subst["__VERSION__"] = self.pkg.changes.get("version", "Unknown")
 
     ###########################################################################
+    def load_changes(self, filename):
+        """
+        @rtype: boolean
+        @rvalue: whether the changes file was valid or not.  We may want to
+                 reject even if this is True (see what gets put in self.rejects).
+                 This is simply to prevent us even trying things later which will
+                 fail because we couldn't properly parse the file.
+        """
+        Cnf = Config()
+        self.pkg.changes_file = filename
+
+        # Parse the .changes field into a dictionary
+        try:
+            self.pkg.changes.update(parse_changes(filename))
+        except CantOpenError:
+            self.rejects.append("%s: can't read file." % (filename))
+            return False
+        except ParseChangesError, line:
+            self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
+            return False
+        except ChangesUnicodeError:
+            self.rejects.append("%s: changes file not proper utf-8" % (filename))
+            return False
+
+        # Parse the Files field from the .changes into another dictionary
+        try:
+            self.pkg.files.update(utils.build_file_list(self.pkg.changes))
+        except ParseChangesError, line:
+            self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
+            return False
+        except UnknownFormatError, format:
+            self.rejects.append("%s: unknown format '%s'." % (filename, format))
+            return False
+
+        # Check for mandatory fields
+        for i in ("distribution", "source", "binary", "architecture",
+                  "version", "maintainer", "files", "changes", "description"):
+            if not self.pkg.changes.has_key(i):
+                # Avoid undefined errors later
+                self.rejects.append("%s: Missing mandatory field `%s'." % (filename, i))
+                return False
+
+        # Strip a source version in brackets from the source field
+        if re_strip_srcver.search(self.pkg.changes["source"]):
+            self.pkg.changes["source"] = re_strip_srcver.sub('', self.pkg.changes["source"])
+
+        # Ensure the source field is a valid package name.
+        if not re_valid_pkg_name.match(self.pkg.changes["source"]):
+            self.rejects.append("%s: invalid source name '%s'." % (filename, self.pkg.changes["source"]))
+
+        # Split multi-value fields into a lower-level dictionary
+        for i in ("architecture", "distribution", "binary", "closes"):
+            o = self.pkg.changes.get(i, "")
+            if o != "":
+                del self.pkg.changes[i]
+
+            self.pkg.changes[i] = {}
+
+            for j in o.split():
+                self.pkg.changes[i][j] = 1
+
+        # Fix the Maintainer: field to be RFC822/2047 compatible
+        try:
+            (self.pkg.changes["maintainer822"],
+             self.pkg.changes["maintainer2047"],
+             self.pkg.changes["maintainername"],
+             self.pkg.changes["maintaineremail"]) = \
+                   fix_maintainer (self.pkg.changes["maintainer"])
+        except ParseMaintError, msg:
+            self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
+                   % (filename, self.pkg.changes["maintainer"], msg))
+
+        # ...likewise for the Changed-By: field if it exists.
+        try:
+            (self.pkg.changes["changedby822"],
+             self.pkg.changes["changedby2047"],
+             self.pkg.changes["changedbyname"],
+             self.pkg.changes["changedbyemail"]) = \
+                   fix_maintainer (self.pkg.changes.get("changed-by", ""))
+        except ParseMaintError, msg:
+            self.pkg.changes["changedby822"] = ""
+            self.pkg.changes["changedby2047"] = ""
+            self.pkg.changes["changedbyname"] = ""
+            self.pkg.changes["changedbyemail"] = ""
+
+            self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
+                   % (filename, changes["changed-by"], msg))
+
+        # Ensure all the values in Closes: are numbers
+        if self.pkg.changes.has_key("closes"):
+            for i in self.pkg.changes["closes"].keys():
+                if re_isanum.match (i) == None:
+                    self.rejects.append(("%s: `%s' from Closes field isn't a number." % (filename, i)))
+
+        # chopversion = no epoch; chopversion2 = no epoch and no revision (e.g. for .orig.tar.gz comparison)
+        self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
+        self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
+
+        # Check there isn't already a changes file of the same name in one
+        # of the queue directories.
+        base_filename = os.path.basename(filename)
+        if get_dbchange(base_filename):
+            self.rejects.append("%s: a file with this name already exists." % (base_filename))
+
+        # Check the .changes is non-empty
+        if not self.pkg.files:
+            self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename))
+            return False
+
+        # Changes was syntactically valid even if we'll reject
+        return True
+
+    ###########################################################################
+
+    def check_distributions(self):
+        "Check and map the Distribution field"
+
+        Cnf = Config()
+
+        # Handle suite mappings
+        for m in Cnf.ValueList("SuiteMappings"):
+            args = m.split()
+            mtype = args[0]
+            if mtype == "map" or mtype == "silent-map":
+                (source, dest) = args[1:3]
+                if self.pkg.changes["distribution"].has_key(source):
+                    del self.pkg.changes["distribution"][source]
+                    self.pkg.changes["distribution"][dest] = 1
+                    if mtype != "silent-map":
+                        self.notes.append("Mapping %s to %s." % (source, dest))
+                if self.pkg.changes.has_key("distribution-version"):
+                    if self.pkg.changes["distribution-version"].has_key(source):
+                        self.pkg.changes["distribution-version"][source]=dest
+            elif mtype == "map-unreleased":
+                (source, dest) = args[1:3]
+                if self.pkg.changes["distribution"].has_key(source):
+                    for arch in self.pkg.changes["architecture"].keys():
+                        if arch not in [ a.arch_string for a in get_suite_architectures(source) ]:
+                            self.notes.append("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch))
+                            del self.pkg.changes["distribution"][source]
+                            self.pkg.changes["distribution"][dest] = 1
+                            break
+            elif mtype == "ignore":
+                suite = args[1]
+                if self.pkg.changes["distribution"].has_key(suite):
+                    del self.pkg.changes["distribution"][suite]
+                    self.warnings.append("Ignoring %s as a target suite." % (suite))
+            elif mtype == "reject":
+                suite = args[1]
+                if self.pkg.changes["distribution"].has_key(suite):
+                    self.rejects.append("Uploads to %s are not accepted." % (suite))
+            elif mtype == "propup-version":
+                # give these as "uploaded-to(non-mapped) suites-to-add-when-upload-obsoletes"
+                #
+                # changes["distribution-version"] looks like: {'testing': 'testing-proposed-updates'}
+                if self.pkg.changes["distribution"].has_key(args[1]):
+                    self.pkg.changes.setdefault("distribution-version", {})
+                    for suite in args[2:]:
+                        self.pkg.changes["distribution-version"][suite] = suite
+
+        # Ensure there is (still) a target distribution
+        if len(self.pkg.changes["distribution"].keys()) < 1:
+            self.rejects.append("No valid distribution remaining.")
+
+        # Ensure target distributions exist
+        for suite in self.pkg.changes["distribution"].keys():
+            if not Cnf.has_key("Suite::%s" % (suite)):
+                self.rejects.append("Unknown distribution `%s'." % (suite))
+
+    ###########################################################################
+
+    def binary_file_checks(self, f, session):
+        cnf = Config()
+        entry = self.pkg.files[f]
+
+        # Extract package control information
+        deb_file = utils.open_file(f)
+        try:
+            control = apt_pkg.ParseSection(apt_inst.debExtractControl(deb_file))
+        except:
+            self.rejects.append("%s: debExtractControl() raised %s." % (f, sys.exc_type))
+            deb_file.close()
+            # Can't continue, none of the checks on control would work.
+            return
+
+        # Check for mandantory "Description:"
+        deb_file.seek(0)
+        try:
+            apt_pkg.ParseSection(apt_inst.debExtractControl(deb_file))["Description"] + '\n'
+        except:
+            self.rejects.append("%s: Missing Description in binary package" % (f))
+            return
+
+        deb_file.close()
+
+        # Check for mandatory fields
+        for field in [ "Package", "Architecture", "Version" ]:
+            if control.Find(field) == None:
+                # Can't continue
+                self.rejects.append("%s: No %s field in control." % (f, field))
+                return
+
+        # Ensure the package name matches the one give in the .changes
+        if not self.pkg.changes["binary"].has_key(control.Find("Package", "")):
+            self.rejects.append("%s: control file lists name as `%s', which isn't in changes file." % (f, control.Find("Package", "")))
+
+        # Validate the package field
+        package = control.Find("Package")
+        if not re_valid_pkg_name.match(package):
+            self.rejects.append("%s: invalid package name '%s'." % (f, package))
+
+        # Validate the version field
+        version = control.Find("Version")
+        if not re_valid_version.match(version):
+            self.rejects.append("%s: invalid version number '%s'." % (f, version))
+
+        # Ensure the architecture of the .deb is one we know about.
+        default_suite = cnf.get("Dinstall::DefaultSuite", "Unstable")
+        architecture = control.Find("Architecture")
+        upload_suite = self.pkg.changes["distribution"].keys()[0]
+
+        if      architecture not in [a.arch_string for a in get_suite_architectures(default_suite, session)] \
+            and architecture not in [a.arch_string for a in get_suite_architectures(upload_suite, session)]:
+            self.rejects.append("Unknown architecture '%s'." % (architecture))
+
+        # Ensure the architecture of the .deb is one of the ones
+        # listed in the .changes.
+        if not self.pkg.changes["architecture"].has_key(architecture):
+            self.rejects.append("%s: control file lists arch as `%s', which isn't in changes file." % (f, architecture))
+
+        # Sanity-check the Depends field
+        depends = control.Find("Depends")
+        if depends == '':
+            self.rejects.append("%s: Depends field is empty." % (f))
+
+        # Sanity-check the Provides field
+        provides = control.Find("Provides")
+        if provides:
+            provide = re_spacestrip.sub('', provides)
+            if provide == '':
+                self.rejects.append("%s: Provides field is empty." % (f))
+            prov_list = provide.split(",")
+            for prov in prov_list:
+                if not re_valid_pkg_name.match(prov):
+                    self.rejects.append("%s: Invalid Provides field content %s." % (f, prov))
+
+        # Check the section & priority match those given in the .changes (non-fatal)
+        if     control.Find("Section") and entry["section"] != "" \
+           and entry["section"] != control.Find("Section"):
+            self.warnings.append("%s control file lists section as `%s', but changes file has `%s'." % \
+                                (f, control.Find("Section", ""), entry["section"]))
+        if control.Find("Priority") and entry["priority"] != "" \
+           and entry["priority"] != control.Find("Priority"):
+            self.warnings.append("%s control file lists priority as `%s', but changes file has `%s'." % \
+                                (f, control.Find("Priority", ""), entry["priority"]))
+
+        entry["package"] = package
+        entry["architecture"] = architecture
+        entry["version"] = version
+        entry["maintainer"] = control.Find("Maintainer", "")
+
+        if f.endswith(".udeb"):
+            self.pkg.files[f]["dbtype"] = "udeb"
+        elif f.endswith(".deb"):
+            self.pkg.files[f]["dbtype"] = "deb"
+        else:
+            self.rejects.append("%s is neither a .deb or a .udeb." % (f))
+
+        entry["source"] = control.Find("Source", entry["package"])
+
+        # Get the source version
+        source = entry["source"]
+        source_version = ""
+
+        if source.find("(") != -1:
+            m = re_extract_src_version.match(source)
+            source = m.group(1)
+            source_version = m.group(2)
+
+        if not source_version:
+            source_version = self.pkg.files[f]["version"]
+
+        entry["source package"] = source
+        entry["source version"] = source_version
+
+        # Ensure the filename matches the contents of the .deb
+        m = re_isadeb.match(f)
+
+        #  package name
+        file_package = m.group(1)
+        if entry["package"] != file_package:
+            self.rejects.append("%s: package part of filename (%s) does not match package name in the %s (%s)." % \
+                                (f, file_package, entry["dbtype"], entry["package"]))
+        epochless_version = re_no_epoch.sub('', control.Find("Version"))
+
+        #  version
+        file_version = m.group(2)
+        if epochless_version != file_version:
+            self.rejects.append("%s: version part of filename (%s) does not match package version in the %s (%s)." % \
+                                (f, file_version, entry["dbtype"], epochless_version))
+
+        #  architecture
+        file_architecture = m.group(3)
+        if entry["architecture"] != file_architecture:
+            self.rejects.append("%s: architecture part of filename (%s) does not match package architecture in the %s (%s)." % \
+                                (f, file_architecture, entry["dbtype"], entry["architecture"]))
+
+        # Check for existent source
+        source_version = entry["source version"]
+        source_package = entry["source package"]
+        if self.pkg.changes["architecture"].has_key("source"):
+            if source_version != self.pkg.changes["version"]:
+                self.rejects.append("source version (%s) for %s doesn't match changes version %s." % \
+                                    (source_version, f, self.pkg.changes["version"]))
+        else:
+            # Check in the SQL database
+            if not source_exists(source_package, source_version, self.pkg.changes["distribution"].keys(), session):
+                # Check in one of the other directories
+                source_epochless_version = re_no_epoch.sub('', source_version)
+                dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
+                if os.path.exists(os.path.join(cnf["Dir::Queue::Byhand"], dsc_filename)):
+                    entry["byhand"] = 1
+                elif os.path.exists(os.path.join(cnf["Dir::Queue::New"], dsc_filename)):
+                    entry["new"] = 1
+                else:
+                    dsc_file_exists = False
+                    for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+                        if cnf.has_key("Dir::Queue::%s" % (myq)):
+                            if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
+                                dsc_file_exists = True
+                                break
+
+                    if not dsc_file_exists:
+                        self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
+
+        # Check the version and for file overwrites
+        self.check_binary_against_db(f, session)
+
+        # Temporarily disable contents generation until we change the table storage layout
+        #b = Binary(f)
+        #b.scan_package()
+        #if len(b.rejects) > 0:
+        #    for j in b.rejects:
+        #        self.rejects.append(j)
+
+    def source_file_checks(self, f, session):
+        entry = self.pkg.files[f]
+
+        m = re_issource.match(f)
+        if not m:
+            return
+
+        entry["package"] = m.group(1)
+        entry["version"] = m.group(2)
+        entry["type"] = m.group(3)
+
+        # Ensure the source package name matches the Source filed in the .changes
+        if self.pkg.changes["source"] != entry["package"]:
+            self.rejects.append("%s: changes file doesn't say %s for Source" % (f, entry["package"]))
+
+        # Ensure the source version matches the version in the .changes file
+        if re_is_orig_source.match(f):
+            changes_version = self.pkg.changes["chopversion2"]
+        else:
+            changes_version = self.pkg.changes["chopversion"]
+
+        if changes_version != entry["version"]:
+            self.rejects.append("%s: should be %s according to changes file." % (f, changes_version))
+
+        # Ensure the .changes lists source in the Architecture field
+        if not self.pkg.changes["architecture"].has_key("source"):
+            self.rejects.append("%s: changes file doesn't list `source' in Architecture field." % (f))
+
+        # Check the signature of a .dsc file
+        if entry["type"] == "dsc":
+            # check_signature returns either:
+            #  (None, [list, of, rejects]) or (signature, [])
+            (self.pkg.dsc["fingerprint"], rejects) = utils.check_signature(f)
+            for j in rejects:
+                self.rejects.append(j)
+
+        entry["architecture"] = "source"
+
+    def per_suite_file_checks(self, f, suite, session):
+        cnf = Config()
+        entry = self.pkg.files[f]
+        archive = utils.where_am_i()
+
+        # Skip byhand
+        if entry.has_key("byhand"):
+            return
+
+        # Check we have fields we need to do these checks
+        oktogo = True
+        for m in ['component', 'package', 'priority', 'size', 'md5sum']:
+            if not entry.has_key(m):
+                self.rejects.append("file '%s' does not have field %s set" % (f, m))
+                oktogo = False
+
+        if not oktogo:
+            return
+
+        # Handle component mappings
+        for m in cnf.ValueList("ComponentMappings"):
+            (source, dest) = m.split()
+            if entry["component"] == source:
+                entry["original component"] = source
+                entry["component"] = dest
+
+        # Ensure the component is valid for the target suite
+        if cnf.has_key("Suite:%s::Components" % (suite)) and \
+           entry["component"] not in cnf.ValueList("Suite::%s::Components" % (suite)):
+            self.rejects.append("unknown component `%s' for suite `%s'." % (entry["component"], suite))
+            return
+
+        # Validate the component
+        if not get_component(entry["component"], session):
+            self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"]))
+            return
+
+        # See if the package is NEW
+        if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), f, session):
+            entry["new"] = 1
+
+        # Validate the priority
+        if entry["priority"].find('/') != -1:
+            self.rejects.append("file '%s' has invalid priority '%s' [contains '/']." % (f, entry["priority"]))
+
+        # Determine the location
+        location = cnf["Dir::Pool"]
+        l = get_location(location, entry["component"], archive, session)
+        if l is None:
+            self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive))
+            entry["location id"] = -1
+        else:
+            entry["location id"] = l.location_id
+
+        # Check the md5sum & size against existing files (if any)
+        entry["pool name"] = utils.poolify(self.pkg.changes["source"], entry["component"])
+
+        found, poolfile = check_poolfile(os.path.join(entry["pool name"], f),
+                                         entry["size"], entry["md5sum"], entry["location id"])
+
+        if found is None:
+            self.rejects.append("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
+        elif found is False and poolfile is not None:
+            self.rejects.append("md5sum and/or size mismatch on existing copy of %s." % (f))
+        else:
+            if poolfile is None:
+                entry["files id"] = None
+            else:
+                entry["files id"] = poolfile.file_id
+
+        # Check for packages that have moved from one component to another
+        entry['suite'] = suite
+        res = get_binary_components(self.pkg.files[f]['package'], suite, entry["architecture"], session)
+        if res.rowcount > 0:
+            entry["othercomponents"] = res.fetchone()[0]
+
+    def check_files(self, action=True):
+        file_keys = self.pkg.files.keys()
+        holding = Holding()
+        cnf = Config()
+
+        if action:
+            cwd = os.getcwd()
+            os.chdir(self.pkg.directory)
+            for f in file_keys:
+                ret = holding.copy_to_holding(f)
+                if ret is not None:
+                    # XXX: Should we bail out here or try and continue?
+                    self.rejects.append(ret)
+
+            os.chdir(cwd)
+
+        # check we already know the changes file
+        # [NB: this check must be done post-suite mapping]
+        base_filename = os.path.basename(self.pkg.changes_file)
+
+        session = DBConn().session()
+
+        try:
+            changes = session.query(DBChange).filter_by(changesname=base_filename).one()
+            if not changes.approved_for:
+                self.rejects.append("%s file already known to dak" % base_filename)
+        except NoResultFound, e:
+            # not known, good
+            pass
+
+        has_binaries = False
+        has_source = False
+
+        for f, entry in self.pkg.files.items():
+            # Ensure the file does not already exist in one of the accepted directories
+            for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+                if not cnf.has_key("Dir::Queue::%s" % (d)): continue
+                if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
+                    self.rejects.append("%s file already exists in the %s directory." % (f, d))
+
+            if not re_taint_free.match(f):
+                self.rejects.append("!!WARNING!! tainted filename: '%s'." % (f))
+
+            # Check the file is readable
+            if os.access(f, os.R_OK) == 0:
+                # When running in -n, copy_to_holding() won't have
+                # generated the reject_message, so we need to.
+                if action:
+                    if os.path.exists(f):
+                        self.rejects.append("Can't read `%s'. [permission denied]" % (f))
+                    else:
+                        self.rejects.append("Can't read `%s'. [file not found]" % (f))
+                entry["type"] = "unreadable"
+                continue
+
+            # If it's byhand skip remaining checks
+            if entry["section"] == "byhand" or entry["section"][:4] == "raw-":
+                entry["byhand"] = 1
+                entry["type"] = "byhand"
+
+            # Checks for a binary package...
+            elif re_isadeb.match(f):
+                has_binaries = True
+                entry["type"] = "deb"
+
+                # This routine appends to self.rejects/warnings as appropriate
+                self.binary_file_checks(f, session)
+
+            # Checks for a source package...
+            elif re_issource.match(f):
+                has_source = True
+
+                # This routine appends to self.rejects/warnings as appropriate
+                self.source_file_checks(f, session)
+
+            # Not a binary or source package?  Assume byhand...
+            else:
+                entry["byhand"] = 1
+                entry["type"] = "byhand"
+
+            # Per-suite file checks
+            entry["oldfiles"] = {}
+            for suite in self.pkg.changes["distribution"].keys():
+                self.per_suite_file_checks(f, suite, session)
+
+        session.close()
+
+        # If the .changes file says it has source, it must have source.
+        if self.pkg.changes["architecture"].has_key("source"):
+            if not has_source:
+                self.rejects.append("no source found and Architecture line in changes mention source.")
+
+            if not has_binaries and cnf.FindB("Dinstall::Reject::NoSourceOnly"):
+                self.rejects.append("source only uploads are not supported.")
+
+    ###########################################################################
+    def check_dsc(self, action=True, session=None):
+        """Returns bool indicating whether or not the source changes are valid"""
+        # Ensure there is source to check
+        if not self.pkg.changes["architecture"].has_key("source"):
+            return True
+
+        # Find the .dsc
+        dsc_filename = None
+        for f, entry in self.pkg.files.items():
+            if entry["type"] == "dsc":
+                if dsc_filename:
+                    self.rejects.append("can not process a .changes file with multiple .dsc's.")
+                    return False
+                else:
+                    dsc_filename = f
+
+        # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
+        if not dsc_filename:
+            self.rejects.append("source uploads must contain a dsc file")
+            return False
+
+        # Parse the .dsc file
+        try:
+            self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=1))
+        except CantOpenError:
+            # if not -n copy_to_holding() will have done this for us...
+            if not action:
+                self.rejects.append("%s: can't read file." % (dsc_filename))
+        except ParseChangesError, line:
+            self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
+        except InvalidDscError, line:
+            self.rejects.append("%s: syntax error on line %s." % (dsc_filename, line))
+        except ChangesUnicodeError:
+            self.rejects.append("%s: dsc file not proper utf-8." % (dsc_filename))
+
+        # Build up the file list of files mentioned by the .dsc
+        try:
+            self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
+        except NoFilesFieldError:
+            self.rejects.append("%s: no Files: field." % (dsc_filename))
+            return False
+        except UnknownFormatError, format:
+            self.rejects.append("%s: unknown format '%s'." % (dsc_filename, format))
+            return False
+        except ParseChangesError, line:
+            self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
+            return False
+
+        # Enforce mandatory fields
+        for i in ("format", "source", "version", "binary", "maintainer", "architecture", "files"):
+            if not self.pkg.dsc.has_key(i):
+                self.rejects.append("%s: missing mandatory field `%s'." % (dsc_filename, i))
+                return False
+
+        # Validate the source and version fields
+        if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
+            self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
+        if not re_valid_version.match(self.pkg.dsc["version"]):
+            self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
+
+        # Only a limited list of source formats are allowed in each suite
+        for dist in self.pkg.changes["distribution"].keys():
+            allowed = [ x.format_name for x in get_suite_src_formats(dist, session) ]
+            if self.pkg.dsc["format"] not in allowed:
+                self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed)))
+
+        # Validate the Maintainer field
+        try:
+            # We ignore the return value
+            fix_maintainer(self.pkg.dsc["maintainer"])
+        except ParseMaintError, msg:
+            self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
+                                 % (dsc_filename, self.pkg.dsc["maintainer"], msg))
+
+        # Validate the build-depends field(s)
+        for field_name in [ "build-depends", "build-depends-indep" ]:
+            field = self.pkg.dsc.get(field_name)
+            if field:
+                # Have apt try to parse them...
+                try:
+                    apt_pkg.ParseSrcDepends(field)
+                except:
+                    self.rejects.append("%s: invalid %s field (can not be parsed by apt)." % (dsc_filename, field_name.title()))
+
+        # Ensure the version number in the .dsc matches the version number in the .changes
+        epochless_dsc_version = re_no_epoch.sub('', self.pkg.dsc["version"])
+        changes_version = self.pkg.files[dsc_filename]["version"]
+
+        if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]:
+            self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version))
+
+        # Ensure the Files field contain only what's expected
+        self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files))
+
+        # Ensure source is newer than existing source in target suites
+        session = DBConn().session()
+        self.check_source_against_db(dsc_filename, session)
+        self.check_dsc_against_db(dsc_filename, session)
+        session.close()
+
+        return True
+
+    ###########################################################################
+
+    def get_changelog_versions(self, source_dir):
+        """Extracts a the source package and (optionally) grabs the
+        version history out of debian/changelog for the BTS."""
+
+        cnf = Config()
+
+        # Find the .dsc (again)
+        dsc_filename = None
+        for f in self.pkg.files.keys():
+            if self.pkg.files[f]["type"] == "dsc":
+                dsc_filename = f
+
+        # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
+        if not dsc_filename:
+            return
+
+        # Create a symlink mirror of the source files in our temporary directory
+        for f in self.pkg.files.keys():
+            m = re_issource.match(f)
+            if m:
+                src = os.path.join(source_dir, f)
+                # If a file is missing for whatever reason, give up.
+                if not os.path.exists(src):
+                    return
+                ftype = m.group(3)
+                if re_is_orig_source.match(f) and self.pkg.orig_files.has_key(f) and \
+                   self.pkg.orig_files[f].has_key("path"):
+                    continue
+                dest = os.path.join(os.getcwd(), f)
+                os.symlink(src, dest)
+
+        # If the orig files are not a part of the upload, create symlinks to the
+        # existing copies.
+        for orig_file in self.pkg.orig_files.keys():
+            if not self.pkg.orig_files[orig_file].has_key("path"):
+                continue
+            dest = os.path.join(os.getcwd(), os.path.basename(orig_file))
+            os.symlink(self.pkg.orig_files[orig_file]["path"], dest)
+
+        # Extract the source
+        cmd = "dpkg-source -sn -x %s" % (dsc_filename)
+        (result, output) = commands.getstatusoutput(cmd)
+        if (result != 0):
+            self.rejects.append("'dpkg-source -x' failed for %s [return code: %s]." % (dsc_filename, result))
+            self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] "))
+            return
+
+        if not cnf.Find("Dir::Queue::BTSVersionTrack"):
+            return
+
+        # Get the upstream version
+        upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
+        if re_strip_revision.search(upstr_version):
+            upstr_version = re_strip_revision.sub('', upstr_version)
+
+        # Ensure the changelog file exists
+        changelog_filename = "%s-%s/debian/changelog" % (self.pkg.dsc["source"], upstr_version)
+        if not os.path.exists(changelog_filename):
+            self.rejects.append("%s: debian/changelog not found in extracted source." % (dsc_filename))
+            return
+
+        # Parse the changelog
+        self.pkg.dsc["bts changelog"] = ""
+        changelog_file = utils.open_file(changelog_filename)
+        for line in changelog_file.readlines():
+            m = re_changelog_versions.match(line)
+            if m:
+                self.pkg.dsc["bts changelog"] += line
+        changelog_file.close()
+
+        # Check we found at least one revision in the changelog
+        if not self.pkg.dsc["bts changelog"]:
+            self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
+
+    def check_source(self):
+        # Bail out if:
+        #    a) there's no source
+        # or c) the orig files are MIA
+        if not self.pkg.changes["architecture"].has_key("source") \
+           or len(self.pkg.orig_files) == 0:
+            return
+
+        tmpdir = utils.temp_dirname()
+
+        # Move into the temporary directory
+        cwd = os.getcwd()
+        os.chdir(tmpdir)
+
+        # Get the changelog version history
+        self.get_changelog_versions(cwd)
+
+        # Move back and cleanup the temporary tree
+        os.chdir(cwd)
+
+        try:
+            shutil.rmtree(tmpdir)
+        except OSError, e:
+            if e.errno != errno.EACCES:
+                print "foobar"
+                utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
+
+            self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
+            # We probably have u-r or u-w directories so chmod everything
+            # and try again.
+            cmd = "chmod -R u+rwx %s" % (tmpdir)
+            result = os.system(cmd)
+            if result != 0:
+                utils.fubar("'%s' failed with result %s." % (cmd, result))
+            shutil.rmtree(tmpdir)
+        except Exception, e:
+            print "foobar2 (%s)" % e
+            utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
+
+    ###########################################################################
+    def ensure_hashes(self):
+        # Make sure we recognise the format of the Files: field in the .changes
+        format = self.pkg.changes.get("format", "0.0").split(".", 1)
+        if len(format) == 2:
+            format = int(format[0]), int(format[1])
+        else:
+            format = int(float(format[0])), 0
+
+        # We need to deal with the original changes blob, as the fields we need
+        # might not be in the changes dict serialised into the .dak anymore.
+        orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
+
+        # Copy the checksums over to the current changes dict.  This will keep
+        # the existing modifications to it intact.
+        for field in orig_changes:
+            if field.startswith('checksums-'):
+                self.pkg.changes[field] = orig_changes[field]
+
+        # Check for unsupported hashes
+        for j in utils.check_hash_fields(".changes", self.pkg.changes):
+            self.rejects.append(j)
+
+        for j in utils.check_hash_fields(".dsc", self.pkg.dsc):
+            self.rejects.append(j)
+
+        # We have to calculate the hash if we have an earlier changes version than
+        # the hash appears in rather than require it exist in the changes file
+        for hashname, hashfunc, version in utils.known_hashes:
+            # TODO: Move _ensure_changes_hash into this class
+            for j in utils._ensure_changes_hash(self.pkg.changes, format, version, self.pkg.files, hashname, hashfunc):
+                self.rejects.append(j)
+            if "source" in self.pkg.changes["architecture"]:
+                # TODO: Move _ensure_dsc_hash into this class
+                for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
+                    self.rejects.append(j)
+
+    def check_hashes(self):
+        for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
+            self.rejects.append(m)
+
+        for m in utils.check_size(".changes", self.pkg.files):
+            self.rejects.append(m)
+
+        for m in utils.check_hash(".dsc", self.pkg.dsc_files, "md5", apt_pkg.md5sum):
+            self.rejects.append(m)
+
+        for m in utils.check_size(".dsc", self.pkg.dsc_files):
+            self.rejects.append(m)
+
+        self.ensure_hashes()
+
+    ###########################################################################
+
+    def ensure_orig(self, target_dir='.', session=None):
+        """
+        Ensures that all orig files mentioned in the changes file are present
+        in target_dir. If they do not exist, they are symlinked into place.
+
+        An list containing the symlinks that were created are returned (so they
+        can be removed).
+        """
+
+        symlinked = []
+        cnf = Config()
+
+        for filename, entry in self.pkg.dsc_files.iteritems():
+            if not re_is_orig_source.match(filename):
+                # File is not an orig; ignore
+                continue
+
+            if os.path.exists(filename):
+                # File exists, no need to continue
+                continue
+
+            def symlink_if_valid(path):
+                f = utils.open_file(path)
+                md5sum = apt_pkg.md5sum(f)
+                f.close()
+
+                fingerprint = (os.stat(path)[stat.ST_SIZE], md5sum)
+                expected = (int(entry['size']), entry['md5sum'])
+
+                if fingerprint != expected:
+                    return False
+
+                dest = os.path.join(target_dir, filename)
+
+                os.symlink(path, dest)
+                symlinked.append(dest)
+
+                return True
+
+            session_ = session
+            if session is None:
+                session_ = DBConn().session()
+
+            found = False
+
+            # Look in the pool
+            for poolfile in get_poolfile_like_name('/%s' % filename, session_):
+                poolfile_path = os.path.join(
+                    poolfile.location.path, poolfile.filename
+                )
+
+                if symlink_if_valid(poolfile_path):
+                    found = True
+                    break
+
+            if session is None:
+                session_.close()
+
+            if found:
+                continue
+
+            # Look in some other queues for the file
+            queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+                'OldProposedUpdates', 'Embargoed', 'Unembargoed')
+
+            for queue in queues:
+                if not cnf.get('Dir::Queue::%s' % queue):
+                    continue
+
+                queuefile_path = os.path.join(
+                    cnf['Dir::Queue::%s' % queue], filename
+                )
+
+                if not os.path.exists(queuefile_path):
+                    # Does not exist in this queue
+                    continue
+
+                if symlink_if_valid(queuefile_path):
+                    break
+
+        return symlinked
+
+    ###########################################################################
+
+    def check_lintian(self):
+        cnf = Config()
+
+        # Don't reject binary uploads
+        if not self.pkg.changes['architecture'].has_key('source'):
+            return
+
+        # Only check some distributions
+        valid_dist = False
+        for dist in ('unstable', 'experimental'):
+            if dist in self.pkg.changes['distribution']:
+                valid_dist = True
+                break
+
+        if not valid_dist:
+            return
+
+        tagfile = cnf.get("Dinstall::LintianTags")
+        if tagfile is None:
+            # We don't have a tagfile, so just don't do anything.
+            return
+
+        # Parse the yaml file
+        sourcefile = file(tagfile, 'r')
+        sourcecontent = sourcefile.read()
+        sourcefile.close()
+        try:
+            lintiantags = yaml.load(sourcecontent)['lintian']
+        except yaml.YAMLError, msg:
+            utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
+            return
+
+        # Try and find all orig mentioned in the .dsc
+        symlinked = self.ensure_orig()
+
+        # Now setup the input file for lintian. lintian wants "one tag per line" only,
+        # so put it together like it. We put all types of tags in one file and then sort
+        # through lintians output later to see if its a fatal tag we detected, or not.
+        # So we only run lintian once on all tags, even if we might reject on some, but not
+        # reject on others.
+        # Additionally build up a set of tags
+        tags = set()
+        (fd, temp_filename) = utils.temp_filename()
+        temptagfile = os.fdopen(fd, 'w')
+        for tagtype in lintiantags:
+            for tag in lintiantags[tagtype]:
+                temptagfile.write("%s\n" % tag)
+                tags.add(tag)
+        temptagfile.close()
+
+        # So now we should look at running lintian at the .changes file, capturing output
+        # to then parse it.
+        command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file)
+        (result, output) = commands.getstatusoutput(command)
+
+        # We are done with lintian, remove our tempfile and any symlinks we created
+        os.unlink(temp_filename)
+        for symlink in symlinked:
+            os.unlink(symlink)
+
+        if (result == 2):
+            utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result))
+            utils.warn(utils.prefix_multi_line_string(output, " [possible output:] "))
+
+        if len(output) == 0:
+            return
+
+        def log(*txt):
+            if self.logger:
+                self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt))
+
+        # We have output of lintian, this package isn't clean. Lets parse it and see if we
+        # are having a victim for a reject.
+        # W: tzdata: binary-without-manpage usr/sbin/tzconfig
+        for line in output.split('\n'):
+            m = re_parse_lintian.match(line)
+            if m is None:
+                continue
+
+            etype = m.group(1)
+            epackage = m.group(2)
+            etag = m.group(3)
+            etext = m.group(4)
+
+            # So lets check if we know the tag at all.
+            if etag not in tags:
+                continue
+
+            if etype == 'O':
+                # We know it and it is overriden. Check that override is allowed.
+                if etag in lintiantags['warning']:
+                    # The tag is overriden, and it is allowed to be overriden.
+                    # Don't add a reject message.
+                    pass
+                elif etag in lintiantags['error']:
+                    # The tag is overriden - but is not allowed to be
+                    self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
+                    log("ftpmaster does not allow tag to be overridable", etag)
+            else:
+                # Tag is known, it is not overriden, direct reject.
+                self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
+                # Now tell if they *might* override it.
+                if etag in lintiantags['warning']:
+                    log("auto rejecting", "overridable", etag)
+                    self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
+                else:
+                    log("auto rejecting", "not overridable", etag)
+
+    ###########################################################################
+    def check_urgency(self):
+        cnf = Config()
+        if self.pkg.changes["architecture"].has_key("source"):
+            if not self.pkg.changes.has_key("urgency"):
+                self.pkg.changes["urgency"] = cnf["Urgency::Default"]
+            self.pkg.changes["urgency"] = self.pkg.changes["urgency"].lower()
+            if self.pkg.changes["urgency"] not in cnf.ValueList("Urgency::Valid"):
+                self.warnings.append("%s is not a valid urgency; it will be treated as %s by testing." % \
+                                     (self.pkg.changes["urgency"], cnf["Urgency::Default"]))
+                self.pkg.changes["urgency"] = cnf["Urgency::Default"]
+
+    ###########################################################################
+
+    # Sanity check the time stamps of files inside debs.
+    # [Files in the near future cause ugly warnings and extreme time
+    #  travel can cause errors on extraction]
+
+    def check_timestamps(self):
+        Cnf = Config()
+
+        future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
+        past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
+        tar = TarTime(future_cutoff, past_cutoff)
+
+        for filename, entry in self.pkg.files.items():
+            if entry["type"] == "deb":
+                tar.reset()
+                try:
+                    deb_file = utils.open_file(filename)
+                    apt_inst.debExtract(deb_file, tar.callback, "control.tar.gz")
+                    deb_file.seek(0)
+                    try:
+                        apt_inst.debExtract(deb_file, tar.callback, "data.tar.gz")
+                    except SystemError, e:
+                        # If we can't find a data.tar.gz, look for data.tar.bz2 instead.
+                        if not re.search(r"Cannot f[ui]nd chunk data.tar.gz$", str(e)):
+                            raise
+                        deb_file.seek(0)
+                        apt_inst.debExtract(deb_file,tar.callback,"data.tar.bz2")
+
+                    deb_file.close()
+
+                    future_files = tar.future_files.keys()
+                    if future_files:
+                        num_future_files = len(future_files)
+                        future_file = future_files[0]
+                        future_date = tar.future_files[future_file]
+                        self.rejects.append("%s: has %s file(s) with a time stamp too far into the future (e.g. %s [%s])."
+                               % (filename, num_future_files, future_file, time.ctime(future_date)))
+
+                    ancient_files = tar.ancient_files.keys()
+                    if ancient_files:
+                        num_ancient_files = len(ancient_files)
+                        ancient_file = ancient_files[0]
+                        ancient_date = tar.ancient_files[ancient_file]
+                        self.rejects.append("%s: has %s file(s) with a time stamp too ancient (e.g. %s [%s])."
+                               % (filename, num_ancient_files, ancient_file, time.ctime(ancient_date)))
+                except:
+                    self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_type, sys.exc_value))
+
+    def check_if_upload_is_sponsored(self, uid_email, uid_name):
+        if uid_email in [self.pkg.changes["maintaineremail"], self.pkg.changes["changedbyemail"]]:
+            sponsored = False
+        elif uid_name in [self.pkg.changes["maintainername"], self.pkg.changes["changedbyname"]]:
+            sponsored = False
+            if uid_name == "":
+                sponsored = True
+        else:
+            sponsored = True
+            if ("source" in self.pkg.changes["architecture"] and uid_email and utils.is_email_alias(uid_email)):
+                sponsor_addresses = utils.gpg_get_key_addresses(self.pkg.changes["fingerprint"])
+                if (self.pkg.changes["maintaineremail"] not in sponsor_addresses and
+                    self.pkg.changes["changedbyemail"] not in sponsor_addresses):
+                        self.pkg.changes["sponsoremail"] = uid_email
+
+        return sponsored
+
+
+    ###########################################################################
+    # check_signed_by_key checks
+    ###########################################################################
+
+    def check_signed_by_key(self):
+        """Ensure the .changes is signed by an authorized uploader."""
+        session = DBConn().session()
+
+        # First of all we check that the person has proper upload permissions
+        # and that this upload isn't blocked
+        fpr = get_fingerprint(self.pkg.changes['fingerprint'], session=session)
+
+        if fpr is None:
+            self.rejects.append("Cannot find fingerprint %s" % self.pkg.changes["fingerprint"])
+            return
+
+        # TODO: Check that import-keyring adds UIDs properly
+        if not fpr.uid:
+            self.rejects.append("Cannot find uid for fingerprint %s.  Please contact ftpmaster@debian.org" % fpr.fingerprint)
+            return
+
+        # Check that the fingerprint which uploaded has permission to do so
+        self.check_upload_permissions(fpr, session)
+
+        # Check that this package is not in a transition
+        self.check_transition(session)
+
+        session.close()
+
+
+    def check_upload_permissions(self, fpr, session):
+        # Check any one-off upload blocks
+        self.check_upload_blocks(fpr, session)
+
+        # Start with DM as a special case
+        # DM is a special case unfortunately, so we check it first
+        # (keys with no source access get more access than DMs in one
+        #  way; DMs can only upload for their packages whether source
+        #  or binary, whereas keys with no access might be able to
+        #  upload some binaries)
+        if fpr.source_acl.access_level == 'dm':
+            self.check_dm_upload(fpr, session)
+        else:
+            # Check source-based permissions for other types
+            if self.pkg.changes["architecture"].has_key("source"):
+                if fpr.source_acl.access_level is None:
+                    rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+                    rej += '\nPlease contact ftpmaster if you think this is incorrect'
+                    self.rejects.append(rej)
+                    return
+            else:
+                # If not a DM, we allow full upload rights
+                uid_email = "%s@debian.org" % (fpr.uid.uid)
+                self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+
+
+        # Check binary upload permissions
+        # By this point we know that DMs can't have got here unless they
+        # are allowed to deal with the package concerned so just apply
+        # normal checks
+        if fpr.binary_acl.access_level == 'full':
+            return
+
+        # Otherwise we're in the map case
+        tmparches = self.pkg.changes["architecture"].copy()
+        tmparches.pop('source', None)
+
+        for bam in fpr.binary_acl_map:
+            tmparches.pop(bam.architecture.arch_string, None)
+
+        if len(tmparches.keys()) > 0:
+            if fpr.binary_reject:
+                rej = ".changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
+                rej += "\narchitectures involved are: ", ",".join(tmparches.keys())
+                self.rejects.append(rej)
+            else:
+                # TODO: This is where we'll implement reject vs throw away binaries later
+                rej = "Uhm.  I'm meant to throw away the binaries now but that's not implemented yet"
+                rej += "\nPlease complain to ftpmaster@debian.org as this shouldn't have been turned on"
+                rej += "\nFingerprint: %s", (fpr.fingerprint)
+                self.rejects.append(rej)
+
+
+    def check_upload_blocks(self, fpr, session):
+        """Check whether any upload blocks apply to this source, source
+           version, uid / fpr combination"""
+
+        def block_rej_template(fb):
+            rej = 'Manual upload block in place for package %s' % fb.source
+            if fb.version is not None:
+                rej += ', version %s' % fb.version
+            return rej
+
+        for fb in session.query(UploadBlock).filter_by(source = self.pkg.changes['source']).all():
+            # version is None if the block applies to all versions
+            if fb.version is None or fb.version == self.pkg.changes['version']:
+                # Check both fpr and uid - either is enough to cause a reject
+                if fb.fpr is not None:
+                    if fb.fpr.fingerprint == fpr.fingerprint:
+                        self.rejects.append(block_rej_template(fb) + ' for fingerprint %s\nReason: %s' % (fpr.fingerprint, fb.reason))
+                if fb.uid is not None:
+                    if fb.uid == fpr.uid:
+                        self.rejects.append(block_rej_template(fb) + ' for uid %s\nReason: %s' % (fb.uid.uid, fb.reason))
+
+
+    def check_dm_upload(self, fpr, session):
+        # Quoth the GR (http://www.debian.org/vote/2007/vote_003):
+        ## none of the uploaded packages are NEW
+        rej = False
+        for f in self.pkg.files.keys():
+            if self.pkg.files[f].has_key("byhand"):
+                self.rejects.append("%s may not upload BYHAND file %s" % (fpr.uid.uid, f))
+                rej = True
+            if self.pkg.files[f].has_key("new"):
+                self.rejects.append("%s may not upload NEW file %s" % (fpr.uid.uid, f))
+                rej = True
+
+        if rej:
+            return
+
+        ## the most recent version of the package uploaded to unstable or
+        ## experimental includes the field "DM-Upload-Allowed: yes" in the source
+        ## section of its control file
+        q = session.query(DBSource).filter_by(source=self.pkg.changes["source"])
+        q = q.join(SrcAssociation)
+        q = q.join(Suite).filter(Suite.suite_name.in_(['unstable', 'experimental']))
+        q = q.order_by(desc('source.version')).limit(1)
+
+        r = q.all()
+
+        if len(r) != 1:
+            rej = "Could not find existing source package %s in unstable or experimental and this is a DM upload" % self.pkg.changes["source"]
+            self.rejects.append(rej)
+            return
+
+        r = r[0]
+        if not r.dm_upload_allowed:
+            rej = "Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version (%s)" % (self.pkg.changes["source"], r.version)
+            self.rejects.append(rej)
+            return
+
+        ## the Maintainer: field of the uploaded .changes file corresponds with
+        ## the owner of the key used (ie, non-developer maintainers may not sponsor
+        ## uploads)
+        if self.check_if_upload_is_sponsored(fpr.uid.uid, fpr.uid.name):
+            self.rejects.append("%s (%s) is not authorised to sponsor uploads" % (fpr.uid.uid, fpr.fingerprint))
+
+        ## the most recent version of the package uploaded to unstable or
+        ## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie,
+        ## non-developer maintainers cannot NMU or hijack packages)
+
+        # srcuploaders includes the maintainer
+        accept = False
+        for sup in r.srcuploaders:
+            (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer()
+            # Eww - I hope we never have two people with the same name in Debian
+            if email == fpr.uid.uid or name == fpr.uid.name:
+                accept = True
+                break
+
+        if not accept:
+            self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (fpr.uid.uid, self.pkg.changes["source"]))
+            return
+
+        ## none of the packages are being taken over from other source packages
+        for b in self.pkg.changes["binary"].keys():
+            for suite in self.pkg.changes["distribution"].keys():
+                q = session.query(DBSource)
+                q = q.join(DBBinary).filter_by(package=b)
+                q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite)
+
+                for s in q.all():
+                    if s.source != self.pkg.changes["source"]:
+                        self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (fpr.uid.uid, b, s, suite))
+
+
+
+    def check_transition(self, session):
+        cnf = Config()
+
+        sourcepkg = self.pkg.changes["source"]
+
+        # No sourceful upload -> no need to do anything else, direct return
+        # We also work with unstable uploads, not experimental or those going to some
+        # proposed-updates queue
+        if "source" not in self.pkg.changes["architecture"] or \
+           "unstable" not in self.pkg.changes["distribution"]:
+            return
+
+        # Also only check if there is a file defined (and existant) with
+        # checks.
+        transpath = cnf.get("Dinstall::Reject::ReleaseTransitions", "")
+        if transpath == "" or not os.path.exists(transpath):
+            return
+
+        # Parse the yaml file
+        sourcefile = file(transpath, 'r')
+        sourcecontent = sourcefile.read()
+        try:
+            transitions = yaml.load(sourcecontent)
+        except yaml.YAMLError, msg:
+            # This shouldn't happen, there is a wrapper to edit the file which
+            # checks it, but we prefer to be safe than ending up rejecting
+            # everything.
+            utils.warn("Not checking transitions, the transitions file is broken: %s." % (msg))
+            return
+
+        # Now look through all defined transitions
+        for trans in transitions:
+            t = transitions[trans]
+            source = t["source"]
+            expected = t["new"]
+
+            # Will be None if nothing is in testing.
+            current = get_source_in_suite(source, "testing", session)
+            if current is not None:
+                compare = apt_pkg.VersionCompare(current.version, expected)
+
+            if current is None or compare < 0:
+                # This is still valid, the current version in testing is older than
+                # the new version we wait for, or there is none in testing yet
+
+                # Check if the source we look at is affected by this.
+                if sourcepkg in t['packages']:
+                    # The source is affected, lets reject it.
+
+                    rejectmsg = "%s: part of the %s transition.\n\n" % (
+                        sourcepkg, trans)
+
+                    if current is not None:
+                        currentlymsg = "at version %s" % (current.version)
+                    else:
+                        currentlymsg = "not present in testing"
+
+                    rejectmsg += "Transition description: %s\n\n" % (t["reason"])
+
+                    rejectmsg += "\n".join(textwrap.wrap("""Your package
+is part of a testing transition designed to get %s migrated (it is
+currently %s, we need version %s).  This transition is managed by the
+Release Team, and %s is the Release-Team member responsible for it.
+Please mail debian-release@lists.debian.org or contact %s directly if you
+need further assistance.  You might want to upload to experimental until this
+transition is done."""
+                            % (source, currentlymsg, expected,t["rm"], t["rm"])))
+
+                    self.rejects.append(rejectmsg)
+                    return
+
+    ###########################################################################
+    # End check_signed_by_key checks
+    ###########################################################################
 
     def build_summaries(self):
         """ Build a summary of changes the upload introduces. """
@@ -320,6 +1738,7 @@ class Upload(object):
         for bug in bugs:
             summary += "%s " % (bug)
             if action:
+                self.update_subst()
                 self.Subst["__BUG_NUMBER__"] = bug
                 if self.pkg.changes["distribution"].has_key("stable"):
                     self.Subst["__STABLE_WARNING__"] = """
@@ -330,15 +1749,15 @@ The update will eventually make its way into the next released Debian
 distribution."""
                 else:
                     self.Subst["__STABLE_WARNING__"] = ""
-                    mail_message = utils.TemplateSubst(self.Subst, template)
-                    utils.send_mail(mail_message)
+                mail_message = utils.TemplateSubst(self.Subst, template)
+                utils.send_mail(mail_message)
 
                 # Clear up after ourselves
                 del self.Subst["__BUG_NUMBER__"]
                 del self.Subst["__STABLE_WARNING__"]
 
-        if action:
-            self.Logger.log(["closing bugs"] + bugs)
+        if action and self.logger:
+            self.logger.log(["closing bugs"] + bugs)
 
         summary += "\n"
 
@@ -375,7 +1794,7 @@ distribution."""
         self.Subst["__SHORT_SUMMARY__"] = short_summary
 
         for dist in self.pkg.changes["distribution"].keys():
-            announce_list = Cnf.Find("Suite::%s::Announce" % (dist))
+            announce_list = cnf.Find("Suite::%s::Announce" % (dist))
             if announce_list == "" or lists_done.has_key(announce_list):
                 continue
 
@@ -383,6 +1802,7 @@ distribution."""
             summary += "Announcing to %s\n" % (announce_list)
 
             if action:
+                self.update_subst()
                 self.Subst["__ANNOUNCE_LIST_ADDRESS__"] = announce_list
                 if cnf.get("Dinstall::TrackingServer") and \
                    self.pkg.changes["architecture"].has_key("source"):
@@ -402,13 +1822,13 @@ distribution."""
         return summary
 
     ###########################################################################
-
-    def accept (self, summary, short_summary, targetdir=None):
+    @session_wrapper
+    def accept (self, summary, short_summary, session=None):
         """
         Accept an upload.
 
-        This moves all files referenced from the .changes into the I{accepted}
-        queue, sends the accepted mail, announces to lists, closes bugs and
+        This moves all files referenced from the .changes into the pool,
+        sends the accepted mail, announces to lists, closes bugs and
         also checks for override disparities. If enabled it will write out
         the version history for the BTS Version Tracking and will finally call
         L{queue_build}.
@@ -418,37 +1838,93 @@ distribution."""
 
         @type short_summary: string
         @param short_summary: Short summary
-
         """
 
         cnf = Config()
         stats = SummaryStats()
 
-        accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')
-
-        if targetdir is None:
-            targetdir = cnf["Dir::Queue::Accepted"]
+        print "Installing."
+        self.logger.log(["installing changes", self.pkg.changes_file])
+
+        # Add the .dsc file to the DB first
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "dsc":
+                dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
+
+        # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "deb":
+                add_deb_to_db(self, newfile, session)
+
+        # If this is a sourceful diff only upload that is moving
+        # cross-component we need to copy the .orig files into the new
+        # component too for the same reasons as above.
+        if self.pkg.changes["architecture"].has_key("source"):
+            for orig_file in self.pkg.orig_files.keys():
+                if not self.pkg.orig_files[orig_file].has_key("id"):
+                    continue # Skip if it's not in the pool
+                orig_file_id = self.pkg.orig_files[orig_file]["id"]
+                if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+                    continue # Skip if the location didn't change
+
+                # Do the move
+                oldf = get_poolfile_by_id(orig_file_id, session)
+                old_filename = os.path.join(oldf.location.path, oldf.filename)
+                old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
+                           'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
+
+                new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+
+                # TODO: Care about size/md5sum collisions etc
+                (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+
+                if newf is None:
+                    utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
+                    newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
+
+                    # TODO: Check that there's only 1 here
+                    source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
+                    dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
+                    dscf.poolfile_id = newf.file_id
+                    session.add(dscf)
+                    session.flush()
+
+        # Install the files into the pool
+        for newfile, entry in self.pkg.files.items():
+            destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
+            utils.move(newfile, destination)
+            self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
+            stats.accept_bytes += float(entry["size"])
 
-        print "Accepting."
-        self.Logger.log(["Accepting changes", self.pkg.changes_file])
+        # Copy the .changes file across for suite which need it.
+        copy_changes = {}
+        for suite_name in self.pkg.changes["distribution"].keys():
+            if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
+                copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
 
-        self.write_dot_dak(targetdir)
+        for dest in copy_changes.keys():
+            utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
 
-        # Move all the files into the accepted directory
-        utils.move(self.pkg.changes_file, targetdir)
+        # We're done - commit the database changes
+        session.commit()
+        # Our SQL session will automatically start a new transaction after
+        # the last commit
 
-        for name, entry in sorted(self.pkg.files.items()):
-            utils.move(name, targetdir)
-            stats.accept_bytes += float(entry["size"])
+        # Move the .changes into the 'done' directory
+        utils.move(self.pkg.changes_file,
+                   os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
 
-        stats.accept_count += 1
+        if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+            UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
 
         # Send accept mail, announce to lists, close bugs and check for
         # override disparities
         if not cnf["Dinstall::Options::No-Mail"]:
+            self.update_subst()
             self.Subst["__SUITE__"] = ""
             self.Subst["__SUMMARY__"] = summary
-            mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+            mail_message = utils.TemplateSubst(self.Subst,
+                                               os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
             utils.send_mail(mail_message)
             self.announce(short_summary, 1)
 
@@ -486,26 +1962,19 @@ distribution."""
             os.rename(temp_filename, filename)
             os.chmod(filename, 0644)
 
-        # Its is Cnf["Dir::Queue::Accepted"] here, not targetdir!
-        # <Ganneff> we do call queue_build too
-        # <mhy> well yes, we'd have had to if we were inserting into accepted
-        # <Ganneff> now. thats database only.
-        # <mhy> urgh, that's going to get messy
-        # <Ganneff> so i make the p-n call to it *also* using accepted/
-        # <mhy> but then the packages will be in the queue_build table without the files being there
-        # <Ganneff> as the buildd queue is only regenerated whenever unchecked runs
-        # <mhy> ah, good point
-        # <Ganneff> so it will work out, as unchecked move it over
-        # <mhy> that's all completely sick
-        # <Ganneff> yes
-
         # This routine returns None on success or an error on failure
-        res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
-        if res:
-            utils.fubar(res)
+        # TODO: Replace queue copying using the new queue.add_file_from_pool routine
+        #       and by looking up which queues in suite.copy_queues
+        #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
+        #if res:
+        #    utils.fubar(res)
+
+        session.commit()
 
+        # Finally...
+        stats.accept_count += 1
 
-    def check_override (self):
+    def check_override(self):
         """
         Checks override entries for validity. Mails "Override disparity" warnings,
         if that feature is enabled.
@@ -531,12 +2000,47 @@ distribution."""
 
         overridetemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.override-disparity')
 
+        self.update_subst()
         self.Subst["__SUMMARY__"] = summary
         mail_message = utils.TemplateSubst(self.Subst, overridetemplate)
         utils.send_mail(mail_message)
         del self.Subst["__SUMMARY__"]
 
     ###########################################################################
+
+    def remove(self, from_dir=None):
+        """
+        Used (for instance) in p-u to remove the package from unchecked
+
+        Also removes the package from holding area.
+        """
+        if from_dir is None:
+            from_dir = self.pkg.directory
+        h = Holding()
+
+        for f in self.pkg.files.keys():
+            os.unlink(os.path.join(from_dir, f))
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
+
+        os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+        if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+            os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
+
+    ###########################################################################
+
+    def move_to_dir (self, dest, perms=0660, changesperms=0664):
+        """
+        Move files to dest with certain perms/changesperms
+        """
+        h = Holding()
+        utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+                   dest, perms=changesperms)
+        for f in self.pkg.files.keys():
+            utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
+
+    ###########################################################################
+
     def force_reject(self, reject_files):
         """
         Forcefully move files from the current directory to the
@@ -651,10 +2155,11 @@ distribution."""
 
         rej_template = os.path.join(cnf["Dir::Templates"], "queue.rejected")
 
+        self.update_subst()
         if not manual:
             self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
             self.Subst["__MANUAL_REJECT_MESSAGE__"] = ""
-            self.Subst["__CC__"] = "X-DAK-Rejection: automatic (moo)\nX-Katie-Rejection: automatic (moo)"
+            self.Subst["__CC__"] = "X-DAK-Rejection: automatic (moo)"
             os.write(reason_fd, reject_message)
             reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
         else:
@@ -662,7 +2167,7 @@ distribution."""
             user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
             self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
             self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
-            self.Subst["__CC__"] = "Cc: " + Cnf["Dinstall::MyEmailAddress"]
+            self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
             reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
             # Write the rejection email out as the <foo>.reason file
             os.write(reason_fd, reject_mail_message)
@@ -677,12 +2182,13 @@ distribution."""
         if not cnf["Dinstall::Options::No-Mail"]:
             utils.send_mail(reject_mail_message)
 
-        self.Logger.log(["rejected", pkg.changes_file])
+        if self.logger:
+            self.logger.log(["rejected", self.pkg.changes_file])
 
         return 0
 
     ################################################################################
-    def in_override_p(self, package, component, suite, binary_type, file, session=None):
+    def in_override_p(self, package, component, suite, binary_type, filename, session):
         """
         Check if a package already has override entries in the DB
 
@@ -690,16 +2196,16 @@ distribution."""
         @param package: package name
 
         @type component: string
-        @param component: database id of the component, as returned by L{database.get_component_id}
+        @param component: database id of the component
 
         @type suite: int
-        @param suite: database id of the suite, as returned by L{database.get_suite_id}
+        @param suite: database id of the suite
 
         @type binary_type: string
         @param binary_type: type of the package
 
-        @type file: string
-        @param file: filename we check
+        @type filename: string
+        @param filename: filename we check
 
         @return: the database result. But noone cares anyway.
 
@@ -707,9 +2213,6 @@ distribution."""
 
         cnf = Config()
 
-        if session is None:
-            session = DBConn().session()
-
         if binary_type == "": # must be source
             file_type = "dsc"
         else:
@@ -728,31 +2231,12 @@ distribution."""
         # Remember the section and priority so we can check them later if appropriate
         if len(result) > 0:
             result = result[0]
-            self.pkg.files[file]["override section"] = result.section.section
-            self.pkg.files[file]["override priority"] = result.priority.priority
+            self.pkg.files[filename]["override section"] = result.section.section
+            self.pkg.files[filename]["override priority"] = result.priority.priority
             return result
 
         return None
 
-    ################################################################################
-    def reject (self, str, prefix="Rejected: "):
-        """
-        Add C{str} to reject_message. Adds C{prefix}, by default "Rejected: "
-
-        @type str: string
-        @param str: Reject text
-
-        @type prefix: string
-        @param prefix: Prefix text, default Rejected:
-
-        """
-        if str:
-            # Unlike other rejects we add new lines first to avoid trailing
-            # new lines when this message is passed back up to a caller.
-            if self.reject_message:
-                self.reject_message += "\n"
-            self.reject_message += prefix + str
-
     ################################################################################
     def get_anyversion(self, sv_list, suite):
         """
@@ -764,8 +2248,9 @@ distribution."""
 
         Description: TODO
         """
+        Cnf = Config()
         anyversion = None
-        anysuite = [suite] + self.Cnf.ValueList("Suite::%s::VersionChecks::Enhances" % (suite))
+        anysuite = [suite] + Cnf.ValueList("Suite::%s::VersionChecks::Enhances" % (suite))
         for (s, v) in sv_list:
             if s in [ x.lower() for x in anysuite ]:
                 if not anyversion or apt_pkg.VersionCompare(anyversion, v) <= 0:
@@ -775,13 +2260,13 @@ distribution."""
 
     ################################################################################
 
-    def cross_suite_version_check(self, sv_list, file, new_version, sourceful=False):
+    def cross_suite_version_check(self, sv_list, filename, new_version, sourceful=False):
         """
         @type sv_list: list
         @param sv_list: list of (suite, version) tuples to check
 
-        @type file: string
-        @param file: XXX
+        @type filename: string
+        @param filename: XXX
 
         @type new_version: string
         @param new_version: XXX
@@ -806,7 +2291,7 @@ distribution."""
                 vercmp = apt_pkg.VersionCompare(new_version, existent_version)
 
                 if suite in must_be_newer_than and sourceful and vercmp < 1:
-                    self.reject("%s: old version (%s) in %s >= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite))
+                    self.rejects.append("%s: old version (%s) in %s >= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
 
                 if suite in must_be_older_than and vercmp > -1:
                     cansave = 0
@@ -828,7 +2313,7 @@ distribution."""
                             # than complaining. either way, this isn't a REJECT issue
                             #
                             # And - we really should complain to the dorks who configured dak
-                            self.reject("%s is mapped to, but not enhanced by %s - adding anyways" % (suite, addsuite), "Warning: ")
+                            self.warnings.append("%s is mapped to, but not enhanced by %s - adding anyways" % (suite, addsuite))
                             self.pkg.changes.setdefault("propdistribution", {})
                             self.pkg.changes["propdistribution"][addsuite] = 1
                             cansave = 1
@@ -836,61 +2321,45 @@ distribution."""
                             # not targets_version is true when the package is NEW
                             # we could just stick with the "...old version..." REJECT
                             # for this, I think.
-                            self.reject("Won't propogate NEW packages.")
+                            self.rejects.append("Won't propogate NEW packages.")
                         elif apt_pkg.VersionCompare(new_version, add_version) < 0:
                             # propogation would be redundant. no need to reject though.
-                            self.reject("ignoring versionconflict: %s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite), "Warning: ")
+                            self.warnings.append("ignoring versionconflict: %s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
                             cansave = 1
                         elif apt_pkg.VersionCompare(new_version, add_version) > 0 and \
                              apt_pkg.VersionCompare(add_version, target_version) >= 0:
                             # propogate!!
-                            self.reject("Propogating upload to %s" % (addsuite), "Warning: ")
+                            self.warnings.append("Propogating upload to %s" % (addsuite))
                             self.pkg.changes.setdefault("propdistribution", {})
                             self.pkg.changes["propdistribution"][addsuite] = 1
                             cansave = 1
 
                     if not cansave:
-                        self.reject("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite))
+                        self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
 
     ################################################################################
-
-    def check_binary_against_db(self, file, session=None):
-        """
-
-        """
-
-        if session is None:
-            session = DBConn().session()
-
-        self.reject_message = ""
-
+    def check_binary_against_db(self, filename, session):
         # Ensure version is sane
         q = session.query(BinAssociation)
-        q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[file]["package"])
-        q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
+        q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[filename]["package"])
+        q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[filename]["architecture"], 'all']))
 
         self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
-                                       file, files[file]["version"], sourceful=False)
+                                       filename, self.pkg.files[filename]["version"], sourceful=False)
 
         # Check for any existing copies of the file
-        q = session.query(DBBinary).filter_by(files[file]["package"])
-        q = q.filter_by(version=files[file]["version"])
-        q = q.join(Architecture).filter_by(arch_string=files[file]["architecture"])
+        q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"])
+        q = q.filter_by(version=self.pkg.files[filename]["version"])
+        q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"])
 
         if q.count() > 0:
-            self.reject("%s: can not overwrite existing copy already in the archive." % (file))
-
-        return self.reject_message
+            self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename)
 
     ################################################################################
 
-    def check_source_against_db(self, file, session=None):
+    def check_source_against_db(self, filename, session):
         """
         """
-        if session is None:
-            session = DBConn().session()
-
-        self.reject_message = ""
         source = self.pkg.dsc.get("source")
         version = self.pkg.dsc.get("version")
 
@@ -899,23 +2368,23 @@ distribution."""
         q = q.join(DBSource).filter(DBSource.source==source)
 
         self.cross_suite_version_check([ (x.suite.suite_name, x.source.version) for x in q.all() ],
-                                       file, version, sourceful=True)
-
-        return self.reject_message
+                                       filename, version, sourceful=True)
 
     ################################################################################
-    def check_dsc_against_db(self, file):
+    def check_dsc_against_db(self, filename, session):
         """
 
         @warning: NB: this function can remove entries from the 'files' index [if
-         the .orig.tar.gz is a duplicate of the one in the archive]; if
+         the orig tarball is a duplicate of the one in the archive]; if
          you're iterating over 'files' and call this function as part of
          the loop, be sure to add a check to the top of the loop to
          ensure you haven't just tried to dereference the deleted entry.
 
         """
-        self.reject_message = ""
-        self.pkg.orig_tar_gz = None
+
+        Cnf = Config()
+        self.pkg.orig_files = {} # XXX: do we need to clear it?
+        orig_files = self.pkg.orig_files
 
         # Try and find all files mentioned in the .dsc.  This has
         # to work harder to cope with the multiple possible
@@ -930,7 +2399,7 @@ distribution."""
                 found = "%s in incoming" % (dsc_name)
 
                 # Check the file does not already exist in the archive
-                ql = get_poolfile_like_name(dsc_name)
+                ql = get_poolfile_like_name(dsc_name, session)
 
                 # Strip out anything that isn't '%s' or '/%s$'
                 for i in ql:
@@ -949,25 +2418,28 @@ distribution."""
                 if len(ql) > 0:
                     # Ignore exact matches for .orig.tar.gz
                     match = 0
-                    if dsc_name.endswith(".orig.tar.gz"):
+                    if re_is_orig_source.match(dsc_name):
                         for i in ql:
                             if self.pkg.files.has_key(dsc_name) and \
                                int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \
                                self.pkg.files[dsc_name]["md5sum"] == i.md5sum:
-                                self.reject("ignoring %s, since it's already in the archive." % (dsc_name), "Warning: ")
+                                self.warnings.append("ignoring %s, since it's already in the archive." % (dsc_name))
                                 # TODO: Don't delete the entry, just mark it as not needed
                                 # This would fix the stupidity of changing something we often iterate over
                                 # whilst we're doing it
-                                del files[dsc_name]
-                                self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
+                                del self.pkg.files[dsc_name]
+                                dsc_entry["files id"] = i.file_id
+                                if not orig_files.has_key(dsc_name):
+                                    orig_files[dsc_name] = {}
+                                orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
                                 match = 1
 
                     if not match:
-                        self.reject("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
+                        self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
 
-            elif dsc_name.endswith(".orig.tar.gz"):
+            elif re_is_orig_source.match(dsc_name):
                 # Check in the pool
-                ql = get_poolfile_like_name(dsc_name)
+                ql = get_poolfile_like_name(dsc_name, session)
 
                 # Strip out anything that isn't '%s' or '/%s$'
                 # TODO: Shouldn't we just search for things which end with our string explicitly in the SQL?
@@ -999,36 +2471,199 @@ distribution."""
                     old_file_fh.close()
                     actual_size = os.stat(old_file)[stat.ST_SIZE]
                     found = old_file
-                    suite_type = f.location.archive_type
+                    suite_type = x.location.archive_type
                     # need this for updating dsc_files in install()
-                    dsc_entry["files id"] = f.file_id
+                    dsc_entry["files id"] = x.file_id
                     # See install() in process-accepted...
-                    self.pkg.orig_tar_id = f.file_id
-                    self.pkg.orig_tar_gz = old_file
-                    self.pkg.orig_tar_location = f.location.location_id
+                    if not orig_files.has_key(dsc_name):
+                        orig_files[dsc_name] = {}
+                    orig_files[dsc_name]["id"] = x.file_id
+                    orig_files[dsc_name]["path"] = old_file
+                    orig_files[dsc_name]["location"] = x.location.location_id
                 else:
                     # TODO: Record the queues and info in the DB so we don't hardcode all this crap
                     # Not there? Check the queue directories...
                     for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
-                        in_otherdir = os.path.join(self.Cnf["Dir::Queue::%s" % (directory)], dsc_name)
+                        if not Cnf.has_key("Dir::Queue::%s" % (directory)):
+                            continue
+                        in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
                         if os.path.exists(in_otherdir):
                             in_otherdir_fh = utils.open_file(in_otherdir)
                             actual_md5 = apt_pkg.md5sum(in_otherdir_fh)
                             in_otherdir_fh.close()
                             actual_size = os.stat(in_otherdir)[stat.ST_SIZE]
                             found = in_otherdir
-                            self.pkg.orig_tar_gz = in_otherdir
+                            if not orig_files.has_key(dsc_name):
+                                orig_files[dsc_name] = {}
+                            orig_files[dsc_name]["path"] = in_otherdir
 
                     if not found:
-                        self.reject("%s refers to %s, but I can't find it in the queue or in the pool." % (file, dsc_name))
-                        self.pkg.orig_tar_gz = -1
+                        self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name))
                         continue
             else:
-                self.reject("%s refers to %s, but I can't find it in the queue." % (file, dsc_name))
+                self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name))
                 continue
             if actual_md5 != dsc_entry["md5sum"]:
-                self.reject("md5sum for %s doesn't match %s." % (found, file))
+                self.rejects.append("md5sum for %s doesn't match %s." % (found, filename))
             if actual_size != int(dsc_entry["size"]):
-                self.reject("size for %s doesn't match %s." % (found, file))
+                self.rejects.append("size for %s doesn't match %s." % (found, filename))
+
+    ################################################################################
+    # This is used by process-new and process-holding to recheck a changes file
+    # at the time we're running.  It mainly wraps various other internal functions
+    # and is similar to accepted_checks - these should probably be tidied up
+    # and combined
+    def recheck(self, session):
+        cnf = Config()
+        for f in self.pkg.files.keys():
+            # The .orig.tar.gz can disappear out from under us is it's a
+            # duplicate of one in the archive.
+            if not self.pkg.files.has_key(f):
+                continue
+
+            entry = self.pkg.files[f]
+
+            # Check that the source still exists
+            if entry["type"] == "deb":
+                source_version = entry["source version"]
+                source_package = entry["source package"]
+                if not self.pkg.changes["architecture"].has_key("source") \
+                   and not source_exists(source_package, source_version, self.pkg.changes["distribution"].keys(), session):
+                    source_epochless_version = re_no_epoch.sub('', source_version)
+                    dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
+                    found = False
+                    for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+                        if cnf.has_key("Dir::Queue::%s" % (q)):
+                            if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
+                                found = True
+                    if not found:
+                        self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
 
-        return (self.reject_message, None)
+            # Version and file overwrite checks
+            if entry["type"] == "deb":
+                self.check_binary_against_db(f, session)
+            elif entry["type"] == "dsc":
+                self.check_source_against_db(f, session)
+                self.check_dsc_against_db(f, session)
+
+    ################################################################################
+    def accepted_checks(self, overwrite_checks, session):
+        # Recheck anything that relies on the database; since that's not
+        # frozen between accept and our run time when called from p-a.
+
+        # overwrite_checks is set to False when installing to stable/oldstable
+
+        propogate={}
+        nopropogate={}
+
+        # Find the .dsc (again)
+        dsc_filename = None
+        for f in self.pkg.files.keys():
+            if self.pkg.files[f]["type"] == "dsc":
+                dsc_filename = f
+
+        for checkfile in self.pkg.files.keys():
+            # The .orig.tar.gz can disappear out from under us is it's a
+            # duplicate of one in the archive.
+            if not self.pkg.files.has_key(checkfile):
+                continue
+
+            entry = self.pkg.files[checkfile]
+
+            # Check that the source still exists
+            if entry["type"] == "deb":
+                source_version = entry["source version"]
+                source_package = entry["source package"]
+                if not self.pkg.changes["architecture"].has_key("source") \
+                   and not source_exists(source_package, source_version,  self.pkg.changes["distribution"].keys()):
+                    self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
+
+            # Version and file overwrite checks
+            if overwrite_checks:
+                if entry["type"] == "deb":
+                    self.check_binary_against_db(checkfile, session)
+                elif entry["type"] == "dsc":
+                    self.check_source_against_db(checkfile, session)
+                    self.check_dsc_against_db(dsc_filename, session)
+
+            # propogate in the case it is in the override tables:
+            for suite in self.pkg.changes.get("propdistribution", {}).keys():
+                if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
+                    propogate[suite] = 1
+                else:
+                    nopropogate[suite] = 1
+
+        for suite in propogate.keys():
+            if suite in nopropogate:
+                continue
+            self.pkg.changes["distribution"][suite] = 1
+
+        for checkfile in self.pkg.files.keys():
+            # Check the package is still in the override tables
+            for suite in self.pkg.changes["distribution"].keys():
+                if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
+                    self.rejects.append("%s is NEW for %s." % (checkfile, suite))
+
+    ################################################################################
+    # This is not really a reject, but an unaccept, but since a) the code for
+    # that is non-trivial (reopen bugs, unannounce etc.), b) this should be
+    # extremely rare, for now we'll go with whining at our admin folks...
+
+    def do_unaccept(self):
+        cnf = Config()
+
+        self.update_subst()
+        self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
+        self.Subst["__REJECT_MESSAGE__"] = self.package_info()
+        self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
+        self.Subst["__BCC__"] = "X-DAK: dak process-accepted"
+        if cnf.has_key("Dinstall::Bcc"):
+            self.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+
+        template = os.path.join(cnf["Dir::Templates"], "process-accepted.unaccept")
+
+        reject_mail_message = utils.TemplateSubst(self.Subst, template)
+
+        # Write the rejection email out as the <foo>.reason file
+        reason_filename = os.path.basename(self.pkg.changes_file[:-8]) + ".reason"
+        reject_filename = os.path.join(cnf["Dir::Queue::Reject"], reason_filename)
+
+        # If we fail here someone is probably trying to exploit the race
+        # so let's just raise an exception ...
+        if os.path.exists(reject_filename):
+            os.unlink(reject_filename)
+
+        fd = os.open(reject_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
+        os.write(fd, reject_mail_message)
+        os.close(fd)
+
+        utils.send_mail(reject_mail_message)
+
+        del self.Subst["__REJECTOR_ADDRESS__"]
+        del self.Subst["__REJECT_MESSAGE__"]
+        del self.Subst["__CC__"]
+
+    ################################################################################
+    # If any file of an upload has a recent mtime then chances are good
+    # the file is still being uploaded.
+
+    def upload_too_new(self):
+        cnf = Config()
+        too_new = False
+        # Move back to the original directory to get accurate time stamps
+        cwd = os.getcwd()
+        os.chdir(self.pkg.directory)
+        file_list = self.pkg.files.keys()
+        file_list.extend(self.pkg.dsc_files.keys())
+        file_list.append(self.pkg.changes_file)
+        for f in file_list:
+            try:
+                last_modified = time.time()-os.path.getmtime(f)
+                if last_modified < int(cnf["Dinstall::SkipTime"]):
+                    too_new = True
+                    break
+            except:
+                pass
+
+        os.chdir(cwd)
+        return too_new