]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Merge remote-tracking branch 'ansgar/pu/multiarchive-2'
[dak.git] / daklib / dbconn.py
old mode 100755 (executable)
new mode 100644 (file)
index d39f8c5..293f4dc
 
 ################################################################################
 
+import apt_pkg
 import os
 from os.path import normpath
 import re
 import psycopg2
 import traceback
 import commands
+import signal
 
 try:
     # python >= 2.6
@@ -72,13 +74,16 @@ from sqlalchemy.orm.exc import NoResultFound
 # in the database
 from config import Config
 from textutils import fix_maintainer
-from dak_exceptions import DBUpdateError, NoSourceFieldError
+from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
 
 # suppress some deprecation warnings in squeeze related to sqlalchemy
 import warnings
 warnings.filterwarnings('ignore', \
     "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
     SADeprecationWarning)
+warnings.filterwarnings('ignore', \
+    "Predicate of partial index .* ignored during reflection", \
+    SAWarning)
 
 
 ################################################################################
@@ -105,11 +110,11 @@ class DebVersion(UserDefinedType):
         return None
 
 sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6"]:
+if sa_major_version in ["0.5", "0.6", "0.7"]:
     from sqlalchemy.databases import postgres
     postgres.ischema_names['debversion'] = DebVersion
 else:
-    raise Exception("dak only ported to SQLA versions 0.5 and 0.6.  See daklib/dbconn.py")
+    raise Exception("dak only ported to SQLA versions 0.5 to 0.7.  See daklib/dbconn.py")
 
 ################################################################################
 
@@ -475,6 +480,19 @@ __all__.append('get_archive')
 
 ################################################################################
 
+class ArchiveFile(object):
+    def __init__(self, archive=None, component=None, file=None):
+        self.archive = archive
+        self.component = component
+        self.file = file
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
+
+__all__.append('ArchiveFile')
+
+################################################################################
+
 class BinContents(ORMObject):
     def __init__(self, file = None, binary = None):
         self.file = file
@@ -487,10 +505,15 @@ __all__.append('BinContents')
 
 ################################################################################
 
+def subprocess_setup():
+    # Python installs a SIGPIPE handler by default. This is usually not what
+    # non-Python subprocesses expect.
+    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
 class DBBinary(ORMObject):
     def __init__(self, package = None, source = None, version = None, \
         maintainer = None, architecture = None, poolfile = None, \
-        binarytype = 'deb'):
+        binarytype = 'deb', fingerprint=None):
         self.package = package
         self.source = source
         self.version = version
@@ -498,6 +521,7 @@ class DBBinary(ORMObject):
         self.architecture = architecture
         self.poolfile = poolfile
         self.binarytype = binarytype
+        self.fingerprint = fingerprint
 
     @property
     def pkid(self):
@@ -514,9 +538,6 @@ class DBBinary(ORMObject):
 
     metadata = association_proxy('key', 'value')
 
-    def get_component_name(self):
-        return self.poolfile.location.component.component_name
-
     def scan_contents(self):
         '''
         Yields the contents of the package. Only regular files are yielded and
@@ -525,7 +546,8 @@ class DBBinary(ORMObject):
         package does not contain any regular file.
         '''
         fullpath = self.poolfile.fullpath
-        dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE)
+        dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
+            preexec_fn = subprocess_setup)
         tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
         for member in tar.getmembers():
             if not member.isdir():
@@ -547,10 +569,10 @@ class DBBinary(ORMObject):
         @rtype: text
         @return: stanza text of the control section.
         '''
-        import apt_inst
+        import utils
         fullpath = self.poolfile.fullpath
         deb_file = open(fullpath, 'r')
-        stanza = apt_inst.debExtractControl(deb_file)
+        stanza = utils.deb_extract_control(deb_file)
         deb_file.close()
 
         return stanza
@@ -614,7 +636,7 @@ def get_component_by_package_suite(package, suite_list, arch_list=[], session=No
     if binary is None:
         return None
     else:
-        return binary.get_component_name()
+        return binary.poolfile.component.component_name
 
 __all__.append('get_component_by_package_suite')
 
@@ -642,45 +664,6 @@ __all__.append('BinaryACLMap')
 
 ################################################################################
 
-MINIMAL_APT_CONF="""
-Dir
-{
-   ArchiveDir "%(archivepath)s";
-   OverrideDir "%(overridedir)s";
-   CacheDir "%(cachedir)s";
-};
-
-Default
-{
-   Packages::Compress ". bzip2 gzip";
-   Sources::Compress ". bzip2 gzip";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "incoming"
-{
-   Packages "Packages";
-   Contents " ";
-
-   BinOverride "override.sid.all3";
-   BinCacheDB "packages-accepted.db";
-
-   FileList "%(filelist)s";
-
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
-   Sources "Sources";
-   BinOverride "override.sid.all3";
-   SrcOverride "override.sid.all3.src";
-   FileList "%(filelist)s";
-};
-"""
-
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -688,367 +671,8 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
-    def write_metadata(self, starttime, force=False):
-        # Do we write out metafiles?
-        if not (force or self.generate_metadata):
-            return
-
-        session = DBConn().session().object_session(self)
-
-        fl_fd = fl_name = ac_fd = ac_name = None
-        tempdir = None
-        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
-        startdir = os.getcwd()
-
-        try:
-            # Grab files we want to include
-            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            # Write file list with newer files
-            (fl_fd, fl_name) = mkstemp()
-            for n in newer:
-                os.write(fl_fd, '%s\n' % n.fullpath)
-            os.close(fl_fd)
-
-            cnf = Config()
-
-            # Write minimal apt.conf
-            # TODO: Remove hardcoding from template
-            (ac_fd, ac_name) = mkstemp()
-            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
-                                                'filelist': fl_name,
-                                                'cachedir': cnf["Dir::Cache"],
-                                                'overridedir': cnf["Dir::Override"],
-                                                })
-            os.close(ac_fd)
-
-            # Run apt-ftparchive generate
-            os.chdir(os.path.dirname(ac_name))
-            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
-            # Run apt-ftparchive release
-            # TODO: Eww - fix this
-            bname = os.path.basename(self.path)
-            os.chdir(self.path)
-            os.chdir('..')
-
-            # We have to remove the Release file otherwise it'll be included in the
-            # new one
-            try:
-                os.unlink(os.path.join(bname, 'Release'))
-            except OSError:
-                pass
-
-            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
-            # Crude hack with open and append, but this whole section is and should be redone.
-            if self.notautomatic:
-                release=open("Release", "a")
-                release.write("NotAutomatic: yes")
-                release.close()
-
-            # Sign if necessary
-            if self.signingkey:
-                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
-                if cnf.has_key("Dinstall::SigningPubKeyring"):
-                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
-                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
-            # Move the files if we got this far
-            os.rename('Release', os.path.join(bname, 'Release'))
-            if self.signingkey:
-                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
-        # Clean up any left behind files
-        finally:
-            os.chdir(startdir)
-            if fl_fd:
-                try:
-                    os.close(fl_fd)
-                except OSError:
-                    pass
-
-            if fl_name:
-                try:
-                    os.unlink(fl_name)
-                except OSError:
-                    pass
-
-            if ac_fd:
-                try:
-                    os.close(ac_fd)
-                except OSError:
-                    pass
-
-            if ac_name:
-                try:
-                    os.unlink(ac_name)
-                except OSError:
-                    pass
-
-    def clean_and_update(self, starttime, Logger, dryrun=False):
-        """WARNING: This routine commits for you"""
-        session = DBConn().session().object_session(self)
-
-        if self.generate_metadata and not dryrun:
-            self.write_metadata(starttime)
-
-        # Grab files older than our execution time
-        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-        older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
-        for o in older:
-            killdb = False
-            try:
-                if dryrun:
-                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
-                else:
-                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
-                    os.unlink(o.fullpath)
-                    killdb = True
-            except OSError, e:
-                # If it wasn't there, don't worry
-                if e.errno == ENOENT:
-                    killdb = True
-                else:
-                    # TODO: Replace with proper logging call
-                    Logger.log(["E: Could not remove %s" % o.fullpath])
-
-            if killdb:
-                session.delete(o)
-
-        session.commit()
-
-        for f in os.listdir(self.path):
-            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
-                continue
-
-            if not self.contains_filename(f):
-                fp = os.path.join(self.path, f)
-                if dryrun:
-                    Logger.log(["I: Would remove unused link %s" % fp])
-                else:
-                    Logger.log(["I: Removing unused link %s" % fp])
-                    try:
-                        os.unlink(fp)
-                    except OSError:
-                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
-    def contains_filename(self, filename):
-        """
-        @rtype Boolean
-        @returns True if filename is supposed to be in the queue; False otherwise
-        """
-        session = DBConn().session().object_session(self)
-        if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
-            return True
-        elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
-            return True
-        return False
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if (f.fileid is not None and f.fileid == poolfile.file_id) or \
-               (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
-                   # In this case, update the BuildQueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(poolfile).add(f)
-                   return f
-
-        # Prepare BuildQueueFile object
-        qf = BuildQueueFile()
-        qf.build_queue_id = self.queue_id
-        qf.lastused = datetime.now()
-        qf.filename = poolfile_basename
-
-        targetpath = poolfile.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetpath, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetpath, queuepath)
-                qf.fileid = poolfile.file_id
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-    def add_changes_from_policy_queue(self, policyqueue, changes):
-        """
-        Copies a changes from a policy queue together with its poolfiles.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the changes from
-
-        @type changes: DBChange
-        @param changes: changes to copy to this build queue
-        """
-        for policyqueuefile in changes.files:
-            self.add_file_from_policy_queue(policyqueue, policyqueuefile)
-        for poolfile in changes.poolfiles:
-            self.add_file_from_pool(poolfile)
-
-    def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
-        """
-        Copies a file from a policy queue.
-        Assumes that the policyqueuefile is attached to the same SQLAlchemy
-        session as the Queue object is.  The caller is responsible for
-        committing after calling this function.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the file from
-
-        @type policyqueuefile: ChangePendingFile
-        @param policyqueuefile: file to be added to the build queue
-        """
-        session = DBConn().session().object_session(policyqueuefile)
-
-        # Is the file already there?
-        try:
-            f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
-            f.lastused = datetime.now()
-            return f
-        except NoResultFound:
-            pass # continue below
-
-        # We have to add the file.
-        f = BuildQueuePolicyFile()
-        f.build_queue = self
-        f.file = policyqueuefile
-        f.filename = policyqueuefile.filename
-
-        source = os.path.join(policyqueue.path, policyqueuefile.filename)
-        target = f.fullpath
-        try:
-            # Always copy files from policy queues as they might move around.
-            import utils
-            utils.copy(source, target)
-        except OSError:
-            return None
-
-        session.add(f)
-        return f
-
 __all__.append('BuildQueue')
 
-@session_wrapper
-def get_build_queue(queuename, session=None):
-    """
-    Returns BuildQueue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: BuildQueue
-    @return: BuildQueue object for the given queue
-    """
-
-    q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_build_queue')
-
-################################################################################
-
-class BuildQueueFile(object):
-    """
-    BuildQueueFile represents a file in a build queue coming from a pool.
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class BuildQueuePolicyFile(object):
-    """
-    BuildQueuePolicyFile represents a file in a build queue that comes from a
-    policy queue (and not a pool).
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    #@property
-    #def filename(self):
-    #    return self.file.filename
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.build_queue.path, self.filename)
-
-__all__.append('BuildQueuePolicyFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingFile %s>' % self.change_pending_file_id
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
 ################################################################################
 
 class Component(ORMObject):
@@ -1069,7 +693,7 @@ class Component(ORMObject):
 
     def properties(self):
         return ['component_name', 'component_id', 'description', \
-            'location_count', 'meets_dfsg', 'overrides_count']
+            'meets_dfsg', 'overrides_count']
 
     def not_null_constraints(self):
         return ['component_name']
@@ -1100,6 +724,47 @@ def get_component(component, session=None):
 
 __all__.append('get_component')
 
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+    """get component after mappings
+
+    Evaluate component mappings from ComponentMappings in dak.conf for the
+    given component name.
+
+    @todo: ansgar wants to get rid of this. It's currently only used for
+           the security archive
+
+    @type  component_name: str
+    @param component_name: component name
+
+    @param session: database session
+
+    @rtype:  L{daklib.dbconn.Component} or C{None}
+    @return: component after applying maps or C{None}
+    """
+    cnf = Config()
+    for m in cnf.value_list("ComponentMappings"):
+        (src, dst) = m.split()
+        if component_name == src:
+            component_name = dst
+    component = session.query(Component).filter_by(component_name=component_name).first()
+    return component
+
+__all__.append('get_mapped_component')
+
+@session_wrapper
+def get_component_names(session=None):
+    """
+    Returns list of strings of component names.
+
+    @rtype: list
+    @return: list of strings of component names
+    """
+
+    return [ x.component_name for x in session.query(Component).all() ]
+
+__all__.append('get_component_names')
+
 ################################################################################
 
 class DBConfig(object):
@@ -1364,82 +1029,58 @@ __all__.append('ExternalOverride')
 ################################################################################
 
 class PoolFile(ORMObject):
-    def __init__(self, filename = None, location = None, filesize = -1, \
+    def __init__(self, filename = None, filesize = -1, \
         md5sum = None):
         self.filename = filename
-        self.location = location
         self.filesize = filesize
         self.md5sum = md5sum
 
     @property
     def fullpath(self):
-        return os.path.join(self.location.path, self.filename)
+        session = DBConn().session().object_session(self)
+        af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
+        return af.path
+
+    @property
+    def component(self):
+        session = DBConn().session().object_session(self)
+        component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+                              .group_by(ArchiveFile.component_id).one()
+        return session.query(Component).get(component_id)
+
+    @property
+    def basename(self):
+        return os.path.basename(self.filename)
 
     def is_valid(self, filesize = -1, md5sum = None):
         return self.filesize == long(filesize) and self.md5sum == md5sum
 
     def properties(self):
         return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
-            'sha256sum', 'location', 'source', 'binary', 'last_used']
+            'sha256sum', 'source', 'binary', 'last_used']
 
     def not_null_constraints(self):
-        return ['filename', 'md5sum', 'location']
-
-__all__.append('PoolFile')
-
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
-    """
-    Returns a tuple:
-    (ValidFileFound [boolean], PoolFile object or None)
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @type filesize: int
-    @param filesize: the size of the file to check against the DB
-
-    @type md5sum: string
-    @param md5sum: the md5sum of the file to check against the DB
-
-    @type location_id: int
-    @param location_id: the id of the location to look in
-
-    @rtype: tuple
-    @return: Tuple of length 2.
-                 - If valid pool file found: (C{True}, C{PoolFile object})
-                 - If valid pool file not found:
-                     - (C{False}, C{None}) if no file found
-                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
-    """
-
-    poolfile = session.query(Location).get(location_id). \
-        files.filter_by(filename=filename).first()
-    valid = False
-    if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
-        valid = True
-
-    return (valid, poolfile)
+        return ['filename', 'md5sum']
 
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
-    """
-    Returns a PoolFile objects or None for the given id
+    def identical_to(self, filename):
+        """
+        compare size and hash with the given file
 
-    @type file_id: int
-    @param file_id: the id of the file to look for
+        @rtype: bool
+        @return: true if the given file has the same size and hash as this object; false otherwise
+        """
+        st = os.stat(filename)
+        if self.filesize != st.st_size:
+            return False
 
-    @rtype: PoolFile or None
-    @return: either the PoolFile object or None
-    """
+        f = open(filename, "r")
+        sha256sum = apt_pkg.sha256sum(f)
+        if sha256sum != self.sha256sum:
+            return False
 
-    return session.query(PoolFile).get(file_id)
+        return True
 
-__all__.append('get_poolfile_by_id')
+__all__.append('PoolFile')
 
 @session_wrapper
 def get_poolfile_like_name(filename, session=None):
@@ -1460,39 +1101,6 @@ def get_poolfile_like_name(filename, session=None):
 
 __all__.append('get_poolfile_like_name')
 
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
-    """
-    Add a new file to the pool
-
-    @type filename: string
-    @param filename: filename
-
-    @type datadict: dict
-    @param datadict: dict with needed data
-
-    @type location_id: int
-    @param location_id: database id of the location
-
-    @rtype: PoolFile
-    @return: the PoolFile object created
-    """
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
-
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
-
-    return poolfile
-
-__all__.append('add_poolfile')
-
 ################################################################################
 
 class Fingerprint(ORMObject):
@@ -1620,7 +1228,7 @@ class Keyring(object):
         key = None
         signingkey = False
 
-        for line in k.xreadlines():
+        for line in k:
             field = line.split(":")
             if field[0] == "pub":
                 key = field[4]
@@ -1727,6 +1335,34 @@ def get_keyring(keyring, session=None):
 
 __all__.append('get_keyring')
 
+@session_wrapper
+def get_active_keyring_paths(session=None):
+    """
+    @rtype: list
+    @return: list of active keyring paths
+    """
+    return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
+
+__all__.append('get_active_keyring_paths')
+
+@session_wrapper
+def get_primary_keyring_path(session=None):
+    """
+    Get the full path to the highest priority active keyring
+
+    @rtype: str or None
+    @return: path to the active keyring with the highest priority or None if no
+             keyring is configured
+    """
+    keyrings = get_active_keyring_paths()
+
+    if len(keyrings) > 0:
+        return keyrings[0]
+    else:
+        return None
+
+__all__.append('get_primary_keyring_path')
+
 ################################################################################
 
 class KeyringACLMap(object):
@@ -1747,19 +1383,6 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
-    def clean_from_queue(self):
-        session = DBConn().session().object_session(self)
-
-        # Remove changes_pool_files entries
-        self.poolfiles = []
-
-        # Remove changes_pending_files references
-        self.files = []
-
-        # Clear out of queue
-        self.in_queue = None
-        self.approved_for_id = None
-
 __all__.append('DBChange')
 
 @session_wrapper
@@ -1789,58 +1412,6 @@ __all__.append('get_dbchange')
 
 ################################################################################
 
-class Location(ORMObject):
-    def __init__(self, path = None, component = None):
-        self.path = path
-        self.component = component
-        # the column 'type' should go away, see comment at mapper
-        self.archive_type = 'pool'
-
-    def properties(self):
-        return ['path', 'location_id', 'archive_type', 'component', \
-            'files_count']
-
-    def not_null_constraints(self):
-        return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
-    """
-    Returns Location object for the given combination of location, component
-    and archive
-
-    @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
-    @type component: string
-    @param component: the component name (if None, no restriction applied)
-
-    @type archive: string
-    @param archive: the archive name (if None, no restriction applied)
-
-    @rtype: Location / None
-    @return: Either a Location object or None if one can't be found
-    """
-
-    q = session.query(Location).filter_by(path=location)
-
-    if archive is not None:
-        q = q.join(Archive).filter_by(archive_name=archive)
-
-    if component is not None:
-        q = q.join(Component).filter_by(component_name=component)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_location')
-
-################################################################################
-
 class Maintainer(ORMObject):
     def __init__(self, name = None):
         self.name = name
@@ -2124,30 +1695,30 @@ def get_policy_queue(queuename, session=None):
 
 __all__.append('get_policy_queue')
 
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
-    """
-    Returns PolicyQueue object for given C{path name}
-
-    @type queuename: string
-    @param queuename: The path
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
+################################################################################
 
-    @rtype: PolicyQueue
-    @return: PolicyQueue object for the given queue
-    """
+class PolicyQueueUpload(object):
+    def __cmp__(self, other):
+        ret = cmp(self.changes.source, other.changes.source)
+        if ret == 0:
+            ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+        if ret == 0:
+            if self.source is not None and other.source is None:
+                ret = -1
+            elif self.source is None and other.source is not None:
+                ret = 1
+        if ret == 0:
+            ret = cmp(self.changes.changesname, other.changes.changesname)
+        return ret
+
+__all__.append('PolicyQueueUpload')
 
-    q = session.query(PolicyQueue).filter_by(path=pathname)
+################################################################################
 
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+class PolicyQueueByhandFile(object):
+    pass
 
-__all__.append('get_policy_queue_from_path')
+__all__.append('PolicyQueueByhandFile')
 
 ################################################################################
 
@@ -2366,13 +1937,14 @@ class Dak822(Deb822):
 
 class DBSource(ORMObject):
     def __init__(self, source = None, version = None, maintainer = None, \
-        changedby = None, poolfile = None, install_date = None):
+        changedby = None, poolfile = None, install_date = None, fingerprint = None):
         self.source = source
         self.version = version
         self.maintainer = maintainer
         self.changedby = changedby
         self.poolfile = poolfile
         self.install_date = install_date
+        self.fingerprint = fingerprint
 
     @property
     def pkid(self):
@@ -2385,7 +1957,7 @@ class DBSource(ORMObject):
 
     def not_null_constraints(self):
         return ['source', 'version', 'install_date', 'maintainer', \
-            'changedby', 'poolfile', 'install_date']
+            'changedby', 'poolfile']
 
     def read_control_fields(self):
         '''
@@ -2457,20 +2029,14 @@ def source_exists(source, source_version, suites = ["any"], session=None):
         q = session.query(DBSource).filter_by(source=source). \
             filter(DBSource.version.in_([source_version, orig_source_version]))
         if suite != "any":
-            # source must exist in suite X, or in some other suite that's
-            # mapped to X, recursively... silent-maps are counted too,
-            # unreleased-maps aren't.
-            maps = cnf.ValueList("SuiteMappings")[:]
-            maps.reverse()
-            maps = [ m.split() for m in maps ]
-            maps = [ (x[1], x[2]) for x in maps
-                            if x[0] == "map" or x[0] == "silent-map" ]
-            s = [suite]
-            for (from_, to) in maps:
-                if from_ in s and to not in s:
-                    s.append(to)
-
-            q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
+            # source must exist in 'suite' or a suite that is enhanced by 'suite'
+            s = get_suite(suite, session)
+            if s:
+                enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
+                considered_suites = [ vc.reference for vc in enhances_vcs ]
+                considered_suites.append(s)
+
+                q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
 
         if q.count() > 0:
             continue
@@ -2589,173 +2155,6 @@ def import_metadata_into_db(obj, session=None):
 
 __all__.append('import_metadata_into_db')
 
-
-################################################################################
-
-@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-    pfs = []
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        session.flush()
-        pfs.append(poolfile)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    source.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-                pfs.append(obj)
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                pfs.append(poolfile)
-                files_id = poolfile.file_id
-        else:
-            poolfile = get_poolfile_by_id(files_id, session)
-            if poolfile is None:
-                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
-            pfs.append(poolfile)
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    # Add the src_uploaders to the DB
-    source.uploaders = [source.maintainer]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
-            up = up.strip()
-            source.uploaders.append(get_or_set_maintainer(up, session))
-
-    session.flush()
-
-    return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
-@session_wrapper
-def add_deb_to_db(u, filename, session=None):
-    """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
-    """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
-    if entry.get("files id", None):
-        poolfile = get_poolfile_by_id(bin.poolfile_id)
-        bin.poolfile_id = entry["files id"]
-    else:
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        bin.poolfile_id = entry["files id"] = poolfile.file_id
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, entry["architecture"],
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
-    bin.source_id = bin_sources[0].source_id
-
-    if entry.has_key("built-using"):
-        for srcname, version in entry["built-using"]:
-            exsources = get_sources_from_name(srcname, version, session=session)
-            if len(exsources) != 1:
-                raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                          (srcname, version, bin.package, bin.version, entry["architecture"],
-                                           filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
-            bin.extra_sources.append(exsources[0])
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    bin.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    session.flush()
-
-    # Deal with contents - disabled for now
-    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
-    #if not contents:
-    #    print "REJECT\nCould not determine contents of package %s" % bin.package
-    #    session.rollback()
-    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-    return bin, poolfile
-
-__all__.append('add_deb_to_db')
-
 ################################################################################
 
 class SourceACL(object):
@@ -2878,6 +2277,10 @@ class Suite(ORMObject):
         else:
             return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
 
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'dists', self.suite_name)
+
 __all__.append('Suite')
 
 @session_wrapper
@@ -2907,11 +2310,11 @@ __all__.append('get_suite')
 
 ################################################################################
 
-# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     """
-    Returns list of Architecture objects for given C{suite} name
+    Returns list of Architecture objects for given C{suite} name. The list is
+    empty if suite does not exist.
 
     @type suite: str
     @param suite: Suite name to search for
@@ -2932,48 +2335,15 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     @return: list of Architecture objects for the given name (may be empty)
     """
 
-    return get_suite(suite, session).get_architectures(skipsrc, skipall)
+    try:
+        return get_suite(suite, session).get_architectures(skipsrc, skipall)
+    except AttributeError:
+        return []
 
 __all__.append('get_suite_architectures')
 
 ################################################################################
 
-class SuiteSrcFormat(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
-
-__all__.append('SuiteSrcFormat')
-
-@session_wrapper
-def get_suite_src_formats(suite, session=None):
-    """
-    Returns list of allowed SrcFormat for C{suite}.
-
-    @type suite: str
-    @param suite: Suite name to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: the list of allowed source formats for I{suite}
-    """
-
-    q = session.query(SrcFormat)
-    q = q.join(SuiteSrcFormat)
-    q = q.join(Suite).filter_by(suite_name=suite)
-    q = q.order_by('format_name')
-
-    return q.all()
-
-__all__.append('get_suite_src_formats')
-
-################################################################################
-
 class Uid(ORMObject):
     def __init__(self, uid = None, name = None):
         self.uid = uid
@@ -3190,26 +2560,18 @@ class DBConn(object):
             'binary_acl',
             'binary_acl_map',
             'build_queue',
-            'build_queue_files',
-            'build_queue_policy_files',
             'changelogs_text',
             'changes',
             'component',
             'config',
-            'changes_pending_binaries',
-            'changes_pending_files',
-            'changes_pending_source',
-            'changes_pending_files_map',
-            'changes_pending_source_files',
-            'changes_pool_files',
             'dsc_files',
             'external_overrides',
             'extra_src_references',
             'files',
+            'files_archive_map',
             'fingerprint',
             'keyrings',
             'keyring_acl_map',
-            'location',
             'maintainer',
             'metadata_keys',
             'new_comments',
@@ -3217,6 +2579,9 @@ class DBConn(object):
             'override',
             'override_type',
             'policy_queue',
+            'policy_queue_upload',
+            'policy_queue_upload_binaries_map',
+            'policy_queue_byhand_file',
             'priority',
             'section',
             'source',
@@ -3241,7 +2606,6 @@ class DBConn(object):
             'any_associations_source',
             'bin_associations_binaries',
             'binaries_suite_arch',
-            'binfiles_suite_component_arch',
             'changelogs',
             'file_arch_suite',
             'newest_all_associations',
@@ -3271,25 +2635,22 @@ class DBConn(object):
         mapper(Architecture, self.tbl_architecture,
             properties = dict(arch_id = self.tbl_architecture.c.id,
                suites = relation(Suite, secondary=self.tbl_suite_architectures,
-                   order_by='suite_name',
-                   backref=backref('architectures', order_by='arch_string'))),
+                   order_by=self.tbl_suite.c.suite_name,
+                   backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
             extension = validator)
 
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
                                  archive_name = self.tbl_archive.c.name))
 
-        mapper(BuildQueue, self.tbl_build_queue,
-               properties = dict(queue_id = self.tbl_build_queue.c.id))
+        mapper(ArchiveFile, self.tbl_files_archive_map,
+               properties = dict(archive = relation(Archive, backref='files'),
+                                 component = relation(Component),
+                                 file = relation(PoolFile, backref='archives')))
 
-        mapper(BuildQueueFile, self.tbl_build_queue_files,
-               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
-
-        mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
-               properties = dict(
-                build_queue = relation(BuildQueue, backref='policy_queue_files'),
-                file = relation(ChangePendingFile, lazy='joined')))
+        mapper(BuildQueue, self.tbl_build_queue,
+               properties = dict(queue_id = self.tbl_build_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
 
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
@@ -3302,7 +2663,7 @@ class DBConn(object):
                                  arch_id = self.tbl_binaries.c.architecture,
                                  architecture = relation(Architecture),
                                  poolfile_id = self.tbl_binaries.c.file,
-                                 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
+                                 poolfile = relation(PoolFile),
                                  binarytype = self.tbl_binaries.c.type,
                                  fingerprint_id = self.tbl_binaries.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
@@ -3338,17 +2699,16 @@ class DBConn(object):
                                  poolfile_id = self.tbl_dsc_files.c.file,
                                  poolfile = relation(PoolFile)))
 
-        mapper(ExternalOverride, self.tbl_external_overrides)
+        mapper(ExternalOverride, self.tbl_external_overrides,
+                properties = dict(
+                    suite_id = self.tbl_external_overrides.c.suite,
+                    suite = relation(Suite),
+                    component_id = self.tbl_external_overrides.c.component,
+                    component = relation(Component)))
 
         mapper(PoolFile, self.tbl_files,
                properties = dict(file_id = self.tbl_files.c.id,
-                                 filesize = self.tbl_files.c.size,
-                                 location_id = self.tbl_files.c.location,
-                                 location = relation(Location,
-                                     # using lazy='dynamic' in the back
-                                     # reference because we have A LOT of
-                                     # files in one location
-                                     backref=backref('files', lazy='dynamic'))),
+                                 filesize = self.tbl_files.c.size),
                 extension = validator)
 
         mapper(Fingerprint, self.tbl_fingerprint,
@@ -3367,9 +2727,6 @@ class DBConn(object):
 
         mapper(DBChange, self.tbl_changes,
                properties = dict(change_id = self.tbl_changes.c.id,
-                                 poolfiles = relation(PoolFile,
-                                                      secondary=self.tbl_changes_pool_files,
-                                                      backref="changeslinks"),
                                  seen = self.tbl_changes.c.seen,
                                  source = self.tbl_changes.c.source,
                                  binaries = self.tbl_changes.c.binaries,
@@ -3379,55 +2736,13 @@ class DBConn(object):
                                  maintainer = self.tbl_changes.c.maintainer,
                                  changedby = self.tbl_changes.c.changedby,
                                  date = self.tbl_changes.c.date,
-                                 version = self.tbl_changes.c.version,
-                                 files = relation(ChangePendingFile,
-                                                  secondary=self.tbl_changes_pending_files_map,
-                                                  backref="changesfile"),
-                                 in_queue_id = self.tbl_changes.c.in_queue,
-                                 in_queue = relation(PolicyQueue,
-                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
-                                 approved_for_id = self.tbl_changes.c.approved_for))
-
-        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
-               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
-        mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
-                                 filename = self.tbl_changes_pending_files.c.filename,
-                                 size = self.tbl_changes_pending_files.c.size,
-                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
-                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
-                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
-        mapper(ChangePendingSource, self.tbl_changes_pending_source,
-               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
-                                 change = relation(DBChange),
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
-                                 changedby = relation(Maintainer,
-                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
-                                 fingerprint = relation(Fingerprint),
-                                 source_files = relation(ChangePendingFile,
-                                                         secondary=self.tbl_changes_pending_source_files,
-                                                         backref="pending_sources")))
-
+                                 version = self.tbl_changes.c.version))
 
         mapper(KeyringACLMap, self.tbl_keyring_acl_map,
                properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
                                  keyring = relation(Keyring, backref="keyring_acl_map"),
                                  architecture = relation(Architecture)))
 
-        mapper(Location, self.tbl_location,
-               properties = dict(location_id = self.tbl_location.c.id,
-                                 component_id = self.tbl_location.c.component,
-                                 component = relation(Component, backref='location'),
-                                 archive_id = self.tbl_location.c.archive,
-                                 archive = relation(Archive),
-                                 # FIXME: the 'type' column is old cruft and
-                                 # should be removed in the future.
-                                 archive_type = self.tbl_location.c.type),
-               extension = validator)
-
         mapper(Maintainer, self.tbl_maintainer,
                properties = dict(maintainer_id = self.tbl_maintainer.c.id,
                    maintains_sources = relation(DBSource, backref='maintainer',
@@ -3462,7 +2777,23 @@ class DBConn(object):
                                  overridetype_id = self.tbl_override_type.c.id))
 
         mapper(PolicyQueue, self.tbl_policy_queue,
-               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
+
+        mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+               properties = dict(
+                   changes = relation(DBChange),
+                   policy_queue = relation(PolicyQueue, backref='uploads'),
+                   target_suite = relation(Suite),
+                   source = relation(DBSource),
+                   binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+                ))
+
+        mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+               properties = dict(
+                   upload = relation(PolicyQueueUpload, backref='byhand'),
+                   )
+               )
 
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
@@ -3476,7 +2807,7 @@ class DBConn(object):
                                  version = self.tbl_source.c.version,
                                  maintainer_id = self.tbl_source.c.maintainer,
                                  poolfile_id = self.tbl_source.c.file,
-                                 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+                                 poolfile = relation(PoolFile),
                                  fingerprint_id = self.tbl_source.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
                                  changedby_id = self.tbl_source.c.changedby,
@@ -3499,17 +2830,14 @@ class DBConn(object):
 
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(PolicyQueue),
+                                 policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
                                  copy_queues = relation(BuildQueue,
-                                     secondary=self.tbl_suite_build_queue_copy)),
+                                     secondary=self.tbl_suite_build_queue_copy),
+                                 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
+                                     backref=backref('suites', lazy='dynamic')),
+                                 archive = relation(Archive, backref='suites')),
                 extension = validator)
 
-        mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
-               properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
-                                 suite = relation(Suite, backref='suitesrcformats'),
-                                 src_format_id = self.tbl_suite_src_formats.c.src_format,
-                                 src_format = relation(SrcFormat)))
-
         mapper(Uid, self.tbl_uid,
                properties = dict(uid_id = self.tbl_uid.c.id,
                                  fingerprint = relation(Fingerprint)),
@@ -3601,15 +2929,21 @@ class DBConn(object):
 
         sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
 
-        self.db_pg   = create_engine(connstr, **engine_args)
-        self.db_meta = MetaData()
-        self.db_meta.bind = self.db_pg
-        self.db_smaker = sessionmaker(bind=self.db_pg,
-                                      autoflush=True,
-                                      autocommit=False)
+        try:
+            self.db_pg   = create_engine(connstr, **engine_args)
+            self.db_meta = MetaData()
+            self.db_meta.bind = self.db_pg
+            self.db_smaker = sessionmaker(bind=self.db_pg,
+                                          autoflush=True,
+                                          autocommit=False)
+
+            self.__setuptables()
+            self.__setupmappers()
+
+        except OperationalError as e:
+            import utils
+            utils.fubar("Cannot connect to database (%s)" % str(e))
 
-        self.__setuptables()
-        self.__setupmappers()
         self.pid = os.getpid()
 
     def session(self, work_mem = 0):