]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Add by-hash support
[dak.git] / daklib / dbconn.py
index 554eba4f2ef3646310f867c52e98d020e228938c..58ad0412232a78f18c01012c1ab5b9b5ba305cb5 100755 (executable)
 
 ################################################################################
 
+import apt_pkg
+import daklib.daksubprocess
 import os
+from os.path import normpath
 import re
 import psycopg2
+import subprocess
 import traceback
-import commands
 
 try:
     # python >= 2.6
@@ -49,14 +52,18 @@ except:
 from datetime import datetime, timedelta
 from errno import ENOENT
 from tempfile import mkstemp, mkdtemp
+from tarfile import TarFile
 
 from inspect import getargspec
 
 import sqlalchemy
-from sqlalchemy import create_engine, Table, MetaData, Column, Integer
+from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
+    Text, ForeignKey
 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
-    backref, MapperExtension, EXT_CONTINUE
+    backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
 from sqlalchemy import types as sqltypes
+from sqlalchemy.orm.collections import attribute_mapped_collection
+from sqlalchemy.ext.associationproxy import association_proxy
 
 # Don't remove this, we re-export the exceptions to scripts which import us
 from sqlalchemy.exc import *
@@ -66,16 +73,16 @@ from sqlalchemy.orm.exc import NoResultFound
 # in the database
 from config import Config
 from textutils import fix_maintainer
-from dak_exceptions import DBUpdateError, NoSourceFieldError
+from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
 
 # suppress some deprecation warnings in squeeze related to sqlalchemy
 import warnings
 warnings.filterwarnings('ignore', \
     "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
     SADeprecationWarning)
-# TODO: sqlalchemy needs some extra configuration to correctly reflect
-# the ind_deb_contents_* indexes - we ignore the warnings at the moment
-warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning)
+warnings.filterwarnings('ignore', \
+    "Predicate of partial index .* ignored during reflection", \
+    SAWarning)
 
 
 ################################################################################
@@ -102,11 +109,11 @@ class DebVersion(UserDefinedType):
         return None
 
 sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6"]:
+if sa_major_version in ["0.5", "0.6", "0.7", "0.8", "0.9", "1.0"]:
     from sqlalchemy.databases import postgres
     postgres.ischema_names['debversion'] = DebVersion
 else:
-    raise Exception("dak only ported to SQLA versions 0.5 and 0.6.  See daklib/dbconn.py")
+    raise Exception("dak only ported to SQLA versions 0.5 to 1.0 (%s installed).  See daklib/dbconn.py" % sa_major_version)
 
 ################################################################################
 
@@ -201,7 +208,9 @@ class ORMObject(object):
                     # list
                     value = len(value)
                 elif hasattr(value, 'count'):
-                    # query
+                    # query (but not during validation)
+                    if self.in_validation:
+                        continue
                     value = value.count()
                 else:
                     raise KeyError('Do not understand property %s.' % property)
@@ -255,6 +264,8 @@ class ORMObject(object):
     validation_message = \
         "Validation failed because property '%s' must not be empty in object\n%s"
 
+    in_validation = False
+
     def validate(self):
         '''
         This function validates the not NULL constraints as returned by
@@ -269,8 +280,11 @@ class ORMObject(object):
                 getattr(self, property + '_id') is not None:
                 continue
             if not hasattr(self, property) or getattr(self, property) is None:
-                raise DBUpdateError(self.validation_message % \
-                    (property, str(self)))
+                # str() might lead to races due to a 2nd flush
+                self.in_validation = True
+                message = self.validation_message % (property, str(self))
+                self.in_validation = False
+                raise DBUpdateError(message)
 
     @classmethod
     @session_wrapper
@@ -287,6 +301,50 @@ class ORMObject(object):
         '''
         return session.query(cls).get(primary_key)
 
+    def session(self, replace = False):
+        '''
+        Returns the current session that is associated with the object. May
+        return None is object is in detached state.
+        '''
+
+        return object_session(self)
+
+    def clone(self, session = None):
+        """
+        Clones the current object in a new session and returns the new clone. A
+        fresh session is created if the optional session parameter is not
+        provided. The function will fail if a session is provided and has
+        unflushed changes.
+
+        RATIONALE: SQLAlchemy's session is not thread safe. This method clones
+        an existing object to allow several threads to work with their own
+        instances of an ORMObject.
+
+        WARNING: Only persistent (committed) objects can be cloned. Changes
+        made to the original object that are not committed yet will get lost.
+        The session of the new object will always be rolled back to avoid
+        resource leaks.
+        """
+
+        if self.session() is None:
+            raise RuntimeError( \
+                'Method clone() failed for detached object:\n%s' % self)
+        self.session().flush()
+        mapper = object_mapper(self)
+        primary_key = mapper.primary_key_from_instance(self)
+        object_class = self.__class__
+        if session is None:
+            session = DBConn().session()
+        elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
+            raise RuntimeError( \
+                'Method clone() failed due to unflushed changes in session.')
+        new_object = session.query(object_class).get(primary_key)
+        session.rollback()
+        if new_object is None:
+            raise RuntimeError( \
+                'Method clone() failed for non-persistent object:\n%s' % self)
+        return new_object
+
 __all__.append('ORMObject')
 
 ################################################################################
@@ -310,6 +368,20 @@ validator = Validator()
 
 ################################################################################
 
+class ACL(ORMObject):
+    def __repr__(self):
+        return "<ACL {0}>".format(self.name)
+
+__all__.append('ACL')
+
+class ACLPerSource(ORMObject):
+    def __repr__(self):
+        return "<ACLPerSource acl={0} fingerprint={1} source={2} reason={3}>".format(self.acl.name, self.fingerprint.fingerprint, self.source, self.reason)
+
+__all__.append('ACLPerSource')
+
+################################################################################
+
 class Architecture(ORMObject):
     def __init__(self, arch_string = None, description = None):
         self.arch_string = arch_string
@@ -360,27 +432,6 @@ def get_architecture(architecture, session=None):
 
 __all__.append('get_architecture')
 
-# TODO: should be removed because the implementation is too trivial
-@session_wrapper
-def get_architecture_suites(architecture, session=None):
-    """
-    Returns list of Suite objects for given C{architecture} name
-
-    @type architecture: str
-    @param architecture: Architecture name to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of Suite objects for the given name (may be empty)
-    """
-
-    return get_architecture(architecture, session).suites
-
-__all__.append('get_architecture_suites')
-
 ################################################################################
 
 class Archive(object):
@@ -421,23 +472,26 @@ __all__.append('get_archive')
 
 ################################################################################
 
-class BinAssociation(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinAssociation %s (%s, %s)>' % (self.ba_id, self.binary, self.suite)
+class ArchiveFile(object):
+    def __init__(self, archive=None, component=None, file=None):
+        self.archive = archive
+        self.component = component
+        self.file = file
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
 
-__all__.append('BinAssociation')
+__all__.append('ArchiveFile')
 
 ################################################################################
 
-class BinContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class BinContents(ORMObject):
+    def __init__(self, file = None, binary = None):
+        self.file = file
+        self.binary = binary
 
-    def __repr__(self):
-        return '<BinContents (%s, %s)>' % (self.binary, self.filename)
+    def properties(self):
+        return ['file', 'binary']
 
 __all__.append('BinContents')
 
@@ -446,7 +500,7 @@ __all__.append('BinContents')
 class DBBinary(ORMObject):
     def __init__(self, package = None, source = None, version = None, \
         maintainer = None, architecture = None, poolfile = None, \
-        binarytype = 'deb'):
+        binarytype = 'deb', fingerprint=None):
         self.package = package
         self.source = source
         self.version = version
@@ -454,15 +508,75 @@ class DBBinary(ORMObject):
         self.architecture = architecture
         self.poolfile = poolfile
         self.binarytype = binarytype
+        self.fingerprint = fingerprint
+
+    @property
+    def pkid(self):
+        return self.binary_id
 
     def properties(self):
         return ['package', 'version', 'maintainer', 'source', 'architecture', \
             'poolfile', 'binarytype', 'fingerprint', 'install_date', \
-            'suites_count']
+            'suites_count', 'binary_id', 'contents_count', 'extra_sources']
 
     def not_null_constraints(self):
-        return ['package', 'version', 'maintainer', 'source', 'architecture', \
-            'poolfile', 'binarytype']
+        return ['package', 'version', 'maintainer', 'source',  'poolfile', \
+            'binarytype']
+
+    metadata = association_proxy('key', 'value')
+
+    def scan_contents(self):
+        '''
+        Yields the contents of the package. Only regular files are yielded and
+        the path names are normalized after converting them from either utf-8
+        or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
+        package does not contain any regular file.
+        '''
+        fullpath = self.poolfile.fullpath
+        dpkg_cmd = ('dpkg-deb', '--fsys-tarfile', fullpath)
+        dpkg = daklib.daksubprocess.Popen(dpkg_cmd, stdout=subprocess.PIPE)
+        tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
+        for member in tar.getmembers():
+            if not member.isdir():
+                name = normpath(member.name)
+                # enforce proper utf-8 encoding
+                try:
+                    name.decode('utf-8')
+                except UnicodeDecodeError:
+                    name = name.decode('iso8859-1').encode('utf-8')
+                yield name
+        tar.close()
+        dpkg.stdout.close()
+        dpkg.wait()
+
+    def read_control(self):
+        '''
+        Reads the control information from a binary.
+
+        @rtype: text
+        @return: stanza text of the control section.
+        '''
+        import utils
+        fullpath = self.poolfile.fullpath
+        with open(fullpath, 'r') as deb_file:
+            return utils.deb_extract_control(deb_file)
+
+    def read_control_fields(self):
+        '''
+        Reads the control information from a binary and return
+        as a dictionary.
+
+        @rtype: dict
+        @return: fields of the control section as a dictionary.
+        '''
+        stanza = self.read_control()
+        return apt_pkg.TagSection(stanza)
+
+    @property
+    def proxy(self):
+        session = object_session(self)
+        query = session.query(BinaryMetadata).filter_by(binary=self)
+        return MetadataProxy(session, query)
 
 __all__.append('DBBinary')
 
@@ -483,188 +597,40 @@ def get_suites_binary_in(package, session=None):
 __all__.append('get_suites_binary_in')
 
 @session_wrapper
-def get_binary_from_id(binary_id, session=None):
-    """
-    Returns DBBinary object for given C{id}
-
-    @type binary_id: int
-    @param binary_id: Id of the required binary
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: DBBinary
-    @return: DBBinary object for the given binary (None if not present)
-    """
-
-    q = session.query(DBBinary).filter_by(binary_id=binary_id)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_binary_from_id')
-
-@session_wrapper
-def get_binaries_from_name(package, version=None, architecture=None, session=None):
-    """
-    Returns list of DBBinary objects for given C{package} name
+def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
+    '''
+    Returns the component name of the newest binary package in suite_list or
+    None if no package is found. The result can be optionally filtered by a list
+    of architecture names.
 
     @type package: str
     @param package: DBBinary package name to search for
 
-    @type version: str or None
-    @param version: Version to search for (or None)
-
-    @type architecture: str, list or None
-    @param architecture: Architectures to limit to (or None if no limit)
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of DBBinary objects for the given name (may be empty)
-    """
+    @type suite_list: list of str
+    @param suite_list: list of suite_name items
 
-    q = session.query(DBBinary).filter_by(package=package)
-
-    if version is not None:
-        q = q.filter_by(version=version)
-
-    if architecture is not None:
-        if not isinstance(architecture, list):
-            architecture = [architecture]
-        q = q.join(Architecture).filter(Architecture.arch_string.in_(architecture))
-
-    ret = q.all()
-
-    return ret
+    @type arch_list: list of str
+    @param arch_list: optional list of arch_string items that defaults to []
 
-__all__.append('get_binaries_from_name')
-
-@session_wrapper
-def get_binaries_from_source_id(source_id, session=None):
-    """
-    Returns list of DBBinary objects for given C{source_id}
-
-    @type source_id: int
-    @param source_id: source_id to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of DBBinary objects for the given name (may be empty)
-    """
-
-    return session.query(DBBinary).filter_by(source_id=source_id).all()
-
-__all__.append('get_binaries_from_source_id')
-
-@session_wrapper
-def get_binary_from_name_suite(package, suitename, session=None):
-    ### For dak examine-package
-    ### XXX: Doesn't use object API yet
-
-    sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
-             FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
-             WHERE b.package='%(package)s'
-               AND b.file = fi.id
-               AND fi.location = l.id
-               AND l.component = c.id
-               AND ba.bin=b.id
-               AND ba.suite = su.id
-               AND su.suite_name %(suitename)s
-          ORDER BY b.version DESC"""
-
-    return session.execute(sql % {'package': package, 'suitename': suitename})
-
-__all__.append('get_binary_from_name_suite')
-
-@session_wrapper
-def get_binary_components(package, suitename, arch, session=None):
-    # Check for packages that have moved from one component to another
-    query = """SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f
-    WHERE b.package=:package AND s.suite_name=:suitename
-      AND (a.arch_string = :arch OR a.arch_string = 'all')
-      AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
-      AND f.location = l.id
-      AND l.component = c.id
-      AND b.file = f.id"""
-
-    vals = {'package': package, 'suitename': suitename, 'arch': arch}
-
-    return session.execute(query, vals)
-
-__all__.append('get_binary_components')
-
-################################################################################
-
-class BinaryACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinaryACL %s>' % self.binary_acl_id
-
-__all__.append('BinaryACL')
-
-################################################################################
-
-class BinaryACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    @rtype: str or NoneType
+    @return: name of component or None
+    '''
 
-    def __repr__(self):
-        return '<BinaryACLMap %s>' % self.binary_acl_map_id
+    q = session.query(DBBinary).filter_by(package = package). \
+        join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
+    if len(arch_list) > 0:
+        q = q.join(DBBinary.architecture). \
+            filter(Architecture.arch_string.in_(arch_list))
+    binary = q.order_by(desc(DBBinary.version)).first()
+    if binary is None:
+        return None
+    else:
+        return binary.poolfile.component.component_name
 
-__all__.append('BinaryACLMap')
+__all__.append('get_component_by_package_suite')
 
 ################################################################################
 
-MINIMAL_APT_CONF="""
-Dir
-{
-   ArchiveDir "%(archivepath)s";
-   OverrideDir "%(overridedir)s";
-   CacheDir "%(cachedir)s";
-};
-
-Default
-{
-   Packages::Compress ". bzip2 gzip";
-   Sources::Compress ". bzip2 gzip";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "incoming"
-{
-   Packages "Packages";
-   Contents " ";
-
-   BinOverride "override.sid.all3";
-   BinCacheDB "packages-accepted.db";
-
-   FileList "%(filelist)s";
-
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
-   Sources "Sources";
-   BinOverride "override.sid.all3";
-   SrcOverride "override.sid.all3.src";
-   FileList "%(filelist)s";
-};
-"""
-
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -672,280 +638,13 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
-    def write_metadata(self, starttime, force=False):
-        # Do we write out metafiles?
-        if not (force or self.generate_metadata):
-            return
-
-        session = DBConn().session().object_session(self)
-
-        fl_fd = fl_name = ac_fd = ac_name = None
-        tempdir = None
-        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
-        startdir = os.getcwd()
-
-        try:
-            # Grab files we want to include
-            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            # Write file list with newer files
-            (fl_fd, fl_name) = mkstemp()
-            for n in newer:
-                os.write(fl_fd, '%s\n' % n.fullpath)
-            os.close(fl_fd)
-
-            cnf = Config()
-
-            # Write minimal apt.conf
-            # TODO: Remove hardcoding from template
-            (ac_fd, ac_name) = mkstemp()
-            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
-                                                'filelist': fl_name,
-                                                'cachedir': cnf["Dir::Cache"],
-                                                'overridedir': cnf["Dir::Override"],
-                                                })
-            os.close(ac_fd)
-
-            # Run apt-ftparchive generate
-            os.chdir(os.path.dirname(ac_name))
-            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
-            # Run apt-ftparchive release
-            # TODO: Eww - fix this
-            bname = os.path.basename(self.path)
-            os.chdir(self.path)
-            os.chdir('..')
-
-            # We have to remove the Release file otherwise it'll be included in the
-            # new one
-            try:
-                os.unlink(os.path.join(bname, 'Release'))
-            except OSError:
-                pass
-
-            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
-            # Crude hack with open and append, but this whole section is and should be redone.
-            if self.notautomatic:
-                release=open("Release", "a")
-                release.write("NotAutomatic: yes")
-                release.close()
-
-            # Sign if necessary
-            if self.signingkey:
-                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
-                if cnf.has_key("Dinstall::SigningPubKeyring"):
-                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
-                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
-            # Move the files if we got this far
-            os.rename('Release', os.path.join(bname, 'Release'))
-            if self.signingkey:
-                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
-        # Clean up any left behind files
-        finally:
-            os.chdir(startdir)
-            if fl_fd:
-                try:
-                    os.close(fl_fd)
-                except OSError:
-                    pass
-
-            if fl_name:
-                try:
-                    os.unlink(fl_name)
-                except OSError:
-                    pass
-
-            if ac_fd:
-                try:
-                    os.close(ac_fd)
-                except OSError:
-                    pass
-
-            if ac_name:
-                try:
-                    os.unlink(ac_name)
-                except OSError:
-                    pass
-
-    def clean_and_update(self, starttime, Logger, dryrun=False):
-        """WARNING: This routine commits for you"""
-        session = DBConn().session().object_session(self)
-
-        if self.generate_metadata and not dryrun:
-            self.write_metadata(starttime)
-
-        # Grab files older than our execution time
-        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
-        for o in older:
-            killdb = False
-            try:
-                if dryrun:
-                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
-                else:
-                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
-                    os.unlink(o.fullpath)
-                    killdb = True
-            except OSError, e:
-                # If it wasn't there, don't worry
-                if e.errno == ENOENT:
-                    killdb = True
-                else:
-                    # TODO: Replace with proper logging call
-                    Logger.log(["E: Could not remove %s" % o.fullpath])
-
-            if killdb:
-                session.delete(o)
-
-        session.commit()
-
-        for f in os.listdir(self.path):
-            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
-                continue
-
-            try:
-                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
-            except NoResultFound:
-                fp = os.path.join(self.path, f)
-                if dryrun:
-                    Logger.log(["I: Would remove unused link %s" % fp])
-                else:
-                    Logger.log(["I: Removing unused link %s" % fp])
-                    try:
-                        os.unlink(fp)
-                    except OSError:
-                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if f.fileid is not None and f.fileid == poolfile.file_id or \
-               f.poolfile.filename == poolfile_basename:
-                   # In this case, update the BuildQueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(poolfile).add(f)
-                   return f
-
-        # Prepare BuildQueueFile object
-        qf = BuildQueueFile()
-        qf.build_queue_id = self.queue_id
-        qf.lastused = datetime.now()
-        qf.filename = poolfile_basename
-
-        targetpath = poolfile.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetpath, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetpath, queuepath)
-                qf.fileid = poolfile.file_id
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-
 __all__.append('BuildQueue')
 
-@session_wrapper
-def get_build_queue(queuename, session=None):
-    """
-    Returns BuildQueue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: BuildQueue
-    @return: BuildQueue object for the given queue
-    """
-
-    q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_build_queue')
-
 ################################################################################
 
-class BuildQueueFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingFile %s>' % self.change_pending_file_id
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
-################################################################################
-
-class Component(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Component(ORMObject):
+    def __init__(self, component_name = None):
+        self.component_name = component_name
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -959,8 +658,12 @@ class Component(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Component %s>' % self.component_name
+    def properties(self):
+        return ['component_name', 'component_id', 'description', \
+            'meets_dfsg', 'overrides_count']
+
+    def not_null_constraints(self):
+        return ['component_name']
 
 
 __all__.append('Component')
@@ -988,211 +691,63 @@ def get_component(component, session=None):
 
 __all__.append('get_component')
 
-################################################################################
-
-class DBConfig(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<DBConfig %s>' % self.name
-
-__all__.append('DBConfig')
-
-################################################################################
-
-@session_wrapper
-def get_or_set_contents_file_id(filename, session=None):
-    """
-    Returns database id for given filename.
-
-    If no matching file is found, a row is inserted.
-
-    @type filename: string
-    @param filename: The filename
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: int
-    @return: the database id for the given component
-    """
-
-    q = session.query(ContentFilename).filter_by(filename=filename)
-
-    try:
-        ret = q.one().cafilename_id
-    except NoResultFound:
-        cf = ContentFilename()
-        cf.filename = filename
-        session.add(cf)
-        session.commit_or_flush()
-        ret = cf.cafilename_id
-
-    return ret
-
-__all__.append('get_or_set_contents_file_id')
-
-@session_wrapper
-def get_contents(suite, overridetype, section=None, session=None):
-    """
-    Returns contents for a suite / overridetype combination, limiting
-    to a section if not None.
-
-    @type suite: Suite
-    @param suite: Suite object
-
-    @type overridetype: OverrideType
-    @param overridetype: OverrideType object
-
-    @type section: Section
-    @param section: Optional section object to limit results to
-
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: ResultsProxy
-    @return: ResultsProxy object set up to return tuples of (filename, section,
-    package, arch_id)
-    """
-
-    # find me all of the contents for a given suite
-    contents_q = """SELECT (p.path||'/'||n.file) AS fn,
-                            s.section,
-                            b.package,
-                            b.architecture
-                   FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
-                   JOIN content_file_names n ON (c.filename=n.id)
-                   JOIN binaries b ON (b.id=c.binary_pkg)
-                   JOIN override o ON (o.package=b.package)
-                   JOIN section s ON (s.id=o.section)
-                   WHERE o.suite = :suiteid AND o.type = :overridetypeid
-                   AND b.type=:overridetypename"""
-
-    vals = {'suiteid': suite.suite_id,
-            'overridetypeid': overridetype.overridetype_id,
-            'overridetypename': overridetype.overridetype}
-
-    if section is not None:
-        contents_q += " AND s.id = :sectionid"
-        vals['sectionid'] = section.section_id
-
-    contents_q += " ORDER BY fn"
-
-    return session.execute(contents_q, vals)
-
-__all__.append('get_contents')
-
-################################################################################
-
-class ContentFilepath(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ContentFilepath %s>' % self.filepath
-
-__all__.append('ContentFilepath')
-
-@session_wrapper
-def get_or_set_contents_path_id(filepath, session=None):
-    """
-    Returns database id for given path.
-
-    If no matching file is found, a row is inserted.
-
-    @type filepath: string
-    @param filepath: The filepath
-
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: int
-    @return: the database id for the given path
-    """
-
-    q = session.query(ContentFilepath).filter_by(filepath=filepath)
-
-    try:
-        ret = q.one().cafilepath_id
-    except NoResultFound:
-        cf = ContentFilepath()
-        cf.filepath = filepath
-        session.add(cf)
-        session.commit_or_flush()
-        ret = cf.cafilepath_id
-
-    return ret
-
-__all__.append('get_or_set_contents_path_id')
+def get_mapped_component_name(component_name):
+    cnf = Config()
+    for m in cnf.value_list("ComponentMappings"):
+        (src, dst) = m.split()
+        if component_name == src:
+            component_name = dst
+    return component_name
 
-################################################################################
+__all__.append('get_mapped_component_name')
 
-class ContentAssociation(object):
-    def __init__(self, *args, **kwargs):
-        pass
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+    """get component after mappings
 
-    def __repr__(self):
-        return '<ContentAssociation %s>' % self.ca_id
+    Evaluate component mappings from ComponentMappings in dak.conf for the
+    given component name.
 
-__all__.append('ContentAssociation')
+    @todo: ansgar wants to get rid of this. It's currently only used for
+           the security archive
 
-def insert_content_paths(binary_id, fullpaths, session=None):
-    """
-    Make sure given path is associated with given binary id
+    @type  component_name: str
+    @param component_name: component name
 
-    @type binary_id: int
-    @param binary_id: the id of the binary
-    @type fullpaths: list
-    @param fullpaths: the list of paths of the file being associated with the binary
-    @type session: SQLAlchemy session
-    @param session: Optional SQLAlchemy session.  If this is passed, the caller
-    is responsible for ensuring a transaction has begun and committing the
-    results or rolling back based on the result code.  If not passed, a commit
-    will be performed at the end of the function, otherwise the caller is
-    responsible for commiting.
+    @param session: database session
 
-    @return: True upon success
+    @rtype:  L{daklib.dbconn.Component} or C{None}
+    @return: component after applying maps or C{None}
     """
+    component_name = get_mapped_component_name(component_name)
+    component = session.query(Component).filter_by(component_name=component_name).first()
+    return component
 
-    privatetrans = False
-    if session is None:
-        session = DBConn().session()
-        privatetrans = True
+__all__.append('get_mapped_component')
 
-    try:
-        # Insert paths
-        def generate_path_dicts():
-            for fullpath in fullpaths:
-                if fullpath.startswith( './' ):
-                    fullpath = fullpath[2:]
+@session_wrapper
+def get_component_names(session=None):
+    """
+    Returns list of strings of component names.
 
-                yield {'filename':fullpath, 'id': binary_id }
+    @rtype: list
+    @return: list of strings of component names
+    """
 
-        for d in generate_path_dicts():
-            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
-                         d )
+    return [ x.component_name for x in session.query(Component).all() ]
 
-        session.commit()
-        if privatetrans:
-            session.close()
-        return True
+__all__.append('get_component_names')
 
-    except:
-        traceback.print_exc()
+################################################################################
 
-        # Only rollback if we set up the session ourself
-        if privatetrans:
-            session.rollback()
-            session.close()
+class DBConfig(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-        return False
+    def __repr__(self):
+        return '<DBConfig %s>' % self.name
 
-__all__.append('insert_content_paths')
+__all__.append('DBConfig')
 
 ################################################################################
 
@@ -1240,135 +795,72 @@ __all__.append('get_dscfiles')
 
 ################################################################################
 
+class ExternalOverride(ORMObject):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
+
+__all__.append('ExternalOverride')
+
+################################################################################
+
 class PoolFile(ORMObject):
-    def __init__(self, filename = None, location = None, filesize = -1, \
+    def __init__(self, filename = None, filesize = -1, \
         md5sum = None):
         self.filename = filename
-        self.location = location
         self.filesize = filesize
         self.md5sum = md5sum
 
     @property
     def fullpath(self):
-        return os.path.join(self.location.path, self.filename)
+        session = DBConn().session().object_session(self)
+        af = session.query(ArchiveFile).join(Archive) \
+                    .filter(ArchiveFile.file == self) \
+                    .order_by(Archive.tainted.desc()).first()
+        return af.path
+
+    @property
+    def component(self):
+        session = DBConn().session().object_session(self)
+        component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+                              .group_by(ArchiveFile.component_id).one()
+        return session.query(Component).get(component_id)
+
+    @property
+    def basename(self):
+        return os.path.basename(self.filename)
 
-    def is_valid(self, filesize = -1, md5sum = None):\
-        return self.filesize == filesize and self.md5sum == md5sum
+    def is_valid(self, filesize = -1, md5sum = None):
+        return self.filesize == long(filesize) and self.md5sum == md5sum
 
     def properties(self):
         return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
-            'sha256sum', 'location', 'source', 'last_used']
+            'sha256sum', 'source', 'binary', 'last_used']
 
     def not_null_constraints(self):
-        return ['filename', 'md5sum', 'location']
-
-__all__.append('PoolFile')
-
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
-    """
-    Returns a tuple:
-    (ValidFileFound [boolean], PoolFile object or None)
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @type filesize: int
-    @param filesize: the size of the file to check against the DB
-
-    @type md5sum: string
-    @param md5sum: the md5sum of the file to check against the DB
-
-    @type location_id: int
-    @param location_id: the id of the location to look in
-
-    @rtype: tuple
-    @return: Tuple of length 2.
-                 - If valid pool file found: (C{True}, C{PoolFile object})
-                 - If valid pool file not found:
-                     - (C{False}, C{None}) if no file found
-                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
-    """
-
-    poolfile = session.query(Location).get(location_id). \
-        files.filter_by(filename=filename).first()
-    valid = False
-    if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
-        valid = True
-
-    return (valid, poolfile)
-
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
-    """
-    Returns a PoolFile objects or None for the given id
-
-    @type file_id: int
-    @param file_id: the id of the file to look for
-
-    @rtype: PoolFile or None
-    @return: either the PoolFile object or None
-    """
-
-    return session.query(PoolFile).get(file_id)
-
-__all__.append('get_poolfile_by_id')
-
-@session_wrapper
-def get_poolfile_like_name(filename, session=None):
-    """
-    Returns an array of PoolFile objects which are like the given name
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @rtype: array
-    @return: array of PoolFile objects
-    """
-
-    # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
-
-    return q.all()
+        return ['filename', 'md5sum']
 
-__all__.append('get_poolfile_like_name')
-
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
-    """
-    Add a new file to the pool
-
-    @type filename: string
-    @param filename: filename
-
-    @type datadict: dict
-    @param datadict: dict with needed data
-
-    @type location_id: int
-    @param location_id: database id of the location
+    def identical_to(self, filename):
+        """
+        compare size and hash with the given file
 
-    @rtype: PoolFile
-    @return: the PoolFile object created
-    """
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
+        @rtype: bool
+        @return: true if the given file has the same size and hash as this object; false otherwise
+        """
+        st = os.stat(filename)
+        if self.filesize != st.st_size:
+            return False
 
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
+        f = open(filename, "r")
+        sha256sum = apt_pkg.sha256sum(f)
+        if sha256sum != self.sha256sum:
+            return False
 
-    return poolfile
+        return True
 
-__all__.append('add_poolfile')
+__all__.append('PoolFile')
 
 ################################################################################
 
@@ -1461,9 +953,6 @@ def get_ldap_name(entry):
 ################################################################################
 
 class Keyring(object):
-    gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
-                     " --with-colons --fingerprint --fingerprint"
-
     keys = {}
     fpr_lookup = {}
 
@@ -1493,11 +982,14 @@ class Keyring(object):
         if not self.keyring_id:
             raise Exception('Must be initialized with database information')
 
-        k = os.popen(self.gpg_invocation % keyring, "r")
+        cmd = ["gpg", "--no-default-keyring", "--keyring", keyring,
+               "--with-colons", "--fingerprint", "--fingerprint"]
+        p = daklib.daksubprocess.Popen(cmd, stdout=subprocess.PIPE)
+
         key = None
-        signingkey = False
+        need_fingerprint = False
 
-        for line in k.xreadlines():
+        for line in p.stdout:
             field = line.split(":")
             if field[0] == "pub":
                 key = field[4]
@@ -1506,18 +998,20 @@ class Keyring(object):
                 if "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-                self.keys[key]["fingerprints"] = []
-                signingkey = True
-            elif key and field[0] == "sub" and len(field) >= 12:
-                signingkey = ("s" in field[11])
+                need_fingerprint = True
             elif key and field[0] == "uid":
                 (name, addr) = self.parse_address(field[9])
                 if "email" not in self.keys[key] and "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-            elif signingkey and field[0] == "fpr":
-                self.keys[key]["fingerprints"].append(field[9])
+            elif need_fingerprint and field[0] == "fpr":
+                self.keys[key]["fingerprints"] = [field[9]]
                 self.fpr_lookup[field[9]] = key
+                need_fingerprint = False
+
+        r = p.wait()
+        if r != 0:
+            raise subprocess.CalledProcessError(r, cmd)
 
     def import_users_from_ldap(self, session):
         import ldap
@@ -1525,11 +1019,19 @@ class Keyring(object):
 
         LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
         LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+        ca_cert_file = cnf.get('Import-LDAP-Fingerprints::CACertFile')
 
         l = ldap.open(LDAPServer)
+
+        if ca_cert_file:
+            l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
+            l.set_option(ldap.OPT_X_TLS_CACERTFILE, ca_cert_file)
+            l.set_option(ldap.OPT_X_TLS_NEWCTX, True)
+            l.start_tls_s()
+
         l.simple_bind_s("","")
         Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
-               "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+               "(&(keyfingerprint=*)(supplementaryGid=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
                ["uid", "keyfingerprint", "cn", "mn", "sn"])
 
         ldap_fin_uid_id = {}
@@ -1604,16 +1106,15 @@ def get_keyring(keyring, session=None):
 
 __all__.append('get_keyring')
 
-################################################################################
-
-class KeyringACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<KeyringACLMap %s>' % self.keyring_acl_map_id
+@session_wrapper
+def get_active_keyring_paths(session=None):
+    """
+    @rtype: list
+    @return: list of active keyring paths
+    """
+    return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
 
-__all__.append('KeyringACLMap')
+__all__.append('get_active_keyring_paths')
 
 ################################################################################
 
@@ -1624,19 +1125,6 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
-    def clean_from_queue(self):
-        session = DBConn().session().object_session(self)
-
-        # Remove changes_pool_files entries
-        self.poolfiles = []
-
-        # Remove changes_pending_files references
-        self.files = []
-
-        # Clear out of queue
-        self.in_queue = None
-        self.approved_for_id = None
-
 __all__.append('DBChange')
 
 @session_wrapper
@@ -1666,56 +1154,6 @@ __all__.append('get_dbchange')
 
 ################################################################################
 
-class Location(ORMObject):
-    def __init__(self, path = None):
-        self.path = path
-        # the column 'type' should go away, see comment at mapper
-        self.archive_type = 'pool'
-
-    def properties(self):
-        return ['path', 'archive_type', 'component', 'files_count']
-
-    def not_null_constraints(self):
-        return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
-    """
-    Returns Location object for the given combination of location, component
-    and archive
-
-    @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
-    @type component: string
-    @param component: the component name (if None, no restriction applied)
-
-    @type archive: string
-    @param archive: the archive name (if None, no restriction applied)
-
-    @rtype: Location / None
-    @return: Either a Location object or None if one can't be found
-    """
-
-    q = session.query(Location).filter_by(path=location)
-
-    if archive is not None:
-        q = q.join(Archive).filter_by(archive_name=archive)
-
-    if component is not None:
-        q = q.join(Component).filter_by(component_name=component)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_location')
-
-################################################################################
-
 class Maintainer(ORMObject):
     def __init__(self, name = None):
         self.name = name
@@ -1797,7 +1235,7 @@ class NewComment(object):
 __all__.append('NewComment')
 
 @session_wrapper
-def has_new_comment(package, version, session=None):
+def has_new_comment(policy_queue, package, version, session=None):
     """
     Returns true if the given combination of C{package}, C{version} has a comment.
 
@@ -1815,7 +1253,7 @@ def has_new_comment(package, version, session=None):
     @return: true/false
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     q = q.filter_by(package=package)
     q = q.filter_by(version=version)
 
@@ -1824,7 +1262,7 @@ def has_new_comment(package, version, session=None):
 __all__.append('has_new_comment')
 
 @session_wrapper
-def get_new_comments(package=None, version=None, comment_id=None, session=None):
+def get_new_comments(policy_queue, package=None, version=None, comment_id=None, session=None):
     """
     Returns (possibly empty) list of NewComment objects for the given
     parameters
@@ -1846,7 +1284,7 @@ def get_new_comments(package=None, version=None, comment_id=None, session=None):
     @return: A (possibly empty) list of NewComment objects will be returned
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     if package is not None: q = q.filter_by(package=package)
     if version is not None: q = q.filter_by(version=version)
     if comment_id is not None: q = q.filter_by(comment_id=comment_id)
@@ -1857,12 +1295,22 @@ __all__.append('get_new_comments')
 
 ################################################################################
 
-class Override(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Override(ORMObject):
+    def __init__(self, package = None, suite = None, component = None, overridetype = None, \
+        section = None, priority = None):
+        self.package = package
+        self.suite = suite
+        self.component = component
+        self.overridetype = overridetype
+        self.section = section
+        self.priority = priority
 
-    def __repr__(self):
-        return '<Override %s (%s)>' % (self.package, self.suite_id)
+    def properties(self):
+        return ['package', 'suite', 'component', 'overridetype', 'section', \
+            'priority']
+
+    def not_null_constraints(self):
+        return ['package', 'suite', 'component', 'overridetype', 'section']
 
 __all__.append('Override')
 
@@ -1916,12 +1364,15 @@ __all__.append('get_override')
 
 ################################################################################
 
-class OverrideType(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class OverrideType(ORMObject):
+    def __init__(self, overridetype = None):
+        self.overridetype = overridetype
 
-    def __repr__(self):
-        return '<OverrideType %s>' % self.overridetype
+    def properties(self):
+        return ['overridetype', 'overridetype_id', 'overrides_count']
+
+    def not_null_constraints(self):
+        return ['overridetype']
 
 __all__.append('OverrideType')
 
@@ -1952,111 +1403,6 @@ __all__.append('get_override_type')
 
 ################################################################################
 
-class DebContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<DebConetnts %s: %s>' % (self.package.package,self.file)
-
-__all__.append('DebContents')
-
-
-class UdebContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
-
-__all__.append('UdebContents')
-
-class PendingBinContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<PendingBinContents %s>' % self.contents_id
-
-__all__.append('PendingBinContents')
-
-def insert_pending_content_paths(package,
-                                 is_udeb,
-                                 fullpaths,
-                                 session=None):
-    """
-    Make sure given paths are temporarily associated with given
-    package
-
-    @type package: dict
-    @param package: the package to associate with should have been read in from the binary control file
-    @type fullpaths: list
-    @param fullpaths: the list of paths of the file being associated with the binary
-    @type session: SQLAlchemy session
-    @param session: Optional SQLAlchemy session.  If this is passed, the caller
-    is responsible for ensuring a transaction has begun and committing the
-    results or rolling back based on the result code.  If not passed, a commit
-    will be performed at the end of the function
-
-    @return: True upon success, False if there is a problem
-    """
-
-    privatetrans = False
-
-    if session is None:
-        session = DBConn().session()
-        privatetrans = True
-
-    try:
-        arch = get_architecture(package['Architecture'], session)
-        arch_id = arch.arch_id
-
-        # Remove any already existing recorded files for this package
-        q = session.query(PendingBinContents)
-        q = q.filter_by(package=package['Package'])
-        q = q.filter_by(version=package['Version'])
-        q = q.filter_by(architecture=arch_id)
-        q.delete()
-
-        for fullpath in fullpaths:
-
-            if fullpath.startswith( "./" ):
-                fullpath = fullpath[2:]
-
-            pca = PendingBinContents()
-            pca.package = package['Package']
-            pca.version = package['Version']
-            pca.file = fullpath
-            pca.architecture = arch_id
-
-            if isudeb:
-                pca.type = 8 # gross
-            else:
-                pca.type = 7 # also gross
-            session.add(pca)
-
-        # Only commit if we set up the session ourself
-        if privatetrans:
-            session.commit()
-            session.close()
-        else:
-            session.flush()
-
-        return True
-    except Exception, e:
-        traceback.print_exc()
-
-        # Only rollback if we set up the session ourself
-        if privatetrans:
-            session.rollback()
-            session.close()
-
-        return False
-
-__all__.append('insert_pending_content_paths')
-
-################################################################################
-
 class PolicyQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -2091,36 +1437,43 @@ def get_policy_queue(queuename, session=None):
 
 __all__.append('get_policy_queue')
 
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
-    """
-    Returns PolicyQueue object for given C{path name}
+################################################################################
 
-    @type queuename: string
-    @param queuename: The path
+class PolicyQueueUpload(object):
+    def __cmp__(self, other):
+        ret = cmp(self.changes.source, other.changes.source)
+        if ret == 0:
+            ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+        if ret == 0:
+            if self.source is not None and other.source is None:
+                ret = -1
+            elif self.source is None and other.source is not None:
+                ret = 1
+        if ret == 0:
+            ret = cmp(self.changes.changesname, other.changes.changesname)
+        return ret
+
+__all__.append('PolicyQueueUpload')
 
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
+################################################################################
 
-    @rtype: PolicyQueue
-    @return: PolicyQueue object for the given queue
-    """
+class PolicyQueueByhandFile(object):
+    pass
 
-    q = session.query(PolicyQueue).filter_by(path=pathname)
+__all__.append('PolicyQueueByhandFile')
 
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+################################################################################
 
-__all__.append('get_policy_queue_from_path')
+class Priority(ORMObject):
+    def __init__(self, priority = None, level = None):
+        self.priority = priority
+        self.level = level
 
-################################################################################
+    def properties(self):
+        return ['priority', 'priority_id', 'level', 'overrides_count']
 
-class Priority(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def not_null_constraints(self):
+        return ['priority', 'level']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -2134,9 +1487,6 @@ class Priority(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Priority %s (%s)>' % (self.priority, self.priority_id)
-
 __all__.append('Priority')
 
 @session_wrapper
@@ -2188,9 +1538,15 @@ __all__.append('get_priorities')
 
 ################################################################################
 
-class Section(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Section(ORMObject):
+    def __init__(self, section = None):
+        self.section = section
+
+    def properties(self):
+        return ['section', 'section_id', 'overrides_count']
+
+    def not_null_constraints(self):
+        return ['section']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -2204,9 +1560,6 @@ class Section(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Section %s>' % self.section
-
 __all__.append('Section')
 
 @session_wrapper
@@ -2239,106 +1592,177 @@ def get_sections(session=None):
     """
     Returns dictionary of section names -> id mappings
 
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
+    @type session: Session
+    @param session: Optional SQL session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: dictionary
+    @return: dictionary of section names -> id mappings
+    """
+
+    ret = {}
+    q = session.query(Section)
+    for x in q.all():
+        ret[x.section] = x.section_id
+
+    return ret
+
+__all__.append('get_sections')
+
+################################################################################
+
+class SignatureHistory(ORMObject):
+    @classmethod
+    def from_signed_file(cls, signed_file):
+        """signature history entry from signed file
+
+        @type  signed_file: L{daklib.gpg.SignedFile}
+        @param signed_file: signed file
+
+        @rtype: L{SignatureHistory}
+        """
+        self = cls()
+        self.fingerprint = signed_file.primary_fingerprint
+        self.signature_timestamp = signed_file.signature_timestamp
+        self.contents_sha1 = signed_file.contents_sha1()
+        return self
+
+    def query(self, session):
+        return session.query(SignatureHistory).filter_by(fingerprint=self.fingerprint, signature_timestamp=self.signature_timestamp, contents_sha1=self.contents_sha1).first()
+
+__all__.append('SignatureHistory')
+
+################################################################################
+
+class SrcContents(ORMObject):
+    def __init__(self, file = None, source = None):
+        self.file = file
+        self.source = source
+
+    def properties(self):
+        return ['file', 'source']
+
+__all__.append('SrcContents')
+
+################################################################################
+
+from debian.debfile import Deb822
+
+# Temporary Deb822 subclass to fix bugs with : handling; see #597249
+class Dak822(Deb822):
+    def _internal_parser(self, sequence, fields=None):
+        # The key is non-whitespace, non-colon characters before any colon.
+        key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
+        single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
+        multi = re.compile(key_part + r"$")
+        multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
+
+        wanted_field = lambda f: fields is None or f in fields
+
+        if isinstance(sequence, basestring):
+            sequence = sequence.splitlines()
+
+        curkey = None
+        content = ""
+        for line in self.gpg_stripped_paragraph(sequence):
+            m = single.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
+
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
+
+                curkey = m.group('key')
+                content = m.group('data')
+                continue
+
+            m = multi.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
 
-    @rtype: dictionary
-    @return: dictionary of section names -> id mappings
-    """
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
 
-    ret = {}
-    q = session.query(Section)
-    for x in q.all():
-        ret[x.section] = x.section_id
+                curkey = m.group('key')
+                content = ""
+                continue
 
-    return ret
+            m = multidata.match(line)
+            if m:
+                content += '\n' + line # XXX not m.group('data')?
+                continue
 
-__all__.append('get_sections')
+        if curkey:
+            self[curkey] = content
 
-################################################################################
 
 class DBSource(ORMObject):
     def __init__(self, source = None, version = None, maintainer = None, \
-        changedby = None, poolfile = None, install_date = None):
+        changedby = None, poolfile = None, install_date = None, fingerprint = None):
         self.source = source
         self.version = version
         self.maintainer = maintainer
         self.changedby = changedby
         self.poolfile = poolfile
         self.install_date = install_date
+        self.fingerprint = fingerprint
+
+    @property
+    def pkid(self):
+        return self.source_id
 
     def properties(self):
         return ['source', 'source_id', 'maintainer', 'changedby', \
             'fingerprint', 'poolfile', 'version', 'suites_count', \
-            'install_date', 'binaries_count']
+            'install_date', 'binaries_count', 'uploaders_count']
 
     def not_null_constraints(self):
-        return ['source', 'version', 'install_date', 'maintainer', \
-            'changedby', 'poolfile', 'install_date']
-
-__all__.append('DBSource')
-
-@session_wrapper
-def source_exists(source, source_version, suites = ["any"], session=None):
-    """
-    Ensure that source exists somewhere in the archive for the binary
-    upload being processed.
-      1. exact match     => 1.0-3
-      2. bin-only NMU    => 1.0-3+b1 , 1.0-3.1+b1
+        return ['source', 'version', 'maintainer', \
+            'changedby', 'poolfile']
 
-    @type source: string
-    @param source: source name
-
-    @type source_version: string
-    @param source_version: expected source version
-
-    @type suites: list
-    @param suites: list of suites to check in, default I{any}
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
+    def read_control_fields(self):
+        '''
+        Reads the control information from a dsc
 
-    @rtype: int
-    @return: returns 1 if a source with expected version is found, otherwise 0
+        @rtype: tuple
+        @return: fields is the dsc information in a dictionary form
+        '''
+        fullpath = self.poolfile.fullpath
+        fields = Dak822(open(self.poolfile.fullpath, 'r'))
+        return fields
 
-    """
+    metadata = association_proxy('key', 'value')
 
-    cnf = Config()
-    ret = True
-
-    from daklib.regexes import re_bin_only_nmu
-    orig_source_version = re_bin_only_nmu.sub('', source_version)
-
-    for suite in suites:
-        q = session.query(DBSource).filter_by(source=source). \
-            filter(DBSource.version.in_([source_version, orig_source_version]))
-        if suite != "any":
-            # source must exist in suite X, or in some other suite that's
-            # mapped to X, recursively... silent-maps are counted too,
-            # unreleased-maps aren't.
-            maps = cnf.ValueList("SuiteMappings")[:]
-            maps.reverse()
-            maps = [ m.split() for m in maps ]
-            maps = [ (x[1], x[2]) for x in maps
-                            if x[0] == "map" or x[0] == "silent-map" ]
-            s = [suite]
-            for x in maps:
-                if x[1] in s and x[0] not in s:
-                    s.append(x[0])
-
-            q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
-
-        if q.count() > 0:
-            continue
-
-        # No source found so return not ok
-        ret = False
+    def scan_contents(self):
+        '''
+        Returns a set of names for non directories. The path names are
+        normalized after converting them from either utf-8 or iso8859-1
+        encoding.
+        '''
+        fullpath = self.poolfile.fullpath
+        from daklib.contents import UnpackedSource
+        unpacked = UnpackedSource(fullpath)
+        fileset = set()
+        for name in unpacked.get_all_filenames():
+            # enforce proper utf-8 encoding
+            try:
+                name.decode('utf-8')
+            except UnicodeDecodeError:
+                name = name.decode('iso8859-1').encode('utf-8')
+            fileset.add(name)
+        return fileset
 
-    return ret
+    @property
+    def proxy(self):
+        session = object_session(self)
+        query = session.query(SourceMetadata).filter_by(source=self)
+        return MetadataProxy(session, query)
 
-__all__.append('source_exists')
+__all__.append('DBSource')
 
 @session_wrapper
 def get_suites_source_in(source, session=None):
@@ -2356,254 +1780,62 @@ def get_suites_source_in(source, session=None):
 
 __all__.append('get_suites_source_in')
 
-@session_wrapper
-def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
-    """
-    Returns list of DBSource objects for given C{source} name and other parameters
-
-    @type source: str
-    @param source: DBSource package name to search for
-
-    @type version: str or None
-    @param version: DBSource version name to search for or None if not applicable
-
-    @type dm_upload_allowed: bool
-    @param dm_upload_allowed: If None, no effect.  If True or False, only
-    return packages with that dm_upload_allowed setting
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of DBSource objects for the given name (may be empty)
-    """
-
-    q = session.query(DBSource).filter_by(source=source)
-
-    if version is not None:
-        q = q.filter_by(version=version)
-
-    if dm_upload_allowed is not None:
-        q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
-
-    return q.all()
-
-__all__.append('get_sources_from_name')
-
 # FIXME: This function fails badly if it finds more than 1 source package and
 # its implementation is trivial enough to be inlined.
 @session_wrapper
-def get_source_in_suite(source, suite, session=None):
+def get_source_in_suite(source, suite_name, session=None):
     """
-    Returns a DBSource object for a combination of C{source} and C{suite}.
+    Returns a DBSource object for a combination of C{source} and C{suite_name}.
 
       - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
-      - B{suite} - a suite name, eg. I{unstable}
+      - B{suite_name} - a suite name, eg. I{unstable}
 
     @type source: string
     @param source: source package name
 
-    @type suite: string
+    @type suite_name: string
     @param suite: the suite name
 
     @rtype: string
     @return: the version for I{source} in I{suite}
 
     """
-
-    q = get_suite(suite, session).get_sources(source)
+    suite = get_suite(suite_name, session)
+    if suite is None:
+        return None
     try:
-        return q.one()
+        return suite.get_sources(source).one()
     except NoResultFound:
         return None
 
 __all__.append('get_source_in_suite')
 
-################################################################################
-
-@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-    pfs = []
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        session.flush()
-        pfs.append(poolfile)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    source.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-                pfs.append(obj)
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                pfs.append(poolfile)
-                files_id = poolfile.file_id
-        else:
-            poolfile = get_poolfile_by_id(files_id, session)
-            if poolfile is None:
-                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
-            pfs.append(poolfile)
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    # Add the src_uploaders to the DB
-    uploader_ids = [source.maintainer_id]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
-            up = up.strip()
-            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
-
-    added_ids = {}
-    for up_id in uploader_ids:
-        if added_ids.has_key(up_id):
-            import utils
-            utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
-            continue
-
-        added_ids[up_id]=1
-
-        su = SrcUploader()
-        su.maintainer_id = up_id
-        su.source_id = source.source_id
-        session.add(su)
-
-    session.flush()
-
-    return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
 @session_wrapper
-def add_deb_to_db(u, filename, session=None):
+def import_metadata_into_db(obj, session=None):
     """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
+    This routine works on either DBBinary or DBSource objects and imports
+    their metadata into the database
     """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
-    if entry.get("files id", None):
-        poolfile = get_poolfile_by_id(bin.poolfile_id)
-        bin.poolfile_id = entry["files id"]
-    else:
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        bin.poolfile_id = entry["files id"] = poolfile.file_id
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, entry["architecture"],
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
-    bin.source_id = bin_sources[0].source_id
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-    session.flush()
-
-    # Add BinAssociations
-    for suite_name in u.pkg.changes["distribution"].keys():
-        ba = BinAssociation()
-        ba.binary_id = bin.binary_id
-        ba.suite_id = get_suite(suite_name).suite_id
-        session.add(ba)
-
-    session.flush()
-
-    # Deal with contents - disabled for now
-    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
-    #if not contents:
-    #    print "REJECT\nCould not determine contents of package %s" % bin.package
-    #    session.rollback()
-    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-    return poolfile
-
-__all__.append('add_deb_to_db')
-
-################################################################################
+    fields = obj.read_control_fields()
+    for k in fields.keys():
+        try:
+            # Try raw ASCII
+            val = str(fields[k])
+        except UnicodeEncodeError:
+            # Fall back to UTF-8
+            try:
+                val = fields[k].encode('utf-8')
+            except UnicodeEncodeError:
+                # Finally try iso8859-1
+                val = fields[k].encode('iso8859-1')
+                # Otherwise we allow the exception to percolate up and we cause
+                # a reject as someone is playing silly buggers
 
-class SourceACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
+        obj.metadata[get_or_set_metadatakey(k, session)] = val
 
-    def __repr__(self):
-        return '<SourceACL %s>' % self.source_acl_id
+    session.commit_or_flush()
 
-__all__.append('SourceACL')
+__all__.append('import_metadata_into_db')
 
 ################################################################################
 
@@ -2618,17 +1850,6 @@ __all__.append('SrcFormat')
 
 ################################################################################
 
-class SrcUploader(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SrcUploader %s>' % self.uploader_id
-
-__all__.append('SrcUploader')
-
-################################################################################
-
 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
                  ('SuiteID', 'suite_id'),
                  ('Version', 'version'),
@@ -2653,10 +1874,11 @@ class Suite(ORMObject):
         self.version = version
 
     def properties(self):
-        return ['suite_name', 'version', 'sources_count', 'binaries_count']
+        return ['suite_name', 'version', 'sources_count', 'binaries_count', \
+            'overrides_count']
 
     def not_null_constraints(self):
-        return ['suite_name', 'version']
+        return ['suite_name']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -2720,6 +1942,25 @@ class Suite(ORMObject):
         return session.query(DBSource).filter_by(source = source). \
             with_parent(self)
 
+    def get_overridesuite(self):
+        if self.overridesuite is None:
+            return self
+        else:
+            return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
+
+    def update_last_changed(self):
+        self.last_changed = sqlalchemy.func.now()
+
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'dists', self.suite_name)
+
+    @property
+    def release_suite_output(self):
+        if self.release_suite is not None:
+            return self.release_suite
+        return self.suite_name
+
 __all__.append('Suite')
 
 @session_wrapper
@@ -2738,8 +1979,22 @@ def get_suite(suite, session=None):
     @return: Suite object for the requested suite name (None if not present)
     """
 
+    # Start by looking for the dak internal name
     q = session.query(Suite).filter_by(suite_name=suite)
+    try:
+        return q.one()
+    except NoResultFound:
+        pass
+
+    # Now try codename
+    q = session.query(Suite).filter_by(codename=suite)
+    try:
+        return q.one()
+    except NoResultFound:
+        pass
 
+    # Finally give release_suite a try
+    q = session.query(Suite).filter_by(release_suite=suite)
     try:
         return q.one()
     except NoResultFound:
@@ -2749,11 +2004,11 @@ __all__.append('get_suite')
 
 ################################################################################
 
-# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     """
-    Returns list of Architecture objects for given C{suite} name
+    Returns list of Architecture objects for given C{suite} name. The list is
+    empty if suite does not exist.
 
     @type suite: str
     @param suite: Suite name to search for
@@ -2774,48 +2029,15 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     @return: list of Architecture objects for the given name (may be empty)
     """
 
-    return get_suite(suite, session).get_architectures(skipsrc, skipall)
+    try:
+        return get_suite(suite, session).get_architectures(skipsrc, skipall)
+    except AttributeError:
+        return []
 
 __all__.append('get_suite_architectures')
 
 ################################################################################
 
-class SuiteSrcFormat(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
-
-__all__.append('SuiteSrcFormat')
-
-@session_wrapper
-def get_suite_src_formats(suite, session=None):
-    """
-    Returns list of allowed SrcFormat for C{suite}.
-
-    @type suite: str
-    @param suite: Suite name to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: the list of allowed source formats for I{suite}
-    """
-
-    q = session.query(SrcFormat)
-    q = q.join(SuiteSrcFormat)
-    q = q.join(Suite).filter_by(suite_name=suite)
-    q = q.order_by('format_name')
-
-    return q.all()
-
-__all__.append('get_suite_src_formats')
-
-################################################################################
-
 class Uid(ORMObject):
     def __init__(self, uid = None, name = None):
         self.uid = uid
@@ -2889,14 +2111,143 @@ __all__.append('get_uid_from_fingerprint')
 
 ################################################################################
 
-class UploadBlock(object):
+class MetadataKey(ORMObject):
+    def __init__(self, key = None):
+        self.key = key
+
+    def properties(self):
+        return ['key']
+
+    def not_null_constraints(self):
+        return ['key']
+
+__all__.append('MetadataKey')
+
+@session_wrapper
+def get_or_set_metadatakey(keyname, session=None):
+    """
+    Returns MetadataKey object for given uidname.
+
+    If no matching keyname is found, a row is inserted.
+
+    @type uidname: string
+    @param uidname: The keyname to add
+
+    @type session: SQLAlchemy
+    @param session: Optional SQL session object (a temporary one will be
+    generated if not supplied).  If not passed, a commit will be performed at
+    the end of the function, otherwise the caller is responsible for commiting.
+
+    @rtype: MetadataKey
+    @return: the metadatakey object for the given keyname
+    """
+
+    q = session.query(MetadataKey).filter_by(key=keyname)
+
+    try:
+        ret = q.one()
+    except NoResultFound:
+        ret = MetadataKey(keyname)
+        session.add(ret)
+        session.commit_or_flush()
+
+    return ret
+
+__all__.append('get_or_set_metadatakey')
+
+################################################################################
+
+class BinaryMetadata(ORMObject):
+    def __init__(self, key = None, value = None, binary = None):
+        self.key = key
+        self.value = value
+        if binary is not None:
+            self.binary = binary
+
+    def properties(self):
+        return ['binary', 'key', 'value']
+
+    def not_null_constraints(self):
+        return ['value']
+
+__all__.append('BinaryMetadata')
+
+################################################################################
+
+class SourceMetadata(ORMObject):
+    def __init__(self, key = None, value = None, source = None):
+        self.key = key
+        self.value = value
+        if source is not None:
+            self.source = source
+
+    def properties(self):
+        return ['source', 'key', 'value']
+
+    def not_null_constraints(self):
+        return ['value']
+
+__all__.append('SourceMetadata')
+
+################################################################################
+
+class MetadataProxy(object):
+    def __init__(self, session, query):
+        self.session = session
+        self.query = query
+
+    def _get(self, key):
+        metadata_key = self.session.query(MetadataKey).filter_by(key=key).first()
+        if metadata_key is None:
+            return None
+        metadata = self.query.filter_by(key=metadata_key).first()
+        return metadata
+
+    def __contains__(self, key):
+        if self._get(key) is not None:
+            return True
+        return False
+
+    def __getitem__(self, key):
+        metadata = self._get(key)
+        if metadata is None:
+            raise KeyError
+        return metadata.value
+
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+################################################################################
+
+class VersionCheck(ORMObject):
     def __init__(self, *args, **kwargs):
-        pass
+       pass
 
-    def __repr__(self):
-        return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
+    def properties(self):
+        #return ['suite_id', 'check', 'reference_id']
+        return ['check']
+
+    def not_null_constraints(self):
+        return ['suite', 'check', 'reference']
 
-__all__.append('UploadBlock')
+__all__.append('VersionCheck')
+
+@session_wrapper
+def get_version_checks(suite_name, check = None, session = None):
+    suite = get_suite(suite_name, session)
+    if not suite:
+        # Make sure that what we return is iterable so that list comprehensions
+        # involving this don't cause a traceback
+        return []
+    q = session.query(VersionCheck).filter_by(suite=suite)
+    if check:
+        q = q.filter_by(check=check)
+    return q.all()
+
+__all__.append('get_version_checks')
 
 ################################################################################
 
@@ -2915,72 +2266,64 @@ class DBConn(object):
             self.__createconn()
 
     def __setuptables(self):
-        tables_with_primary = (
+        tables = (
+            'acl',
+            'acl_architecture_map',
+            'acl_fingerprint_map',
+            'acl_per_source',
             'architecture',
             'archive',
             'bin_associations',
+            'bin_contents',
             'binaries',
-            'binary_acl',
-            'binary_acl_map',
+            'binaries_metadata',
             'build_queue',
             'changelogs_text',
+            'changes',
             'component',
+            'component_suite',
             'config',
-            'changes_pending_binaries',
-            'changes_pending_files',
-            'changes_pending_source',
             'dsc_files',
+            'external_overrides',
+            'extra_src_references',
             'files',
+            'files_archive_map',
             'fingerprint',
             'keyrings',
-            'keyring_acl_map',
-            'location',
             'maintainer',
+            'metadata_keys',
             'new_comments',
+            # TODO: the maintainer column in table override should be removed.
+            'override',
             'override_type',
-            'pending_bin_contents',
             'policy_queue',
+            'policy_queue_upload',
+            'policy_queue_upload_binaries_map',
+            'policy_queue_byhand_file',
             'priority',
             'section',
+            'signature_history',
             'source',
-            'source_acl',
+            'source_metadata',
             'src_associations',
+            'src_contents',
             'src_format',
             'src_uploaders',
             'suite',
-            'uid',
-            'upload_blocks',
-            # The following tables have primary keys but sqlalchemy
-            # version 0.5 fails to reflect them correctly with database
-            # versions before upgrade #41.
-            #'changes',
-            #'build_queue_files',
-        )
-
-        tables_no_primary = (
-            'bin_contents',
-            'changes_pending_files_map',
-            'changes_pending_source_files',
-            'changes_pool_files',
-            'deb_contents',
-            'override',
+            'suite_acl_map',
             'suite_architectures',
-            'suite_src_formats',
             'suite_build_queue_copy',
-            'udeb_contents',
-            # see the comment above
-            'changes',
-            'build_queue_files',
+            'suite_src_formats',
+            'uid',
+            'version_check',
         )
 
         views = (
             'almost_obsolete_all_associations',
             'almost_obsolete_src_associations',
             'any_associations_source',
-            'bin_assoc_by_arch',
             'bin_associations_binaries',
             'binaries_suite_arch',
-            'binfiles_suite_component_arch',
             'changelogs',
             'file_arch_suite',
             'newest_all_associations',
@@ -2991,25 +2334,18 @@ class DBConn(object):
             'obsolete_any_associations',
             'obsolete_any_by_all_associations',
             'obsolete_src_associations',
+            'package_list',
             'source_suite',
             'src_associations_bin',
             'src_associations_src',
             'suite_arch_by_name',
         )
 
-        # Sqlalchemy version 0.5 fails to reflect the SERIAL type
-        # correctly and that is why we have to use a workaround. It can
-        # be removed as soon as we switch to version 0.6.
-        for table_name in tables_with_primary:
+        for table_name in tables:
             table = Table(table_name, self.db_meta, \
-                Column('id', Integer, primary_key = True), \
                 autoload=True, useexisting=True)
             setattr(self, 'tbl_%s' % table_name, table)
 
-        for table_name in tables_no_primary:
-            table = Table(table_name, self.db_meta, autoload=True)
-            setattr(self, 'tbl_%s' % table_name, table)
-
         for view_name in views:
             view = Table(view_name, self.db_meta, autoload=True)
             setattr(self, 'view_%s' % view_name, view)
@@ -3018,51 +2354,37 @@ class DBConn(object):
         mapper(Architecture, self.tbl_architecture,
             properties = dict(arch_id = self.tbl_architecture.c.id,
                suites = relation(Suite, secondary=self.tbl_suite_architectures,
-                   order_by='suite_name',
-                   backref=backref('architectures', order_by='arch_string'))),
+                   order_by=self.tbl_suite.c.suite_name,
+                   backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
             extension = validator)
 
+        mapper(ACL, self.tbl_acl,
+               properties = dict(
+                architectures = relation(Architecture, secondary=self.tbl_acl_architecture_map, collection_class=set),
+                fingerprints = relation(Fingerprint, secondary=self.tbl_acl_fingerprint_map, collection_class=set),
+                match_keyring = relation(Keyring, primaryjoin=(self.tbl_acl.c.match_keyring_id == self.tbl_keyrings.c.id)),
+                per_source = relation(ACLPerSource, collection_class=set),
+                ))
+
+        mapper(ACLPerSource, self.tbl_acl_per_source,
+               properties = dict(
+                acl = relation(ACL),
+                fingerprint = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.fingerprint_id == self.tbl_fingerprint.c.id)),
+                created_by = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.created_by_id == self.tbl_fingerprint.c.id)),
+                ))
+
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
                                  archive_name = self.tbl_archive.c.name))
 
-        mapper(BinAssociation, self.tbl_bin_associations,
-               properties = dict(ba_id = self.tbl_bin_associations.c.id,
-                                 suite_id = self.tbl_bin_associations.c.suite,
-                                 suite = relation(Suite),
-                                 binary_id = self.tbl_bin_associations.c.bin,
-                                 binary = relation(DBBinary)))
-
-        mapper(PendingBinContents, self.tbl_pending_bin_contents,
-               properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
-                                 filename = self.tbl_pending_bin_contents.c.filename,
-                                 package = self.tbl_pending_bin_contents.c.package,
-                                 version = self.tbl_pending_bin_contents.c.version,
-                                 arch = self.tbl_pending_bin_contents.c.arch,
-                                 otype = self.tbl_pending_bin_contents.c.type))
-
-        mapper(DebContents, self.tbl_deb_contents,
-               properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
-                                 package=self.tbl_deb_contents.c.package,
-                                 suite=self.tbl_deb_contents.c.suite,
-                                 arch=self.tbl_deb_contents.c.arch,
-                                 section=self.tbl_deb_contents.c.section,
-                                 filename=self.tbl_deb_contents.c.filename))
-
-        mapper(UdebContents, self.tbl_udeb_contents,
-               properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
-                                 package=self.tbl_udeb_contents.c.package,
-                                 suite=self.tbl_udeb_contents.c.suite,
-                                 arch=self.tbl_udeb_contents.c.arch,
-                                 section=self.tbl_udeb_contents.c.section,
-                                 filename=self.tbl_udeb_contents.c.filename))
+        mapper(ArchiveFile, self.tbl_files_archive_map,
+               properties = dict(archive = relation(Archive, backref='files'),
+                                 component = relation(Component),
+                                 file = relation(PoolFile, backref='archives')))
 
         mapper(BuildQueue, self.tbl_build_queue,
-               properties = dict(queue_id = self.tbl_build_queue.c.id))
-
-        mapper(BuildQueueFile, self.tbl_build_queue_files,
-               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
+               properties = dict(queue_id = self.tbl_build_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
 
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
@@ -3082,21 +2404,16 @@ class DBConn(object):
                                  install_date = self.tbl_binaries.c.install_date,
                                  suites = relation(Suite, secondary=self.tbl_bin_associations,
                                      backref=backref('binaries', lazy='dynamic')),
-                                 binassociations = relation(BinAssociation,
-                                                            primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))),
+                                 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
+                                     backref=backref('extra_binary_references', lazy='dynamic')),
+                                 key = relation(BinaryMetadata, cascade='all',
+                                     collection_class=attribute_mapped_collection('key'))),
                 extension = validator)
 
-        mapper(BinaryACL, self.tbl_binary_acl,
-               properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
-
-        mapper(BinaryACLMap, self.tbl_binary_acl_map,
-               properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
-                                 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
-                                 architecture = relation(Architecture)))
-
         mapper(Component, self.tbl_component,
                properties = dict(component_id = self.tbl_component.c.id,
-                                 component_name = self.tbl_component.c.name))
+                                 component_name = self.tbl_component.c.name),
+               extension = validator)
 
         mapper(DBConfig, self.tbl_config,
                properties = dict(config_id = self.tbl_config.c.id))
@@ -3108,15 +2425,16 @@ class DBConn(object):
                                  poolfile_id = self.tbl_dsc_files.c.file,
                                  poolfile = relation(PoolFile)))
 
+        mapper(ExternalOverride, self.tbl_external_overrides,
+                properties = dict(
+                    suite_id = self.tbl_external_overrides.c.suite,
+                    suite = relation(Suite),
+                    component_id = self.tbl_external_overrides.c.component,
+                    component = relation(Component)))
+
         mapper(PoolFile, self.tbl_files,
                properties = dict(file_id = self.tbl_files.c.id,
-                                 filesize = self.tbl_files.c.size,
-                                 location_id = self.tbl_files.c.location,
-                                 location = relation(Location,
-                                     # using lazy='dynamic' in the back
-                                     # reference because we have A LOT of
-                                     # files in one location
-                                     backref=backref('files', lazy='dynamic'))),
+                                 filesize = self.tbl_files.c.size),
                 extension = validator)
 
         mapper(Fingerprint, self.tbl_fingerprint,
@@ -3125,19 +2443,16 @@ class DBConn(object):
                                  uid = relation(Uid),
                                  keyring_id = self.tbl_fingerprint.c.keyring,
                                  keyring = relation(Keyring),
-                                 source_acl = relation(SourceACL),
-                                 binary_acl = relation(BinaryACL)),
+                                 acl = relation(ACL)),
                extension = validator)
 
         mapper(Keyring, self.tbl_keyrings,
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
-                                 keyring_id = self.tbl_keyrings.c.id))
+                                 keyring_id = self.tbl_keyrings.c.id,
+                                 acl = relation(ACL, primaryjoin=(self.tbl_keyrings.c.acl_id == self.tbl_acl.c.id)))),
 
         mapper(DBChange, self.tbl_changes,
                properties = dict(change_id = self.tbl_changes.c.id,
-                                 poolfiles = relation(PoolFile,
-                                                      secondary=self.tbl_changes_pool_files,
-                                                      backref="changeslinks"),
                                  seen = self.tbl_changes.c.seen,
                                  source = self.tbl_changes.c.source,
                                  binaries = self.tbl_changes.c.binaries,
@@ -3147,54 +2462,7 @@ class DBConn(object):
                                  maintainer = self.tbl_changes.c.maintainer,
                                  changedby = self.tbl_changes.c.changedby,
                                  date = self.tbl_changes.c.date,
-                                 version = self.tbl_changes.c.version,
-                                 files = relation(ChangePendingFile,
-                                                  secondary=self.tbl_changes_pending_files_map,
-                                                  backref="changesfile"),
-                                 in_queue_id = self.tbl_changes.c.in_queue,
-                                 in_queue = relation(PolicyQueue,
-                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
-                                 approved_for_id = self.tbl_changes.c.approved_for))
-
-        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
-               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
-        mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
-                                 filename = self.tbl_changes_pending_files.c.filename,
-                                 size = self.tbl_changes_pending_files.c.size,
-                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
-                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
-                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
-        mapper(ChangePendingSource, self.tbl_changes_pending_source,
-               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
-                                 change = relation(DBChange),
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
-                                 changedby = relation(Maintainer,
-                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
-                                 fingerprint = relation(Fingerprint),
-                                 source_files = relation(ChangePendingFile,
-                                                         secondary=self.tbl_changes_pending_source_files,
-                                                         backref="pending_sources")))
-
-
-        mapper(KeyringACLMap, self.tbl_keyring_acl_map,
-               properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
-                                 keyring = relation(Keyring, backref="keyring_acl_map"),
-                                 architecture = relation(Architecture)))
-
-        mapper(Location, self.tbl_location,
-               properties = dict(location_id = self.tbl_location.c.id,
-                                 component_id = self.tbl_location.c.component,
-                                 component = relation(Component),
-                                 archive_id = self.tbl_location.c.archive,
-                                 archive = relation(Archive),
-                                 # FIXME: the 'type' column is old cruft and
-                                 # should be removed in the future.
-                                 archive_type = self.tbl_location.c.type),
-               extension = validator)
+                                 version = self.tbl_changes.c.version))
 
         mapper(Maintainer, self.tbl_maintainer,
                properties = dict(maintainer_id = self.tbl_maintainer.c.id,
@@ -3205,27 +2473,49 @@ class DBConn(object):
                 extension = validator)
 
         mapper(NewComment, self.tbl_new_comments,
-               properties = dict(comment_id = self.tbl_new_comments.c.id))
+               properties = dict(comment_id = self.tbl_new_comments.c.id,
+                                 policy_queue = relation(PolicyQueue)))
 
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
-                                 suite = relation(Suite),
+                                 suite = relation(Suite, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  package = self.tbl_override.c.package,
                                  component_id = self.tbl_override.c.component,
-                                 component = relation(Component),
+                                 component = relation(Component, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  priority_id = self.tbl_override.c.priority,
-                                 priority = relation(Priority),
+                                 priority = relation(Priority, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  section_id = self.tbl_override.c.section,
-                                 section = relation(Section),
+                                 section = relation(Section, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  overridetype_id = self.tbl_override.c.type,
-                                 overridetype = relation(OverrideType)))
+                                 overridetype = relation(OverrideType, \
+                                    backref=backref('overrides', lazy='dynamic'))))
 
         mapper(OverrideType, self.tbl_override_type,
                properties = dict(overridetype = self.tbl_override_type.c.type,
                                  overridetype_id = self.tbl_override_type.c.id))
 
         mapper(PolicyQueue, self.tbl_policy_queue,
-               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
+
+        mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+               properties = dict(
+                   changes = relation(DBChange),
+                   policy_queue = relation(PolicyQueue, backref='uploads'),
+                   target_suite = relation(Suite),
+                   source = relation(DBSource),
+                   binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+                ))
+
+        mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+               properties = dict(
+                   upload = relation(PolicyQueueUpload, backref='byhand'),
+                   )
+               )
 
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
@@ -3234,12 +2524,14 @@ class DBConn(object):
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
 
+        mapper(SignatureHistory, self.tbl_signature_history)
+
         mapper(DBSource, self.tbl_source,
                properties = dict(source_id = self.tbl_source.c.id,
                                  version = self.tbl_source.c.version,
                                  maintainer_id = self.tbl_source.c.maintainer,
                                  poolfile_id = self.tbl_source.c.file,
-                                 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+                                 poolfile = relation(PoolFile),
                                  fingerprint_id = self.tbl_source.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
                                  changedby_id = self.tbl_source.c.changedby,
@@ -3247,77 +2539,149 @@ class DBConn(object):
                                                      primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
                                  suites = relation(Suite, secondary=self.tbl_src_associations,
                                      backref=backref('sources', lazy='dynamic')),
-                                 srcuploaders = relation(SrcUploader)),
+                                 uploaders = relation(Maintainer,
+                                     secondary=self.tbl_src_uploaders),
+                                 key = relation(SourceMetadata, cascade='all',
+                                     collection_class=attribute_mapped_collection('key'))),
                extension = validator)
 
-        mapper(SourceACL, self.tbl_source_acl,
-               properties = dict(source_acl_id = self.tbl_source_acl.c.id))
-
         mapper(SrcFormat, self.tbl_src_format,
                properties = dict(src_format_id = self.tbl_src_format.c.id,
                                  format_name = self.tbl_src_format.c.format_name))
 
-        mapper(SrcUploader, self.tbl_src_uploaders,
-               properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
-                                 source_id = self.tbl_src_uploaders.c.source,
-                                 source = relation(DBSource,
-                                                   primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)),
-                                 maintainer_id = self.tbl_src_uploaders.c.maintainer,
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
-
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(PolicyQueue),
+                                 policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
+                                 new_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.new_queue_id == self.tbl_policy_queue.c.id)),
+                                 debug_suite = relation(Suite, remote_side=[self.tbl_suite.c.id]),
                                  copy_queues = relation(BuildQueue,
-                                     secondary=self.tbl_suite_build_queue_copy)),
+                                     secondary=self.tbl_suite_build_queue_copy),
+                                 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
+                                     backref=backref('suites', lazy='dynamic')),
+                                 archive = relation(Archive, backref='suites'),
+                                 acls = relation(ACL, secondary=self.tbl_suite_acl_map, collection_class=set),
+                                 components = relation(Component, secondary=self.tbl_component_suite,
+                                                   order_by=self.tbl_component.c.ordering,
+                                                   backref=backref('suites'))),
                 extension = validator)
 
-        mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
-               properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
-                                 suite = relation(Suite, backref='suitesrcformats'),
-                                 src_format_id = self.tbl_suite_src_formats.c.src_format,
-                                 src_format = relation(SrcFormat)))
-
         mapper(Uid, self.tbl_uid,
                properties = dict(uid_id = self.tbl_uid.c.id,
                                  fingerprint = relation(Fingerprint)),
                extension = validator)
 
-        mapper(UploadBlock, self.tbl_upload_blocks,
-               properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
-                                 fingerprint = relation(Fingerprint, backref="uploadblocks"),
-                                 uid = relation(Uid, backref="uploadblocks")))
+        mapper(BinContents, self.tbl_bin_contents,
+            properties = dict(
+                binary = relation(DBBinary,
+                    backref=backref('contents', lazy='dynamic', cascade='all')),
+                file = self.tbl_bin_contents.c.file))
+
+        mapper(SrcContents, self.tbl_src_contents,
+            properties = dict(
+                source = relation(DBSource,
+                    backref=backref('contents', lazy='dynamic', cascade='all')),
+                file = self.tbl_src_contents.c.file))
+
+        mapper(MetadataKey, self.tbl_metadata_keys,
+            properties = dict(
+                key_id = self.tbl_metadata_keys.c.key_id,
+                key = self.tbl_metadata_keys.c.key))
+
+        mapper(BinaryMetadata, self.tbl_binaries_metadata,
+            properties = dict(
+                binary_id = self.tbl_binaries_metadata.c.bin_id,
+                binary = relation(DBBinary),
+                key_id = self.tbl_binaries_metadata.c.key_id,
+                key = relation(MetadataKey),
+                value = self.tbl_binaries_metadata.c.value))
+
+        mapper(SourceMetadata, self.tbl_source_metadata,
+            properties = dict(
+                source_id = self.tbl_source_metadata.c.src_id,
+                source = relation(DBSource),
+                key_id = self.tbl_source_metadata.c.key_id,
+                key = relation(MetadataKey),
+                value = self.tbl_source_metadata.c.value))
+
+       mapper(VersionCheck, self.tbl_version_check,
+           properties = dict(
+               suite_id = self.tbl_version_check.c.suite,
+               suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
+               reference_id = self.tbl_version_check.c.reference,
+               reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
 
     ## Connection functions
     def __createconn(self):
         from config import Config
         cnf = Config()
-        if cnf["DB::Host"]:
+        if cnf.has_key("DB::Service"):
+            connstr = "postgresql://service=%s" % cnf["DB::Service"]
+        elif cnf.has_key("DB::Host"):
             # TCP/IP
-            connstr = "postgres://%s" % cnf["DB::Host"]
-            if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+            connstr = "postgresql://%s" % cnf["DB::Host"]
+            if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
                 connstr += ":%s" % cnf["DB::Port"]
             connstr += "/%s" % cnf["DB::Name"]
         else:
             # Unix Socket
-            connstr = "postgres:///%s" % cnf["DB::Name"]
-            if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+            connstr = "postgresql:///%s" % cnf["DB::Name"]
+            if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
                 connstr += "?port=%s" % cnf["DB::Port"]
 
-        self.db_pg   = create_engine(connstr, echo=self.debug)
-        self.db_meta = MetaData()
-        self.db_meta.bind = self.db_pg
-        self.db_smaker = sessionmaker(bind=self.db_pg,
-                                      autoflush=True,
-                                      autocommit=False)
+        engine_args = { 'echo': self.debug }
+        if cnf.has_key('DB::PoolSize'):
+            engine_args['pool_size'] = int(cnf['DB::PoolSize'])
+        if cnf.has_key('DB::MaxOverflow'):
+            engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
+        if sa_major_version != '0.5' and cnf.has_key('DB::Unicode') and \
+            cnf['DB::Unicode'] == 'false':
+            engine_args['use_native_unicode'] = False
+
+        # Monkey patch a new dialect in in order to support service= syntax
+        import sqlalchemy.dialects.postgresql
+        from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
+        class PGDialect_psycopg2_dak(PGDialect_psycopg2):
+            def create_connect_args(self, url):
+                if str(url).startswith('postgresql://service='):
+                    # Eww
+                    servicename = str(url)[21:]
+                    return (['service=%s' % servicename], {})
+                else:
+                    return PGDialect_psycopg2.create_connect_args(self, url)
+
+        sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
 
-        self.__setuptables()
-        self.__setupmappers()
+        try:
+            self.db_pg   = create_engine(connstr, **engine_args)
+            self.db_meta = MetaData()
+            self.db_meta.bind = self.db_pg
+            self.db_smaker = sessionmaker(bind=self.db_pg,
+                                          autoflush=True,
+                                          autocommit=False)
 
-    def session(self):
-        return self.db_smaker()
+            self.__setuptables()
+            self.__setupmappers()
 
-__all__.append('DBConn')
+        except OperationalError as e:
+            import utils
+            utils.fubar("Cannot connect to database (%s)" % str(e))
+
+        self.pid = os.getpid()
 
+    def session(self, work_mem = 0):
+        '''
+        Returns a new session object. If a work_mem parameter is provided a new
+        transaction is started and the work_mem parameter is set for this
+        transaction. The work_mem parameter is measured in MB. A default value
+        will be used if the parameter is not set.
+        '''
+        # reinitialize DBConn in new processes
+        if self.pid != os.getpid():
+            clear_mappers()
+            self.__createconn()
+        session = self.db_smaker()
+        if work_mem > 0:
+            session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
+        return session
 
+__all__.append('DBConn')