]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Merge remote-tracking branch 'origin/master' into version-checks
[dak.git] / daklib / dbconn.py
index 18f427d424222d02fd9e74b4ade827950679003f..fe04ebc3df4c90e1f1bf13a4c0ddb3966bab3d1c 100755 (executable)
@@ -5,7 +5,7 @@
 @contact: Debian FTPMaster <ftpmaster@debian.org>
 @copyright: 2000, 2001, 2002, 2003, 2004, 2006  James Troup <james@nocrew.org>
 @copyright: 2008-2009  Mark Hymers <mhy@debian.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009, 2010  Joerg Jaspert <joerg@debian.org>
 @copyright: 2009  Mike O'Connor <stew@debian.org>
 @license: GNU General Public License version 2 or later
 """
 ################################################################################
 
 import os
+from os.path import normpath
+import re
 import psycopg2
 import traceback
+import commands
+
+try:
+    # python >= 2.6
+    import json
+except:
+    # python <= 2.5
+    import simplejson as json
+
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
+from subprocess import Popen, PIPE
+from tarfile import TarFile
 
 from inspect import getargspec
 
-from sqlalchemy import create_engine, Table, MetaData
-from sqlalchemy.orm import sessionmaker, mapper, relation
+import sqlalchemy
+from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
+    Text, ForeignKey
+from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
+    backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
+from sqlalchemy import types as sqltypes
+from sqlalchemy.orm.collections import attribute_mapped_collection
+from sqlalchemy.ext.associationproxy import association_proxy
 
 # Don't remove this, we re-export the exceptions to scripts which import us
 from sqlalchemy.exc import *
@@ -49,12 +71,49 @@ from sqlalchemy.orm.exc import NoResultFound
 # Only import Config until Queue stuff is changed to store its config
 # in the database
 from config import Config
-from singleton import Singleton
 from textutils import fix_maintainer
+from dak_exceptions import DBUpdateError, NoSourceFieldError
+
+# suppress some deprecation warnings in squeeze related to sqlalchemy
+import warnings
+warnings.filterwarnings('ignore', \
+    "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
+    SADeprecationWarning)
+
+
+################################################################################
+
+# Patch in support for the debversion field type so that it works during
+# reflection
+
+try:
+    # that is for sqlalchemy 0.6
+    UserDefinedType = sqltypes.UserDefinedType
+except:
+    # this one for sqlalchemy 0.5
+    UserDefinedType = sqltypes.TypeEngine
+
+class DebVersion(UserDefinedType):
+    def get_col_spec(self):
+        return "DEBVERSION"
+
+    def bind_processor(self, dialect):
+        return None
+
+    # ' = None' is needed for sqlalchemy 0.5:
+    def result_processor(self, dialect, coltype = None):
+        return None
+
+sa_major_version = sqlalchemy.__version__[0:3]
+if sa_major_version in ["0.5", "0.6"]:
+    from sqlalchemy.databases import postgres
+    postgres.ischema_names['debversion'] = DebVersion
+else:
+    raise Exception("dak only ported to SQLA versions 0.5 and 0.6.  See daklib/dbconn.py")
 
 ################################################################################
 
-__all__ = ['IntegrityError', 'SQLAlchemyError']
+__all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
 
 ################################################################################
 
@@ -105,11 +164,210 @@ def session_wrapper(fn):
 
     return wrapped
 
+__all__.append('session_wrapper')
+
 ################################################################################
 
-class Architecture(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class ORMObject(object):
+    """
+    ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
+    derived classes must implement the properties() method.
+    """
+
+    def properties(self):
+        '''
+        This method should be implemented by all derived classes and returns a
+        list of the important properties. The properties 'created' and
+        'modified' will be added automatically. A suffix '_count' should be
+        added to properties that are lists or query objects. The most important
+        property name should be returned as the first element in the list
+        because it is used by repr().
+        '''
+        return []
+
+    def json(self):
+        '''
+        Returns a JSON representation of the object based on the properties
+        returned from the properties() method.
+        '''
+        data = {}
+        # add created and modified
+        all_properties = self.properties() + ['created', 'modified']
+        for property in all_properties:
+            # check for list or query
+            if property[-6:] == '_count':
+                real_property = property[:-6]
+                if not hasattr(self, real_property):
+                    continue
+                value = getattr(self, real_property)
+                if hasattr(value, '__len__'):
+                    # list
+                    value = len(value)
+                elif hasattr(value, 'count'):
+                    # query (but not during validation)
+                    if self.in_validation:
+                        continue
+                    value = value.count()
+                else:
+                    raise KeyError('Do not understand property %s.' % property)
+            else:
+                if not hasattr(self, property):
+                    continue
+                # plain object
+                value = getattr(self, property)
+                if value is None:
+                    # skip None
+                    continue
+                elif isinstance(value, ORMObject):
+                    # use repr() for ORMObject types
+                    value = repr(value)
+                else:
+                    # we want a string for all other types because json cannot
+                    # encode everything
+                    value = str(value)
+            data[property] = value
+        return json.dumps(data)
+
+    def classname(self):
+        '''
+        Returns the name of the class.
+        '''
+        return type(self).__name__
+
+    def __repr__(self):
+        '''
+        Returns a short string representation of the object using the first
+        element from the properties() method.
+        '''
+        primary_property = self.properties()[0]
+        value = getattr(self, primary_property)
+        return '<%s %s>' % (self.classname(), str(value))
+
+    def __str__(self):
+        '''
+        Returns a human readable form of the object using the properties()
+        method.
+        '''
+        return '<%s %s>' % (self.classname(), self.json())
+
+    def not_null_constraints(self):
+        '''
+        Returns a list of properties that must be not NULL. Derived classes
+        should override this method if needed.
+        '''
+        return []
+
+    validation_message = \
+        "Validation failed because property '%s' must not be empty in object\n%s"
+
+    in_validation = False
+
+    def validate(self):
+        '''
+        This function validates the not NULL constraints as returned by
+        not_null_constraints(). It raises the DBUpdateError exception if
+        validation fails.
+        '''
+        for property in self.not_null_constraints():
+            # TODO: It is a bit awkward that the mapper configuration allow
+            # directly setting the numeric _id columns. We should get rid of it
+            # in the long run.
+            if hasattr(self, property + '_id') and \
+                getattr(self, property + '_id') is not None:
+                continue
+            if not hasattr(self, property) or getattr(self, property) is None:
+                # str() might lead to races due to a 2nd flush
+                self.in_validation = True
+                message = self.validation_message % (property, str(self))
+                self.in_validation = False
+                raise DBUpdateError(message)
+
+    @classmethod
+    @session_wrapper
+    def get(cls, primary_key,  session = None):
+        '''
+        This is a support function that allows getting an object by its primary
+        key.
+
+        Architecture.get(3[, session])
+
+        instead of the more verbose
+
+        session.query(Architecture).get(3)
+        '''
+        return session.query(cls).get(primary_key)
+
+    def session(self, replace = False):
+        '''
+        Returns the current session that is associated with the object. May
+        return None is object is in detached state.
+        '''
+
+        return object_session(self)
+
+    def clone(self, session = None):
+        '''
+        Clones the current object in a new session and returns the new clone. A
+        fresh session is created if the optional session parameter is not
+        provided. The function will fail if a session is provided and has
+        unflushed changes.
+
+        RATIONALE: SQLAlchemy's session is not thread safe. This method clones
+        an existing object to allow several threads to work with their own
+        instances of an ORMObject.
+
+        WARNING: Only persistent (committed) objects can be cloned. Changes
+        made to the original object that are not committed yet will get lost.
+        The session of the new object will always be rolled back to avoid
+        ressource leaks.
+        '''
+
+        if self.session() is None:
+            raise RuntimeError( \
+                'Method clone() failed for detached object:\n%s' % self)
+        self.session().flush()
+        mapper = object_mapper(self)
+        primary_key = mapper.primary_key_from_instance(self)
+        object_class = self.__class__
+        if session is None:
+            session = DBConn().session()
+        elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
+            raise RuntimeError( \
+                'Method clone() failed due to unflushed changes in session.')
+        new_object = session.query(object_class).get(primary_key)
+        session.rollback()
+        if new_object is None:
+            raise RuntimeError( \
+                'Method clone() failed for non-persistent object:\n%s' % self)
+        return new_object
+
+__all__.append('ORMObject')
+
+################################################################################
+
+class Validator(MapperExtension):
+    '''
+    This class calls the validate() method for each instance for the
+    'before_update' and 'before_insert' events. A global object validator is
+    used for configuring the individual mappers.
+    '''
+
+    def before_update(self, mapper, connection, instance):
+        instance.validate()
+        return EXT_CONTINUE
+
+    def before_insert(self, mapper, connection, instance):
+        instance.validate()
+        return EXT_CONTINUE
+
+validator = Validator()
+
+################################################################################
+
+class Architecture(ORMObject):
+    def __init__(self, arch_string = None, description = None):
+        self.arch_string = arch_string
+        self.description = description
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -123,8 +381,11 @@ class Architecture(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Architecture %s>' % self.arch_string
+    def properties(self):
+        return ['arch_string', 'arch_id', 'suites_count']
+
+    def not_null_constraints(self):
+        return ['arch_string']
 
 __all__.append('Architecture')
 
@@ -153,13 +414,14 @@ def get_architecture(architecture, session=None):
 
 __all__.append('get_architecture')
 
+# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_architecture_suites(architecture, session=None):
     """
     Returns list of Suite objects for given C{architecture} name
 
-    @type source: str
-    @param source: Architecture name to search for
+    @type architecture: str
+    @param architecture: Architecture name to search for
 
     @type session: Session
     @param session: Optional SQL session object (a temporary one will be
@@ -169,13 +431,7 @@ def get_architecture_suites(architecture, session=None):
     @return: list of Suite objects for the given name (may be empty)
     """
 
-    q = session.query(Suite)
-    q = q.join(SuiteArchitecture)
-    q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name')
-
-    ret = q.all()
-
-    return ret
+    return get_architecture(architecture, session).suites
 
 __all__.append('get_architecture_suites')
 
@@ -219,34 +475,97 @@ __all__.append('get_archive')
 
 ################################################################################
 
-class BinAssociation(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinAssociation %s (%s, %s)>' % (self.ba_id, self.binary, self.suite)
-
-__all__.append('BinAssociation')
-
-################################################################################
-
-class BinContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class BinContents(ORMObject):
+    def __init__(self, file = None, binary = None):
+        self.file = file
+        self.binary = binary
 
-    def __repr__(self):
-        return '<BinContents (%s, %s)>' % (self.binary, self.filename)
+    def properties(self):
+        return ['file', 'binary']
 
 __all__.append('BinContents')
 
 ################################################################################
 
-class DBBinary(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<DBBinary %s (%s, %s)>' % (self.package, self.version, self.architecture)
+class DBBinary(ORMObject):
+    def __init__(self, package = None, source = None, version = None, \
+        maintainer = None, architecture = None, poolfile = None, \
+        binarytype = 'deb'):
+        self.package = package
+        self.source = source
+        self.version = version
+        self.maintainer = maintainer
+        self.architecture = architecture
+        self.poolfile = poolfile
+        self.binarytype = binarytype
+
+    @property
+    def pkid(self):
+        return self.binary_id
+
+    def properties(self):
+        return ['package', 'version', 'maintainer', 'source', 'architecture', \
+            'poolfile', 'binarytype', 'fingerprint', 'install_date', \
+            'suites_count', 'binary_id', 'contents_count', 'extra_sources']
+
+    def not_null_constraints(self):
+        return ['package', 'version', 'maintainer', 'source',  'poolfile', \
+            'binarytype']
+
+    metadata = association_proxy('key', 'value')
+
+    def get_component_name(self):
+        return self.poolfile.location.component.component_name
+
+    def scan_contents(self):
+        '''
+        Yields the contents of the package. Only regular files are yielded and
+        the path names are normalized after converting them from either utf-8
+        or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
+        package does not contain any regular file.
+        '''
+        fullpath = self.poolfile.fullpath
+        dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE)
+        tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
+        for member in tar.getmembers():
+            if not member.isdir():
+                name = normpath(member.name)
+                # enforce proper utf-8 encoding
+                try:
+                    name.decode('utf-8')
+                except UnicodeDecodeError:
+                    name = name.decode('iso8859-1').encode('utf-8')
+                yield name
+        tar.close()
+        dpkg.stdout.close()
+        dpkg.wait()
+
+    def read_control(self):
+        '''
+        Reads the control information from a binary.
+
+        @rtype: text
+        @return: stanza text of the control section.
+        '''
+        import apt_inst
+        fullpath = self.poolfile.fullpath
+        deb_file = open(fullpath, 'r')
+        stanza = apt_inst.debExtractControl(deb_file)
+        deb_file.close()
+
+        return stanza
+
+    def read_control_fields(self):
+        '''
+        Reads the control information from a binary and return
+        as a dictionary.
+
+        @rtype: dict
+        @return: fields of the control section as a dictionary.
+        '''
+        import apt_pkg
+        stanza = self.read_control()
+        return apt_pkg.TagSection(stanza)
 
 __all__.append('DBBinary')
 
@@ -255,143 +574,395 @@ def get_suites_binary_in(package, session=None):
     """
     Returns list of Suite objects which given C{package} name is in
 
-    @type source: str
-    @param source: DBBinary package name to search for
+    @type package: str
+    @param package: DBBinary package name to search for
 
     @rtype: list
     @return: list of Suite objects for the given package
     """
 
-    return session.query(Suite).join(BinAssociation).join(DBBinary).filter_by(package=package).all()
+    return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
 
 __all__.append('get_suites_binary_in')
 
 @session_wrapper
-def get_binary_from_id(id, session=None):
-    """
-    Returns DBBinary object for given C{id}
+def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
+    '''
+    Returns the component name of the newest binary package in suite_list or
+    None if no package is found. The result can be optionally filtered by a list
+    of architecture names.
 
-    @type id: int
-    @param id: Id of the required binary
+    @type package: str
+    @param package: DBBinary package name to search for
 
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
+    @type suite_list: list of str
+    @param suite_list: list of suite_name items
 
-    @rtype: DBBinary
-    @return: DBBinary object for the given binary (None if not present)
-    """
+    @type arch_list: list of str
+    @param arch_list: optional list of arch_string items that defaults to []
 
-    q = session.query(DBBinary).filter_by(binary_id=id)
+    @rtype: str or NoneType
+    @return: name of component or None
+    '''
 
-    try:
-        return q.one()
-    except NoResultFound:
+    q = session.query(DBBinary).filter_by(package = package). \
+        join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
+    if len(arch_list) > 0:
+        q = q.join(DBBinary.architecture). \
+            filter(Architecture.arch_string.in_(arch_list))
+    binary = q.order_by(desc(DBBinary.version)).first()
+    if binary is None:
         return None
+    else:
+        return binary.get_component_name()
 
-__all__.append('get_binary_from_id')
+__all__.append('get_component_by_package_suite')
 
-@session_wrapper
-def get_binaries_from_name(package, version=None, architecture=None, session=None):
-    """
-    Returns list of DBBinary objects for given C{package} name
+################################################################################
 
-    @type package: str
-    @param package: DBBinary package name to search for
+class BinaryACL(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-    @type version: str or None
-    @param version: Version to search for (or None)
+    def __repr__(self):
+        return '<BinaryACL %s>' % self.binary_acl_id
 
-    @type package: str, list or None
-    @param package: Architectures to limit to (or None if no limit)
+__all__.append('BinaryACL')
 
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
+################################################################################
 
-    @rtype: list
-    @return: list of DBBinary objects for the given name (may be empty)
-    """
+class BinaryACLMap(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-    q = session.query(DBBinary).filter_by(package=package)
+    def __repr__(self):
+        return '<BinaryACLMap %s>' % self.binary_acl_map_id
 
-    if version is not None:
-        q = q.filter_by(version=version)
+__all__.append('BinaryACLMap')
 
-    if architecture is not None:
-        if not isinstance(architecture, list):
-            architecture = [architecture]
-        q = q.join(Architecture).filter(Architecture.arch_string.in_(architecture))
+################################################################################
 
-    ret = q.all()
+MINIMAL_APT_CONF="""
+Dir
+{
+   ArchiveDir "%(archivepath)s";
+   OverrideDir "%(overridedir)s";
+   CacheDir "%(cachedir)s";
+};
+
+Default
+{
+   Packages::Compress ". bzip2 gzip";
+   Sources::Compress ". bzip2 gzip";
+   DeLinkLimit 0;
+   FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+   Packages "Packages";
+   Contents " ";
+
+   BinOverride "override.sid.all3";
+   BinCacheDB "packages-accepted.db";
+
+   FileList "%(filelist)s";
+
+   PathPrefix "";
+   Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+   Sources "Sources";
+   BinOverride "override.sid.all3";
+   SrcOverride "override.sid.all3.src";
+   FileList "%(filelist)s";
+};
+"""
 
-    return ret
+class BuildQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-__all__.append('get_binaries_from_name')
+    def __repr__(self):
+        return '<BuildQueue %s>' % self.queue_name
+
+    def write_metadata(self, starttime, force=False):
+        # Do we write out metafiles?
+        if not (force or self.generate_metadata):
+            return
+
+        session = DBConn().session().object_session(self)
+
+        fl_fd = fl_name = ac_fd = ac_name = None
+        tempdir = None
+        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+        startdir = os.getcwd()
+
+        try:
+            # Grab files we want to include
+            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+            # Write file list with newer files
+            (fl_fd, fl_name) = mkstemp()
+            for n in newer:
+                os.write(fl_fd, '%s\n' % n.fullpath)
+            os.close(fl_fd)
+
+            cnf = Config()
+
+            # Write minimal apt.conf
+            # TODO: Remove hardcoding from template
+            (ac_fd, ac_name) = mkstemp()
+            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+                                                'filelist': fl_name,
+                                                'cachedir': cnf["Dir::Cache"],
+                                                'overridedir': cnf["Dir::Override"],
+                                                })
+            os.close(ac_fd)
+
+            # Run apt-ftparchive generate
+            os.chdir(os.path.dirname(ac_name))
+            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+            # Run apt-ftparchive release
+            # TODO: Eww - fix this
+            bname = os.path.basename(self.path)
+            os.chdir(self.path)
+            os.chdir('..')
+
+            # We have to remove the Release file otherwise it'll be included in the
+            # new one
+            try:
+                os.unlink(os.path.join(bname, 'Release'))
+            except OSError:
+                pass
+
+            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+            # Crude hack with open and append, but this whole section is and should be redone.
+            if self.notautomatic:
+                release=open("Release", "a")
+                release.write("NotAutomatic: yes")
+                release.close()
+
+            # Sign if necessary
+            if self.signingkey:
+                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+                if cnf.has_key("Dinstall::SigningPubKeyring"):
+                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+            # Move the files if we got this far
+            os.rename('Release', os.path.join(bname, 'Release'))
+            if self.signingkey:
+                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+        # Clean up any left behind files
+        finally:
+            os.chdir(startdir)
+            if fl_fd:
+                try:
+                    os.close(fl_fd)
+                except OSError:
+                    pass
+
+            if fl_name:
+                try:
+                    os.unlink(fl_name)
+                except OSError:
+                    pass
+
+            if ac_fd:
+                try:
+                    os.close(ac_fd)
+                except OSError:
+                    pass
+
+            if ac_name:
+                try:
+                    os.unlink(ac_name)
+                except OSError:
+                    pass
+
+    def clean_and_update(self, starttime, Logger, dryrun=False):
+        """WARNING: This routine commits for you"""
+        session = DBConn().session().object_session(self)
+
+        if self.generate_metadata and not dryrun:
+            self.write_metadata(starttime)
+
+        # Grab files older than our execution time
+        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+        for o in older:
+            killdb = False
+            try:
+                if dryrun:
+                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+                else:
+                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
+                    os.unlink(o.fullpath)
+                    killdb = True
+            except OSError, e:
+                # If it wasn't there, don't worry
+                if e.errno == ENOENT:
+                    killdb = True
+                else:
+                    # TODO: Replace with proper logging call
+                    Logger.log(["E: Could not remove %s" % o.fullpath])
+
+            if killdb:
+                session.delete(o)
+
+        session.commit()
+
+        for f in os.listdir(self.path):
+            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
+                continue
+
+            try:
+                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+            except NoResultFound:
+                fp = os.path.join(self.path, f)
+                if dryrun:
+                    Logger.log(["I: Would remove unused link %s" % fp])
+                else:
+                    Logger.log(["I: Removing unused link %s" % fp])
+                    try:
+                        os.unlink(fp)
+                    except OSError:
+                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
+    def add_file_from_pool(self, poolfile):
+        """Copies a file into the pool.  Assumes that the PoolFile object is
+        attached to the same SQLAlchemy session as the Queue object is.
+
+        The caller is responsible for committing after calling this function."""
+        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+
+        # Check if we have a file of this name or this ID already
+        for f in self.queuefiles:
+            if f.fileid is not None and f.fileid == poolfile.file_id or \
+               f.poolfile.filename == poolfile_basename:
+                   # In this case, update the BuildQueueFile entry so we
+                   # don't remove it too early
+                   f.lastused = datetime.now()
+                   DBConn().session().object_session(poolfile).add(f)
+                   return f
+
+        # Prepare BuildQueueFile object
+        qf = BuildQueueFile()
+        qf.build_queue_id = self.queue_id
+        qf.lastused = datetime.now()
+        qf.filename = poolfile_basename
+
+        targetpath = poolfile.fullpath
+        queuepath = os.path.join(self.path, poolfile_basename)
+
+        try:
+            if self.copy_files:
+                # We need to copy instead of symlink
+                import utils
+                utils.copy(targetpath, queuepath)
+                # NULL in the fileid field implies a copy
+                qf.fileid = None
+            else:
+                os.symlink(targetpath, queuepath)
+                qf.fileid = poolfile.file_id
+        except OSError:
+            return None
+
+        # Get the same session as the PoolFile is using and add the qf to it
+        DBConn().session().object_session(poolfile).add(qf)
+
+        return qf
+
+
+__all__.append('BuildQueue')
 
 @session_wrapper
-def get_binaries_from_source_id(source_id, session=None):
+def get_build_queue(queuename, session=None):
     """
-    Returns list of DBBinary objects for given C{source_id}
+    Returns BuildQueue object for given C{queue name}, creating it if it does not
+    exist.
 
-    @type source_id: int
-    @param source_id: source_id to search for
+    @type queuename: string
+    @param queuename: The name of the queue
 
     @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
+    @param session: Optional SQLA session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: list
-    @return: list of DBBinary objects for the given name (may be empty)
+    @rtype: BuildQueue
+    @return: BuildQueue object for the given queue
     """
 
-    return session.query(DBBinary).filter_by(source_id=source_id).all()
+    q = session.query(BuildQueue).filter_by(queue_name=queuename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_build_queue')
+
+################################################################################
 
-__all__.append('get_binaries_from_source_id')
+class BuildQueueFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-@session_wrapper
-def get_binary_from_name_suite(package, suitename, session=None):
-    ### For dak examine-package
-    ### XXX: Doesn't use object API yet
+    def __repr__(self):
+        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
 
-    sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
-             FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
-             WHERE b.package=:package
-               AND b.file = fi.id
-               AND fi.location = l.id
-               AND l.component = c.id
-               AND ba.bin=b.id
-               AND ba.suite = su.id
-               AND su.suite_name=:suitename
-          ORDER BY b.version DESC"""
+    @property
+    def fullpath(self):
+        return os.path.join(self.buildqueue.path, self.filename)
 
-    return session.execute(sql, {'package': package, 'suitename': suitename})
 
-__all__.append('get_binary_from_name_suite')
+__all__.append('BuildQueueFile')
 
-@session_wrapper
-def get_binary_components(package, suitename, arch, session=None):
-    # Check for packages that have moved from one component to another
-    query = """SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f
-    WHERE b.package=:package AND s.suite_name=:suitename
-      AND (a.arch_string = :arch OR a.arch_string = 'all')
-      AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
-      AND f.location = l.id
-      AND l.component = c.id
-      AND b.file = f.id"""
+################################################################################
+
+class ChangePendingBinary(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
 
-    vals = {'package': package, 'suitename': suitename, 'arch': arch}
+__all__.append('ChangePendingBinary')
 
-    return session.execute(query, vals)
+################################################################################
+
+class ChangePendingFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-__all__.append('get_binary_components')
+    def __repr__(self):
+        return '<ChangePendingFile %s>' % self.change_pending_file_id
+
+__all__.append('ChangePendingFile')
 
 ################################################################################
 
-class Component(object):
+class ChangePendingSource(object):
     def __init__(self, *args, **kwargs):
         pass
 
+    def __repr__(self):
+        return '<ChangePendingSource %s>' % self.change_pending_source_id
+
+__all__.append('ChangePendingSource')
+
+################################################################################
+
+class Component(ORMObject):
+    def __init__(self, component_name = None):
+        self.component_name = component_name
+
     def __eq__(self, val):
         if isinstance(val, str):
             return (self.component_name == val)
@@ -404,8 +975,12 @@ class Component(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Component %s>' % self.component_name
+    def properties(self):
+        return ['component_name', 'component_id', 'description', \
+            'location_count', 'meets_dfsg', 'overrides_count']
+
+    def not_null_constraints(self):
+        return ['component_name']
 
 
 __all__.append('Component')
@@ -548,8 +1123,9 @@ def get_or_set_contents_path_id(filepath, session=None):
 
     If no matching file is found, a row is inserted.
 
-    @type filename: string
-    @param filename: The filepath
+    @type filepath: string
+    @param filepath: The filepath
+
     @type session: SQLAlchemy
     @param session: Optional SQL session object (a temporary one will be
     generated if not supplied).  If not passed, a commit will be performed at
@@ -610,17 +1186,16 @@ def insert_content_paths(binary_id, fullpaths, session=None):
 
     try:
         # Insert paths
-        pathcache = {}
-
         def generate_path_dicts():
             for fullpath in fullpaths:
                 if fullpath.startswith( './' ):
                     fullpath = fullpath[2:]
 
-                yield {'fulename':fullpath, 'id': binary_id }
+                yield {'filename':fullpath, 'id': binary_id }
 
-        session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
-                         generate_path_dicts() )
+        for d in generate_path_dicts():
+            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
+                         d )
 
         session.commit()
         if privatetrans:
@@ -685,12 +1260,27 @@ __all__.append('get_dscfiles')
 
 ################################################################################
 
-class PoolFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class PoolFile(ORMObject):
+    def __init__(self, filename = None, location = None, filesize = -1, \
+        md5sum = None):
+        self.filename = filename
+        self.location = location
+        self.filesize = filesize
+        self.md5sum = md5sum
 
-    def __repr__(self):
-        return '<PoolFile %s>' % self.filename
+    @property
+    def fullpath(self):
+        return os.path.join(self.location.path, self.filename)
+
+    def is_valid(self, filesize = -1, md5sum = None):
+        return self.filesize == long(filesize) and self.md5sum == md5sum
+
+    def properties(self):
+        return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
+            'sha256sum', 'location', 'source', 'binary', 'last_used']
+
+    def not_null_constraints(self):
+        return ['filename', 'md5sum', 'location']
 
 __all__.append('PoolFile')
 
@@ -698,7 +1288,7 @@ __all__.append('PoolFile')
 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
     """
     Returns a tuple:
-     (ValidFileFound [boolean or None], PoolFile object or None)
+    (ValidFileFound [boolean], PoolFile object or None)
 
     @type filename: string
     @param filename: the filename of the file to check against the DB
@@ -714,35 +1304,24 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None):
 
     @rtype: tuple
     @return: Tuple of length 2.
-             If more than one file found with that name:
-                    (None,  None)
-             If valid pool file found: (True, PoolFile object)
-             If valid pool file not found:
-                    (False, None) if no file found
-                    (False, PoolFile object) if file found with size/md5sum mismatch
+                 - If valid pool file found: (C{True}, C{PoolFile object})
+                 - If valid pool file not found:
+                     - (C{False}, C{None}) if no file found
+                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
     """
 
-    q = session.query(PoolFile).filter_by(filename=filename)
-    q = q.join(Location).filter_by(location_id=location_id)
-
-    ret = None
+    poolfile = session.query(Location).get(location_id). \
+        files.filter_by(filename=filename).first()
+    valid = False
+    if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
+        valid = True
 
-    if q.count() > 1:
-        ret = (None, None)
-    elif q.count() < 1:
-        ret = (False, None)
-    else:
-        obj = q.one()
-        if obj.md5sum != md5sum or obj.filesize != filesize:
-            ret = (False, obj)
-
-    if ret is None:
-        ret = (True, obj)
-
-    return ret
+    return (valid, poolfile)
 
 __all__.append('check_poolfile')
 
+# TODO: the implementation can trivially be inlined at the place where the
+# function is called
 @session_wrapper
 def get_poolfile_by_id(file_id, session=None):
     """
@@ -755,71 +1334,104 @@ def get_poolfile_by_id(file_id, session=None):
     @return: either the PoolFile object or None
     """
 
-    q = session.query(PoolFile).filter_by(file_id=file_id)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+    return session.query(PoolFile).get(file_id)
 
 __all__.append('get_poolfile_by_id')
 
-
 @session_wrapper
-def get_poolfile_by_name(filename, location_id=None, session=None):
+def get_poolfile_like_name(filename, session=None):
     """
-    Returns an array of PoolFile objects for the given filename and
-    (optionally) location_id
+    Returns an array of PoolFile objects which are like the given name
 
     @type filename: string
     @param filename: the filename of the file to check against the DB
 
-    @type location_id: int
-    @param location_id: the id of the location to look in (optional)
-
     @rtype: array
     @return: array of PoolFile objects
     """
 
-    q = session.query(PoolFile).filter_by(filename=filename)
-
-    if location_id is not None:
-        q = q.join(Location).filter_by(location_id=location_id)
+    # TODO: There must be a way of properly using bind parameters with %FOO%
+    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
 
     return q.all()
 
-__all__.append('get_poolfile_by_name')
+__all__.append('get_poolfile_like_name')
 
 @session_wrapper
-def get_poolfile_like_name(filename, session=None):
+def add_poolfile(filename, datadict, location_id, session=None):
     """
-    Returns an array of PoolFile objects which are like the given name
+    Add a new file to the pool
 
     @type filename: string
-    @param filename: the filename of the file to check against the DB
+    @param filename: filename
 
-    @rtype: array
-    @return: array of PoolFile objects
+    @type datadict: dict
+    @param datadict: dict with needed data
+
+    @type location_id: int
+    @param location_id: database id of the location
+
+    @rtype: PoolFile
+    @return: the PoolFile object created
     """
+    poolfile = PoolFile()
+    poolfile.filename = filename
+    poolfile.filesize = datadict["size"]
+    poolfile.md5sum = datadict["md5sum"]
+    poolfile.sha1sum = datadict["sha1sum"]
+    poolfile.sha256sum = datadict["sha256sum"]
+    poolfile.location_id = location_id
 
-    # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+    session.add(poolfile)
+    # Flush to get a file id (NB: This is not a commit)
+    session.flush()
 
-    return q.all()
+    return poolfile
 
-__all__.append('get_poolfile_like_name')
+__all__.append('add_poolfile')
 
 ################################################################################
 
-class Fingerprint(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Fingerprint(ORMObject):
+    def __init__(self, fingerprint = None):
+        self.fingerprint = fingerprint
 
-    def __repr__(self):
-        return '<Fingerprint %s>' % self.fingerprint
+    def properties(self):
+        return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
+            'binary_reject']
+
+    def not_null_constraints(self):
+        return ['fingerprint']
 
 __all__.append('Fingerprint')
 
+@session_wrapper
+def get_fingerprint(fpr, session=None):
+    """
+    Returns Fingerprint object for given fpr.
+
+    @type fpr: string
+    @param fpr: The fpr to find / add
+
+    @type session: SQLAlchemy
+    @param session: Optional SQL session object (a temporary one will be
+    generated if not supplied).
+
+    @rtype: Fingerprint
+    @return: the Fingerprint object for the given fpr or None
+    """
+
+    q = session.query(Fingerprint).filter_by(fingerprint=fpr)
+
+    try:
+        ret = q.one()
+    except NoResultFound:
+        ret = None
+
+    return ret
+
+__all__.append('get_fingerprint')
+
 @session_wrapper
 def get_or_set_fingerprint(fpr, session=None):
     """
@@ -857,20 +1469,143 @@ __all__.append('get_or_set_fingerprint')
 
 ################################################################################
 
+# Helper routine for Keyring class
+def get_ldap_name(entry):
+    name = []
+    for k in ["cn", "mn", "sn"]:
+        ret = entry.get(k)
+        if ret and ret[0] != "" and ret[0] != "-":
+            name.append(ret[0])
+    return " ".join(name)
+
+################################################################################
+
 class Keyring(object):
+    gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
+                     " --with-colons --fingerprint --fingerprint"
+
+    keys = {}
+    fpr_lookup = {}
+
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
         return '<Keyring %s>' % self.keyring_name
 
+    def de_escape_gpg_str(self, txt):
+        esclist = re.split(r'(\\x..)', txt)
+        for x in range(1,len(esclist),2):
+            esclist[x] = "%c" % (int(esclist[x][2:],16))
+        return "".join(esclist)
+
+    def parse_address(self, uid):
+        """parses uid and returns a tuple of real name and email address"""
+        import email.Utils
+        (name, address) = email.Utils.parseaddr(uid)
+        name = re.sub(r"\s*[(].*[)]", "", name)
+        name = self.de_escape_gpg_str(name)
+        if name == "":
+            name = uid
+        return (name, address)
+
+    def load_keys(self, keyring):
+        if not self.keyring_id:
+            raise Exception('Must be initialized with database information')
+
+        k = os.popen(self.gpg_invocation % keyring, "r")
+        key = None
+        signingkey = False
+
+        for line in k.xreadlines():
+            field = line.split(":")
+            if field[0] == "pub":
+                key = field[4]
+                self.keys[key] = {}
+                (name, addr) = self.parse_address(field[9])
+                if "@" in addr:
+                    self.keys[key]["email"] = addr
+                    self.keys[key]["name"] = name
+                self.keys[key]["fingerprints"] = []
+                signingkey = True
+            elif key and field[0] == "sub" and len(field) >= 12:
+                signingkey = ("s" in field[11])
+            elif key and field[0] == "uid":
+                (name, addr) = self.parse_address(field[9])
+                if "email" not in self.keys[key] and "@" in addr:
+                    self.keys[key]["email"] = addr
+                    self.keys[key]["name"] = name
+            elif signingkey and field[0] == "fpr":
+                self.keys[key]["fingerprints"].append(field[9])
+                self.fpr_lookup[field[9]] = key
+
+    def import_users_from_ldap(self, session):
+        import ldap
+        cnf = Config()
+
+        LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
+        LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+
+        l = ldap.open(LDAPServer)
+        l.simple_bind_s("","")
+        Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+               "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+               ["uid", "keyfingerprint", "cn", "mn", "sn"])
+
+        ldap_fin_uid_id = {}
+
+        byuid = {}
+        byname = {}
+
+        for i in Attrs:
+            entry = i[1]
+            uid = entry["uid"][0]
+            name = get_ldap_name(entry)
+            fingerprints = entry["keyFingerPrint"]
+            keyid = None
+            for f in fingerprints:
+                key = self.fpr_lookup.get(f, None)
+                if key not in self.keys:
+                    continue
+                self.keys[key]["uid"] = uid
+
+                if keyid != None:
+                    continue
+                keyid = get_or_set_uid(uid, session).uid_id
+                byuid[keyid] = (uid, name)
+                byname[uid] = (keyid, name)
+
+        return (byname, byuid)
+
+    def generate_users_from_keyring(self, format, session):
+        byuid = {}
+        byname = {}
+        any_invalid = False
+        for x in self.keys.keys():
+            if "email" not in self.keys[x]:
+                any_invalid = True
+                self.keys[x]["uid"] = format % "invalid-uid"
+            else:
+                uid = format % self.keys[x]["email"]
+                keyid = get_or_set_uid(uid, session).uid_id
+                byuid[keyid] = (uid, self.keys[x]["name"])
+                byname[uid] = (keyid, self.keys[x]["name"])
+                self.keys[x]["uid"] = uid
+
+        if any_invalid:
+            uid = format % "invalid-uid"
+            keyid = get_or_set_uid(uid, session).uid_id
+            byuid[keyid] = (uid, "ungeneratable user id")
+            byname[uid] = (keyid, "ungeneratable user id")
+
+        return (byname, byuid)
+
 __all__.append('Keyring')
 
 @session_wrapper
-def get_or_set_keyring(keyring, session=None):
+def get_keyring(keyring, session=None):
     """
-    If C{keyring} does not have an entry in the C{keyrings} table yet, create one
-    and return the new Keyring
+    If C{keyring} does not have an entry in the C{keyrings} table yet, return None
     If C{keyring} already has an entry, simply return the existing Keyring
 
     @type keyring: string
@@ -885,49 +1620,113 @@ def get_or_set_keyring(keyring, session=None):
     try:
         return q.one()
     except NoResultFound:
-        obj = Keyring(keyring_name=keyring)
-        session.add(obj)
-        session.commit_or_flush()
-        return obj
+        return None
 
-__all__.append('get_or_set_keyring')
+__all__.append('get_keyring')
 
 ################################################################################
 
-class Location(object):
+class KeyringACLMap(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<Location %s (%s)>' % (self.path, self.location_id)
+        return '<KeyringACLMap %s>' % self.keyring_acl_map_id
 
-__all__.append('Location')
+__all__.append('KeyringACLMap')
 
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
-    """
-    Returns Location object for the given combination of location, component
-    and archive
+################################################################################
 
-    @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/}
+class DBChange(object):
+    def __init__(self, *args, **kwargs):
+        pass
 
-    @type component: string
-    @param component: the component name (if None, no restriction applied)
+    def __repr__(self):
+        return '<DBChange %s>' % self.changesname
 
-    @type archive: string
-    @param archive_id: the archive name (if None, no restriction applied)
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
 
-    @rtype: Location / None
-    @return: Either a Location object or None if one can't be found
-    """
+        # Remove changes_pool_files entries
+        self.poolfiles = []
 
-    q = session.query(Location).filter_by(path=location)
+        # Remove changes_pending_files references
+        self.files = []
 
-    if archive is not None:
-        q = q.join(Archive).filter_by(archive_name=archive)
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
 
-    if component is not None:
+__all__.append('DBChange')
+
+@session_wrapper
+def get_dbchange(filename, session=None):
+    """
+    returns DBChange object for given C{filename}.
+
+    @type filename: string
+    @param filename: the name of the file
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: DBChange
+    @return:  DBChange object for the given filename (C{None} if not present)
+
+    """
+    q = session.query(DBChange).filter_by(changesname=filename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_dbchange')
+
+################################################################################
+
+class Location(ORMObject):
+    def __init__(self, path = None, component = None):
+        self.path = path
+        self.component = component
+        # the column 'type' should go away, see comment at mapper
+        self.archive_type = 'pool'
+
+    def properties(self):
+        return ['path', 'location_id', 'archive_type', 'component', \
+            'files_count']
+
+    def not_null_constraints(self):
+        return ['path', 'archive_type']
+
+__all__.append('Location')
+
+@session_wrapper
+def get_location(location, component=None, archive=None, session=None):
+    """
+    Returns Location object for the given combination of location, component
+    and archive
+
+    @type location: string
+    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
+
+    @type component: string
+    @param component: the component name (if None, no restriction applied)
+
+    @type archive: string
+    @param archive: the archive name (if None, no restriction applied)
+
+    @rtype: Location / None
+    @return: Either a Location object or None if one can't be found
+    """
+
+    q = session.query(Location).filter_by(path=location)
+
+    if archive is not None:
+        q = q.join(Archive).filter_by(archive_name=archive)
+
+    if component is not None:
         q = q.join(Component).filter_by(component_name=component)
 
     try:
@@ -939,12 +1738,15 @@ __all__.append('get_location')
 
 ################################################################################
 
-class Maintainer(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Maintainer(ORMObject):
+    def __init__(self, name = None):
+        self.name = name
 
-    def __repr__(self):
-        return '''<Maintainer '%s' (%s)>''' % (self.name, self.maintainer_id)
+    def properties(self):
+        return ['name', 'maintainer_id']
+
+    def not_null_constraints(self):
+        return ['name']
 
     def get_split_maintainer(self):
         if not hasattr(self, 'name') or self.name is None:
@@ -1077,12 +1879,22 @@ __all__.append('get_new_comments')
 
 ################################################################################
 
-class Override(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Override(ORMObject):
+    def __init__(self, package = None, suite = None, component = None, overridetype = None, \
+        section = None, priority = None):
+        self.package = package
+        self.suite = suite
+        self.component = component
+        self.overridetype = overridetype
+        self.section = section
+        self.priority = priority
 
-    def __repr__(self):
-        return '<Override %s (%s)>' % (self.package, self.suite_id)
+    def properties(self):
+        return ['package', 'suite', 'component', 'overridetype', 'section', \
+            'priority']
+
+    def not_null_constraints(self):
+        return ['package', 'suite', 'component', 'overridetype', 'section']
 
 __all__.append('Override')
 
@@ -1136,12 +1948,15 @@ __all__.append('get_override')
 
 ################################################################################
 
-class OverrideType(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class OverrideType(ORMObject):
+    def __init__(self, overridetype = None):
+        self.overridetype = overridetype
 
-    def __repr__(self):
-        return '<OverrideType %s>' % self.overridetype
+    def properties(self):
+        return ['overridetype', 'overridetype_id', 'overrides_count']
+
+    def not_null_constraints(self):
+        return ['overridetype']
 
 __all__.append('OverrideType')
 
@@ -1172,114 +1987,77 @@ __all__.append('get_override_type')
 
 ################################################################################
 
-class DebContents(object):
+class PolicyQueue(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<DebConetnts %s: %s>' % (self.package.package,self.file)
-
-__all__.append('DebContents')
+        return '<PolicyQueue %s>' % self.queue_name
 
+__all__.append('PolicyQueue')
 
-class UdebContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
-
-__all__.append('UdebContents')
-
-class PendingBinContents(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<PendingBinContents %s>' % self.contents_id
-
-__all__.append('PendingBinContents')
-
-def insert_pending_content_paths(package,
-                                 is_udeb,
-                                 fullpaths,
-                                 session=None):
+@session_wrapper
+def get_policy_queue(queuename, session=None):
     """
-    Make sure given paths are temporarily associated with given
-    package
+    Returns PolicyQueue object for given C{queue name}
 
-    @type package: dict
-    @param package: the package to associate with should have been read in from the binary control file
-    @type fullpaths: list
-    @param fullpaths: the list of paths of the file being associated with the binary
-    @type session: SQLAlchemy session
-    @param session: Optional SQLAlchemy session.  If this is passed, the caller
-    is responsible for ensuring a transaction has begun and committing the
-    results or rolling back based on the result code.  If not passed, a commit
-    will be performed at the end of the function
+    @type queuename: string
+    @param queuename: The name of the queue
 
-    @return: True upon success, False if there is a problem
-    """
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
 
-    privatetrans = False
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
+    """
 
-    if session is None:
-        session = DBConn().session()
-        privatetrans = True
+    q = session.query(PolicyQueue).filter_by(queue_name=queuename)
 
     try:
-        arch = get_architecture(package['Architecture'], session)
-        arch_id = arch.arch_id
+        return q.one()
+    except NoResultFound:
+        return None
 
-        # Remove any already existing recorded files for this package
-        q = session.query(PendingBinContents)
-        q = q.filter_by(package=package['Package'])
-        q = q.filter_by(version=package['Version'])
-        q = q.filter_by(architecture=arch_id)
-        q.delete()
+__all__.append('get_policy_queue')
 
-        for fullpath in fullpaths:
+@session_wrapper
+def get_policy_queue_from_path(pathname, session=None):
+    """
+    Returns PolicyQueue object for given C{path name}
 
-            if fullpath.startswith( "./" ):
-                fullpath = fullpath[2:]
+    @type queuename: string
+    @param queuename: The path
 
-            pca = PendingBinContents()
-            pca.package = package['Package']
-            pca.version = package['Version']
-            pca.file = fullpath
-            pca.architecture = arch_id
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
 
-            if isudeb:
-                pca.type = 8 # gross
-            else:
-                pca.type = 7 # also gross
-            session.add(pca)
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
+    """
 
-        # Only commit if we set up the session ourself
-        if privatetrans:
-            session.commit()
-            session.close()
-        else:
-            session.flush()
+    q = session.query(PolicyQueue).filter_by(path=pathname)
 
-        return True
-    except Exception, e:
-        traceback.print_exc()
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
 
-        # Only rollback if we set up the session ourself
-        if privatetrans:
-            session.rollback()
-            session.close()
+__all__.append('get_policy_queue_from_path')
 
-        return False
+################################################################################
 
-__all__.append('insert_pending_content_paths')
+class Priority(ORMObject):
+    def __init__(self, priority = None, level = None):
+        self.priority = priority
+        self.level = level
 
-################################################################################
+    def properties(self):
+        return ['priority', 'priority_id', 'level', 'overrides_count']
 
-class Priority(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def not_null_constraints(self):
+        return ['priority', 'level']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -1293,9 +2071,6 @@ class Priority(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Priority %s (%s)>' % (self.priority, self.priority_id)
-
 __all__.append('Priority')
 
 @session_wrapper
@@ -1347,191 +2122,15 @@ __all__.append('get_priorities')
 
 ################################################################################
 
-class Queue(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Section(ORMObject):
+    def __init__(self, section = None):
+        self.section = section
 
-    def __repr__(self):
-        return '<Queue %s>' % self.queue_name
+    def properties(self):
+        return ['section', 'section_id', 'overrides_count']
 
-    def autobuild_upload(self, changes, srcpath, session=None):
-        """
-        Update queue_build database table used for incoming autobuild support.
-
-        @type changes: Changes
-        @param changes: changes object for the upload to process
-
-        @type srcpath: string
-        @param srcpath: path for the queue file entries/link destinations
-
-        @type session: SQLAlchemy session
-        @param session: Optional SQLAlchemy session.  If this is passed, the
-        caller is responsible for ensuring a transaction has begun and
-        committing the results or rolling back based on the result code.  If
-        not passed, a commit will be performed at the end of the function,
-        otherwise the caller is responsible for commiting.
-
-        @rtype: NoneType or string
-        @return: None if the operation failed, a string describing the error if not
-        """
-
-        privatetrans = False
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
-
-        # TODO: Remove by moving queue config into the database
-        conf = Config()
-
-        for suitename in changes.changes["distribution"].keys():
-            # TODO: Move into database as:
-            #       buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build)
-            #       buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink)
-            #       This also gets rid of the SecurityQueueBuild hack below
-            if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"):
-                continue
-
-            # Find suite object
-            s = get_suite(suitename, session)
-            if s is None:
-                return "INTERNAL ERROR: Could not find suite %s" % suitename
-
-            # TODO: Get from database as above
-            dest_dir = conf["Dir::QueueBuild"]
-
-            # TODO: Move into database as above
-            if conf.FindB("Dinstall::SecurityQueueBuild"):
-                dest_dir = os.path.join(dest_dir, suitename)
-
-            for file_entry in changes.files.keys():
-                src = os.path.join(srcpath, file_entry)
-                dest = os.path.join(dest_dir, file_entry)
-
-                # TODO: Move into database as above
-                if conf.FindB("Dinstall::SecurityQueueBuild"):
-                    # Copy it since the original won't be readable by www-data
-                    import utils
-                    utils.copy(src, dest)
-                else:
-                    # Create a symlink to it
-                    os.symlink(src, dest)
-
-                qb = QueueBuild()
-                qb.suite_id = s.suite_id
-                qb.queue_id = self.queue_id
-                qb.filename = dest
-                qb.in_queue = True
-
-                session.add(qb)
-
-            exists, symlinked = utils.ensure_orig_files(changes, dest, session)
-
-            # Add symlinked files to the list of packages for later processing
-            # by apt-ftparchive
-            for filename in symlinked:
-                qb = QueueBuild()
-                qb.suite_id = s.suite_id
-                qb.queue_id = self.queue_id
-                qb.filename = filename
-                qb.in_queue = True
-                session.add(qb)
-
-            # Update files to ensure they are not removed prematurely
-            for filename in exists:
-                qb = get_queue_build(filename, s.suite_id, session)
-                if qb is None:
-                    qb.in_queue = True
-                    qb.last_used = None
-                    session.add(qb)
-
-        if privatetrans:
-            session.commit()
-            session.close()
-
-        return None
-
-__all__.append('Queue')
-
-@session_wrapper
-def get_or_set_queue(queuename, session=None):
-    """
-    Returns Queue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: Queue
-    @return: Queue object for the given queue
-    """
-
-    q = session.query(Queue).filter_by(queue_name=queuename)
-
-    try:
-        ret = q.one()
-    except NoResultFound:
-        queue = Queue()
-        queue.queue_name = queuename
-        session.add(queue)
-        session.commit_or_flush()
-        ret = queue
-
-    return ret
-
-__all__.append('get_or_set_queue')
-
-################################################################################
-
-class QueueBuild(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<QueueBuild %s (%s)>' % (self.filename, self.queue_id)
-
-__all__.append('QueueBuild')
-
-@session_wrapper
-def get_queue_build(filename, suite, session=None):
-    """
-    Returns QueueBuild object for given C{filename} and C{suite}.
-
-    @type filename: string
-    @param filename: The name of the file
-
-    @type suiteid: int or str
-    @param suiteid: Suite name or ID
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: Queue
-    @return: Queue object for the given queue
-    """
-
-    if isinstance(suite, int):
-        q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite)
-    else:
-        q = session.query(QueueBuild).filter_by(filename=filename)
-        q = q.join(Suite).filter_by(suite_name=suite)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_queue_build')
-
-################################################################################
-
-class Section(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def not_null_constraints(self):
+        return ['section']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -1545,9 +2144,6 @@ class Section(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Section %s>' % self.section
-
 __all__.append('Section')
 
 @session_wrapper
@@ -1599,12 +2195,126 @@ __all__.append('get_sections')
 
 ################################################################################
 
-class DBSource(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class SrcContents(ORMObject):
+    def __init__(self, file = None, source = None):
+        self.file = file
+        self.source = source
 
-    def __repr__(self):
-        return '<DBSource %s (%s)>' % (self.source, self.version)
+    def properties(self):
+        return ['file', 'source']
+
+__all__.append('SrcContents')
+
+################################################################################
+
+from debian.debfile import Deb822
+
+# Temporary Deb822 subclass to fix bugs with : handling; see #597249
+class Dak822(Deb822):
+    def _internal_parser(self, sequence, fields=None):
+        # The key is non-whitespace, non-colon characters before any colon.
+        key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
+        single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
+        multi = re.compile(key_part + r"$")
+        multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
+
+        wanted_field = lambda f: fields is None or f in fields
+
+        if isinstance(sequence, basestring):
+            sequence = sequence.splitlines()
+
+        curkey = None
+        content = ""
+        for line in self.gpg_stripped_paragraph(sequence):
+            m = single.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
+
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
+
+                curkey = m.group('key')
+                content = m.group('data')
+                continue
+
+            m = multi.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
+
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
+
+                curkey = m.group('key')
+                content = ""
+                continue
+
+            m = multidata.match(line)
+            if m:
+                content += '\n' + line # XXX not m.group('data')?
+                continue
+
+        if curkey:
+            self[curkey] = content
+
+
+class DBSource(ORMObject):
+    def __init__(self, source = None, version = None, maintainer = None, \
+        changedby = None, poolfile = None, install_date = None):
+        self.source = source
+        self.version = version
+        self.maintainer = maintainer
+        self.changedby = changedby
+        self.poolfile = poolfile
+        self.install_date = install_date
+
+    @property
+    def pkid(self):
+        return self.source_id
+
+    def properties(self):
+        return ['source', 'source_id', 'maintainer', 'changedby', \
+            'fingerprint', 'poolfile', 'version', 'suites_count', \
+            'install_date', 'binaries_count', 'uploaders_count']
+
+    def not_null_constraints(self):
+        return ['source', 'version', 'install_date', 'maintainer', \
+            'changedby', 'poolfile', 'install_date']
+
+    def read_control_fields(self):
+        '''
+        Reads the control information from a dsc
+
+        @rtype: tuple
+        @return: fields is the dsc information in a dictionary form
+        '''
+        fullpath = self.poolfile.fullpath
+        fields = Dak822(open(self.poolfile.fullpath, 'r'))
+        return fields
+
+    metadata = association_proxy('key', 'value')
+
+    def scan_contents(self):
+        '''
+        Returns a set of names for non directories. The path names are
+        normalized after converting them from either utf-8 or iso8859-1
+        encoding.
+        '''
+        fullpath = self.poolfile.fullpath
+        from daklib.contents import UnpackedSource
+        unpacked = UnpackedSource(fullpath)
+        fileset = set()
+        for name in unpacked.get_all_filenames():
+            # enforce proper utf-8 encoding
+            try:
+                name.decode('utf-8')
+            except UnicodeDecodeError:
+                name = name.decode('iso8859-1').encode('utf-8')
+            fileset.add(name)
+        return fileset
 
 __all__.append('DBSource')
 
@@ -1616,8 +2326,8 @@ def source_exists(source, source_version, suites = ["any"], session=None):
       1. exact match     => 1.0-3
       2. bin-only NMU    => 1.0-3+b1 , 1.0-3.1+b1
 
-    @type package: string
-    @param package: package source name
+    @type source: string
+    @param source: source name
 
     @type source_version: string
     @param source_version: expected source version
@@ -1635,10 +2345,14 @@ def source_exists(source, source_version, suites = ["any"], session=None):
     """
 
     cnf = Config()
-    ret = 1
+    ret = True
+
+    from daklib.regexes import re_bin_only_nmu
+    orig_source_version = re_bin_only_nmu.sub('', source_version)
 
     for suite in suites:
-        q = session.query(DBSource).filter_by(source=source)
+        q = session.query(DBSource).filter_by(source=source). \
+            filter(DBSource.version.in_([source_version, orig_source_version]))
         if suite != "any":
             # source must exist in suite X, or in some other suite that's
             # mapped to X, recursively... silent-maps are counted too,
@@ -1649,28 +2363,17 @@ def source_exists(source, source_version, suites = ["any"], session=None):
             maps = [ (x[1], x[2]) for x in maps
                             if x[0] == "map" or x[0] == "silent-map" ]
             s = [suite]
-            for x in maps:
-                if x[1] in s and x[0] not in s:
-                    s.append(x[0])
-
-            q = q.join(SrcAssociation).join(Suite)
-            q = q.filter(Suite.suite_name.in_(s))
+            for (from_, to) in maps:
+                if from_ in s and to not in s:
+                    s.append(to)
 
-        # Reduce the query results to a list of version numbers
-        ql = [ j.version for j in q.all() ]
+            q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
 
-        # Try (1)
-        if source_version in ql:
-            continue
-
-        # Try (2)
-        from daklib.regexes import re_bin_only_nmu
-        orig_source_version = re_bin_only_nmu.sub('', source_version)
-        if orig_source_version in ql:
+        if q.count() > 0:
             continue
 
         # No source found so return not ok
-        ret = 0
+        ret = False
 
     return ret
 
@@ -1688,7 +2391,7 @@ def get_suites_source_in(source, session=None):
     @return: list of Suite objects for the given source
     """
 
-    return session.query(Suite).join(SrcAssociation).join(DBSource).filter_by(source=source).all()
+    return session.query(Suite).filter(Suite.sources.any(source=source)).all()
 
 __all__.append('get_suites_source_in')
 
@@ -1700,8 +2403,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=
     @type source: str
     @param source: DBSource package name to search for
 
-    @type source: str or None
-    @param source: DBSource version name to search for or None if not applicable
+    @type version: str or None
+    @param version: DBSource version name to search for or None if not applicable
 
     @type dm_upload_allowed: bool
     @param dm_upload_allowed: If None, no effect.  If True or False, only
@@ -1727,10 +2430,12 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=
 
 __all__.append('get_sources_from_name')
 
+# FIXME: This function fails badly if it finds more than 1 source package and
+# its implementation is trivial enough to be inlined.
 @session_wrapper
 def get_source_in_suite(source, suite, session=None):
     """
-    Returns list of DBSource objects for a combination of C{source} and C{suite}.
+    Returns a DBSource object for a combination of C{source} and C{suite}.
 
       - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
       - B{suite} - a suite name, eg. I{unstable}
@@ -1746,49 +2451,229 @@ def get_source_in_suite(source, suite, session=None):
 
     """
 
-    q = session.query(SrcAssociation)
-    q = q.join('source').filter_by(source=source)
-    q = q.join('suite').filter_by(suite_name=suite)
-
+    q = get_suite(suite, session).get_sources(source)
     try:
-        return q.one().source
+        return q.one()
     except NoResultFound:
         return None
 
 __all__.append('get_source_in_suite')
 
+@session_wrapper
+def import_metadata_into_db(obj, session=None):
+    """
+    This routine works on either DBBinary or DBSource objects and imports
+    their metadata into the database
+    """
+    fields = obj.read_control_fields()
+    for k in fields.keys():
+        try:
+            # Try raw ASCII
+            val = str(fields[k])
+        except UnicodeEncodeError:
+            # Fall back to UTF-8
+            try:
+                val = fields[k].encode('utf-8')
+            except UnicodeEncodeError:
+                # Finally try iso8859-1
+                val = fields[k].encode('iso8859-1')
+                # Otherwise we allow the exception to percolate up and we cause
+                # a reject as someone is playing silly buggers
+
+        obj.metadata[get_or_set_metadatakey(k, session)] = val
+
+    session.commit_or_flush()
+
+__all__.append('import_metadata_into_db')
+
+
 ################################################################################
 
-class SrcAssociation(object):
-    def __init__(self, *args, **kwargs):
-        pass
+@session_wrapper
+def add_dsc_to_db(u, filename, session=None):
+    entry = u.pkg.files[filename]
+    source = DBSource()
+    pfs = []
+
+    source.source = u.pkg.dsc["source"]
+    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    source.install_date = datetime.now().date()
+
+    dsc_component = entry["component"]
+    dsc_location_id = entry["location id"]
+
+    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+
+    # Set up a new poolfile if necessary
+    if not entry.has_key("files id") or not entry["files id"]:
+        filename = entry["pool name"] + filename
+        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+        session.flush()
+        pfs.append(poolfile)
+        entry["files id"] = poolfile.file_id
+
+    source.poolfile_id = entry["files id"]
+    session.add(source)
+
+    suite_names = u.pkg.changes["distribution"].keys()
+    source.suites = session.query(Suite). \
+        filter(Suite.suite_name.in_(suite_names)).all()
+
+    # Add the source files to the DB (files and dsc_files)
+    dscfile = DSCFile()
+    dscfile.source_id = source.source_id
+    dscfile.poolfile_id = entry["files id"]
+    session.add(dscfile)
+
+    for dsc_file, dentry in u.pkg.dsc_files.items():
+        df = DSCFile()
+        df.source_id = source.source_id
+
+        # If the .orig tarball is already in the pool, it's
+        # files id is stored in dsc_files by check_dsc().
+        files_id = dentry.get("files id", None)
+
+        # Find the entry in the files hash
+        # TODO: Bail out here properly
+        dfentry = None
+        for f, e in u.pkg.files.items():
+            if f == dsc_file:
+                dfentry = e
+                break
+
+        if files_id is None:
+            filename = dfentry["pool name"] + dsc_file
+
+            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+            # FIXME: needs to check for -1/-2 and or handle exception
+            if found and obj is not None:
+                files_id = obj.file_id
+                pfs.append(obj)
+
+            # If still not found, add it
+            if files_id is None:
+                # HACK: Force sha1sum etc into dentry
+                dentry["sha1sum"] = dfentry["sha1sum"]
+                dentry["sha256sum"] = dfentry["sha256sum"]
+                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+                pfs.append(poolfile)
+                files_id = poolfile.file_id
+        else:
+            poolfile = get_poolfile_by_id(files_id, session)
+            if poolfile is None:
+                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+            pfs.append(poolfile)
 
-    def __repr__(self):
-        return '<SrcAssociation %s (%s, %s)>' % (self.sa_id, self.source, self.suite)
+        df.poolfile_id = files_id
+        session.add(df)
+
+    # Add the src_uploaders to the DB
+    source.uploaders = [source.maintainer]
+    if u.pkg.dsc.has_key("uploaders"):
+        for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
+            up = up.strip()
+            source.uploaders.append(get_or_set_maintainer(up, session))
+
+    session.flush()
+
+    return source, dsc_component, dsc_location_id, pfs
+
+__all__.append('add_dsc_to_db')
+
+@session_wrapper
+def add_deb_to_db(u, filename, session=None):
+    """
+    Contrary to what you might expect, this routine deals with both
+    debs and udebs.  That info is in 'dbtype', whilst 'type' is
+    'deb' for both of them
+    """
+    cnf = Config()
+    entry = u.pkg.files[filename]
+
+    bin = DBBinary()
+    bin.package = entry["package"]
+    bin.version = entry["version"]
+    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+    bin.binarytype = entry["dbtype"]
+
+    # Find poolfile id
+    filename = entry["pool name"] + filename
+    fullpath = os.path.join(cnf["Dir::Pool"], filename)
+    if not entry.get("location id", None):
+        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
+
+    if entry.get("files id", None):
+        poolfile = get_poolfile_by_id(bin.poolfile_id)
+        bin.poolfile_id = entry["files id"]
+    else:
+        poolfile = add_poolfile(filename, entry, entry["location id"], session)
+        bin.poolfile_id = entry["files id"] = poolfile.file_id
+
+    # Find source id
+    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+    if len(bin_sources) != 1:
+        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                  (bin.package, bin.version, entry["architecture"],
+                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+    bin.source_id = bin_sources[0].source_id
+
+    if entry.has_key("built-using"):
+        for srcname, version in entry["built-using"]:
+            exsources = get_sources_from_name(srcname, version, session=session)
+            if len(exsources) != 1:
+                raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                          (srcname, version, bin.package, bin.version, entry["architecture"],
+                                           filename, bin.binarytype, u.pkg.changes["fingerprint"])
 
-__all__.append('SrcAssociation')
+            bin.extra_sources.append(exsources[0])
+
+    # Add and flush object so it has an ID
+    session.add(bin)
+
+    suite_names = u.pkg.changes["distribution"].keys()
+    bin.suites = session.query(Suite). \
+        filter(Suite.suite_name.in_(suite_names)).all()
+
+    session.flush()
+
+    # Deal with contents - disabled for now
+    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+    #if not contents:
+    #    print "REJECT\nCould not determine contents of package %s" % bin.package
+    #    session.rollback()
+    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+
+    return bin, poolfile
+
+__all__.append('add_deb_to_db')
 
 ################################################################################
 
-class SrcFormat(object):
+class SourceACL(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<SrcFormat %s>' % (self.format_name)
+        return '<SourceACL %s>' % self.source_acl_id
 
-__all__.append('SrcFormat')
+__all__.append('SourceACL')
 
 ################################################################################
 
-class SrcUploader(object):
+class SrcFormat(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<SrcUploader %s>' % self.uploader_id
+        return '<SrcFormat %s>' % (self.format_name)
 
-__all__.append('SrcUploader')
+__all__.append('SrcFormat')
 
 ################################################################################
 
@@ -1806,18 +2691,21 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
                  ('Priority', 'priority'),
                  ('NotAutomatic', 'notautomatic'),
                  ('CopyChanges', 'copychanges'),
-                 ('CopyDotDak', 'copydotdak'),
-                 ('CommentsDir', 'commentsdir'),
-                 ('OverrideSuite', 'overridesuite'),
-                 ('ChangelogBase', 'changelogbase')]
+                 ('OverrideSuite', 'overridesuite')]
 
+# Why the heck don't we have any UNIQUE constraints in table suite?
+# TODO: Add UNIQUE constraints for appropriate columns.
+class Suite(ORMObject):
+    def __init__(self, suite_name = None, version = None):
+        self.suite_name = suite_name
+        self.version = version
 
-class Suite(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def properties(self):
+        return ['suite_name', 'version', 'sources_count', 'binaries_count', \
+            'overrides_count']
 
-    def __repr__(self):
-        return '<Suite %s>' % self.suite_name
+    def not_null_constraints(self):
+        return ['suite_name']
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -1840,38 +2728,48 @@ class Suite(object):
 
         return "\n".join(ret)
 
-__all__.append('Suite')
+    def get_architectures(self, skipsrc=False, skipall=False):
+        """
+        Returns list of Architecture objects
 
-@session_wrapper
-def get_suite_architecture(suite, architecture, session=None):
-    """
-    Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it
-    doesn't exist
+        @type skipsrc: boolean
+        @param skipsrc: Whether to skip returning the 'source' architecture entry
+        (Default False)
 
-    @type suite: str
-    @param suite: Suite name to search for
+        @type skipall: boolean
+        @param skipall: Whether to skip returning the 'all' architecture entry
+        (Default False)
 
-    @type architecture: str
-    @param architecture: Architecture name to search for
+        @rtype: list
+        @return: list of Architecture objects for the given name (may be empty)
+        """
 
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
+        q = object_session(self).query(Architecture).with_parent(self)
+        if skipsrc:
+            q = q.filter(Architecture.arch_string != 'source')
+        if skipall:
+            q = q.filter(Architecture.arch_string != 'all')
+        return q.order_by(Architecture.arch_string).all()
 
-    @rtype: SuiteArchitecture
-    @return: the SuiteArchitecture object or None
-    """
+    def get_sources(self, source):
+        """
+        Returns a query object representing DBSource that is part of C{suite}.
 
-    q = session.query(SuiteArchitecture)
-    q = q.join(Architecture).filter_by(arch_string=architecture)
-    q = q.join(Suite).filter_by(suite_name=suite)
+          - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
 
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+        @type source: string
+        @param source: source package name
 
-__all__.append('get_suite_architecture')
+        @rtype: sqlalchemy.orm.query.Query
+        @return: a query of DBSource
+
+        """
+
+        session = object_session(self)
+        return session.query(DBSource).filter_by(source = source). \
+            with_parent(self)
+
+__all__.append('Suite')
 
 @session_wrapper
 def get_suite(suite, session=None):
@@ -1900,22 +2798,14 @@ __all__.append('get_suite')
 
 ################################################################################
 
-class SuiteArchitecture(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SuiteArchitecture (%s, %s)>' % (self.suite_id, self.arch_id)
-
-__all__.append('SuiteArchitecture')
-
+# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     """
     Returns list of Architecture objects for given C{suite} name
 
-    @type source: str
-    @param source: Suite name to search for
+    @type suite: str
+    @param suite: Suite name to search for
 
     @type skipsrc: boolean
     @param skipsrc: Whether to skip returning the 'source' architecture entry
@@ -1933,19 +2823,7 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     @return: list of Architecture objects for the given name (may be empty)
     """
 
-    q = session.query(Architecture)
-    q = q.join(SuiteArchitecture)
-    q = q.join(Suite).filter_by(suite_name=suite)
-
-    if skipsrc:
-        q = q.filter(Architecture.arch_string != 'source')
-
-    if skipall:
-        q = q.filter(Architecture.arch_string != 'all')
-
-    q = q.order_by('arch_string')
-
-    return q.all()
+    return get_suite(suite, session).get_architectures(skipsrc, skipall)
 
 __all__.append('get_suite_architectures')
 
@@ -1987,9 +2865,10 @@ __all__.append('get_suite_src_formats')
 
 ################################################################################
 
-class Uid(object):
-    def __init__(self, *args, **kwargs):
-        pass
+class Uid(ORMObject):
+    def __init__(self, uid = None, name = None):
+        self.uid = uid
+        self.name = name
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -2003,32 +2882,13 @@ class Uid(object):
         # This signals to use the normal comparison operator
         return NotImplemented
 
-    def __repr__(self):
-        return '<Uid %s (%s)>' % (self.uid, self.name)
-
-__all__.append('Uid')
-
-@session_wrapper
-def add_database_user(uidname, session=None):
-    """
-    Adds a database user
+    def properties(self):
+        return ['uid', 'name', 'fingerprint']
 
-    @type uidname: string
-    @param uidname: The uid of the user to add
+    def not_null_constraints(self):
+        return ['uid']
 
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: Uid
-    @return: the uid object for the given uidname
-    """
-
-    session.execute("CREATE USER :uid", {'uid': uidname})
-    session.commit_or_flush()
-
-__all__.append('add_database_user')
+__all__.append('Uid')
 
 @session_wrapper
 def get_or_set_uid(uidname, session=None):
@@ -2078,93 +2938,241 @@ __all__.append('get_uid_from_fingerprint')
 
 ################################################################################
 
-class DBConn(Singleton):
+class UploadBlock(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
+
+__all__.append('UploadBlock')
+
+################################################################################
+
+class MetadataKey(ORMObject):
+    def __init__(self, key = None):
+        self.key = key
+
+    def properties(self):
+        return ['key']
+
+    def not_null_constraints(self):
+        return ['key']
+
+__all__.append('MetadataKey')
+
+@session_wrapper
+def get_or_set_metadatakey(keyname, session=None):
+    """
+    Returns MetadataKey object for given uidname.
+
+    If no matching keyname is found, a row is inserted.
+
+    @type uidname: string
+    @param uidname: The keyname to add
+
+    @type session: SQLAlchemy
+    @param session: Optional SQL session object (a temporary one will be
+    generated if not supplied).  If not passed, a commit will be performed at
+    the end of the function, otherwise the caller is responsible for commiting.
+
+    @rtype: MetadataKey
+    @return: the metadatakey object for the given keyname
+    """
+
+    q = session.query(MetadataKey).filter_by(key=keyname)
+
+    try:
+        ret = q.one()
+    except NoResultFound:
+        ret = MetadataKey(keyname)
+        session.add(ret)
+        session.commit_or_flush()
+
+    return ret
+
+__all__.append('get_or_set_metadatakey')
+
+################################################################################
+
+class BinaryMetadata(ORMObject):
+    def __init__(self, key = None, value = None, binary = None):
+        self.key = key
+        self.value = value
+        self.binary = binary
+
+    def properties(self):
+        return ['binary', 'key', 'value']
+
+    def not_null_constraints(self):
+        return ['value']
+
+__all__.append('BinaryMetadata')
+
+################################################################################
+
+class SourceMetadata(ORMObject):
+    def __init__(self, key = None, value = None, source = None):
+        self.key = key
+        self.value = value
+        self.source = source
+
+    def properties(self):
+        return ['source', 'key', 'value']
+
+    def not_null_constraints(self):
+        return ['value']
+
+__all__.append('SourceMetadata')
+
+################################################################################
+
+class VersionCheck(ORMObject):
+    def __init__(self, *args, **kwargs):
+       pass
+
+    def properties(self):
+        #return ['suite_id', 'check', 'reference_id']
+        return ['check']
+
+    def not_null_constraints(self):
+        return ['suite', 'check', 'reference']
+
+__all__.append('VersionCheck')
+
+@session_wrapper
+def get_version_checks(suite_name, check = None, session = None):
+    suite = get_suite(suite_name, session)
+    if not suite:
+        return None
+    q = session.query(VersionCheck).filter_by(suite=suite)
+    if check:
+        q = q.filter_by(check=check)
+    return q.all()
+
+__all__.append('get_version_checks')
+
+################################################################################
+
+class DBConn(object):
     """
     database module init.
     """
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(DBConn, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self, *args, **kwargs):
-        self.debug = False
-        if kwargs.has_key('debug'):
-            self.debug = True
-        self.__createconn()
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+            self.debug = kwargs.has_key('debug')
+            self.__createconn()
 
     def __setuptables(self):
-        self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
-        self.tbl_archive = Table('archive', self.db_meta, autoload=True)
-        self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
-        self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
-        self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
-        self.tbl_component = Table('component', self.db_meta, autoload=True)
-        self.tbl_config = Table('config', self.db_meta, autoload=True)
-        self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
-        self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
-        self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
-        self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
-        self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
-        self.tbl_files = Table('files', self.db_meta, autoload=True)
-        self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
-        self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
-        self.tbl_location = Table('location', self.db_meta, autoload=True)
-        self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
-        self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
-        self.tbl_override = Table('override', self.db_meta, autoload=True)
-        self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
-        self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
-        self.tbl_priority = Table('priority', self.db_meta, autoload=True)
-        self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-        self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True)
-        self.tbl_section = Table('section', self.db_meta, autoload=True)
-        self.tbl_source = Table('source', self.db_meta, autoload=True)
-        self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
-        self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
-        self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
-        self.tbl_suite = Table('suite', self.db_meta, autoload=True)
-        self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
-        self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
-        self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
-        self.tbl_uid = Table('uid', self.db_meta, autoload=True)
+        tables = (
+            'architecture',
+            'archive',
+            'bin_associations',
+            'bin_contents',
+            'binaries',
+            'binaries_metadata',
+            'binary_acl',
+            'binary_acl_map',
+            'build_queue',
+            'build_queue_files',
+            'changelogs_text',
+            'changes',
+            'component',
+            'config',
+            'changes_pending_binaries',
+            'changes_pending_files',
+            'changes_pending_source',
+            'changes_pending_files_map',
+            'changes_pending_source_files',
+            'changes_pool_files',
+            'dsc_files',
+            'extra_src_references',
+            'files',
+            'fingerprint',
+            'keyrings',
+            'keyring_acl_map',
+            'location',
+            'maintainer',
+            'metadata_keys',
+            'new_comments',
+            # TODO: the maintainer column in table override should be removed.
+            'override',
+            'override_type',
+            'policy_queue',
+            'priority',
+            'section',
+            'source',
+            'source_acl',
+            'source_metadata',
+            'src_associations',
+            'src_contents',
+            'src_format',
+            'src_uploaders',
+            'suite',
+            'suite_architectures',
+            'suite_build_queue_copy',
+            'suite_src_formats',
+            'uid',
+            'upload_blocks',
+            'version_check',
+        )
+
+        views = (
+            'almost_obsolete_all_associations',
+            'almost_obsolete_src_associations',
+            'any_associations_source',
+            'bin_assoc_by_arch',
+            'bin_associations_binaries',
+            'binaries_suite_arch',
+            'binfiles_suite_component_arch',
+            'changelogs',
+            'file_arch_suite',
+            'newest_all_associations',
+            'newest_any_associations',
+            'newest_source',
+            'newest_src_association',
+            'obsolete_all_associations',
+            'obsolete_any_associations',
+            'obsolete_any_by_all_associations',
+            'obsolete_src_associations',
+            'source_suite',
+            'src_associations_bin',
+            'src_associations_src',
+            'suite_arch_by_name',
+        )
+
+        for table_name in tables:
+            table = Table(table_name, self.db_meta, \
+                autoload=True, useexisting=True)
+            setattr(self, 'tbl_%s' % table_name, table)
+
+        for view_name in views:
+            view = Table(view_name, self.db_meta, autoload=True)
+            setattr(self, 'view_%s' % view_name, view)
 
     def __setupmappers(self):
         mapper(Architecture, self.tbl_architecture,
-               properties = dict(arch_id = self.tbl_architecture.c.id))
+            properties = dict(arch_id = self.tbl_architecture.c.id,
+               suites = relation(Suite, secondary=self.tbl_suite_architectures,
+                   order_by='suite_name',
+                   backref=backref('architectures', order_by='arch_string'))),
+            extension = validator)
 
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
                                  archive_name = self.tbl_archive.c.name))
 
-        mapper(BinAssociation, self.tbl_bin_associations,
-               properties = dict(ba_id = self.tbl_bin_associations.c.id,
-                                 suite_id = self.tbl_bin_associations.c.suite,
-                                 suite = relation(Suite),
-                                 binary_id = self.tbl_bin_associations.c.bin,
-                                 binary = relation(DBBinary)))
-
-        mapper(PendingBinContents, self.tbl_pending_bin_contents,
-               properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
-                                 filename = self.tbl_pending_bin_contents.c.filename,
-                                 package = self.tbl_pending_bin_contents.c.package,
-                                 version = self.tbl_pending_bin_contents.c.version,
-                                 arch = self.tbl_pending_bin_contents.c.arch,
-                                 otype = self.tbl_pending_bin_contents.c.type))
-
-        mapper(DebContents, self.tbl_deb_contents,
-               properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
-                                 package=self.tbl_deb_contents.c.package,
-                                 component=self.tbl_deb_contents.c.component,
-                                 arch=self.tbl_deb_contents.c.arch,
-                                 section=self.tbl_deb_contents.c.section,
-                                 filename=self.tbl_deb_contents.c.filename))
-
-        mapper(UdebContents, self.tbl_udeb_contents,
-               properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
-                                 package=self.tbl_udeb_contents.c.package,
-                                 component=self.tbl_udeb_contents.c.component,
-                                 arch=self.tbl_udeb_contents.c.arch,
-                                 section=self.tbl_udeb_contents.c.section,
-                                 filename=self.tbl_udeb_contents.c.filename))
+        mapper(BuildQueue, self.tbl_build_queue,
+               properties = dict(queue_id = self.tbl_build_queue.c.id))
+
+        mapper(BuildQueueFile, self.tbl_build_queue_files,
+               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
+                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
 
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
@@ -2173,21 +3181,35 @@ class DBConn(Singleton):
                                  maintainer_id = self.tbl_binaries.c.maintainer,
                                  maintainer = relation(Maintainer),
                                  source_id = self.tbl_binaries.c.source,
-                                 source = relation(DBSource),
+                                 source = relation(DBSource, backref='binaries'),
                                  arch_id = self.tbl_binaries.c.architecture,
                                  architecture = relation(Architecture),
                                  poolfile_id = self.tbl_binaries.c.file,
-                                 poolfile = relation(PoolFile),
+                                 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
                                  binarytype = self.tbl_binaries.c.type,
                                  fingerprint_id = self.tbl_binaries.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
                                  install_date = self.tbl_binaries.c.install_date,
-                                 binassociations = relation(BinAssociation,
-                                                            primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))))
+                                 suites = relation(Suite, secondary=self.tbl_bin_associations,
+                                     backref=backref('binaries', lazy='dynamic')),
+                                 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
+                                     backref=backref('extra_binary_references', lazy='dynamic')),
+                                 key = relation(BinaryMetadata, cascade='all',
+                                     collection_class=attribute_mapped_collection('key'))),
+                extension = validator)
+
+        mapper(BinaryACL, self.tbl_binary_acl,
+               properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
+
+        mapper(BinaryACLMap, self.tbl_binary_acl_map,
+               properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
+                                 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
+                                 architecture = relation(Architecture)))
 
         mapper(Component, self.tbl_component,
                properties = dict(component_id = self.tbl_component.c.id,
-                                 component_name = self.tbl_component.c.name))
+                                 component_name = self.tbl_component.c.name),
+               extension = validator)
 
         mapper(DBConfig, self.tbl_config,
                properties = dict(config_id = self.tbl_config.c.id))
@@ -2203,61 +3225,129 @@ class DBConn(Singleton):
                properties = dict(file_id = self.tbl_files.c.id,
                                  filesize = self.tbl_files.c.size,
                                  location_id = self.tbl_files.c.location,
-                                 location = relation(Location)))
+                                 location = relation(Location,
+                                     # using lazy='dynamic' in the back
+                                     # reference because we have A LOT of
+                                     # files in one location
+                                     backref=backref('files', lazy='dynamic'))),
+                extension = validator)
 
         mapper(Fingerprint, self.tbl_fingerprint,
                properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
                                  uid_id = self.tbl_fingerprint.c.uid,
                                  uid = relation(Uid),
                                  keyring_id = self.tbl_fingerprint.c.keyring,
-                                 keyring = relation(Keyring)))
+                                 keyring = relation(Keyring),
+                                 source_acl = relation(SourceACL),
+                                 binary_acl = relation(BinaryACL)),
+               extension = validator)
 
         mapper(Keyring, self.tbl_keyrings,
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
                                  keyring_id = self.tbl_keyrings.c.id))
 
+        mapper(DBChange, self.tbl_changes,
+               properties = dict(change_id = self.tbl_changes.c.id,
+                                 poolfiles = relation(PoolFile,
+                                                      secondary=self.tbl_changes_pool_files,
+                                                      backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
+                                 files = relation(ChangePendingFile,
+                                                  secondary=self.tbl_changes_pending_files_map,
+                                                  backref="changesfile"),
+                                 in_queue_id = self.tbl_changes.c.in_queue,
+                                 in_queue = relation(PolicyQueue,
+                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
+                                 approved_for_id = self.tbl_changes.c.approved_for))
+
+        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
+               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
+
+        mapper(ChangePendingFile, self.tbl_changes_pending_files,
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
+
+        mapper(ChangePendingSource, self.tbl_changes_pending_source,
+               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+                                 change = relation(DBChange),
+                                 maintainer = relation(Maintainer,
+                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+                                 changedby = relation(Maintainer,
+                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+                                 fingerprint = relation(Fingerprint),
+                                 source_files = relation(ChangePendingFile,
+                                                         secondary=self.tbl_changes_pending_source_files,
+                                                         backref="pending_sources")))
+
+
+        mapper(KeyringACLMap, self.tbl_keyring_acl_map,
+               properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
+                                 keyring = relation(Keyring, backref="keyring_acl_map"),
+                                 architecture = relation(Architecture)))
+
         mapper(Location, self.tbl_location,
                properties = dict(location_id = self.tbl_location.c.id,
                                  component_id = self.tbl_location.c.component,
-                                 component = relation(Component),
+                                 component = relation(Component, backref='location'),
                                  archive_id = self.tbl_location.c.archive,
                                  archive = relation(Archive),
-                                 archive_type = self.tbl_location.c.type))
+                                 # FIXME: the 'type' column is old cruft and
+                                 # should be removed in the future.
+                                 archive_type = self.tbl_location.c.type),
+               extension = validator)
 
         mapper(Maintainer, self.tbl_maintainer,
-               properties = dict(maintainer_id = self.tbl_maintainer.c.id))
+               properties = dict(maintainer_id = self.tbl_maintainer.c.id,
+                   maintains_sources = relation(DBSource, backref='maintainer',
+                       primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
+                   changed_sources = relation(DBSource, backref='changedby',
+                       primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
+                extension = validator)
 
         mapper(NewComment, self.tbl_new_comments,
                properties = dict(comment_id = self.tbl_new_comments.c.id))
 
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
-                                 suite = relation(Suite),
+                                 suite = relation(Suite, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  package = self.tbl_override.c.package,
                                  component_id = self.tbl_override.c.component,
-                                 component = relation(Component),
+                                 component = relation(Component, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  priority_id = self.tbl_override.c.priority,
-                                 priority = relation(Priority),
+                                 priority = relation(Priority, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  section_id = self.tbl_override.c.section,
-                                 section = relation(Section),
+                                 section = relation(Section, \
+                                    backref=backref('overrides', lazy='dynamic')),
                                  overridetype_id = self.tbl_override.c.type,
-                                 overridetype = relation(OverrideType)))
+                                 overridetype = relation(OverrideType, \
+                                    backref=backref('overrides', lazy='dynamic'))))
 
         mapper(OverrideType, self.tbl_override_type,
                properties = dict(overridetype = self.tbl_override_type.c.type,
                                  overridetype_id = self.tbl_override_type.c.id))
 
+        mapper(PolicyQueue, self.tbl_policy_queue,
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
 
-        mapper(Queue, self.tbl_queue,
-               properties = dict(queue_id = self.tbl_queue.c.id))
-
-        mapper(QueueBuild, self.tbl_queue_build,
-               properties = dict(suite_id = self.tbl_queue_build.c.suite,
-                                 queue_id = self.tbl_queue_build.c.queue,
-                                 queue = relation(Queue, backref='queuebuild')))
-
         mapper(Section, self.tbl_section,
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
@@ -2266,48 +3356,34 @@ class DBConn(Singleton):
                properties = dict(source_id = self.tbl_source.c.id,
                                  version = self.tbl_source.c.version,
                                  maintainer_id = self.tbl_source.c.maintainer,
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_source.c.maintainer==self.tbl_maintainer.c.id)),
                                  poolfile_id = self.tbl_source.c.file,
-                                 poolfile = relation(PoolFile),
+                                 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
                                  fingerprint_id = self.tbl_source.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
                                  changedby_id = self.tbl_source.c.changedby,
-                                 changedby = relation(Maintainer,
-                                                      primaryjoin=(self.tbl_source.c.changedby==self.tbl_maintainer.c.id)),
                                  srcfiles = relation(DSCFile,
                                                      primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
-                                 srcassociations = relation(SrcAssociation,
-                                                            primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source))))
+                                 suites = relation(Suite, secondary=self.tbl_src_associations,
+                                     backref=backref('sources', lazy='dynamic')),
+                                 uploaders = relation(Maintainer,
+                                     secondary=self.tbl_src_uploaders),
+                                 key = relation(SourceMetadata, cascade='all',
+                                     collection_class=attribute_mapped_collection('key'))),
+               extension = validator)
 
-        mapper(SrcAssociation, self.tbl_src_associations,
-               properties = dict(sa_id = self.tbl_src_associations.c.id,
-                                 suite_id = self.tbl_src_associations.c.suite,
-                                 suite = relation(Suite),
-                                 source_id = self.tbl_src_associations.c.source,
-                                 source = relation(DBSource)))
+        mapper(SourceACL, self.tbl_source_acl,
+               properties = dict(source_acl_id = self.tbl_source_acl.c.id))
 
         mapper(SrcFormat, self.tbl_src_format,
                properties = dict(src_format_id = self.tbl_src_format.c.id,
                                  format_name = self.tbl_src_format.c.format_name))
 
-        mapper(SrcUploader, self.tbl_src_uploaders,
-               properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
-                                 source_id = self.tbl_src_uploaders.c.source,
-                                 source = relation(DBSource,
-                                                   primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)),
-                                 maintainer_id = self.tbl_src_uploaders.c.maintainer,
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
-
         mapper(Suite, self.tbl_suite,
-               properties = dict(suite_id = self.tbl_suite.c.id))
-
-        mapper(SuiteArchitecture, self.tbl_suite_architectures,
-               properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
-                                 suite = relation(Suite, backref='suitearchitectures'),
-                                 arch_id = self.tbl_suite_architectures.c.architecture,
-                                 architecture = relation(Architecture)))
+               properties = dict(suite_id = self.tbl_suite.c.id,
+                                 policy_queue = relation(PolicyQueue),
+                                 copy_queues = relation(BuildQueue,
+                                     secondary=self.tbl_suite_build_queue_copy)),
+                extension = validator)
 
         mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
                properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
@@ -2317,25 +3393,96 @@ class DBConn(Singleton):
 
         mapper(Uid, self.tbl_uid,
                properties = dict(uid_id = self.tbl_uid.c.id,
-                                 fingerprint = relation(Fingerprint)))
+                                 fingerprint = relation(Fingerprint)),
+               extension = validator)
+
+        mapper(UploadBlock, self.tbl_upload_blocks,
+               properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
+                                 fingerprint = relation(Fingerprint, backref="uploadblocks"),
+                                 uid = relation(Uid, backref="uploadblocks")))
+
+        mapper(BinContents, self.tbl_bin_contents,
+            properties = dict(
+                binary = relation(DBBinary,
+                    backref=backref('contents', lazy='dynamic', cascade='all')),
+                file = self.tbl_bin_contents.c.file))
+
+        mapper(SrcContents, self.tbl_src_contents,
+            properties = dict(
+                source = relation(DBSource,
+                    backref=backref('contents', lazy='dynamic', cascade='all')),
+                file = self.tbl_src_contents.c.file))
+
+        mapper(MetadataKey, self.tbl_metadata_keys,
+            properties = dict(
+                key_id = self.tbl_metadata_keys.c.key_id,
+                key = self.tbl_metadata_keys.c.key))
+
+        mapper(BinaryMetadata, self.tbl_binaries_metadata,
+            properties = dict(
+                binary_id = self.tbl_binaries_metadata.c.bin_id,
+                binary = relation(DBBinary),
+                key_id = self.tbl_binaries_metadata.c.key_id,
+                key = relation(MetadataKey),
+                value = self.tbl_binaries_metadata.c.value))
+
+        mapper(SourceMetadata, self.tbl_source_metadata,
+            properties = dict(
+                source_id = self.tbl_source_metadata.c.src_id,
+                source = relation(DBSource),
+                key_id = self.tbl_source_metadata.c.key_id,
+                key = relation(MetadataKey),
+                value = self.tbl_source_metadata.c.value))
+
+       mapper(VersionCheck, self.tbl_version_check,
+           properties = dict(
+               suite_id = self.tbl_version_check.c.suite,
+               suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
+               reference_id = self.tbl_version_check.c.reference,
+               reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
 
     ## Connection functions
     def __createconn(self):
         from config import Config
         cnf = Config()
-        if cnf["DB::Host"]:
+        if cnf.has_key("DB::Service"):
+            connstr = "postgresql://service=%s" % cnf["DB::Service"]
+        elif cnf.has_key("DB::Host"):
             # TCP/IP
-            connstr = "postgres://%s" % cnf["DB::Host"]
-            if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+            connstr = "postgresql://%s" % cnf["DB::Host"]
+            if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
                 connstr += ":%s" % cnf["DB::Port"]
             connstr += "/%s" % cnf["DB::Name"]
         else:
             # Unix Socket
-            connstr = "postgres:///%s" % cnf["DB::Name"]
-            if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+            connstr = "postgresql:///%s" % cnf["DB::Name"]
+            if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
                 connstr += "?port=%s" % cnf["DB::Port"]
 
-        self.db_pg   = create_engine(connstr, echo=self.debug)
+        engine_args = { 'echo': self.debug }
+        if cnf.has_key('DB::PoolSize'):
+            engine_args['pool_size'] = int(cnf['DB::PoolSize'])
+        if cnf.has_key('DB::MaxOverflow'):
+            engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
+        if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
+            cnf['DB::Unicode'] == 'false':
+            engine_args['use_native_unicode'] = False
+
+        # Monkey patch a new dialect in in order to support service= syntax
+        import sqlalchemy.dialects.postgresql
+        from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
+        class PGDialect_psycopg2_dak(PGDialect_psycopg2):
+            def create_connect_args(self, url):
+                if str(url).startswith('postgresql://service='):
+                    # Eww
+                    servicename = str(url)[21:]
+                    return (['service=%s' % servicename], {})
+                else:
+                    return PGDialect_psycopg2.create_connect_args(self, url)
+
+        sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
+
+        self.db_pg   = create_engine(connstr, **engine_args)
         self.db_meta = MetaData()
         self.db_meta.bind = self.db_pg
         self.db_smaker = sessionmaker(bind=self.db_pg,
@@ -2344,8 +3491,13 @@ class DBConn(Singleton):
 
         self.__setuptables()
         self.__setupmappers()
+        self.pid = os.getpid()
 
     def session(self):
+        # reinitialize DBConn in new processes
+        if self.pid != os.getpid():
+            clear_mappers()
+            self.__createconn()
         return self.db_smaker()
 
 __all__.append('DBConn')