]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Suppress warnings in the most suitable files.
[dak.git] / daklib / dbconn.py
index 47f4e5f2758de6536b1d2886126b18ed2dc984a5..72453f643d26dc35b768395d5d9886264d49b3eb 100755 (executable)
@@ -5,7 +5,7 @@
 @contact: Debian FTPMaster <ftpmaster@debian.org>
 @copyright: 2000, 2001, 2002, 2003, 2004, 2006  James Troup <james@nocrew.org>
 @copyright: 2008-2009  Mark Hymers <mhy@debian.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009, 2010  Joerg Jaspert <joerg@debian.org>
 @copyright: 2009  Mike O'Connor <stew@debian.org>
 @license: GNU General Public License version 2 or later
 """
 ################################################################################
 
 import os
+import re
 import psycopg2
 import traceback
+import commands
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
 
 from inspect import getargspec
 
-from sqlalchemy import create_engine, Table, MetaData
-from sqlalchemy.orm import sessionmaker, mapper, relation
+import sqlalchemy
+from sqlalchemy import create_engine, Table, MetaData, Column, Integer
+from sqlalchemy.orm import sessionmaker, mapper, relation, object_session
+from sqlalchemy import types as sqltypes
 
 # Don't remove this, we re-export the exceptions to scripts which import us
 from sqlalchemy.exc import *
@@ -49,12 +56,52 @@ from sqlalchemy.orm.exc import NoResultFound
 # Only import Config until Queue stuff is changed to store its config
 # in the database
 from config import Config
-from singleton import Singleton
 from textutils import fix_maintainer
+from dak_exceptions import NoSourceFieldError
+
+# suppress some deprecation warnings in squeeze related to sqlalchemy
+import warnings
+warnings.filterwarnings('ignore', \
+    "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
+    SADeprecationWarning)
+# TODO: sqlalchemy needs some extra configuration to correctly reflect
+# the ind_deb_contents_* indexes - we ignore the warnings at the moment
+warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning)
+
 
 ################################################################################
 
-__all__ = ['IntegrityError', 'SQLAlchemyError']
+# Patch in support for the debversion field type so that it works during
+# reflection
+
+try:
+    # that is for sqlalchemy 0.6
+    UserDefinedType = sqltypes.UserDefinedType
+except:
+    # this one for sqlalchemy 0.5
+    UserDefinedType = sqltypes.TypeEngine
+
+class DebVersion(UserDefinedType):
+    def get_col_spec(self):
+        return "DEBVERSION"
+
+    def bind_processor(self, dialect):
+        return None
+
+    # ' = None' is needed for sqlalchemy 0.5:
+    def result_processor(self, dialect, coltype = None):
+        return None
+
+sa_major_version = sqlalchemy.__version__[0:3]
+if sa_major_version in ["0.5", "0.6"]:
+    from sqlalchemy.databases import postgres
+    postgres.ischema_names['debversion'] = DebVersion
+else:
+    raise Exception("dak only ported to SQLA versions 0.5 and 0.6.  See daklib/dbconn.py")
+
+################################################################################
+
+__all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
 
 ################################################################################
 
@@ -105,11 +152,14 @@ def session_wrapper(fn):
 
     return wrapped
 
+__all__.append('session_wrapper')
+
 ################################################################################
 
 class Architecture(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def __init__(self, arch_string = None, description = None):
+        self.arch_string = arch_string
+        self.description = description
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -153,13 +203,14 @@ def get_architecture(architecture, session=None):
 
 __all__.append('get_architecture')
 
+# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_architecture_suites(architecture, session=None):
     """
     Returns list of Suite objects for given C{architecture} name
 
-    @type source: str
-    @param source: Architecture name to search for
+    @type architecture: str
+    @param architecture: Architecture name to search for
 
     @type session: Session
     @param session: Optional SQL session object (a temporary one will be
@@ -169,13 +220,7 @@ def get_architecture_suites(architecture, session=None):
     @return: list of Suite objects for the given name (may be empty)
     """
 
-    q = session.query(Suite)
-    q = q.join(SuiteArchitecture)
-    q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name')
-
-    ret = q.all()
-
-    return ret
+    return get_architecture(architecture, session).suites
 
 __all__.append('get_architecture_suites')
 
@@ -255,8 +300,8 @@ def get_suites_binary_in(package, session=None):
     """
     Returns list of Suite objects which given C{package} name is in
 
-    @type source: str
-    @param source: DBBinary package name to search for
+    @type package: str
+    @param package: DBBinary package name to search for
 
     @rtype: list
     @return: list of Suite objects for the given package
@@ -267,12 +312,12 @@ def get_suites_binary_in(package, session=None):
 __all__.append('get_suites_binary_in')
 
 @session_wrapper
-def get_binary_from_id(id, session=None):
+def get_binary_from_id(binary_id, session=None):
     """
     Returns DBBinary object for given C{id}
 
-    @type id: int
-    @param id: Id of the required binary
+    @type binary_id: int
+    @param binary_id: Id of the required binary
 
     @type session: Session
     @param session: Optional SQLA session object (a temporary one will be
@@ -282,7 +327,7 @@ def get_binary_from_id(id, session=None):
     @return: DBBinary object for the given binary (None if not present)
     """
 
-    q = session.query(DBBinary).filter_by(binary_id=id)
+    q = session.query(DBBinary).filter_by(binary_id=binary_id)
 
     try:
         return q.one()
@@ -302,8 +347,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non
     @type version: str or None
     @param version: Version to search for (or None)
 
-    @type package: str, list or None
-    @param package: Architectures to limit to (or None if no limit)
+    @type architecture: str, list or None
+    @param architecture: Architectures to limit to (or None if no limit)
 
     @type session: Session
     @param session: Optional SQL session object (a temporary one will be
@@ -356,16 +401,16 @@ def get_binary_from_name_suite(package, suitename, session=None):
 
     sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
              FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
-             WHERE b.package=:package
+             WHERE b.package='%(package)s'
                AND b.file = fi.id
                AND fi.location = l.id
                AND l.component = c.id
                AND ba.bin=b.id
                AND ba.suite = su.id
-               AND su.suite_name=:suitename
+               AND su.suite_name %(suitename)s
           ORDER BY b.version DESC"""
 
-    return session.execute(sql, {'package': package, 'suitename': suitename})
+    return session.execute(sql % {'package': package, 'suitename': suitename})
 
 __all__.append('get_binary_from_name_suite')
 
@@ -388,6 +433,345 @@ __all__.append('get_binary_components')
 
 ################################################################################
 
+class BinaryACL(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BinaryACL %s>' % self.binary_acl_id
+
+__all__.append('BinaryACL')
+
+################################################################################
+
+class BinaryACLMap(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BinaryACLMap %s>' % self.binary_acl_map_id
+
+__all__.append('BinaryACLMap')
+
+################################################################################
+
+MINIMAL_APT_CONF="""
+Dir
+{
+   ArchiveDir "%(archivepath)s";
+   OverrideDir "%(overridedir)s";
+   CacheDir "%(cachedir)s";
+};
+
+Default
+{
+   Packages::Compress ". bzip2 gzip";
+   Sources::Compress ". bzip2 gzip";
+   DeLinkLimit 0;
+   FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+   Packages "Packages";
+   Contents " ";
+
+   BinOverride "override.sid.all3";
+   BinCacheDB "packages-accepted.db";
+
+   FileList "%(filelist)s";
+
+   PathPrefix "";
+   Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+   Sources "Sources";
+   BinOverride "override.sid.all3";
+   SrcOverride "override.sid.all3.src";
+   FileList "%(filelist)s";
+};
+"""
+
+class BuildQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BuildQueue %s>' % self.queue_name
+
+    def write_metadata(self, starttime, force=False):
+        # Do we write out metafiles?
+        if not (force or self.generate_metadata):
+            return
+
+        session = DBConn().session().object_session(self)
+
+        fl_fd = fl_name = ac_fd = ac_name = None
+        tempdir = None
+        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+        startdir = os.getcwd()
+
+        try:
+            # Grab files we want to include
+            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+            # Write file list with newer files
+            (fl_fd, fl_name) = mkstemp()
+            for n in newer:
+                os.write(fl_fd, '%s\n' % n.fullpath)
+            os.close(fl_fd)
+
+            cnf = Config()
+
+            # Write minimal apt.conf
+            # TODO: Remove hardcoding from template
+            (ac_fd, ac_name) = mkstemp()
+            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+                                                'filelist': fl_name,
+                                                'cachedir': cnf["Dir::Cache"],
+                                                'overridedir': cnf["Dir::Override"],
+                                                })
+            os.close(ac_fd)
+
+            # Run apt-ftparchive generate
+            os.chdir(os.path.dirname(ac_name))
+            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+            # Run apt-ftparchive release
+            # TODO: Eww - fix this
+            bname = os.path.basename(self.path)
+            os.chdir(self.path)
+            os.chdir('..')
+
+            # We have to remove the Release file otherwise it'll be included in the
+            # new one
+            try:
+                os.unlink(os.path.join(bname, 'Release'))
+            except OSError:
+                pass
+
+            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+            # Crude hack with open and append, but this whole section is and should be redone.
+            if self.notautomatic:
+                release=open("Release", "a")
+                release.write("NotAutomatic: yes")
+                release.close()
+
+            # Sign if necessary
+            if self.signingkey:
+                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+                if cnf.has_key("Dinstall::SigningPubKeyring"):
+                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+            # Move the files if we got this far
+            os.rename('Release', os.path.join(bname, 'Release'))
+            if self.signingkey:
+                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+        # Clean up any left behind files
+        finally:
+            os.chdir(startdir)
+            if fl_fd:
+                try:
+                    os.close(fl_fd)
+                except OSError:
+                    pass
+
+            if fl_name:
+                try:
+                    os.unlink(fl_name)
+                except OSError:
+                    pass
+
+            if ac_fd:
+                try:
+                    os.close(ac_fd)
+                except OSError:
+                    pass
+
+            if ac_name:
+                try:
+                    os.unlink(ac_name)
+                except OSError:
+                    pass
+
+    def clean_and_update(self, starttime, Logger, dryrun=False):
+        """WARNING: This routine commits for you"""
+        session = DBConn().session().object_session(self)
+
+        if self.generate_metadata and not dryrun:
+            self.write_metadata(starttime)
+
+        # Grab files older than our execution time
+        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+        for o in older:
+            killdb = False
+            try:
+                if dryrun:
+                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+                else:
+                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
+                    os.unlink(o.fullpath)
+                    killdb = True
+            except OSError, e:
+                # If it wasn't there, don't worry
+                if e.errno == ENOENT:
+                    killdb = True
+                else:
+                    # TODO: Replace with proper logging call
+                    Logger.log(["E: Could not remove %s" % o.fullpath])
+
+            if killdb:
+                session.delete(o)
+
+        session.commit()
+
+        for f in os.listdir(self.path):
+            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
+                continue
+
+            try:
+                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+            except NoResultFound:
+                fp = os.path.join(self.path, f)
+                if dryrun:
+                    Logger.log(["I: Would remove unused link %s" % fp])
+                else:
+                    Logger.log(["I: Removing unused link %s" % fp])
+                    try:
+                        os.unlink(fp)
+                    except OSError:
+                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
+    def add_file_from_pool(self, poolfile):
+        """Copies a file into the pool.  Assumes that the PoolFile object is
+        attached to the same SQLAlchemy session as the Queue object is.
+
+        The caller is responsible for committing after calling this function."""
+        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+
+        # Check if we have a file of this name or this ID already
+        for f in self.queuefiles:
+            if f.fileid is not None and f.fileid == poolfile.file_id or \
+               f.poolfile.filename == poolfile_basename:
+                   # In this case, update the BuildQueueFile entry so we
+                   # don't remove it too early
+                   f.lastused = datetime.now()
+                   DBConn().session().object_session(poolfile).add(f)
+                   return f
+
+        # Prepare BuildQueueFile object
+        qf = BuildQueueFile()
+        qf.build_queue_id = self.queue_id
+        qf.lastused = datetime.now()
+        qf.filename = poolfile_basename
+
+        targetpath = poolfile.fullpath
+        queuepath = os.path.join(self.path, poolfile_basename)
+
+        try:
+            if self.copy_files:
+                # We need to copy instead of symlink
+                import utils
+                utils.copy(targetpath, queuepath)
+                # NULL in the fileid field implies a copy
+                qf.fileid = None
+            else:
+                os.symlink(targetpath, queuepath)
+                qf.fileid = poolfile.file_id
+        except OSError:
+            return None
+
+        # Get the same session as the PoolFile is using and add the qf to it
+        DBConn().session().object_session(poolfile).add(qf)
+
+        return qf
+
+
+__all__.append('BuildQueue')
+
+@session_wrapper
+def get_build_queue(queuename, session=None):
+    """
+    Returns BuildQueue object for given C{queue name}, creating it if it does not
+    exist.
+
+    @type queuename: string
+    @param queuename: The name of the queue
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: BuildQueue
+    @return: BuildQueue object for the given queue
+    """
+
+    q = session.query(BuildQueue).filter_by(queue_name=queuename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_build_queue')
+
+################################################################################
+
+class BuildQueueFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
+
+    @property
+    def fullpath(self):
+        return os.path.join(self.buildqueue.path, self.filename)
+
+
+__all__.append('BuildQueueFile')
+
+################################################################################
+
+class ChangePendingBinary(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
+
+__all__.append('ChangePendingBinary')
+
+################################################################################
+
+class ChangePendingFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingFile %s>' % self.change_pending_file_id
+
+__all__.append('ChangePendingFile')
+
+################################################################################
+
+class ChangePendingSource(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingSource %s>' % self.change_pending_source_id
+
+__all__.append('ChangePendingSource')
+
+################################################################################
+
 class Component(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -548,8 +932,9 @@ def get_or_set_contents_path_id(filepath, session=None):
 
     If no matching file is found, a row is inserted.
 
-    @type filename: string
-    @param filename: The filepath
+    @type filepath: string
+    @param filepath: The filepath
+
     @type session: SQLAlchemy
     @param session: Optional SQL session object (a temporary one will be
     generated if not supplied).  If not passed, a commit will be performed at
@@ -610,17 +995,16 @@ def insert_content_paths(binary_id, fullpaths, session=None):
 
     try:
         # Insert paths
-        pathcache = {}
-
         def generate_path_dicts():
             for fullpath in fullpaths:
                 if fullpath.startswith( './' ):
                     fullpath = fullpath[2:]
 
-                yield {'fulename':fullpath, 'id': binary_id }
+                yield {'filename':fullpath, 'id': binary_id }
 
-        session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
-                         generate_path_dicts() )
+        for d in generate_path_dicts():
+            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
+                         d )
 
         session.commit()
         if privatetrans:
@@ -692,13 +1076,17 @@ class PoolFile(object):
     def __repr__(self):
         return '<PoolFile %s>' % self.filename
 
+    @property
+    def fullpath(self):
+        return os.path.join(self.location.path, self.filename)
+
 __all__.append('PoolFile')
 
 @session_wrapper
 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
     """
     Returns a tuple:
-     (ValidFileFound [boolean or None], PoolFile object or None)
+    (ValidFileFound [boolean or None], PoolFile object or None)
 
     @type filename: string
     @param filename: the filename of the file to check against the DB
@@ -714,12 +1102,11 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None):
 
     @rtype: tuple
     @return: Tuple of length 2.
-             If more than one file found with that name:
-                    (None,  None)
-             If valid pool file found: (True, PoolFile object)
-             If valid pool file not found:
-                    (False, None) if no file found
-                    (False, PoolFile object) if file found with size/md5sum mismatch
+                 - If more than one file found with that name: (C{None},  C{None})
+                 - If valid pool file found: (C{True}, C{PoolFile object})
+                 - If valid pool file not found:
+                     - (C{False}, C{None}) if no file found
+                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
     """
 
     q = session.query(PoolFile).filter_by(filename=filename)
@@ -733,7 +1120,7 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None):
         ret = (False, None)
     else:
         obj = q.one()
-        if obj.md5sum != md5sum or obj.filesize != filesize:
+        if obj.md5sum != md5sum or obj.filesize != int(filesize):
             ret = (False, obj)
 
     if ret is None:
@@ -803,23 +1190,83 @@ def get_poolfile_like_name(filename, session=None):
     """
 
     # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
 
     return q.all()
 
 __all__.append('get_poolfile_like_name')
 
+@session_wrapper
+def add_poolfile(filename, datadict, location_id, session=None):
+    """
+    Add a new file to the pool
+
+    @type filename: string
+    @param filename: filename
+
+    @type datadict: dict
+    @param datadict: dict with needed data
+
+    @type location_id: int
+    @param location_id: database id of the location
+
+    @rtype: PoolFile
+    @return: the PoolFile object created
+    """
+    poolfile = PoolFile()
+    poolfile.filename = filename
+    poolfile.filesize = datadict["size"]
+    poolfile.md5sum = datadict["md5sum"]
+    poolfile.sha1sum = datadict["sha1sum"]
+    poolfile.sha256sum = datadict["sha256sum"]
+    poolfile.location_id = location_id
+
+    session.add(poolfile)
+    # Flush to get a file id (NB: This is not a commit)
+    session.flush()
+
+    return poolfile
+
+__all__.append('add_poolfile')
+
 ################################################################################
 
 class Fingerprint(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def __init__(self, fingerprint = None):
+        self.fingerprint = fingerprint
 
     def __repr__(self):
         return '<Fingerprint %s>' % self.fingerprint
 
 __all__.append('Fingerprint')
 
+@session_wrapper
+def get_fingerprint(fpr, session=None):
+    """
+    Returns Fingerprint object for given fpr.
+
+    @type fpr: string
+    @param fpr: The fpr to find / add
+
+    @type session: SQLAlchemy
+    @param session: Optional SQL session object (a temporary one will be
+    generated if not supplied).
+
+    @rtype: Fingerprint
+    @return: the Fingerprint object for the given fpr or None
+    """
+
+    q = session.query(Fingerprint).filter_by(fingerprint=fpr)
+
+    try:
+        ret = q.one()
+    except NoResultFound:
+        ret = None
+
+    return ret
+
+__all__.append('get_fingerprint')
+
 @session_wrapper
 def get_or_set_fingerprint(fpr, session=None):
     """
@@ -857,20 +1304,143 @@ __all__.append('get_or_set_fingerprint')
 
 ################################################################################
 
+# Helper routine for Keyring class
+def get_ldap_name(entry):
+    name = []
+    for k in ["cn", "mn", "sn"]:
+        ret = entry.get(k)
+        if ret and ret[0] != "" and ret[0] != "-":
+            name.append(ret[0])
+    return " ".join(name)
+
+################################################################################
+
 class Keyring(object):
+    gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
+                     " --with-colons --fingerprint --fingerprint"
+
+    keys = {}
+    fpr_lookup = {}
+
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
         return '<Keyring %s>' % self.keyring_name
 
+    def de_escape_gpg_str(self, txt):
+        esclist = re.split(r'(\\x..)', txt)
+        for x in range(1,len(esclist),2):
+            esclist[x] = "%c" % (int(esclist[x][2:],16))
+        return "".join(esclist)
+
+    def parse_address(self, uid):
+        """parses uid and returns a tuple of real name and email address"""
+        import email.Utils
+        (name, address) = email.Utils.parseaddr(uid)
+        name = re.sub(r"\s*[(].*[)]", "", name)
+        name = self.de_escape_gpg_str(name)
+        if name == "":
+            name = uid
+        return (name, address)
+
+    def load_keys(self, keyring):
+        if not self.keyring_id:
+            raise Exception('Must be initialized with database information')
+
+        k = os.popen(self.gpg_invocation % keyring, "r")
+        key = None
+        signingkey = False
+
+        for line in k.xreadlines():
+            field = line.split(":")
+            if field[0] == "pub":
+                key = field[4]
+                self.keys[key] = {}
+                (name, addr) = self.parse_address(field[9])
+                if "@" in addr:
+                    self.keys[key]["email"] = addr
+                    self.keys[key]["name"] = name
+                self.keys[key]["fingerprints"] = []
+                signingkey = True
+            elif key and field[0] == "sub" and len(field) >= 12:
+                signingkey = ("s" in field[11])
+            elif key and field[0] == "uid":
+                (name, addr) = self.parse_address(field[9])
+                if "email" not in self.keys[key] and "@" in addr:
+                    self.keys[key]["email"] = addr
+                    self.keys[key]["name"] = name
+            elif signingkey and field[0] == "fpr":
+                self.keys[key]["fingerprints"].append(field[9])
+                self.fpr_lookup[field[9]] = key
+
+    def import_users_from_ldap(self, session):
+        import ldap
+        cnf = Config()
+
+        LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
+        LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+
+        l = ldap.open(LDAPServer)
+        l.simple_bind_s("","")
+        Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+               "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+               ["uid", "keyfingerprint", "cn", "mn", "sn"])
+
+        ldap_fin_uid_id = {}
+
+        byuid = {}
+        byname = {}
+
+        for i in Attrs:
+            entry = i[1]
+            uid = entry["uid"][0]
+            name = get_ldap_name(entry)
+            fingerprints = entry["keyFingerPrint"]
+            keyid = None
+            for f in fingerprints:
+                key = self.fpr_lookup.get(f, None)
+                if key not in self.keys:
+                    continue
+                self.keys[key]["uid"] = uid
+
+                if keyid != None:
+                    continue
+                keyid = get_or_set_uid(uid, session).uid_id
+                byuid[keyid] = (uid, name)
+                byname[uid] = (keyid, name)
+
+        return (byname, byuid)
+
+    def generate_users_from_keyring(self, format, session):
+        byuid = {}
+        byname = {}
+        any_invalid = False
+        for x in self.keys.keys():
+            if "email" not in self.keys[x]:
+                any_invalid = True
+                self.keys[x]["uid"] = format % "invalid-uid"
+            else:
+                uid = format % self.keys[x]["email"]
+                keyid = get_or_set_uid(uid, session).uid_id
+                byuid[keyid] = (uid, self.keys[x]["name"])
+                byname[uid] = (keyid, self.keys[x]["name"])
+                self.keys[x]["uid"] = uid
+
+        if any_invalid:
+            uid = format % "invalid-uid"
+            keyid = get_or_set_uid(uid, session).uid_id
+            byuid[keyid] = (uid, "ungeneratable user id")
+            byname[uid] = (keyid, "ungeneratable user id")
+
+        return (byname, byuid)
+
 __all__.append('Keyring')
 
 @session_wrapper
-def get_or_set_keyring(keyring, session=None):
+def get_keyring(keyring, session=None):
     """
-    If C{keyring} does not have an entry in the C{keyrings} table yet, create one
-    and return the new Keyring
+    If C{keyring} does not have an entry in the C{keyrings} table yet, return None
     If C{keyring} already has an entry, simply return the existing Keyring
 
     @type keyring: string
@@ -885,12 +1455,69 @@ def get_or_set_keyring(keyring, session=None):
     try:
         return q.one()
     except NoResultFound:
-        obj = Keyring(keyring_name=keyring)
-        session.add(obj)
-        session.commit_or_flush()
-        return obj
+        return None
+
+__all__.append('get_keyring')
+
+################################################################################
+
+class KeyringACLMap(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<KeyringACLMap %s>' % self.keyring_acl_map_id
+
+__all__.append('KeyringACLMap')
+
+################################################################################
+
+class DBChange(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<DBChange %s>' % self.changesname
+
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
+
+        # Remove changes_pool_files entries
+        self.poolfiles = []
+
+        # Remove changes_pending_files references
+        self.files = []
+
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
+
+__all__.append('DBChange')
+
+@session_wrapper
+def get_dbchange(filename, session=None):
+    """
+    returns DBChange object for given C{filename}.
+
+    @type filename: string
+    @param filename: the name of the file
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
 
-__all__.append('get_or_set_keyring')
+    @rtype: DBChange
+    @return:  DBChange object for the given filename (C{None} if not present)
+
+    """
+    q = session.query(DBChange).filter_by(changesname=filename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_dbchange')
 
 ################################################################################
 
@@ -910,13 +1537,13 @@ def get_location(location, component=None, archive=None, session=None):
     and archive
 
     @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/}
+    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
 
     @type component: string
     @param component: the component name (if None, no restriction applied)
 
     @type archive: string
-    @param archive_id: the archive name (if None, no restriction applied)
+    @param archive: the archive name (if None, no restriction applied)
 
     @rtype: Location / None
     @return: Either a Location object or None if one can't be found
@@ -1277,255 +1904,134 @@ __all__.append('insert_pending_content_paths')
 
 ################################################################################
 
-class Priority(object):
+class PolicyQueue(object):
     def __init__(self, *args, **kwargs):
         pass
 
-    def __eq__(self, val):
-        if isinstance(val, str):
-            return (self.priority == val)
-        # This signals to use the normal comparison operator
-        return NotImplemented
-
-    def __ne__(self, val):
-        if isinstance(val, str):
-            return (self.priority != val)
-        # This signals to use the normal comparison operator
-        return NotImplemented
-
     def __repr__(self):
-        return '<Priority %s (%s)>' % (self.priority, self.priority_id)
+        return '<PolicyQueue %s>' % self.queue_name
 
-__all__.append('Priority')
+__all__.append('PolicyQueue')
 
 @session_wrapper
-def get_priority(priority, session=None):
+def get_policy_queue(queuename, session=None):
     """
-    Returns Priority object for given C{priority name}.
+    Returns PolicyQueue object for given C{queue name}
 
-    @type priority: string
-    @param priority: The name of the priority
+    @type queuename: string
+    @param queuename: The name of the queue
 
     @type session: Session
     @param session: Optional SQLA session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: Priority
-    @return: Priority object for the given priority
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
     """
 
-    q = session.query(Priority).filter_by(priority=priority)
+    q = session.query(PolicyQueue).filter_by(queue_name=queuename)
 
     try:
         return q.one()
     except NoResultFound:
         return None
 
-__all__.append('get_priority')
+__all__.append('get_policy_queue')
 
 @session_wrapper
-def get_priorities(session=None):
+def get_policy_queue_from_path(pathname, session=None):
     """
-    Returns dictionary of priority names -> id mappings
+    Returns PolicyQueue object for given C{path name}
+
+    @type queuename: string
+    @param queuename: The path
 
     @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
+    @param session: Optional SQLA session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: dictionary
-    @return: dictionary of priority names -> id mappings
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
     """
 
-    ret = {}
-    q = session.query(Priority)
-    for x in q.all():
-        ret[x.priority] = x.priority_id
+    q = session.query(PolicyQueue).filter_by(path=pathname)
 
-    return ret
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
 
-__all__.append('get_priorities')
+__all__.append('get_policy_queue_from_path')
 
 ################################################################################
 
-class Queue(object):
+class Priority(object):
     def __init__(self, *args, **kwargs):
         pass
 
-    def __repr__(self):
-        return '<Queue %s>' % self.queue_name
-
-    def autobuild_upload(self, changes, srcpath, session=None):
-        """
-        Update queue_build database table used for incoming autobuild support.
-
-        @type changes: Changes
-        @param changes: changes object for the upload to process
-
-        @type srcpath: string
-        @param srcpath: path for the queue file entries/link destinations
-
-        @type session: SQLAlchemy session
-        @param session: Optional SQLAlchemy session.  If this is passed, the
-        caller is responsible for ensuring a transaction has begun and
-        committing the results or rolling back based on the result code.  If
-        not passed, a commit will be performed at the end of the function,
-        otherwise the caller is responsible for commiting.
-
-        @rtype: NoneType or string
-        @return: None if the operation failed, a string describing the error if not
-        """
-
-        privatetrans = False
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
-
-        # TODO: Remove by moving queue config into the database
-        conf = Config()
-
-        for suitename in changes.changes["distribution"].keys():
-            # TODO: Move into database as:
-            #       buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build)
-            #       buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink)
-            #       This also gets rid of the SecurityQueueBuild hack below
-            if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"):
-                continue
-
-            # Find suite object
-            s = get_suite(suitename, session)
-            if s is None:
-                return "INTERNAL ERROR: Could not find suite %s" % suitename
-
-            # TODO: Get from database as above
-            dest_dir = conf["Dir::QueueBuild"]
-
-            # TODO: Move into database as above
-            if conf.FindB("Dinstall::SecurityQueueBuild"):
-                dest_dir = os.path.join(dest_dir, suitename)
-
-            for file_entry in changes.files.keys():
-                src = os.path.join(srcpath, file_entry)
-                dest = os.path.join(dest_dir, file_entry)
-
-                # TODO: Move into database as above
-                if conf.FindB("Dinstall::SecurityQueueBuild"):
-                    # Copy it since the original won't be readable by www-data
-                    import utils
-                    utils.copy(src, dest)
-                else:
-                    # Create a symlink to it
-                    os.symlink(src, dest)
-
-                qb = QueueBuild()
-                qb.suite_id = s.suite_id
-                qb.queue_id = self.queue_id
-                qb.filename = dest
-                qb.in_queue = True
-
-                session.add(qb)
-
-            exists, symlinked = utils.ensure_orig_files(changes, dest, session)
-
-            # Add symlinked files to the list of packages for later processing
-            # by apt-ftparchive
-            for filename in symlinked:
-                qb = QueueBuild()
-                qb.suite_id = s.suite_id
-                qb.queue_id = self.queue_id
-                qb.filename = filename
-                qb.in_queue = True
-                session.add(qb)
-
-            # Update files to ensure they are not removed prematurely
-            for filename in exists:
-                qb = get_queue_build(filename, s.suite_id, session)
-                if qb is None:
-                    qb.in_queue = True
-                    qb.last_used = None
-                    session.add(qb)
+    def __eq__(self, val):
+        if isinstance(val, str):
+            return (self.priority == val)
+        # This signals to use the normal comparison operator
+        return NotImplemented
 
-        if privatetrans:
-            session.commit()
-            session.close()
+    def __ne__(self, val):
+        if isinstance(val, str):
+            return (self.priority != val)
+        # This signals to use the normal comparison operator
+        return NotImplemented
 
-        return None
+    def __repr__(self):
+        return '<Priority %s (%s)>' % (self.priority, self.priority_id)
 
-__all__.append('Queue')
+__all__.append('Priority')
 
 @session_wrapper
-def get_or_set_queue(queuename, session=None):
+def get_priority(priority, session=None):
     """
-    Returns Queue object for given C{queue name}, creating it if it does not
-    exist.
+    Returns Priority object for given C{priority name}.
 
-    @type queuename: string
-    @param queuename: The name of the queue
+    @type priority: string
+    @param priority: The name of the priority
 
     @type session: Session
     @param session: Optional SQLA session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: Queue
-    @return: Queue object for the given queue
+    @rtype: Priority
+    @return: Priority object for the given priority
     """
 
-    q = session.query(Queue).filter_by(queue_name=queuename)
+    q = session.query(Priority).filter_by(priority=priority)
 
     try:
-        ret = q.one()
+        return q.one()
     except NoResultFound:
-        queue = Queue()
-        queue.queue_name = queuename
-        session.add(queue)
-        session.commit_or_flush()
-        ret = queue
-
-    return ret
-
-__all__.append('get_or_set_queue')
-
-################################################################################
-
-class QueueBuild(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<QueueBuild %s (%s)>' % (self.filename, self.queue_id)
+        return None
 
-__all__.append('QueueBuild')
+__all__.append('get_priority')
 
 @session_wrapper
-def get_queue_build(filename, suite, session=None):
+def get_priorities(session=None):
     """
-    Returns QueueBuild object for given C{filename} and C{suite}.
-
-    @type filename: string
-    @param filename: The name of the file
-
-    @type suiteid: int or str
-    @param suiteid: Suite name or ID
+    Returns dictionary of priority names -> id mappings
 
     @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
+    @param session: Optional SQL session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: Queue
-    @return: Queue object for the given queue
+    @rtype: dictionary
+    @return: dictionary of priority names -> id mappings
     """
 
-    if isinstance(suite, int):
-        q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite)
-    else:
-        q = session.query(QueueBuild).filter_by(filename=filename)
-        q = q.join(Suite).filter_by(suite_name=suite)
+    ret = {}
+    q = session.query(Priority)
+    for x in q.all():
+        ret[x.priority] = x.priority_id
 
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+    return ret
 
-__all__.append('get_queue_build')
+__all__.append('get_priorities')
 
 ################################################################################
 
@@ -1616,8 +2122,8 @@ def source_exists(source, source_version, suites = ["any"], session=None):
       1. exact match     => 1.0-3
       2. bin-only NMU    => 1.0-3+b1 , 1.0-3.1+b1
 
-    @type package: string
-    @param package: package source name
+    @type source: string
+    @param source: source name
 
     @type source_version: string
     @param source_version: expected source version
@@ -1700,8 +2206,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=
     @type source: str
     @param source: DBSource package name to search for
 
-    @type source: str or None
-    @param source: DBSource version name to search for or None if not applicable
+    @type version: str or None
+    @param version: DBSource version name to search for or None if not applicable
 
     @type dm_upload_allowed: bool
     @param dm_upload_allowed: If None, no effect.  If True or False, only
@@ -1759,6 +2265,198 @@ __all__.append('get_source_in_suite')
 
 ################################################################################
 
+@session_wrapper
+def add_dsc_to_db(u, filename, session=None):
+    entry = u.pkg.files[filename]
+    source = DBSource()
+    pfs = []
+
+    source.source = u.pkg.dsc["source"]
+    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    source.install_date = datetime.now().date()
+
+    dsc_component = entry["component"]
+    dsc_location_id = entry["location id"]
+
+    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+
+    # Set up a new poolfile if necessary
+    if not entry.has_key("files id") or not entry["files id"]:
+        filename = entry["pool name"] + filename
+        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+        session.flush()
+        pfs.append(poolfile)
+        entry["files id"] = poolfile.file_id
+
+    source.poolfile_id = entry["files id"]
+    session.add(source)
+    session.flush()
+
+    for suite_name in u.pkg.changes["distribution"].keys():
+        sa = SrcAssociation()
+        sa.source_id = source.source_id
+        sa.suite_id = get_suite(suite_name).suite_id
+        session.add(sa)
+
+    session.flush()
+
+    # Add the source files to the DB (files and dsc_files)
+    dscfile = DSCFile()
+    dscfile.source_id = source.source_id
+    dscfile.poolfile_id = entry["files id"]
+    session.add(dscfile)
+
+    for dsc_file, dentry in u.pkg.dsc_files.items():
+        df = DSCFile()
+        df.source_id = source.source_id
+
+        # If the .orig tarball is already in the pool, it's
+        # files id is stored in dsc_files by check_dsc().
+        files_id = dentry.get("files id", None)
+
+        # Find the entry in the files hash
+        # TODO: Bail out here properly
+        dfentry = None
+        for f, e in u.pkg.files.items():
+            if f == dsc_file:
+                dfentry = e
+                break
+
+        if files_id is None:
+            filename = dfentry["pool name"] + dsc_file
+
+            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+            # FIXME: needs to check for -1/-2 and or handle exception
+            if found and obj is not None:
+                files_id = obj.file_id
+                pfs.append(obj)
+
+            # If still not found, add it
+            if files_id is None:
+                # HACK: Force sha1sum etc into dentry
+                dentry["sha1sum"] = dfentry["sha1sum"]
+                dentry["sha256sum"] = dfentry["sha256sum"]
+                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+                pfs.append(poolfile)
+                files_id = poolfile.file_id
+        else:
+            poolfile = get_poolfile_by_id(files_id, session)
+            if poolfile is None:
+                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+            pfs.append(poolfile)
+
+        df.poolfile_id = files_id
+        session.add(df)
+
+    session.flush()
+
+    # Add the src_uploaders to the DB
+    uploader_ids = [source.maintainer_id]
+    if u.pkg.dsc.has_key("uploaders"):
+        for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
+            up = up.strip()
+            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
+
+    added_ids = {}
+    for up_id in uploader_ids:
+        if added_ids.has_key(up_id):
+            import utils
+            utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
+            continue
+
+        added_ids[up_id]=1
+
+        su = SrcUploader()
+        su.maintainer_id = up_id
+        su.source_id = source.source_id
+        session.add(su)
+
+    session.flush()
+
+    return source, dsc_component, dsc_location_id, pfs
+
+__all__.append('add_dsc_to_db')
+
+@session_wrapper
+def add_deb_to_db(u, filename, session=None):
+    """
+    Contrary to what you might expect, this routine deals with both
+    debs and udebs.  That info is in 'dbtype', whilst 'type' is
+    'deb' for both of them
+    """
+    cnf = Config()
+    entry = u.pkg.files[filename]
+
+    bin = DBBinary()
+    bin.package = entry["package"]
+    bin.version = entry["version"]
+    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+    bin.binarytype = entry["dbtype"]
+
+    # Find poolfile id
+    filename = entry["pool name"] + filename
+    fullpath = os.path.join(cnf["Dir::Pool"], filename)
+    if not entry.get("location id", None):
+        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
+
+    if entry.get("files id", None):
+        poolfile = get_poolfile_by_id(bin.poolfile_id)
+        bin.poolfile_id = entry["files id"]
+    else:
+        poolfile = add_poolfile(filename, entry, entry["location id"], session)
+        bin.poolfile_id = entry["files id"] = poolfile.file_id
+
+    # Find source id
+    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+    if len(bin_sources) != 1:
+        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                  (bin.package, bin.version, entry["architecture"],
+                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+    bin.source_id = bin_sources[0].source_id
+
+    # Add and flush object so it has an ID
+    session.add(bin)
+    session.flush()
+
+    # Add BinAssociations
+    for suite_name in u.pkg.changes["distribution"].keys():
+        ba = BinAssociation()
+        ba.binary_id = bin.binary_id
+        ba.suite_id = get_suite(suite_name).suite_id
+        session.add(ba)
+
+    session.flush()
+
+    # Deal with contents - disabled for now
+    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+    #if not contents:
+    #    print "REJECT\nCould not determine contents of package %s" % bin.package
+    #    session.rollback()
+    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+
+    return poolfile
+
+__all__.append('add_deb_to_db')
+
+################################################################################
+
+class SourceACL(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<SourceACL %s>' % self.source_acl_id
+
+__all__.append('SourceACL')
+
+################################################################################
+
 class SrcAssociation(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1806,15 +2504,12 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
                  ('Priority', 'priority'),
                  ('NotAutomatic', 'notautomatic'),
                  ('CopyChanges', 'copychanges'),
-                 ('CopyDotDak', 'copydotdak'),
-                 ('CommentsDir', 'commentsdir'),
-                 ('OverrideSuite', 'overridesuite'),
-                 ('ChangelogBase', 'changelogbase')]
-
+                 ('OverrideSuite', 'overridesuite')]
 
 class Suite(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def __init__(self, suite_name = None, version = None):
+        self.suite_name = suite_name
+        self.version = version
 
     def __repr__(self):
         return '<Suite %s>' % self.suite_name
@@ -1840,38 +2535,31 @@ class Suite(object):
 
         return "\n".join(ret)
 
-__all__.append('Suite')
-
-@session_wrapper
-def get_suite_architecture(suite, architecture, session=None):
-    """
-    Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it
-    doesn't exist
+    def get_architectures(self, skipsrc=False, skipall=False):
+        """
+        Returns list of Architecture objects
 
-    @type suite: str
-    @param suite: Suite name to search for
+        @type skipsrc: boolean
+        @param skipsrc: Whether to skip returning the 'source' architecture entry
+        (Default False)
 
-    @type architecture: str
-    @param architecture: Architecture name to search for
+        @type skipall: boolean
+        @param skipall: Whether to skip returning the 'all' architecture entry
+        (Default False)
 
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: SuiteArchitecture
-    @return: the SuiteArchitecture object or None
-    """
-
-    q = session.query(SuiteArchitecture)
-    q = q.join(Architecture).filter_by(arch_string=architecture)
-    q = q.join(Suite).filter_by(suite_name=suite)
+        @rtype: list
+        @return: list of Architecture objects for the given name (may be empty)
+        """
 
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
+        q = object_session(self).query(Architecture). \
+            filter(Architecture.suites.contains(self))
+        if skipsrc:
+            q = q.filter(Architecture.arch_string != 'source')
+        if skipall:
+            q = q.filter(Architecture.arch_string != 'all')
+        return q.order_by(Architecture.arch_string).all()
 
-__all__.append('get_suite_architecture')
+__all__.append('Suite')
 
 @session_wrapper
 def get_suite(suite, session=None):
@@ -1900,22 +2588,14 @@ __all__.append('get_suite')
 
 ################################################################################
 
-class SuiteArchitecture(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SuiteArchitecture (%s, %s)>' % (self.suite_id, self.arch_id)
-
-__all__.append('SuiteArchitecture')
-
+# TODO: should be removed because the implementation is too trivial
 @session_wrapper
 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     """
     Returns list of Architecture objects for given C{suite} name
 
-    @type source: str
-    @param source: Suite name to search for
+    @type suite: str
+    @param suite: Suite name to search for
 
     @type skipsrc: boolean
     @param skipsrc: Whether to skip returning the 'source' architecture entry
@@ -1933,19 +2613,7 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     @return: list of Architecture objects for the given name (may be empty)
     """
 
-    q = session.query(Architecture)
-    q = q.join(SuiteArchitecture)
-    q = q.join(Suite).filter_by(suite_name=suite)
-
-    if skipsrc:
-        q = q.filter(Architecture.arch_string != 'source')
-
-    if skipall:
-        q = q.filter(Architecture.arch_string != 'all')
-
-    q = q.order_by('arch_string')
-
-    return q.all()
+    return get_suite(suite, session).get_architectures(skipsrc, skipall)
 
 __all__.append('get_suite_architectures')
 
@@ -1988,8 +2656,9 @@ __all__.append('get_suite_src_formats')
 ################################################################################
 
 class Uid(object):
-    def __init__(self, *args, **kwargs):
-        pass
+    def __init__(self, uid = None, name = None):
+        self.uid = uid
+        self.name = name
 
     def __eq__(self, val):
         if isinstance(val, str):
@@ -2008,28 +2677,6 @@ class Uid(object):
 
 __all__.append('Uid')
 
-@session_wrapper
-def add_database_user(uidname, session=None):
-    """
-    Adds a database user
-
-    @type uidname: string
-    @param uidname: The uid of the user to add
-
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: Uid
-    @return: the uid object for the given uidname
-    """
-
-    session.execute("CREATE USER :uid", {'uid': uidname})
-    session.commit_or_flush()
-
-__all__.append('add_database_user')
-
 @session_wrapper
 def get_or_set_uid(uidname, session=None):
     """
@@ -2078,58 +2725,135 @@ __all__.append('get_uid_from_fingerprint')
 
 ################################################################################
 
-class DBConn(Singleton):
+class UploadBlock(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
+
+__all__.append('UploadBlock')
+
+################################################################################
+
+class DBConn(object):
     """
     database module init.
     """
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(DBConn, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self, *args, **kwargs):
-        self.debug = False
-        if kwargs.has_key('debug'):
-            self.debug = True
-        self.__createconn()
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+            self.debug = kwargs.has_key('debug')
+            self.__createconn()
 
     def __setuptables(self):
-        self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
-        self.tbl_archive = Table('archive', self.db_meta, autoload=True)
-        self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
-        self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
-        self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
-        self.tbl_component = Table('component', self.db_meta, autoload=True)
-        self.tbl_config = Table('config', self.db_meta, autoload=True)
-        self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
-        self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
-        self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
-        self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
-        self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
-        self.tbl_files = Table('files', self.db_meta, autoload=True)
-        self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
-        self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
-        self.tbl_location = Table('location', self.db_meta, autoload=True)
-        self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
-        self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
-        self.tbl_override = Table('override', self.db_meta, autoload=True)
-        self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
-        self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
-        self.tbl_priority = Table('priority', self.db_meta, autoload=True)
-        self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-        self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True)
-        self.tbl_section = Table('section', self.db_meta, autoload=True)
-        self.tbl_source = Table('source', self.db_meta, autoload=True)
-        self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
-        self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
-        self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
-        self.tbl_suite = Table('suite', self.db_meta, autoload=True)
-        self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
-        self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
-        self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
-        self.tbl_uid = Table('uid', self.db_meta, autoload=True)
+        tables_with_primary = (
+            'architecture',
+            'archive',
+            'bin_associations',
+            'binaries',
+            'binary_acl',
+            'binary_acl_map',
+            'build_queue',
+            'changelogs_text',
+            'component',
+            'config',
+            'changes_pending_binaries',
+            'changes_pending_files',
+            'changes_pending_source',
+            'dsc_files',
+            'files',
+            'fingerprint',
+            'keyrings',
+            'keyring_acl_map',
+            'location',
+            'maintainer',
+            'new_comments',
+            'override_type',
+            'pending_bin_contents',
+            'policy_queue',
+            'priority',
+            'section',
+            'source',
+            'source_acl',
+            'src_associations',
+            'src_format',
+            'src_uploaders',
+            'suite',
+            'uid',
+            'upload_blocks',
+            # The following tables have primary keys but sqlalchemy
+            # version 0.5 fails to reflect them correctly with database
+            # versions before upgrade #41.
+            #'changes',
+            #'build_queue_files',
+        )
+
+        tables_no_primary = (
+            'bin_contents',
+            'changes_pending_files_map',
+            'changes_pending_source_files',
+            'changes_pool_files',
+            'deb_contents',
+            'override',
+            'suite_architectures',
+            'suite_src_formats',
+            'suite_build_queue_copy',
+            'udeb_contents',
+            # see the comment above
+            'changes',
+            'build_queue_files',
+        )
+
+        views = (
+            'almost_obsolete_all_associations',
+            'almost_obsolete_src_associations',
+            'any_associations_source',
+            'bin_assoc_by_arch',
+            'bin_associations_binaries',
+            'binaries_suite_arch',
+            'binfiles_suite_component_arch',
+            'changelogs',
+            'file_arch_suite',
+            'newest_all_associations',
+            'newest_any_associations',
+            'newest_source',
+            'newest_src_association',
+            'obsolete_all_associations',
+            'obsolete_any_associations',
+            'obsolete_any_by_all_associations',
+            'obsolete_src_associations',
+            'source_suite',
+            'src_associations_bin',
+            'src_associations_src',
+            'suite_arch_by_name',
+        )
+
+        # Sqlalchemy version 0.5 fails to reflect the SERIAL type
+        # correctly and that is why we have to use a workaround. It can
+        # be removed as soon as we switch to version 0.6.
+        for table_name in tables_with_primary:
+            table = Table(table_name, self.db_meta, \
+                Column('id', Integer, primary_key = True), \
+                autoload=True, useexisting=True)
+            setattr(self, 'tbl_%s' % table_name, table)
+
+        for table_name in tables_no_primary:
+            table = Table(table_name, self.db_meta, autoload=True)
+            setattr(self, 'tbl_%s' % table_name, table)
+
+        for view_name in views:
+            view = Table(view_name, self.db_meta, autoload=True)
+            setattr(self, 'view_%s' % view_name, view)
 
     def __setupmappers(self):
         mapper(Architecture, self.tbl_architecture,
-               properties = dict(arch_id = self.tbl_architecture.c.id))
+               properties = dict(arch_id = self.tbl_architecture.c.id,
+                                 suites = relation(Suite, secondary=self.tbl_suite_architectures, backref='architectures', order_by='suite_name')))
 
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
@@ -2153,7 +2877,7 @@ class DBConn(Singleton):
         mapper(DebContents, self.tbl_deb_contents,
                properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
                                  package=self.tbl_deb_contents.c.package,
-                                 component=self.tbl_deb_contents.c.component,
+                                 suite=self.tbl_deb_contents.c.suite,
                                  arch=self.tbl_deb_contents.c.arch,
                                  section=self.tbl_deb_contents.c.section,
                                  filename=self.tbl_deb_contents.c.filename))
@@ -2161,11 +2885,18 @@ class DBConn(Singleton):
         mapper(UdebContents, self.tbl_udeb_contents,
                properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
                                  package=self.tbl_udeb_contents.c.package,
-                                 component=self.tbl_udeb_contents.c.component,
+                                 suite=self.tbl_udeb_contents.c.suite,
                                  arch=self.tbl_udeb_contents.c.arch,
                                  section=self.tbl_udeb_contents.c.section,
                                  filename=self.tbl_udeb_contents.c.filename))
 
+        mapper(BuildQueue, self.tbl_build_queue,
+               properties = dict(queue_id = self.tbl_build_queue.c.id))
+
+        mapper(BuildQueueFile, self.tbl_build_queue_files,
+               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
+                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
+
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
                                  package = self.tbl_binaries.c.package,
@@ -2185,6 +2916,14 @@ class DBConn(Singleton):
                                  binassociations = relation(BinAssociation,
                                                             primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))))
 
+        mapper(BinaryACL, self.tbl_binary_acl,
+               properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
+
+        mapper(BinaryACLMap, self.tbl_binary_acl_map,
+               properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
+                                 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
+                                 architecture = relation(Architecture)))
+
         mapper(Component, self.tbl_component,
                properties = dict(component_id = self.tbl_component.c.id,
                                  component_name = self.tbl_component.c.name))
@@ -2210,12 +2949,66 @@ class DBConn(Singleton):
                                  uid_id = self.tbl_fingerprint.c.uid,
                                  uid = relation(Uid),
                                  keyring_id = self.tbl_fingerprint.c.keyring,
-                                 keyring = relation(Keyring)))
+                                 keyring = relation(Keyring),
+                                 source_acl = relation(SourceACL),
+                                 binary_acl = relation(BinaryACL)))
 
         mapper(Keyring, self.tbl_keyrings,
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
                                  keyring_id = self.tbl_keyrings.c.id))
 
+        mapper(DBChange, self.tbl_changes,
+               properties = dict(change_id = self.tbl_changes.c.id,
+                                 poolfiles = relation(PoolFile,
+                                                      secondary=self.tbl_changes_pool_files,
+                                                      backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
+                                 files = relation(ChangePendingFile,
+                                                  secondary=self.tbl_changes_pending_files_map,
+                                                  backref="changesfile"),
+                                 in_queue_id = self.tbl_changes.c.in_queue,
+                                 in_queue = relation(PolicyQueue,
+                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
+                                 approved_for_id = self.tbl_changes.c.approved_for))
+
+        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
+               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
+
+        mapper(ChangePendingFile, self.tbl_changes_pending_files,
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
+
+        mapper(ChangePendingSource, self.tbl_changes_pending_source,
+               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+                                 change = relation(DBChange),
+                                 maintainer = relation(Maintainer,
+                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+                                 changedby = relation(Maintainer,
+                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+                                 fingerprint = relation(Fingerprint),
+                                 source_files = relation(ChangePendingFile,
+                                                         secondary=self.tbl_changes_pending_source_files,
+                                                         backref="pending_sources")))
+
+
+        mapper(KeyringACLMap, self.tbl_keyring_acl_map,
+               properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
+                                 keyring = relation(Keyring, backref="keyring_acl_map"),
+                                 architecture = relation(Architecture)))
+
         mapper(Location, self.tbl_location,
                properties = dict(location_id = self.tbl_location.c.id,
                                  component_id = self.tbl_location.c.component,
@@ -2233,6 +3026,7 @@ class DBConn(Singleton):
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
                                  suite = relation(Suite),
+                                 package = self.tbl_override.c.package,
                                  component_id = self.tbl_override.c.component,
                                  component = relation(Component),
                                  priority_id = self.tbl_override.c.priority,
@@ -2246,17 +3040,12 @@ class DBConn(Singleton):
                properties = dict(overridetype = self.tbl_override_type.c.type,
                                  overridetype_id = self.tbl_override_type.c.id))
 
+        mapper(PolicyQueue, self.tbl_policy_queue,
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
 
-        mapper(Queue, self.tbl_queue,
-               properties = dict(queue_id = self.tbl_queue.c.id))
-
-        mapper(QueueBuild, self.tbl_queue_build,
-               properties = dict(suite_id = self.tbl_queue_build.c.suite,
-                                 queue_id = self.tbl_queue_build.c.queue,
-                                 queue = relation(Queue, backref='queuebuild')))
-
         mapper(Section, self.tbl_section,
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
@@ -2277,7 +3066,11 @@ class DBConn(Singleton):
                                  srcfiles = relation(DSCFile,
                                                      primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
                                  srcassociations = relation(SrcAssociation,
-                                                            primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source))))
+                                                            primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)),
+                                 srcuploaders = relation(SrcUploader)))
+
+        mapper(SourceACL, self.tbl_source_acl,
+               properties = dict(source_acl_id = self.tbl_source_acl.c.id))
 
         mapper(SrcAssociation, self.tbl_src_associations,
                properties = dict(sa_id = self.tbl_src_associations.c.id,
@@ -2300,13 +3093,9 @@ class DBConn(Singleton):
                                                        primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
 
         mapper(Suite, self.tbl_suite,
-               properties = dict(suite_id = self.tbl_suite.c.id))
-
-        mapper(SuiteArchitecture, self.tbl_suite_architectures,
-               properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
-                                 suite = relation(Suite, backref='suitearchitectures'),
-                                 arch_id = self.tbl_suite_architectures.c.architecture,
-                                 architecture = relation(Architecture)))
+               properties = dict(suite_id = self.tbl_suite.c.id,
+                                 policy_queue = relation(PolicyQueue),
+                                 copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
 
         mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
                properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
@@ -2318,6 +3107,11 @@ class DBConn(Singleton):
                properties = dict(uid_id = self.tbl_uid.c.id,
                                  fingerprint = relation(Fingerprint)))
 
+        mapper(UploadBlock, self.tbl_upload_blocks,
+               properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
+                                 fingerprint = relation(Fingerprint, backref="uploadblocks"),
+                                 uid = relation(Uid, backref="uploadblocks")))
+
     ## Connection functions
     def __createconn(self):
         from config import Config