]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Merge branch 'master' into dbtests
[dak.git] / daklib / dbconn.py
old mode 100644 (file)
new mode 100755 (executable)
index 8543ab1..786aebd
@@ -5,7 +5,7 @@
 @contact: Debian FTPMaster <ftpmaster@debian.org>
 @copyright: 2000, 2001, 2002, 2003, 2004, 2006  James Troup <james@nocrew.org>
 @copyright: 2008-2009  Mark Hymers <mhy@debian.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009, 2010  Joerg Jaspert <joerg@debian.org>
 @copyright: 2009  Mike O'Connor <stew@debian.org>
 @license: GNU General Public License version 2 or later
 """
@@ -37,6 +37,7 @@ import os
 import re
 import psycopg2
 import traceback
+import commands
 from datetime import datetime, timedelta
 from errno import ENOENT
 from tempfile import mkstemp, mkdtemp
@@ -44,7 +45,7 @@ from tempfile import mkstemp, mkdtemp
 from inspect import getargspec
 
 import sqlalchemy
-from sqlalchemy import create_engine, Table, MetaData
+from sqlalchemy import create_engine, Table, MetaData, Column, Integer
 from sqlalchemy.orm import sessionmaker, mapper, relation
 from sqlalchemy import types as sqltypes
 
@@ -52,8 +53,11 @@ from sqlalchemy import types as sqltypes
 from sqlalchemy.exc import *
 from sqlalchemy.orm.exc import NoResultFound
 
+# Only import Config until Queue stuff is changed to store its config
+# in the database
 from config import Config
 from textutils import fix_maintainer
+from dak_exceptions import NoSourceFieldError
 
 ################################################################################
 
@@ -61,15 +65,19 @@ from textutils import fix_maintainer
 # reflection
 
 class DebVersion(sqltypes.Text):
+    """
+    Support the debversion type
+    """
+
     def get_col_spec(self):
         return "DEBVERSION"
 
 sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version == "0.5":
+if sa_major_version in ["0.5", "0.6"]:
     from sqlalchemy.databases import postgres
     postgres.ischema_names['debversion'] = DebVersion
 else:
-    raise Exception("dak isn't ported to SQLA versions != 0.5 yet.  See daklib/dbconn.py")
+    raise Exception("dak only ported to SQLA versions 0.5 and 0.6.  See daklib/dbconn.py")
 
 ################################################################################
 
@@ -179,8 +187,8 @@ def get_architecture_suites(architecture, session=None):
     """
     Returns list of Suite objects for given C{architecture} name
 
-    @type source: str
-    @param source: Architecture name to search for
+    @type architecture: str
+    @param architecture: Architecture name to search for
 
     @type session: Session
     @param session: Optional SQL session object (a temporary one will be
@@ -276,8 +284,8 @@ def get_suites_binary_in(package, session=None):
     """
     Returns list of Suite objects which given C{package} name is in
 
-    @type source: str
-    @param source: DBBinary package name to search for
+    @type package: str
+    @param package: DBBinary package name to search for
 
     @rtype: list
     @return: list of Suite objects for the given package
@@ -323,8 +331,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non
     @type version: str or None
     @param version: Version to search for (or None)
 
-    @type package: str, list or None
-    @param package: Architectures to limit to (or None if no limit)
+    @type architecture: str, list or None
+    @param architecture: Architectures to limit to (or None if no limit)
 
     @type session: Session
     @param session: Optional SQL session object (a temporary one will be
@@ -377,16 +385,16 @@ def get_binary_from_name_suite(package, suitename, session=None):
 
     sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
              FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
-             WHERE b.package=:package
+             WHERE b.package='%(package)s'
                AND b.file = fi.id
                AND fi.location = l.id
                AND l.component = c.id
                AND ba.bin=b.id
                AND ba.suite = su.id
-               AND su.suite_name=:suitename
+               AND su.suite_name %(suitename)s
           ORDER BY b.version DESC"""
 
-    return session.execute(sql, {'package': package, 'suitename': suitename})
+    return session.execute(sql % {'package': package, 'suitename': suitename})
 
 __all__.append('get_binary_from_name_suite')
 
@@ -435,8 +443,8 @@ MINIMAL_APT_CONF="""
 Dir
 {
    ArchiveDir "%(archivepath)s";
-   OverrideDir "/srv/ftp.debian.org/scripts/override/";
-   CacheDir "/srv/ftp.debian.org/database/";
+   OverrideDir "%(overridedir)s";
+   CacheDir "%(cachedir)s";
 };
 
 Default
@@ -477,7 +485,7 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
-    def write_metadata(self, ourtime, force=False):
+    def write_metadata(self, starttime, force=False):
         # Do we write out metafiles?
         if not (force or self.generate_metadata):
             return
@@ -491,35 +499,52 @@ class BuildQueue(object):
 
         try:
             # Grab files we want to include
-            newer = session.query(BuildQueueFile).filter_by(build_queue_id = 1).filter(BuildQueueFile.lastused > ourtime).all()
-
+            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
             # Write file list with newer files
             (fl_fd, fl_name) = mkstemp()
             for n in newer:
                 os.write(fl_fd, '%s\n' % n.fullpath)
             os.close(fl_fd)
 
+            cnf = Config()
+
             # Write minimal apt.conf
             # TODO: Remove hardcoding from template
             (ac_fd, ac_name) = mkstemp()
             os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
-                                                'filelist': fl_name})
-            os.close()
+                                                'filelist': fl_name,
+                                                'cachedir': cnf["Dir::Cache"],
+                                                'overridedir': cnf["Dir::Override"],
+                                                })
+            os.close(ac_fd)
 
             # Run apt-ftparchive generate
-            os.chdir(os.path.dirname(fl_name))
-            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(fl_name))
+            os.chdir(os.path.dirname(ac_name))
+            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
 
             # Run apt-ftparchive release
             # TODO: Eww - fix this
             bname = os.path.basename(self.path)
             os.chdir(self.path)
             os.chdir('..')
-            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="${archs}" release %s > Release""", [self.origin, self.label, self.releasedescription, arches, bname])
+
+            # We have to remove the Release file otherwise it'll be included in the
+            # new one
+            try:
+                os.unlink(os.path.join(bname, 'Release'))
+            except OSError:
+                pass
+
+            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+            # Crude hack with open and append, but this whole section is and should be redone.
+            if self.notautomatic:
+                release=open("Release", "a")
+                release.write("NotAutomatic: yes")
+                release.close()
 
             # Sign if necessary
             if self.signingkey:
-                cnf = Config()
                 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
                 if cnf.has_key("Dinstall::SigningPubKeyring"):
                     keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
@@ -558,24 +583,23 @@ class BuildQueue(object):
                 except OSError:
                     pass
 
-    def clean_and_update(self, starttime, dryrun=False):
+    def clean_and_update(self, starttime, Logger, dryrun=False):
         """WARNING: This routine commits for you"""
         session = DBConn().session().object_session(self)
 
-        ourtime = starttime + timedelta(seconds=self.stay_of_execution)
-
-        if self.generate_metadata:
-            self.write_metadata(ourtime)
+        if self.generate_metadata and not dryrun:
+            self.write_metadata(starttime)
 
         # Grab files older than our execution time
-        older = session.query(BuildQueueFile).filter_by(build_queue_id = 1).filter(BuildQueueFile.lastused <= ourtime).all()
+        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
 
         for o in older:
             killdb = False
             try:
                 if dryrun:
-                    print "I: Would have removed %s from the queue"
+                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
                 else:
+                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
                     os.unlink(o.fullpath)
                     killdb = True
             except OSError, e:
@@ -584,13 +608,29 @@ class BuildQueue(object):
                     killdb = True
                 else:
                     # TODO: Replace with proper logging call
-                    print "E: Could not remove %s" % o.fullpath
+                    Logger.log(["E: Could not remove %s" % o.fullpath])
 
             if killdb:
                 session.delete(o)
 
         session.commit()
 
+        for f in os.listdir(self.path):
+            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
+                continue
+
+            try:
+                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+            except NoResultFound:
+                fp = os.path.join(self.path, f)
+                if dryrun:
+                    Logger.log(["I: Would remove unused link %s" % fp])
+                else:
+                    Logger.log(["I: Removing unused link %s" % fp])
+                    try:
+                        os.unlink(fp)
+                    except OSError:
+                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
 
     def add_file_from_pool(self, poolfile):
         """Copies a file into the pool.  Assumes that the PoolFile object is
@@ -876,8 +916,9 @@ def get_or_set_contents_path_id(filepath, session=None):
 
     If no matching file is found, a row is inserted.
 
-    @type filename: string
-    @param filename: The filepath
+    @type filepath: string
+    @param filepath: The filepath
+
     @type session: SQLAlchemy
     @param session: Optional SQL session object (a temporary one will be
     generated if not supplied).  If not passed, a commit will be performed at
@@ -938,12 +979,16 @@ def insert_content_paths(binary_id, fullpaths, session=None):
 
     try:
         # Insert paths
-        pathcache = {}
-        for fullpath in fullpaths:
-            if fullpath.startswith( './' ):
-                fullpath = fullpath[2:]
+        def generate_path_dicts():
+            for fullpath in fullpaths:
+                if fullpath.startswith( './' ):
+                    fullpath = fullpath[2:]
+
+                yield {'filename':fullpath, 'id': binary_id }
 
-            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id}  )
+        for d in generate_path_dicts():
+            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
+                         d )
 
         session.commit()
         if privatetrans:
@@ -1025,7 +1070,7 @@ __all__.append('PoolFile')
 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
     """
     Returns a tuple:
-     (ValidFileFound [boolean or None], PoolFile object or None)
+    (ValidFileFound [boolean or None], PoolFile object or None)
 
     @type filename: string
     @param filename: the filename of the file to check against the DB
@@ -1041,12 +1086,11 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None):
 
     @rtype: tuple
     @return: Tuple of length 2.
-             If more than one file found with that name:
-                    (None,  None)
-             If valid pool file found: (True, PoolFile object)
-             If valid pool file not found:
-                    (False, None) if no file found
-                    (False, PoolFile object) if file found with size/md5sum mismatch
+                 - If more than one file found with that name: (C{None},  C{None})
+                 - If valid pool file found: (C{True}, C{PoolFile object})
+                 - If valid pool file not found:
+                     - (C{False}, C{None}) if no file found
+                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
     """
 
     q = session.query(PoolFile).filter_by(filename=filename)
@@ -1130,7 +1174,7 @@ def get_poolfile_like_name(filename, session=None):
     """
 
     # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
 
     return q.all()
 
@@ -1274,9 +1318,17 @@ class Keyring(object):
             esclist[x] = "%c" % (int(esclist[x][2:],16))
         return "".join(esclist)
 
-    def load_keys(self, keyring):
+    def parse_address(self, uid):
+        """parses uid and returns a tuple of real name and email address"""
         import email.Utils
+        (name, address) = email.Utils.parseaddr(uid)
+        name = re.sub(r"\s*[(].*[)]", "", name)
+        name = self.de_escape_gpg_str(name)
+        if name == "":
+            name = uid
+        return (name, address)
 
+    def load_keys(self, keyring):
         if not self.keyring_id:
             raise Exception('Must be initialized with database information')
 
@@ -1288,24 +1340,20 @@ class Keyring(object):
             field = line.split(":")
             if field[0] == "pub":
                 key = field[4]
-                (name, addr) = email.Utils.parseaddr(field[9])
-                name = re.sub(r"\s*[(].*[)]", "", name)
-                if name == "" or addr == "" or "@" not in addr:
-                    name = field[9]
-                    addr = "invalid-uid"
-                name = self.de_escape_gpg_str(name)
-                self.keys[key] = {"email": addr}
-                if name != "":
+                self.keys[key] = {}
+                (name, addr) = self.parse_address(field[9])
+                if "@" in addr:
+                    self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-                self.keys[key]["aliases"] = [name]
                 self.keys[key]["fingerprints"] = []
                 signingkey = True
             elif key and field[0] == "sub" and len(field) >= 12:
                 signingkey = ("s" in field[11])
             elif key and field[0] == "uid":
-                (name, addr) = email.Utils.parseaddr(field[9])
-                if name and name not in self.keys[key]["aliases"]:
-                    self.keys[key]["aliases"].append(name)
+                (name, addr) = self.parse_address(field[9])
+                if "email" not in self.keys[key] and "@" in addr:
+                    self.keys[key]["email"] = addr
+                    self.keys[key]["name"] = name
             elif signingkey and field[0] == "fpr":
                 self.keys[key]["fingerprints"].append(field[9])
                 self.fpr_lookup[field[9]] = key
@@ -1353,7 +1401,7 @@ class Keyring(object):
         byname = {}
         any_invalid = False
         for x in self.keys.keys():
-            if self.keys[x]["email"] == "invalid-uid":
+            if "email" not in self.keys[x]:
                 any_invalid = True
                 self.keys[x]["uid"] = format % "invalid-uid"
             else:
@@ -1415,6 +1463,19 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
+
+        # Remove changes_pool_files entries
+        self.poolfiles = []
+
+        # Remove changes_pending_files references
+        self.files = []
+
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
+
 __all__.append('DBChange')
 
 @session_wrapper
@@ -1422,15 +1483,15 @@ def get_dbchange(filename, session=None):
     """
     returns DBChange object for given C{filename}.
 
-    @type archive: string
-    @param archive: the name of the arhive
+    @type filename: string
+    @param filename: the name of the file
 
     @type session: Session
     @param session: Optional SQLA session object (a temporary one will be
     generated if not supplied)
 
-    @rtype: Archive
-    @return: Archive object for the given name (None if not present)
+    @rtype: DBChange
+    @return:  DBChange object for the given filename (C{None} if not present)
 
     """
     q = session.query(DBChange).filter_by(changesname=filename)
@@ -1460,13 +1521,13 @@ def get_location(location, component=None, archive=None, session=None):
     and archive
 
     @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/}
+    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
 
     @type component: string
     @param component: the component name (if None, no restriction applied)
 
     @type archive: string
-    @param archive_id: the archive name (if None, no restriction applied)
+    @param archive: the archive name (if None, no restriction applied)
 
     @rtype: Location / None
     @return: Either a Location object or None if one can't be found
@@ -1722,16 +1783,38 @@ __all__.append('get_override_type')
 
 ################################################################################
 
-class PendingContentAssociation(object):
+class DebContents(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<DebConetnts %s: %s>' % (self.package.package,self.file)
+
+__all__.append('DebContents')
+
+
+class UdebContents(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<PendingContentAssociation %s>' % self.pca_id
+        return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
 
-__all__.append('PendingContentAssociation')
+__all__.append('UdebContents')
 
-def insert_pending_content_paths(package, fullpaths, session=None):
+class PendingBinContents(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<PendingBinContents %s>' % self.contents_id
+
+__all__.append('PendingBinContents')
+
+def insert_pending_content_paths(package,
+                                 is_udeb,
+                                 fullpaths,
+                                 session=None):
     """
     Make sure given paths are temporarily associated with given
     package
@@ -1760,32 +1843,27 @@ def insert_pending_content_paths(package, fullpaths, session=None):
         arch_id = arch.arch_id
 
         # Remove any already existing recorded files for this package
-        q = session.query(PendingContentAssociation)
+        q = session.query(PendingBinContents)
         q = q.filter_by(package=package['Package'])
         q = q.filter_by(version=package['Version'])
         q = q.filter_by(architecture=arch_id)
         q.delete()
 
-        # Insert paths
-        pathcache = {}
         for fullpath in fullpaths:
-            (path, filename) = os.path.split(fullpath)
-
-            if path.startswith( "./" ):
-                path = path[2:]
 
-            filepath_id = get_or_set_contents_path_id(path, session)
-            filename_id = get_or_set_contents_file_id(filename, session)
-
-            pathcache[fullpath] = (filepath_id, filename_id)
+            if fullpath.startswith( "./" ):
+                fullpath = fullpath[2:]
 
-        for fullpath, dat in pathcache.items():
-            pca = PendingContentAssociation()
+            pca = PendingBinContents()
             pca.package = package['Package']
             pca.version = package['Version']
-            pca.filepath_id = dat[0]
-            pca.filename_id = dat[1]
+            pca.file = fullpath
             pca.architecture = arch_id
+
+            if isudeb:
+                pca.type = 8 # gross
+            else:
+                pca.type = 7 # also gross
             session.add(pca)
 
         # Only commit if we set up the session ourself
@@ -1844,6 +1922,31 @@ def get_policy_queue(queuename, session=None):
 
 __all__.append('get_policy_queue')
 
+@session_wrapper
+def get_policy_queue_from_path(pathname, session=None):
+    """
+    Returns PolicyQueue object for given C{path name}
+
+    @type queuename: string
+    @param queuename: The path
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
+    """
+
+    q = session.query(PolicyQueue).filter_by(path=pathname)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_policy_queue_from_path')
+
 ################################################################################
 
 class Priority(object):
@@ -2003,8 +2106,8 @@ def source_exists(source, source_version, suites = ["any"], session=None):
       1. exact match     => 1.0-3
       2. bin-only NMU    => 1.0-3+b1 , 1.0-3.1+b1
 
-    @type package: string
-    @param package: package source name
+    @type source: string
+    @param source: source name
 
     @type source_version: string
     @param source_version: expected source version
@@ -2087,8 +2190,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=
     @type source: str
     @param source: DBSource package name to search for
 
-    @type source: str or None
-    @param source: DBSource version name to search for or None if not applicable
+    @type version: str or None
+    @param version: DBSource version name to search for or None if not applicable
 
     @type dm_upload_allowed: bool
     @param dm_upload_allowed: If None, no effect.  If True or False, only
@@ -2237,26 +2340,27 @@ def add_dsc_to_db(u, filename, session=None):
     # Add the src_uploaders to the DB
     uploader_ids = [source.maintainer_id]
     if u.pkg.dsc.has_key("uploaders"):
-        for up in u.pkg.dsc["uploaders"].split(","):
+        for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
             up = up.strip()
             uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
 
     added_ids = {}
-    for up in uploader_ids:
-        if added_ids.has_key(up):
-            utils.warn("Already saw uploader %s for source %s" % (up, source.source))
+    for up_id in uploader_ids:
+        if added_ids.has_key(up_id):
+            import utils
+            utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
             continue
 
-        added_ids[u]=1
+        added_ids[up_id]=1
 
         su = SrcUploader()
-        su.maintainer_id = up
+        su.maintainer_id = up_id
         su.source_id = source.source_id
         session.add(su)
 
     session.flush()
 
-    return dsc_component, dsc_location_id, pfs
+    return source, dsc_component, dsc_location_id, pfs
 
 __all__.append('add_dsc_to_db')
 
@@ -2295,7 +2399,7 @@ def add_deb_to_db(u, filename, session=None):
     bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
     if len(bin_sources) != 1:
         raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, bin.architecture.arch_string,
+                                  (bin.package, bin.version, entry["architecture"],
                                    filename, bin.binarytype, u.pkg.changes["fingerprint"])
 
     bin.source_id = bin_sources[0].source_id
@@ -2384,11 +2488,7 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
                  ('Priority', 'priority'),
                  ('NotAutomatic', 'notautomatic'),
                  ('CopyChanges', 'copychanges'),
-                 ('CopyDotDak', 'copydotdak'),
-                 ('CommentsDir', 'commentsdir'),
-                 ('OverrideSuite', 'overridesuite'),
-                 ('ChangelogBase', 'changelogbase')]
-
+                 ('OverrideSuite', 'overridesuite')]
 
 class Suite(object):
     def __init__(self, *args, **kwargs):
@@ -2492,8 +2592,8 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
     """
     Returns list of Architecture objects for given C{suite} name
 
-    @type source: str
-    @param source: Suite name to search for
+    @type suite: str
+    @param suite: Suite name to search for
 
     @type skipsrc: boolean
     @param skipsrc: Whether to skip returning the 'source' architecture entry
@@ -2586,28 +2686,6 @@ class Uid(object):
 
 __all__.append('Uid')
 
-@session_wrapper
-def add_database_user(uidname, session=None):
-    """
-    Adds a database user
-
-    @type uidname: string
-    @param uidname: The uid of the user to add
-
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: Uid
-    @return: the uid object for the given uidname
-    """
-
-    session.execute("CREATE USER :uid", {'uid': uidname})
-    session.commit_or_flush()
-
-__all__.append('add_database_user')
-
 @session_wrapper
 def get_or_set_uid(uidname, session=None):
     """
@@ -2682,7 +2760,7 @@ class DBConn(object):
             self.__createconn()
 
     def __setuptables(self):
-        tables = (
+        tables_with_primary = (
             'architecture',
             'archive',
             'bin_associations',
@@ -2691,17 +2769,12 @@ class DBConn(object):
             'binary_acl_map',
             'build_queue',
             'build_queue_files',
+            'changelogs_text',
             'component',
             'config',
-            'content_associations',
-            'content_file_names',
-            'content_file_paths',
             'changes_pending_binaries',
             'changes_pending_files',
-            'changes_pending_files_map',
             'changes_pending_source',
-            'changes_pending_source_files',
-            'changes_pool_files',
             'dsc_files',
             'files',
             'fingerprint',
@@ -2711,9 +2784,8 @@ class DBConn(object):
             'location',
             'maintainer',
             'new_comments',
-            'override',
             'override_type',
-            'pending_content_associations',
+            'pending_bin_contents',
             'policy_queue',
             'priority',
             'section',
@@ -2723,14 +2795,33 @@ class DBConn(object):
             'src_format',
             'src_uploaders',
             'suite',
+            'uid',
+            'upload_blocks',
+        )
+
+        tables_no_primary = (
+            'bin_contents',
+            'changes_pending_files_map',
+            'changes_pending_source_files',
+            'changes_pool_files',
+            'deb_contents',
+            'override',
             'suite_architectures',
             'suite_src_formats',
             'suite_build_queue_copy',
-            'uid',
-            'upload_blocks',
+            'udeb_contents',
         )
 
-        for table_name in tables:
+        # Sqlalchemy fails to reflect the SERIAL type correctly and that
+        # is why we have to use a workaround. It can be removed as soon
+        # as we switch to version 0.6.
+        for table_name in tables_with_primary:
+            table = Table(table_name, self.db_meta, \
+                Column('id', Integer, primary_key = True), \
+                autoload=True, useexisting=True)
+            setattr(self, 'tbl_%s' % table_name, table)
+
+        for table_name in tables_no_primary:
             table = Table(table_name, self.db_meta, autoload=True)
             setattr(self, 'tbl_%s' % table_name, table)
 
@@ -2749,6 +2840,30 @@ class DBConn(object):
                                  binary_id = self.tbl_bin_associations.c.bin,
                                  binary = relation(DBBinary)))
 
+        mapper(PendingBinContents, self.tbl_pending_bin_contents,
+               properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
+                                 filename = self.tbl_pending_bin_contents.c.filename,
+                                 package = self.tbl_pending_bin_contents.c.package,
+                                 version = self.tbl_pending_bin_contents.c.version,
+                                 arch = self.tbl_pending_bin_contents.c.arch,
+                                 otype = self.tbl_pending_bin_contents.c.type))
+
+        mapper(DebContents, self.tbl_deb_contents,
+               properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
+                                 package=self.tbl_deb_contents.c.package,
+                                 suite=self.tbl_deb_contents.c.suite,
+                                 arch=self.tbl_deb_contents.c.arch,
+                                 section=self.tbl_deb_contents.c.section,
+                                 filename=self.tbl_deb_contents.c.filename))
+
+        mapper(UdebContents, self.tbl_udeb_contents,
+               properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
+                                 package=self.tbl_udeb_contents.c.package,
+                                 suite=self.tbl_udeb_contents.c.suite,
+                                 arch=self.tbl_udeb_contents.c.arch,
+                                 section=self.tbl_udeb_contents.c.section,
+                                 filename=self.tbl_udeb_contents.c.filename))
+
         mapper(BuildQueue, self.tbl_build_queue,
                properties = dict(queue_id = self.tbl_build_queue.c.id))
 
@@ -2821,6 +2936,16 @@ class DBConn(object):
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
                                  files = relation(ChangePendingFile,
                                                   secondary=self.tbl_changes_pending_files_map,
                                                   backref="changesfile"),
@@ -2833,7 +2958,12 @@ class DBConn(object):
                properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
 
         mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
 
         mapper(ChangePendingSource, self.tbl_changes_pending_source,
                properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
@@ -2846,6 +2976,8 @@ class DBConn(object):
                                  source_files = relation(ChangePendingFile,
                                                          secondary=self.tbl_changes_pending_source_files,
                                                          backref="pending_sources")))
+
+
         mapper(KeyringACLMap, self.tbl_keyring_acl_map,
                properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
                                  keyring = relation(Keyring, backref="keyring_acl_map"),
@@ -2868,6 +3000,7 @@ class DBConn(object):
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
                                  suite = relation(Suite),
+                                 package = self.tbl_override.c.package,
                                  component_id = self.tbl_override.c.component,
                                  component = relation(Component),
                                  priority_id = self.tbl_override.c.priority,
@@ -2888,7 +3021,8 @@ class DBConn(object):
                properties = dict(priority_id = self.tbl_priority.c.id))
 
         mapper(Section, self.tbl_section,
-               properties = dict(section_id = self.tbl_section.c.id))
+               properties = dict(section_id = self.tbl_section.c.id,
+                                 section=self.tbl_section.c.section))
 
         mapper(DBSource, self.tbl_source,
                properties = dict(source_id = self.tbl_source.c.id,