]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
Add by-hash support
[dak.git] / daklib / dbconn.py
old mode 100644 (file)
new mode 100755 (executable)
index d9ab4cb..58ad041
 ################################################################################
 
 import apt_pkg
+import daklib.daksubprocess
 import os
 from os.path import normpath
 import re
 import psycopg2
+import subprocess
 import traceback
-import commands
-import signal
-
-from daklib.gpg import SignedFile
 
 try:
     # python >= 2.6
@@ -54,7 +52,6 @@ except:
 from datetime import datetime, timedelta
 from errno import ENOENT
 from tempfile import mkstemp, mkdtemp
-from subprocess import Popen, PIPE
 from tarfile import TarFile
 
 from inspect import getargspec
@@ -112,11 +109,11 @@ class DebVersion(UserDefinedType):
         return None
 
 sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6", "0.7"]:
+if sa_major_version in ["0.5", "0.6", "0.7", "0.8", "0.9", "1.0"]:
     from sqlalchemy.databases import postgres
     postgres.ischema_names['debversion'] = DebVersion
 else:
-    raise Exception("dak only ported to SQLA versions 0.5 to 0.7.  See daklib/dbconn.py")
+    raise Exception("dak only ported to SQLA versions 0.5 to 1.0 (%s installed).  See daklib/dbconn.py" % sa_major_version)
 
 ################################################################################
 
@@ -313,7 +310,7 @@ class ORMObject(object):
         return object_session(self)
 
     def clone(self, session = None):
-        '''
+        """
         Clones the current object in a new session and returns the new clone. A
         fresh session is created if the optional session parameter is not
         provided. The function will fail if a session is provided and has
@@ -326,8 +323,8 @@ class ORMObject(object):
         WARNING: Only persistent (committed) objects can be cloned. Changes
         made to the original object that are not committed yet will get lost.
         The session of the new object will always be rolled back to avoid
-        ressource leaks.
-        '''
+        resource leaks.
+        """
 
         if self.session() is None:
             raise RuntimeError( \
@@ -371,6 +368,20 @@ validator = Validator()
 
 ################################################################################
 
+class ACL(ORMObject):
+    def __repr__(self):
+        return "<ACL {0}>".format(self.name)
+
+__all__.append('ACL')
+
+class ACLPerSource(ORMObject):
+    def __repr__(self):
+        return "<ACLPerSource acl={0} fingerprint={1} source={2} reason={3}>".format(self.acl.name, self.fingerprint.fingerprint, self.source, self.reason)
+
+__all__.append('ACLPerSource')
+
+################################################################################
+
 class Architecture(ORMObject):
     def __init__(self, arch_string = None, description = None):
         self.arch_string = arch_string
@@ -421,27 +432,6 @@ def get_architecture(architecture, session=None):
 
 __all__.append('get_architecture')
 
-# TODO: should be removed because the implementation is too trivial
-@session_wrapper
-def get_architecture_suites(architecture, session=None):
-    """
-    Returns list of Suite objects for given C{architecture} name
-
-    @type architecture: str
-    @param architecture: Architecture name to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of Suite objects for the given name (may be empty)
-    """
-
-    return get_architecture(architecture, session).suites
-
-__all__.append('get_architecture_suites')
-
 ################################################################################
 
 class Archive(object):
@@ -507,11 +497,6 @@ __all__.append('BinContents')
 
 ################################################################################
 
-def subprocess_setup():
-    # Python installs a SIGPIPE handler by default. This is usually not what
-    # non-Python subprocesses expect.
-    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
 class DBBinary(ORMObject):
     def __init__(self, package = None, source = None, version = None, \
         maintainer = None, architecture = None, poolfile = None, \
@@ -540,9 +525,6 @@ class DBBinary(ORMObject):
 
     metadata = association_proxy('key', 'value')
 
-    def get_component_name(self):
-        return self.poolfile.location.component.component_name
-
     def scan_contents(self):
         '''
         Yields the contents of the package. Only regular files are yielded and
@@ -551,8 +533,8 @@ class DBBinary(ORMObject):
         package does not contain any regular file.
         '''
         fullpath = self.poolfile.fullpath
-        dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
-            preexec_fn = subprocess_setup)
+        dpkg_cmd = ('dpkg-deb', '--fsys-tarfile', fullpath)
+        dpkg = daklib.daksubprocess.Popen(dpkg_cmd, stdout=subprocess.PIPE)
         tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
         for member in tar.getmembers():
             if not member.isdir():
@@ -576,11 +558,8 @@ class DBBinary(ORMObject):
         '''
         import utils
         fullpath = self.poolfile.fullpath
-        deb_file = open(fullpath, 'r')
-        stanza = utils.deb_extract_control(deb_file)
-        deb_file.close()
-
-        return stanza
+        with open(fullpath, 'r') as deb_file:
+            return utils.deb_extract_control(deb_file)
 
     def read_control_fields(self):
         '''
@@ -590,10 +569,15 @@ class DBBinary(ORMObject):
         @rtype: dict
         @return: fields of the control section as a dictionary.
         '''
-        import apt_pkg
         stanza = self.read_control()
         return apt_pkg.TagSection(stanza)
 
+    @property
+    def proxy(self):
+        session = object_session(self)
+        query = session.query(BinaryMetadata).filter_by(binary=self)
+        return MetadataProxy(session, query)
+
 __all__.append('DBBinary')
 
 @session_wrapper
@@ -641,73 +625,12 @@ def get_component_by_package_suite(package, suite_list, arch_list=[], session=No
     if binary is None:
         return None
     else:
-        return binary.get_component_name()
+        return binary.poolfile.component.component_name
 
 __all__.append('get_component_by_package_suite')
 
 ################################################################################
 
-class BinaryACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinaryACL %s>' % self.binary_acl_id
-
-__all__.append('BinaryACL')
-
-################################################################################
-
-class BinaryACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinaryACLMap %s>' % self.binary_acl_map_id
-
-__all__.append('BinaryACLMap')
-
-################################################################################
-
-MINIMAL_APT_CONF="""
-Dir
-{
-   ArchiveDir "%(archivepath)s";
-   OverrideDir "%(overridedir)s";
-   CacheDir "%(cachedir)s";
-};
-
-Default
-{
-   Packages::Compress ". bzip2 gzip";
-   Sources::Compress ". bzip2 gzip";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "incoming"
-{
-   Packages "Packages";
-   Contents " ";
-
-   BinOverride "override.sid.all3";
-   BinCacheDB "packages-accepted.db";
-
-   FileList "%(filelist)s";
-
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
-   Sources "Sources";
-   BinOverride "override.sid.all3";
-   SrcOverride "override.sid.all3.src";
-   FileList "%(filelist)s";
-};
-"""
-
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -715,390 +638,8 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
-    def write_metadata(self, starttime, force=False):
-        # Do we write out metafiles?
-        if not (force or self.generate_metadata):
-            return
-
-        session = DBConn().session().object_session(self)
-
-        fl_fd = fl_name = ac_fd = ac_name = None
-        tempdir = None
-        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
-        startdir = os.getcwd()
-
-        try:
-            # Grab files we want to include
-            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            # Write file list with newer files
-            (fl_fd, fl_name) = mkstemp()
-            for n in newer:
-                os.write(fl_fd, '%s\n' % n.fullpath)
-            os.close(fl_fd)
-
-            cnf = Config()
-
-            # Write minimal apt.conf
-            # TODO: Remove hardcoding from template
-            (ac_fd, ac_name) = mkstemp()
-            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
-                                                'filelist': fl_name,
-                                                'cachedir': cnf["Dir::Cache"],
-                                                'overridedir': cnf["Dir::Override"],
-                                                })
-            os.close(ac_fd)
-
-            # Run apt-ftparchive generate
-            os.chdir(os.path.dirname(ac_name))
-            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
-            # Run apt-ftparchive release
-            # TODO: Eww - fix this
-            bname = os.path.basename(self.path)
-            os.chdir(self.path)
-            os.chdir('..')
-
-            # We have to remove the Release file otherwise it'll be included in the
-            # new one
-            try:
-                os.unlink(os.path.join(bname, 'Release'))
-            except OSError:
-                pass
-
-            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
-            # Crude hack with open and append, but this whole section is and should be redone.
-            if self.notautomatic:
-                release=open("Release", "a")
-                release.write("NotAutomatic: yes\n")
-                release.close()
-
-            # Sign if necessary
-            if self.signingkey:
-                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
-                if cnf.has_key("Dinstall::SigningPubKeyring"):
-                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
-                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
-            # Move the files if we got this far
-            os.rename('Release', os.path.join(bname, 'Release'))
-            if self.signingkey:
-                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
-        # Clean up any left behind files
-        finally:
-            os.chdir(startdir)
-            if fl_fd:
-                try:
-                    os.close(fl_fd)
-                except OSError:
-                    pass
-
-            if fl_name:
-                try:
-                    os.unlink(fl_name)
-                except OSError:
-                    pass
-
-            if ac_fd:
-                try:
-                    os.close(ac_fd)
-                except OSError:
-                    pass
-
-            if ac_name:
-                try:
-                    os.unlink(ac_name)
-                except OSError:
-                    pass
-
-    def clean_and_update(self, starttime, Logger, dryrun=False):
-        """WARNING: This routine commits for you"""
-        session = DBConn().session().object_session(self)
-
-        if self.generate_metadata and not dryrun:
-            self.write_metadata(starttime)
-
-        # Grab files older than our execution time
-        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-        older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
-        for o in older:
-            killdb = False
-            try:
-                if dryrun:
-                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
-                else:
-                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
-                    os.unlink(o.fullpath)
-                    killdb = True
-            except OSError as e:
-                # If it wasn't there, don't worry
-                if e.errno == ENOENT:
-                    killdb = True
-                else:
-                    # TODO: Replace with proper logging call
-                    Logger.log(["E: Could not remove %s" % o.fullpath])
-
-            if killdb:
-                session.delete(o)
-
-        session.commit()
-
-        for f in os.listdir(self.path):
-            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
-                continue
-
-            if not self.contains_filename(f):
-                fp = os.path.join(self.path, f)
-                if dryrun:
-                    Logger.log(["I: Would remove unused link %s" % fp])
-                else:
-                    Logger.log(["I: Removing unused link %s" % fp])
-                    try:
-                        os.unlink(fp)
-                    except OSError:
-                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
-    def contains_filename(self, filename):
-        """
-        @rtype Boolean
-        @returns True if filename is supposed to be in the queue; False otherwise
-        """
-        session = DBConn().session().object_session(self)
-        if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
-            return True
-        elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
-            return True
-        return False
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if (f.fileid is not None and f.fileid == poolfile.file_id) or \
-               (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
-                   # In this case, update the BuildQueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(poolfile).add(f)
-                   return f
-
-        # Prepare BuildQueueFile object
-        qf = BuildQueueFile()
-        qf.build_queue_id = self.queue_id
-        qf.filename = poolfile_basename
-
-        targetpath = poolfile.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetpath, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetpath, queuepath)
-                qf.fileid = poolfile.file_id
-        except FileExistsError:
-            if not poolfile.identical_to(queuepath):
-                raise
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-    def add_changes_from_policy_queue(self, policyqueue, changes):
-        """
-        Copies a changes from a policy queue together with its poolfiles.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the changes from
-
-        @type changes: DBChange
-        @param changes: changes to copy to this build queue
-        """
-        for policyqueuefile in changes.files:
-            self.add_file_from_policy_queue(policyqueue, policyqueuefile)
-        for poolfile in changes.poolfiles:
-            self.add_file_from_pool(poolfile)
-
-    def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
-        """
-        Copies a file from a policy queue.
-        Assumes that the policyqueuefile is attached to the same SQLAlchemy
-        session as the Queue object is.  The caller is responsible for
-        committing after calling this function.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the file from
-
-        @type policyqueuefile: ChangePendingFile
-        @param policyqueuefile: file to be added to the build queue
-        """
-        session = DBConn().session().object_session(policyqueuefile)
-
-        # Is the file already there?
-        try:
-            f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
-            f.lastused = datetime.now()
-            return f
-        except NoResultFound:
-            pass # continue below
-
-        # We have to add the file.
-        f = BuildQueuePolicyFile()
-        f.build_queue = self
-        f.file = policyqueuefile
-        f.filename = policyqueuefile.filename
-
-        source = os.path.join(policyqueue.path, policyqueuefile.filename)
-        target = f.fullpath
-        try:
-            # Always copy files from policy queues as they might move around.
-            import utils
-            utils.copy(source, target)
-        except FileExistsError:
-            if not policyqueuefile.identical_to(target):
-                raise
-        except OSError:
-            return None
-
-        session.add(f)
-        return f
-
 __all__.append('BuildQueue')
 
-@session_wrapper
-def get_build_queue(queuename, session=None):
-    """
-    Returns BuildQueue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: BuildQueue
-    @return: BuildQueue object for the given queue
-    """
-
-    q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_build_queue')
-
-################################################################################
-
-class BuildQueueFile(object):
-    """
-    BuildQueueFile represents a file in a build queue coming from a pool.
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class BuildQueuePolicyFile(object):
-    """
-    BuildQueuePolicyFile represents a file in a build queue that comes from a
-    policy queue (and not a pool).
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    #@property
-    #def filename(self):
-    #    return self.file.filename
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.build_queue.path, self.filename)
-
-__all__.append('BuildQueuePolicyFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingFile %s>' % self.change_pending_file_id
-
-    def identical_to(self, filename):
-        """
-        compare size and hash with the given file
-
-        @rtype: bool
-        @return: true if the given file has the same size and hash as this object; false otherwise
-        """
-        st = os.stat(filename)
-        if self.size != st.st_size:
-            return False
-
-        f = open(filename, "r")
-        sha256sum = apt_pkg.sha256sum(f)
-        if sha256sum != self.sha256sum:
-            return False
-
-        return True
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
 ################################################################################
 
 class Component(ORMObject):
@@ -1119,7 +660,7 @@ class Component(ORMObject):
 
     def properties(self):
         return ['component_name', 'component_id', 'description', \
-            'location_count', 'meets_dfsg', 'overrides_count']
+            'meets_dfsg', 'overrides_count']
 
     def not_null_constraints(self):
         return ['component_name']
@@ -1150,224 +691,63 @@ def get_component(component, session=None):
 
 __all__.append('get_component')
 
-@session_wrapper
-def get_component_names(session=None):
-    """
-    Returns list of strings of component names.
-
-    @rtype: list
-    @return: list of strings of component names
-    """
-
-    return [ x.component_name for x in session.query(Component).all() ]
-
-__all__.append('get_component_names')
-
-################################################################################
-
-class DBConfig(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<DBConfig %s>' % self.name
-
-__all__.append('DBConfig')
-
-################################################################################
-
-@session_wrapper
-def get_or_set_contents_file_id(filename, session=None):
-    """
-    Returns database id for given filename.
-
-    If no matching file is found, a row is inserted.
-
-    @type filename: string
-    @param filename: The filename
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
-
-    @rtype: int
-    @return: the database id for the given component
-    """
-
-    q = session.query(ContentFilename).filter_by(filename=filename)
-
-    try:
-        ret = q.one().cafilename_id
-    except NoResultFound:
-        cf = ContentFilename()
-        cf.filename = filename
-        session.add(cf)
-        session.commit_or_flush()
-        ret = cf.cafilename_id
-
-    return ret
+def get_mapped_component_name(component_name):
+    cnf = Config()
+    for m in cnf.value_list("ComponentMappings"):
+        (src, dst) = m.split()
+        if component_name == src:
+            component_name = dst
+    return component_name
 
-__all__.append('get_or_set_contents_file_id')
+__all__.append('get_mapped_component_name')
 
 @session_wrapper
-def get_contents(suite, overridetype, section=None, session=None):
-    """
-    Returns contents for a suite / overridetype combination, limiting
-    to a section if not None.
+def get_mapped_component(component_name, session=None):
+    """get component after mappings
 
-    @type suite: Suite
-    @param suite: Suite object
+    Evaluate component mappings from ComponentMappings in dak.conf for the
+    given component name.
 
-    @type overridetype: OverrideType
-    @param overridetype: OverrideType object
+    @todo: ansgar wants to get rid of this. It's currently only used for
+           the security archive
 
-    @type section: Section
-    @param section: Optional section object to limit results to
+    @type  component_name: str
+    @param component_name: component name
 
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
+    @param session: database session
 
-    @rtype: ResultsProxy
-    @return: ResultsProxy object set up to return tuples of (filename, section,
-    package, arch_id)
+    @rtype:  L{daklib.dbconn.Component} or C{None}
+    @return: component after applying maps or C{None}
     """
+    component_name = get_mapped_component_name(component_name)
+    component = session.query(Component).filter_by(component_name=component_name).first()
+    return component
 
-    # find me all of the contents for a given suite
-    contents_q = """SELECT (p.path||'/'||n.file) AS fn,
-                            s.section,
-                            b.package,
-                            b.architecture
-                   FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
-                   JOIN content_file_names n ON (c.filename=n.id)
-                   JOIN binaries b ON (b.id=c.binary_pkg)
-                   JOIN override o ON (o.package=b.package)
-                   JOIN section s ON (s.id=o.section)
-                   WHERE o.suite = :suiteid AND o.type = :overridetypeid
-                   AND b.type=:overridetypename"""
-
-    vals = {'suiteid': suite.suite_id,
-            'overridetypeid': overridetype.overridetype_id,
-            'overridetypename': overridetype.overridetype}
-
-    if section is not None:
-        contents_q += " AND s.id = :sectionid"
-        vals['sectionid'] = section.section_id
-
-    contents_q += " ORDER BY fn"
-
-    return session.execute(contents_q, vals)
-
-__all__.append('get_contents')
-
-################################################################################
-
-class ContentFilepath(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ContentFilepath %s>' % self.filepath
-
-__all__.append('ContentFilepath')
+__all__.append('get_mapped_component')
 
 @session_wrapper
-def get_or_set_contents_path_id(filepath, session=None):
+def get_component_names(session=None):
     """
-    Returns database id for given path.
-
-    If no matching file is found, a row is inserted.
-
-    @type filepath: string
-    @param filepath: The filepath
-
-    @type session: SQLAlchemy
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied).  If not passed, a commit will be performed at
-    the end of the function, otherwise the caller is responsible for commiting.
+    Returns list of strings of component names.
 
-    @rtype: int
-    @return: the database id for the given path
+    @rtype: list
+    @return: list of strings of component names
     """
 
-    q = session.query(ContentFilepath).filter_by(filepath=filepath)
-
-    try:
-        ret = q.one().cafilepath_id
-    except NoResultFound:
-        cf = ContentFilepath()
-        cf.filepath = filepath
-        session.add(cf)
-        session.commit_or_flush()
-        ret = cf.cafilepath_id
-
-    return ret
+    return [ x.component_name for x in session.query(Component).all() ]
 
-__all__.append('get_or_set_contents_path_id')
+__all__.append('get_component_names')
 
 ################################################################################
 
-class ContentAssociation(object):
+class DBConfig(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<ContentAssociation %s>' % self.ca_id
-
-__all__.append('ContentAssociation')
-
-def insert_content_paths(binary_id, fullpaths, session=None):
-    """
-    Make sure given path is associated with given binary id
-
-    @type binary_id: int
-    @param binary_id: the id of the binary
-    @type fullpaths: list
-    @param fullpaths: the list of paths of the file being associated with the binary
-    @type session: SQLAlchemy session
-    @param session: Optional SQLAlchemy session.  If this is passed, the caller
-    is responsible for ensuring a transaction has begun and committing the
-    results or rolling back based on the result code.  If not passed, a commit
-    will be performed at the end of the function, otherwise the caller is
-    responsible for commiting.
-
-    @return: True upon success
-    """
-
-    privatetrans = False
-    if session is None:
-        session = DBConn().session()
-        privatetrans = True
-
-    try:
-        # Insert paths
-        def generate_path_dicts():
-            for fullpath in fullpaths:
-                if fullpath.startswith( './' ):
-                    fullpath = fullpath[2:]
-
-                yield {'filename':fullpath, 'id': binary_id }
-
-        for d in generate_path_dicts():
-            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
-                         d )
-
-        session.commit()
-        if privatetrans:
-            session.close()
-        return True
-
-    except:
-        traceback.print_exc()
-
-        # Only rollback if we set up the session ourself
-        if privatetrans:
-            session.rollback()
-            session.close()
-
-        return False
+        return '<DBConfig %s>' % self.name
 
-__all__.append('insert_content_paths')
+__all__.append('DBConfig')
 
 ################################################################################
 
@@ -1427,17 +807,18 @@ __all__.append('ExternalOverride')
 ################################################################################
 
 class PoolFile(ORMObject):
-    def __init__(self, filename = None, location = None, filesize = -1, \
+    def __init__(self, filename = None, filesize = -1, \
         md5sum = None):
         self.filename = filename
-        self.location = location
         self.filesize = filesize
         self.md5sum = md5sum
 
     @property
     def fullpath(self):
         session = DBConn().session().object_session(self)
-        af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
+        af = session.query(ArchiveFile).join(Archive) \
+                    .filter(ArchiveFile.file == self) \
+                    .order_by(Archive.tainted.desc()).first()
         return af.path
 
     @property
@@ -1458,134 +839,28 @@ class PoolFile(ORMObject):
         return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
             'sha256sum', 'source', 'binary', 'last_used']
 
-    def not_null_constraints(self):
-        return ['filename', 'md5sum']
-
-    def identical_to(self, filename):
-        """
-        compare size and hash with the given file
-
-        @rtype: bool
-        @return: true if the given file has the same size and hash as this object; false otherwise
-        """
-        st = os.stat(filename)
-        if self.filesize != st.st_size:
-            return False
-
-        f = open(filename, "r")
-        sha256sum = apt_pkg.sha256sum(f)
-        if sha256sum != self.sha256sum:
-            return False
-
-        return True
-
-__all__.append('PoolFile')
-
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
-    """
-    Returns a tuple:
-    (ValidFileFound [boolean], PoolFile object or None)
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @type filesize: int
-    @param filesize: the size of the file to check against the DB
-
-    @type md5sum: string
-    @param md5sum: the md5sum of the file to check against the DB
-
-    @type location_id: int
-    @param location_id: the id of the location to look in
-
-    @rtype: tuple
-    @return: Tuple of length 2.
-                 - If valid pool file found: (C{True}, C{PoolFile object})
-                 - If valid pool file not found:
-                     - (C{False}, C{None}) if no file found
-                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
-    """
-
-    poolfile = session.query(Location).get(location_id). \
-        files.filter_by(filename=filename).first()
-    valid = False
-    if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
-        valid = True
-
-    return (valid, poolfile)
-
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
-    """
-    Returns a PoolFile objects or None for the given id
-
-    @type file_id: int
-    @param file_id: the id of the file to look for
-
-    @rtype: PoolFile or None
-    @return: either the PoolFile object or None
-    """
-
-    return session.query(PoolFile).get(file_id)
-
-__all__.append('get_poolfile_by_id')
-
-@session_wrapper
-def get_poolfile_like_name(filename, session=None):
-    """
-    Returns an array of PoolFile objects which are like the given name
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @rtype: array
-    @return: array of PoolFile objects
-    """
-
-    # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
-
-    return q.all()
-
-__all__.append('get_poolfile_like_name')
-
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
-    """
-    Add a new file to the pool
-
-    @type filename: string
-    @param filename: filename
-
-    @type datadict: dict
-    @param datadict: dict with needed data
+    def not_null_constraints(self):
+        return ['filename', 'md5sum']
 
-    @type location_id: int
-    @param location_id: database id of the location
+    def identical_to(self, filename):
+        """
+        compare size and hash with the given file
 
-    @rtype: PoolFile
-    @return: the PoolFile object created
-    """
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
+        @rtype: bool
+        @return: true if the given file has the same size and hash as this object; false otherwise
+        """
+        st = os.stat(filename)
+        if self.filesize != st.st_size:
+            return False
 
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
+        f = open(filename, "r")
+        sha256sum = apt_pkg.sha256sum(f)
+        if sha256sum != self.sha256sum:
+            return False
 
-    return poolfile
+        return True
 
-__all__.append('add_poolfile')
+__all__.append('PoolFile')
 
 ################################################################################
 
@@ -1678,9 +953,6 @@ def get_ldap_name(entry):
 ################################################################################
 
 class Keyring(object):
-    gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
-                     " --with-colons --fingerprint --fingerprint"
-
     keys = {}
     fpr_lookup = {}
 
@@ -1710,11 +982,14 @@ class Keyring(object):
         if not self.keyring_id:
             raise Exception('Must be initialized with database information')
 
-        k = os.popen(self.gpg_invocation % keyring, "r")
+        cmd = ["gpg", "--no-default-keyring", "--keyring", keyring,
+               "--with-colons", "--fingerprint", "--fingerprint"]
+        p = daklib.daksubprocess.Popen(cmd, stdout=subprocess.PIPE)
+
         key = None
-        signingkey = False
+        need_fingerprint = False
 
-        for line in k:
+        for line in p.stdout:
             field = line.split(":")
             if field[0] == "pub":
                 key = field[4]
@@ -1723,18 +998,20 @@ class Keyring(object):
                 if "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-                self.keys[key]["fingerprints"] = []
-                signingkey = True
-            elif key and field[0] == "sub" and len(field) >= 12:
-                signingkey = ("s" in field[11])
+                need_fingerprint = True
             elif key and field[0] == "uid":
                 (name, addr) = self.parse_address(field[9])
                 if "email" not in self.keys[key] and "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-            elif signingkey and field[0] == "fpr":
-                self.keys[key]["fingerprints"].append(field[9])
+            elif need_fingerprint and field[0] == "fpr":
+                self.keys[key]["fingerprints"] = [field[9]]
                 self.fpr_lookup[field[9]] = key
+                need_fingerprint = False
+
+        r = p.wait()
+        if r != 0:
+            raise subprocess.CalledProcessError(r, cmd)
 
     def import_users_from_ldap(self, session):
         import ldap
@@ -1742,11 +1019,19 @@ class Keyring(object):
 
         LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
         LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+        ca_cert_file = cnf.get('Import-LDAP-Fingerprints::CACertFile')
 
         l = ldap.open(LDAPServer)
+
+        if ca_cert_file:
+            l.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
+            l.set_option(ldap.OPT_X_TLS_CACERTFILE, ca_cert_file)
+            l.set_option(ldap.OPT_X_TLS_NEWCTX, True)
+            l.start_tls_s()
+
         l.simple_bind_s("","")
         Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
-               "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+               "(&(keyfingerprint=*)(supplementaryGid=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
                ["uid", "keyfingerprint", "cn", "mn", "sn"])
 
         ldap_fin_uid_id = {}
@@ -1831,35 +1116,6 @@ def get_active_keyring_paths(session=None):
 
 __all__.append('get_active_keyring_paths')
 
-@session_wrapper
-def get_primary_keyring_path(session=None):
-    """
-    Get the full path to the highest priority active keyring
-
-    @rtype: str or None
-    @return: path to the active keyring with the highest priority or None if no
-             keyring is configured
-    """
-    keyrings = get_active_keyring_paths()
-
-    if len(keyrings) > 0:
-        return keyrings[0]
-    else:
-        return None
-
-__all__.append('get_primary_keyring_path')
-
-################################################################################
-
-class KeyringACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<KeyringACLMap %s>' % self.keyring_acl_map_id
-
-__all__.append('KeyringACLMap')
-
 ################################################################################
 
 class DBChange(object):
@@ -1869,19 +1125,6 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
-    def clean_from_queue(self):
-        session = DBConn().session().object_session(self)
-
-        # Remove changes_pool_files entries
-        self.poolfiles = []
-
-        # Remove changes_pending_files references
-        self.files = []
-
-        # Clear out of queue
-        self.in_queue = None
-        self.approved_for_id = None
-
 __all__.append('DBChange')
 
 @session_wrapper
@@ -1911,58 +1154,6 @@ __all__.append('get_dbchange')
 
 ################################################################################
 
-class Location(ORMObject):
-    def __init__(self, path = None, component = None):
-        self.path = path
-        self.component = component
-        # the column 'type' should go away, see comment at mapper
-        self.archive_type = 'pool'
-
-    def properties(self):
-        return ['path', 'location_id', 'archive_type', 'component', \
-            'files_count']
-
-    def not_null_constraints(self):
-        return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
-    """
-    Returns Location object for the given combination of location, component
-    and archive
-
-    @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
-    @type component: string
-    @param component: the component name (if None, no restriction applied)
-
-    @type archive: string
-    @param archive: the archive name (if None, no restriction applied)
-
-    @rtype: Location / None
-    @return: Either a Location object or None if one can't be found
-    """
-
-    q = session.query(Location).filter_by(path=location)
-
-    if archive is not None:
-        q = q.join(Archive).filter_by(archive_name=archive)
-
-    if component is not None:
-        q = q.join(Component).filter_by(component_name=component)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_location')
-
-################################################################################
-
 class Maintainer(ORMObject):
     def __init__(self, name = None):
         self.name = name
@@ -2044,7 +1235,7 @@ class NewComment(object):
 __all__.append('NewComment')
 
 @session_wrapper
-def has_new_comment(package, version, session=None):
+def has_new_comment(policy_queue, package, version, session=None):
     """
     Returns true if the given combination of C{package}, C{version} has a comment.
 
@@ -2062,7 +1253,7 @@ def has_new_comment(package, version, session=None):
     @return: true/false
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     q = q.filter_by(package=package)
     q = q.filter_by(version=version)
 
@@ -2071,7 +1262,7 @@ def has_new_comment(package, version, session=None):
 __all__.append('has_new_comment')
 
 @session_wrapper
-def get_new_comments(package=None, version=None, comment_id=None, session=None):
+def get_new_comments(policy_queue, package=None, version=None, comment_id=None, session=None):
     """
     Returns (possibly empty) list of NewComment objects for the given
     parameters
@@ -2093,7 +1284,7 @@ def get_new_comments(package=None, version=None, comment_id=None, session=None):
     @return: A (possibly empty) list of NewComment objects will be returned
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     if package is not None: q = q.filter_by(package=package)
     if version is not None: q = q.filter_by(version=version)
     if comment_id is not None: q = q.filter_by(comment_id=comment_id)
@@ -2246,31 +1437,6 @@ def get_policy_queue(queuename, session=None):
 
 __all__.append('get_policy_queue')
 
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
-    """
-    Returns PolicyQueue object for given C{path name}
-
-    @type queuename: string
-    @param queuename: The path
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: PolicyQueue
-    @return: PolicyQueue object for the given queue
-    """
-
-    q = session.query(PolicyQueue).filter_by(path=pathname)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_policy_queue_from_path')
-
 ################################################################################
 
 class PolicyQueueUpload(object):
@@ -2445,6 +1611,29 @@ __all__.append('get_sections')
 
 ################################################################################
 
+class SignatureHistory(ORMObject):
+    @classmethod
+    def from_signed_file(cls, signed_file):
+        """signature history entry from signed file
+
+        @type  signed_file: L{daklib.gpg.SignedFile}
+        @param signed_file: signed file
+
+        @rtype: L{SignatureHistory}
+        """
+        self = cls()
+        self.fingerprint = signed_file.primary_fingerprint
+        self.signature_timestamp = signed_file.signature_timestamp
+        self.contents_sha1 = signed_file.contents_sha1()
+        return self
+
+    def query(self, session):
+        return session.query(SignatureHistory).filter_by(fingerprint=self.fingerprint, signature_timestamp=self.signature_timestamp, contents_sha1=self.contents_sha1).first()
+
+__all__.append('SignatureHistory')
+
+################################################################################
+
 class SrcContents(ORMObject):
     def __init__(self, file = None, source = None):
         self.file = file
@@ -2457,6 +1646,60 @@ __all__.append('SrcContents')
 
 ################################################################################
 
+from debian.debfile import Deb822
+
+# Temporary Deb822 subclass to fix bugs with : handling; see #597249
+class Dak822(Deb822):
+    def _internal_parser(self, sequence, fields=None):
+        # The key is non-whitespace, non-colon characters before any colon.
+        key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
+        single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
+        multi = re.compile(key_part + r"$")
+        multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
+
+        wanted_field = lambda f: fields is None or f in fields
+
+        if isinstance(sequence, basestring):
+            sequence = sequence.splitlines()
+
+        curkey = None
+        content = ""
+        for line in self.gpg_stripped_paragraph(sequence):
+            m = single.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
+
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
+
+                curkey = m.group('key')
+                content = m.group('data')
+                continue
+
+            m = multi.match(line)
+            if m:
+                if curkey:
+                    self[curkey] = content
+
+                if not wanted_field(m.group('key')):
+                    curkey = None
+                    continue
+
+                curkey = m.group('key')
+                content = ""
+                continue
+
+            m = multidata.match(line)
+            if m:
+                content += '\n' + line # XXX not m.group('data')?
+                continue
+
+        if curkey:
+            self[curkey] = content
+
+
 class DBSource(ORMObject):
     def __init__(self, source = None, version = None, maintainer = None, \
         changedby = None, poolfile = None, install_date = None, fingerprint = None):
@@ -2478,7 +1721,7 @@ class DBSource(ORMObject):
             'install_date', 'binaries_count', 'uploaders_count']
 
     def not_null_constraints(self):
-        return ['source', 'version', 'install_date', 'maintainer', \
+        return ['source', 'version', 'maintainer', \
             'changedby', 'poolfile']
 
     def read_control_fields(self):
@@ -2489,16 +1732,11 @@ class DBSource(ORMObject):
         @return: fields is the dsc information in a dictionary form
         '''
         fullpath = self.poolfile.fullpath
-        contents = open(fullpath, 'r').read()
-        signed_file = SignedFile(contents, keyrings=[], require_signature=False)
-        fields = apt_pkg.TagSection(signed_file.contents)
+        fields = Dak822(open(self.poolfile.fullpath, 'r'))
         return fields
 
     metadata = association_proxy('key', 'value')
 
-    def get_component_name(self):
-        return self.poolfile.location.component.component_name
-
     def scan_contents(self):
         '''
         Returns a set of names for non directories. The path names are
@@ -2518,62 +1756,13 @@ class DBSource(ORMObject):
             fileset.add(name)
         return fileset
 
-__all__.append('DBSource')
-
-@session_wrapper
-def source_exists(source, source_version, suites = ["any"], session=None):
-    """
-    Ensure that source exists somewhere in the archive for the binary
-    upload being processed.
-      1. exact match     => 1.0-3
-      2. bin-only NMU    => 1.0-3+b1 , 1.0-3.1+b1
-
-    @type source: string
-    @param source: source name
-
-    @type source_version: string
-    @param source_version: expected source version
-
-    @type suites: list
-    @param suites: list of suites to check in, default I{any}
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: int
-    @return: returns 1 if a source with expected version is found, otherwise 0
-
-    """
-
-    cnf = Config()
-    ret = True
-
-    from daklib.regexes import re_bin_only_nmu
-    orig_source_version = re_bin_only_nmu.sub('', source_version)
-
-    for suite in suites:
-        q = session.query(DBSource).filter_by(source=source). \
-            filter(DBSource.version.in_([source_version, orig_source_version]))
-        if suite != "any":
-            # source must exist in 'suite' or a suite that is enhanced by 'suite'
-            s = get_suite(suite, session)
-            if s:
-                enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
-                considered_suites = [ vc.reference for vc in enhances_vcs ]
-                considered_suites.append(s)
-
-                q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
-
-        if q.count() > 0:
-            continue
-
-        # No source found so return not ok
-        ret = False
-
-    return ret
+    @property
+    def proxy(self):
+        session = object_session(self)
+        query = session.query(SourceMetadata).filter_by(source=self)
+        return MetadataProxy(session, query)
 
-__all__.append('source_exists')
+__all__.append('DBSource')
 
 @session_wrapper
 def get_suites_source_in(source, session=None):
@@ -2591,65 +1780,31 @@ def get_suites_source_in(source, session=None):
 
 __all__.append('get_suites_source_in')
 
-@session_wrapper
-def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
-    """
-    Returns list of DBSource objects for given C{source} name and other parameters
-
-    @type source: str
-    @param source: DBSource package name to search for
-
-    @type version: str or None
-    @param version: DBSource version name to search for or None if not applicable
-
-    @type dm_upload_allowed: bool
-    @param dm_upload_allowed: If None, no effect.  If True or False, only
-    return packages with that dm_upload_allowed setting
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of DBSource objects for the given name (may be empty)
-    """
-
-    q = session.query(DBSource).filter_by(source=source)
-
-    if version is not None:
-        q = q.filter_by(version=version)
-
-    if dm_upload_allowed is not None:
-        q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
-
-    return q.all()
-
-__all__.append('get_sources_from_name')
-
 # FIXME: This function fails badly if it finds more than 1 source package and
 # its implementation is trivial enough to be inlined.
 @session_wrapper
-def get_source_in_suite(source, suite, session=None):
+def get_source_in_suite(source, suite_name, session=None):
     """
-    Returns a DBSource object for a combination of C{source} and C{suite}.
+    Returns a DBSource object for a combination of C{source} and C{suite_name}.
 
       - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
-      - B{suite} - a suite name, eg. I{unstable}
+      - B{suite_name} - a suite name, eg. I{unstable}
 
     @type source: string
     @param source: source package name
 
-    @type suite: string
+    @type suite_name: string
     @param suite: the suite name
 
     @rtype: string
     @return: the version for I{source} in I{suite}
 
     """
-
-    q = get_suite(suite, session).get_sources(source)
+    suite = get_suite(suite_name, session)
+    if suite is None:
+        return None
     try:
-        return q.one()
+        return suite.get_sources(source).one()
     except NoResultFound:
         return None
 
@@ -2682,213 +1837,6 @@ def import_metadata_into_db(obj, session=None):
 
 __all__.append('import_metadata_into_db')
 
-
-################################################################################
-
-def split_uploaders(uploaders_list):
-    '''
-    Split the Uploaders field into the individual uploaders and yield each of
-    them. Beware: email addresses might contain commas.
-    '''
-    import re
-    for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
-        yield uploader.strip()
-
-@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-    pfs = []
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    # If Changed-By isn't available, fall back to maintainer
-    if u.pkg.changes.has_key("changed-by"):
-        source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    else:
-        source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        session.flush()
-        pfs.append(poolfile)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    source.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-                pfs.append(obj)
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                pfs.append(poolfile)
-                files_id = poolfile.file_id
-        else:
-            poolfile = get_poolfile_by_id(files_id, session)
-            if poolfile is None:
-                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
-            pfs.append(poolfile)
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    # Add the src_uploaders to the DB
-    session.flush()
-    session.refresh(source)
-    source.uploaders = [source.maintainer]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in split_uploaders(u.pkg.dsc["uploaders"]):
-            source.uploaders.append(get_or_set_maintainer(up, session))
-
-    session.flush()
-
-    return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
-@session_wrapper
-def add_deb_to_db(u, filename, session=None):
-    """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
-    """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
-    if entry.get("files id", None):
-        poolfile = get_poolfile_by_id(bin.poolfile_id)
-        bin.poolfile_id = entry["files id"]
-    else:
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        bin.poolfile_id = entry["files id"] = poolfile.file_id
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-
-    # If we couldn't find anything and the upload contains Arch: source,
-    # fall back to trying the source package, source version uploaded
-    # This maintains backwards compatibility with previous dak behaviour
-    # and deals with slightly broken binary debs which don't properly
-    # declare their source package name
-    if len(bin_sources) == 0:
-        if u.pkg.changes["architecture"].has_key("source") \
-           and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
-            bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
-
-    # If we couldn't find a source here, we reject
-    # TODO: Fix this so that it doesn't kill process-upload and instead just
-    #       performs a reject.  To be honest, we should probably spot this
-    #       *much* earlier than here
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, entry["architecture"],
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
-    bin.source_id = bin_sources[0].source_id
-
-    if entry.has_key("built-using"):
-        for srcname, version in entry["built-using"]:
-            exsources = get_sources_from_name(srcname, version, session=session)
-            if len(exsources) != 1:
-                raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                          (srcname, version, bin.package, bin.version, entry["architecture"],
-                                           filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
-            bin.extra_sources.append(exsources[0])
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    bin.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    session.flush()
-
-    # Deal with contents - disabled for now
-    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
-    #if not contents:
-    #    print "REJECT\nCould not determine contents of package %s" % bin.package
-    #    session.rollback()
-    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-    return bin, poolfile
-
-__all__.append('add_deb_to_db')
-
-################################################################################
-
-class SourceACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SourceACL %s>' % self.source_acl_id
-
-__all__.append('SourceACL')
-
 ################################################################################
 
 class SrcFormat(object):
@@ -3000,10 +1948,19 @@ class Suite(ORMObject):
         else:
             return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
 
+    def update_last_changed(self):
+        self.last_changed = sqlalchemy.func.now()
+
     @property
     def path(self):
         return os.path.join(self.archive.path, 'dists', self.suite_name)
 
+    @property
+    def release_suite_output(self):
+        if self.release_suite is not None:
+            return self.release_suite
+        return self.suite_name
+
 __all__.append('Suite')
 
 @session_wrapper
@@ -3022,8 +1979,22 @@ def get_suite(suite, session=None):
     @return: Suite object for the requested suite name (None if not present)
     """
 
+    # Start by looking for the dak internal name
     q = session.query(Suite).filter_by(suite_name=suite)
+    try:
+        return q.one()
+    except NoResultFound:
+        pass
+
+    # Now try codename
+    q = session.query(Suite).filter_by(codename=suite)
+    try:
+        return q.one()
+    except NoResultFound:
+        pass
 
+    # Finally give release_suite a try
+    q = session.query(Suite).filter_by(release_suite=suite)
     try:
         return q.one()
     except NoResultFound:
@@ -3140,17 +2111,6 @@ __all__.append('get_uid_from_fingerprint')
 
 ################################################################################
 
-class UploadBlock(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
-
-__all__.append('UploadBlock')
-
-################################################################################
-
 class MetadataKey(ORMObject):
     def __init__(self, key = None):
         self.key = key
@@ -3201,7 +2161,8 @@ class BinaryMetadata(ORMObject):
     def __init__(self, key = None, value = None, binary = None):
         self.key = key
         self.value = value
-        self.binary = binary
+        if binary is not None:
+            self.binary = binary
 
     def properties(self):
         return ['binary', 'key', 'value']
@@ -3217,7 +2178,8 @@ class SourceMetadata(ORMObject):
     def __init__(self, key = None, value = None, source = None):
         self.key = key
         self.value = value
-        self.source = source
+        if source is not None:
+            self.source = source
 
     def properties(self):
         return ['source', 'key', 'value']
@@ -3229,6 +2191,37 @@ __all__.append('SourceMetadata')
 
 ################################################################################
 
+class MetadataProxy(object):
+    def __init__(self, session, query):
+        self.session = session
+        self.query = query
+
+    def _get(self, key):
+        metadata_key = self.session.query(MetadataKey).filter_by(key=key).first()
+        if metadata_key is None:
+            return None
+        metadata = self.query.filter_by(key=metadata_key).first()
+        return metadata
+
+    def __contains__(self, key):
+        if self._get(key) is not None:
+            return True
+        return False
+
+    def __getitem__(self, key):
+        metadata = self._get(key)
+        if metadata is None:
+            raise KeyError
+        return metadata.value
+
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+################################################################################
+
 class VersionCheck(ORMObject):
     def __init__(self, *args, **kwargs):
        pass
@@ -3274,27 +2267,22 @@ class DBConn(object):
 
     def __setuptables(self):
         tables = (
+            'acl',
+            'acl_architecture_map',
+            'acl_fingerprint_map',
+            'acl_per_source',
             'architecture',
             'archive',
             'bin_associations',
             'bin_contents',
             'binaries',
             'binaries_metadata',
-            'binary_acl',
-            'binary_acl_map',
             'build_queue',
-            'build_queue_files',
-            'build_queue_policy_files',
             'changelogs_text',
             'changes',
             'component',
+            'component_suite',
             'config',
-            'changes_pending_binaries',
-            'changes_pending_files',
-            'changes_pending_source',
-            'changes_pending_files_map',
-            'changes_pending_source_files',
-            'changes_pool_files',
             'dsc_files',
             'external_overrides',
             'extra_src_references',
@@ -3302,8 +2290,6 @@ class DBConn(object):
             'files_archive_map',
             'fingerprint',
             'keyrings',
-            'keyring_acl_map',
-            'location',
             'maintainer',
             'metadata_keys',
             'new_comments',
@@ -3316,19 +2302,19 @@ class DBConn(object):
             'policy_queue_byhand_file',
             'priority',
             'section',
+            'signature_history',
             'source',
-            'source_acl',
             'source_metadata',
             'src_associations',
             'src_contents',
             'src_format',
             'src_uploaders',
             'suite',
+            'suite_acl_map',
             'suite_architectures',
             'suite_build_queue_copy',
             'suite_src_formats',
             'uid',
-            'upload_blocks',
             'version_check',
         )
 
@@ -3348,6 +2334,7 @@ class DBConn(object):
             'obsolete_any_associations',
             'obsolete_any_by_all_associations',
             'obsolete_src_associations',
+            'package_list',
             'source_suite',
             'src_associations_bin',
             'src_associations_src',
@@ -3371,6 +2358,21 @@ class DBConn(object):
                    backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
             extension = validator)
 
+        mapper(ACL, self.tbl_acl,
+               properties = dict(
+                architectures = relation(Architecture, secondary=self.tbl_acl_architecture_map, collection_class=set),
+                fingerprints = relation(Fingerprint, secondary=self.tbl_acl_fingerprint_map, collection_class=set),
+                match_keyring = relation(Keyring, primaryjoin=(self.tbl_acl.c.match_keyring_id == self.tbl_keyrings.c.id)),
+                per_source = relation(ACLPerSource, collection_class=set),
+                ))
+
+        mapper(ACLPerSource, self.tbl_acl_per_source,
+               properties = dict(
+                acl = relation(ACL),
+                fingerprint = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.fingerprint_id == self.tbl_fingerprint.c.id)),
+                created_by = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.created_by_id == self.tbl_fingerprint.c.id)),
+                ))
+
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
                                  archive_name = self.tbl_archive.c.name))
@@ -3384,15 +2386,6 @@ class DBConn(object):
                properties = dict(queue_id = self.tbl_build_queue.c.id,
                                  suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
 
-        mapper(BuildQueueFile, self.tbl_build_queue_files,
-               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
-
-        mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
-               properties = dict(
-                build_queue = relation(BuildQueue, backref='policy_queue_files'),
-                file = relation(ChangePendingFile, lazy='joined')))
-
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
                                  package = self.tbl_binaries.c.package,
@@ -3417,14 +2410,6 @@ class DBConn(object):
                                      collection_class=attribute_mapped_collection('key'))),
                 extension = validator)
 
-        mapper(BinaryACL, self.tbl_binary_acl,
-               properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
-
-        mapper(BinaryACLMap, self.tbl_binary_acl_map,
-               properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
-                                 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
-                                 architecture = relation(Architecture)))
-
         mapper(Component, self.tbl_component,
                properties = dict(component_id = self.tbl_component.c.id,
                                  component_name = self.tbl_component.c.name),
@@ -3458,19 +2443,16 @@ class DBConn(object):
                                  uid = relation(Uid),
                                  keyring_id = self.tbl_fingerprint.c.keyring,
                                  keyring = relation(Keyring),
-                                 source_acl = relation(SourceACL),
-                                 binary_acl = relation(BinaryACL)),
+                                 acl = relation(ACL)),
                extension = validator)
 
         mapper(Keyring, self.tbl_keyrings,
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
-                                 keyring_id = self.tbl_keyrings.c.id))
+                                 keyring_id = self.tbl_keyrings.c.id,
+                                 acl = relation(ACL, primaryjoin=(self.tbl_keyrings.c.acl_id == self.tbl_acl.c.id)))),
 
         mapper(DBChange, self.tbl_changes,
                properties = dict(change_id = self.tbl_changes.c.id,
-                                 poolfiles = relation(PoolFile,
-                                                      secondary=self.tbl_changes_pool_files,
-                                                      backref="changeslinks"),
                                  seen = self.tbl_changes.c.seen,
                                  source = self.tbl_changes.c.source,
                                  binaries = self.tbl_changes.c.binaries,
@@ -3480,54 +2462,7 @@ class DBConn(object):
                                  maintainer = self.tbl_changes.c.maintainer,
                                  changedby = self.tbl_changes.c.changedby,
                                  date = self.tbl_changes.c.date,
-                                 version = self.tbl_changes.c.version,
-                                 files = relation(ChangePendingFile,
-                                                  secondary=self.tbl_changes_pending_files_map,
-                                                  backref="changesfile"),
-                                 in_queue_id = self.tbl_changes.c.in_queue,
-                                 in_queue = relation(PolicyQueue,
-                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
-                                 approved_for_id = self.tbl_changes.c.approved_for))
-
-        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
-               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
-        mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
-                                 filename = self.tbl_changes_pending_files.c.filename,
-                                 size = self.tbl_changes_pending_files.c.size,
-                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
-                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
-                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
-        mapper(ChangePendingSource, self.tbl_changes_pending_source,
-               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
-                                 change = relation(DBChange),
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
-                                 changedby = relation(Maintainer,
-                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
-                                 fingerprint = relation(Fingerprint),
-                                 source_files = relation(ChangePendingFile,
-                                                         secondary=self.tbl_changes_pending_source_files,
-                                                         backref="pending_sources")))
-
-
-        mapper(KeyringACLMap, self.tbl_keyring_acl_map,
-               properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
-                                 keyring = relation(Keyring, backref="keyring_acl_map"),
-                                 architecture = relation(Architecture)))
-
-        mapper(Location, self.tbl_location,
-               properties = dict(location_id = self.tbl_location.c.id,
-                                 component_id = self.tbl_location.c.component,
-                                 component = relation(Component, backref='location'),
-                                 archive_id = self.tbl_location.c.archive,
-                                 archive = relation(Archive),
-                                 # FIXME: the 'type' column is old cruft and
-                                 # should be removed in the future.
-                                 archive_type = self.tbl_location.c.type),
-               extension = validator)
+                                 version = self.tbl_changes.c.version))
 
         mapper(Maintainer, self.tbl_maintainer,
                properties = dict(maintainer_id = self.tbl_maintainer.c.id,
@@ -3538,7 +2473,8 @@ class DBConn(object):
                 extension = validator)
 
         mapper(NewComment, self.tbl_new_comments,
-               properties = dict(comment_id = self.tbl_new_comments.c.id))
+               properties = dict(comment_id = self.tbl_new_comments.c.id,
+                                 policy_queue = relation(PolicyQueue)))
 
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
@@ -3563,7 +2499,8 @@ class DBConn(object):
                                  overridetype_id = self.tbl_override_type.c.id))
 
         mapper(PolicyQueue, self.tbl_policy_queue,
-               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
 
         mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
                properties = dict(
@@ -3587,6 +2524,8 @@ class DBConn(object):
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
 
+        mapper(SignatureHistory, self.tbl_signature_history)
+
         mapper(DBSource, self.tbl_source,
                properties = dict(source_id = self.tbl_source.c.id,
                                  version = self.tbl_source.c.version,
@@ -3606,21 +2545,24 @@ class DBConn(object):
                                      collection_class=attribute_mapped_collection('key'))),
                extension = validator)
 
-        mapper(SourceACL, self.tbl_source_acl,
-               properties = dict(source_acl_id = self.tbl_source_acl.c.id))
-
         mapper(SrcFormat, self.tbl_src_format,
                properties = dict(src_format_id = self.tbl_src_format.c.id,
                                  format_name = self.tbl_src_format.c.format_name))
 
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(PolicyQueue),
+                                 policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
+                                 new_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.new_queue_id == self.tbl_policy_queue.c.id)),
+                                 debug_suite = relation(Suite, remote_side=[self.tbl_suite.c.id]),
                                  copy_queues = relation(BuildQueue,
                                      secondary=self.tbl_suite_build_queue_copy),
                                  srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
                                      backref=backref('suites', lazy='dynamic')),
-                                 archive = relation(Archive, backref='suites')),
+                                 archive = relation(Archive, backref='suites'),
+                                 acls = relation(ACL, secondary=self.tbl_suite_acl_map, collection_class=set),
+                                 components = relation(Component, secondary=self.tbl_component_suite,
+                                                   order_by=self.tbl_component.c.ordering,
+                                                   backref=backref('suites'))),
                 extension = validator)
 
         mapper(Uid, self.tbl_uid,
@@ -3628,11 +2570,6 @@ class DBConn(object):
                                  fingerprint = relation(Fingerprint)),
                extension = validator)
 
-        mapper(UploadBlock, self.tbl_upload_blocks,
-               properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
-                                 fingerprint = relation(Fingerprint, backref="uploadblocks"),
-                                 uid = relation(Uid, backref="uploadblocks")))
-
         mapper(BinContents, self.tbl_bin_contents,
             properties = dict(
                 binary = relation(DBBinary,
@@ -3696,7 +2633,7 @@ class DBConn(object):
             engine_args['pool_size'] = int(cnf['DB::PoolSize'])
         if cnf.has_key('DB::MaxOverflow'):
             engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
-        if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
+        if sa_major_version != '0.5' and cnf.has_key('DB::Unicode') and \
             cnf['DB::Unicode'] == 'false':
             engine_args['use_native_unicode'] = False
 
@@ -3748,5 +2685,3 @@ class DBConn(object):
         return session
 
 __all__.append('DBConn')
-
-