]> git.decadent.org.uk Git - dak.git/commitdiff
merge from ftp-master
authorMike O'Connor <stew@vireo.org>
Thu, 5 Nov 2009 00:53:31 +0000 (19:53 -0500)
committerMike O'Connor <stew@vireo.org>
Thu, 5 Nov 2009 00:53:31 +0000 (19:53 -0500)
Signed-off-by: Mike O'Connor <stew@vireo.org>
1  2 
dak/dakdb/update25.py
daklib/binary.py
daklib/dbconn.py
daklib/utils.py

diff --combined dak/dakdb/update25.py
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..a61deb61352f6b1464309147655142f485b3e7be
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,268 @@@
++#!/usr/bin/env python
++# coding=utf8
++
++"""
++Adding a trainee field to the process-new notes
++
++@contact: Debian FTP Master <ftpmaster@debian.org>
++@copyright: 2009  Mike O'Connor <stew@debian.org>
++@license: GNU General Public License version 2 or later
++"""
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++################################################################################
++
++
++################################################################################
++
++import psycopg2
++import time
++from daklib.dak_exceptions import DBUpdateError
++
++################################################################################
++
++def suites():
++    """
++    return a list of suites to operate on
++    """
++    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
++        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
++    else:
++        suites = [ 'unstable', 'testing' ]
++#            suites = Config().SubTree("Suite").List()
++
++    return suites
++
++def arches(cursor, suite):
++    """
++    return a list of archs to operate on
++    """
++    arch_list = []
++    cursor.execute("""SELECT s.architecture, a.arch_string
++    FROM suite_architectures s
++    JOIN architecture a ON (s.architecture=a.id)
++    WHERE suite = :suite""", {'suite' : suite })
++
++    while True:
++        r = cursor.fetchone()
++        if not r:
++            break
++
++        if r[1] != "source" and r[1] != "all":
++            arch_list.append((r[0], r[1]))
++
++    return arch_list
++
++def do_update(self):
++    """
++    Adding contents table as first step to maybe, finally getting rid
++    of apt-ftparchive
++    """
++
++    print __doc__
++
++    try:
++        c = self.db.cursor()
++
++        c.execute("""CREATE TABLE pending_bin_contents (
++        id serial NOT NULL,
++        package text NOT NULL,
++        version debversion NOT NULL,
++        arch int NOT NULL,
++        filename text NOT NULL,
++        type int NOT NULL,
++        PRIMARY KEY(id))""" );
++
++        c.execute("""CREATE TABLE deb_contents (
++        filename text,
++        section text,
++        package text,
++        binary_id integer,
++        arch integer,
++        suite integer)""" )
++
++        c.execute("""CREATE TABLE udeb_contents (
++        filename text,
++        section text,
++        package text,
++        binary_id integer,
++        suite integer,
++        arch integer)""" )
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_arch_fkey
++        FOREIGN KEY (arch) REFERENCES architecture(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_arch_fkey
++        FOREIGN KEY (arch) REFERENCES architecture(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_pkey
++        PRIMARY KEY (filename,package,arch,suite);""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_pkey
++        PRIMARY KEY (filename,package,arch,suite);""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_suite_fkey
++        FOREIGN KEY (suite) REFERENCES suite(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_suite_fkey
++        FOREIGN KEY (suite) REFERENCES suite(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_binary_fkey
++        FOREIGN KEY (binary_id) REFERENCES binaries(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_binary_fkey
++        FOREIGN KEY (binary_id) REFERENCES binaries(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
++
++
++        suites = self.suites()
++
++        for suite in [i.lower() for i in suites]:
++            suite_id = DBConn().get_suite_id(suite)
++            arch_list = arches(c, suite_id)
++            arch_list = arches(c, suite_id)
++
++            for (arch_id,arch_str) in arch_list:
++                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
++
++            for section, sname in [("debian-installer","main"),
++                                  ("non-free/debian-installer", "nonfree")]:
++                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
++
++
++        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
++    event = TD["event"]
++    if event == "DELETE" or event == "UPDATE":
++
++        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
++                                  ["int","int"]),
++                                  [TD["old"]["bin"], TD["old"]["suite"]])
++
++    if event == "INSERT" or event == "UPDATE":
++
++       content_data = plpy.execute(plpy.prepare(
++            """SELECT s.section, b.package, b.architecture, ot.type
++            FROM override o
++            JOIN override_type ot on o.type=ot.id
++            JOIN binaries b on b.package=o.package
++            JOIN files f on b.file=f.id
++            JOIN location l on l.id=f.location
++            JOIN section s on s.id=o.section
++            WHERE b.id=$1
++            AND o.suite=$2
++            """,
++            ["int", "int"]),
++            [TD["new"]["bin"], TD["new"]["suite"]])[0]
++
++       tablename="%s_contents" % content_data['type']
++
++       plpy.execute(plpy.prepare("""DELETE FROM %s
++                   WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
++                   ['text','int','int']),
++                   [content_data['package'],
++                   content_data['architecture'],
++                   TD["new"]["suite"]])
++
++       filenames = plpy.execute(plpy.prepare(
++           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
++           ["int"]),
++           [TD["new"]["bin"]])
++
++       for filename in filenames:
++           plpy.execute(plpy.prepare(
++               """INSERT INTO %s
++                   (filename,section,package,binary_id,arch,suite)
++                   VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
++               ["text","text","text","int","int","int"]),
++               [filename["file"],
++                content_data["section"],
++                content_data["package"],
++                TD["new"]["bin"],
++                content_data["architecture"],
++                TD["new"]["suite"]] )
++$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
++""")
++
++
++        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
++    event = TD["event"]
++    if event == "UPDATE":
++
++        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
++        if otype["type"].endswith("deb"):
++            section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
++
++            table_name = "%s_contents" % otype["type"]
++            plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
++                                      ["text","text","int"]),
++                                      [section["section"],
++                                      TD["new"]["package"],
++                                      TD["new"]["suite"]])
++
++$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
++""")
++
++        c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
++                      RETURNS trigger AS  $$
++    event = TD["event"]
++    if event == "UPDATE" or event == "INSERT":
++        row = TD["new"]
++        r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
++                  JOIN binaries b ON b.architecture = sa.architecture
++                  WHERE b.id = $1 and sa.suite = $2""",
++                ["int", "int"]),
++                [row["bin"], row["suite"]])
++        if not len(r):
++            plpy.error("Illegal architecture for this suite")
++
++$$ LANGUAGE plpythonu VOLATILE;""")
++
++        c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
++                      BEFORE INSERT OR UPDATE ON bin_associations
++                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
++
++        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
++                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
++                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
++        c.execute("""CREATE TRIGGER override_contents_trigger
++                      AFTER UPDATE ON override
++                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
++
++
++        c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
++        c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
++
++        self.db.commit()
++
++    except psycopg2.ProgrammingError, msg:
++        self.db.rollback()
++        raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
++
diff --combined daklib/binary.py
index a70aadb943fb2e713a5771f113782384e612d251,c6ee96f86d5f1eed8b210720697e1620e2448ead..a70aadb943fb2e713a5771f113782384e612d251
mode 100755,100644..100644
@@@ -204,10 -204,7 +204,10 @@@ class Binary(object)
                      else:
                          pkgs = deb822.Packages.iter_paragraphs(file(os.path.join(self.tmpdir,'control')))
                          pkg = pkgs.next()
 -                        result = insert_pending_content_paths(pkg, [tarinfo.name for tarinfo in data if not tarinfo.isdir()], session)
 +                        result = insert_pending_content_paths(pkg,
 +                                                              self.filename.endswith('.udeb'),
 +                                                              [tarinfo.name for tarinfo in data if not tarinfo.isdir()],
 +                                                              session)
  
                  except:
                      traceback.print_exc()
  
  __all__.append('Binary')
  
 -def copy_temporary_contents(package, version, archname, deb, reject, session=None):
 +
 +def copy_temporary_contents(binary, bin_association, reject, session=None):
      """
      copy the previously stored contents from the temp table to the permanant one
  
  
      arch = get_architecture(archname, session=session)
  
 -    # first see if contents exist:
 -    in_pcaq = """SELECT 1 FROM pending_content_associations
 -                               WHERE package=:package
 -                               AND version=:version
 -                               AND architecture=:archid LIMIT 1"""
 -
 -    vals = {'package': package,
 -            'version': version,
 -            'archid': arch.arch_id}
 -
 -    exists = None
 -    check = session.execute(in_pcaq, vals)
 +    pending = session.query(PendingBinContents).filter_by(package=binary.package,
 +                                                          version=binary.version,
 +                                                          arch=binary.arch).first()
  
 -    if check.rowcount > 0:
 +    if pending:
          # This should NOT happen.  We should have added contents
          # during process-unchecked.  if it did, log an error, and send
          # an email.
          message = utils.TemplateSubst(subst, cnf["Dir::Templates"]+"/missing-contents")
          utils.send_mail(message)
  
 -        # Temporarily disable contents storage until we re-do the table layout
 -        #exists = Binary(deb, reject).scan_package()
 -
 -    if exists:
 -        sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
 -                 SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations
 -                 WHERE package=:package AND version=:version AND architecture=:archid"""
 -        session.execute(sql, vals)
 -
 -        sql = """DELETE from pending_content_associations
 -                 WHERE package=:package AND version=:version AND architecture=:archid"""
 -        session.execute(sql, vals)
 -        session.commit()
 +        # rescan it now
 +        exists = Binary(deb, reject).scan_package()
 +
 +        if not exists:
 +            # LOG?
 +            return False
 +
 +    component = binary.poolfile.location.component
 +    override = session.query(Override).filter_by(package=binary.package,
 +                                                 suite=bin_association.suite,
 +                                                 component=component.id).first()
 +    if not override:
 +        # LOG?
 +        return False
 +
 +
 +    if not override.overridetype.type.endswith('deb'):
 +        return True
 +
 +    if override.overridetype.type == "udeb":
 +        table = "udeb_contents"
 +    elif override.overridetype.type == "deb":
 +        table = "deb_contents"
 +    else:
 +        return False
 +    
 +
 +    if component.name == "main":
 +        component_str = ""
 +    else:
 +        component_str = component.name + "/"
 +        
 +    vals = { 'package':binary.package,
 +             'version':binary.version,
 +             'arch':binary.architecture,
 +             'binary_id': binary.id,
 +             'component':component_str,
 +             'section':override.section.section
 +             }
 +
 +    session.execute( """INSERT INTO %s
 +    (binary_id,package,version.component,arch,section,filename)
 +    SELECT :binary_id, :package, :version, :component, :arch, :section
 +    FROM pending_bin_contents pbc
 +    WHERE pbc.package=:package
 +    AND pbc.version=:version
 +    AND pbc.arch=:arch""" % table, vals )
 +
 +    session.execute( """DELETE from pending_bin_contents package=:package
 +    AND version=:version
 +    AND arch=:arch""", vals )
  
      if privatetrans:
 +        session.commit()
          session.close()
  
      return exists
  
  __all__.append('copy_temporary_contents')
 +
 +
diff --combined daklib/dbconn.py
index 921f1daa03af8c35e066c777ba8872ebd4b5bba8,361dcf42cfc3b1eaad30d1f3dbc72785678e8c02..d05dd1599d2d5181605fb9d6b1cabb51a59cfef3
mode 100755,100644..100644
@@@ -37,7 -37,7 +37,7 @@@ import o
  import re
  import psycopg2
  import traceback
- import datetime
from datetime import datetime
  
  from inspect import getargspec
  
@@@ -50,10 -50,7 +50,9 @@@ from sqlalchemy import types as sqltype
  from sqlalchemy.exc import *
  from sqlalchemy.orm.exc import NoResultFound
  
 +# Only import Config until Queue stuff is changed to store its config
 +# in the database
  from config import Config
- from singleton import Singleton
  from textutils import fix_maintainer
  
  ################################################################################
@@@ -125,6 -122,8 +124,8 @@@ def session_wrapper(fn)
  
      return wrapped
  
+ __all__.append('session_wrapper')
  ################################################################################
  
  class Architecture(object):
@@@ -430,6 -429,132 +431,132 @@@ __all__.append('BinaryACLMap'
  
  ################################################################################
  
+ class BuildQueue(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<BuildQueue %s>' % self.queue_name
+     def add_file_from_pool(self, poolfile):
+         """Copies a file into the pool.  Assumes that the PoolFile object is
+         attached to the same SQLAlchemy session as the Queue object is.
+         The caller is responsible for committing after calling this function."""
+         poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+         # Check if we have a file of this name or this ID already
+         for f in self.queuefiles:
+             if f.fileid is not None and f.fileid == poolfile.file_id or \
+                f.poolfile.filename == poolfile_basename:
+                    # In this case, update the BuildQueueFile entry so we
+                    # don't remove it too early
+                    f.lastused = datetime.now()
+                    DBConn().session().object_session(poolfile).add(f)
+                    return f
+         # Prepare BuildQueueFile object
+         qf = BuildQueueFile()
+         qf.build_queue_id = self.queue_id
+         qf.lastused = datetime.now()
+         qf.filename = poolfile_basename
+         targetpath = poolfile.fullpath
+         queuepath = os.path.join(self.path, poolfile_basename)
+         try:
+             if self.copy_files:
+                 # We need to copy instead of symlink
+                 import utils
+                 utils.copy(targetpath, queuepath)
+                 # NULL in the fileid field implies a copy
+                 qf.fileid = None
+             else:
+                 os.symlink(targetpath, queuepath)
+                 qf.fileid = poolfile.file_id
+         except OSError:
+             return None
+         # Get the same session as the PoolFile is using and add the qf to it
+         DBConn().session().object_session(poolfile).add(qf)
+         return qf
+ __all__.append('BuildQueue')
+ @session_wrapper
+ def get_build_queue(queuename, session=None):
+     """
+     Returns BuildQueue object for given C{queue name}, creating it if it does not
+     exist.
+     @type queuename: string
+     @param queuename: The name of the queue
+     @type session: Session
+     @param session: Optional SQLA session object (a temporary one will be
+     generated if not supplied)
+     @rtype: BuildQueue
+     @return: BuildQueue object for the given queue
+     """
+     q = session.query(BuildQueue).filter_by(queue_name=queuename)
+     try:
+         return q.one()
+     except NoResultFound:
+         return None
+ __all__.append('get_build_queue')
+ ################################################################################
+ class BuildQueueFile(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+ __all__.append('BuildQueueFile')
+ ################################################################################
+ class ChangePendingBinary(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<ChangePendingBinary %s>' % self.change_pending_binary_id
+ __all__.append('ChangePendingBinary')
+ ################################################################################
+ class ChangePendingFile(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<ChangePendingFile %s>' % self.change_pending_file_id
+ __all__.append('ChangePendingFile')
+ ################################################################################
+ class ChangePendingSource(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<ChangePendingSource %s>' % self.change_pending_source_id
+ __all__.append('ChangePendingSource')
+ ################################################################################
  class Component(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -653,16 -778,11 +780,16 @@@ def insert_content_paths(binary_id, ful
      try:
          # Insert paths
          pathcache = {}
 -        for fullpath in fullpaths:
 -            if fullpath.startswith( './' ):
 -                fullpath = fullpath[2:]
  
 -            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id}  )
 +        def generate_path_dicts():
 +            for fullpath in fullpaths:
 +                if fullpath.startswith( './' ):
 +                    fullpath = fullpath[2:]
 +
 +                yield {'fulename':fullpath, 'id': binary_id }
 +
 +        session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
 +                         generate_path_dicts() )
  
          session.commit()
          if privatetrans:
@@@ -855,6 -975,39 +982,39 @@@ def get_poolfile_like_name(filename, se
  
  __all__.append('get_poolfile_like_name')
  
+ @session_wrapper
+ def add_poolfile(filename, datadict, location_id, session=None):
+     """
+     Add a new file to the pool
+     @type filename: string
+     @param filename: filename
+     @type datadict: dict
+     @param datadict: dict with needed data
+     @type location_id: int
+     @param location_id: database id of the location
+     @rtype: PoolFile
+     @return: the PoolFile object created
+     """
+     poolfile = PoolFile()
+     poolfile.filename = filename
+     poolfile.filesize = datadict["size"]
+     poolfile.md5sum = datadict["md5sum"]
+     poolfile.sha1sum = datadict["sha1sum"]
+     poolfile.sha256sum = datadict["sha256sum"]
+     poolfile.location_id = location_id
+     session.add(poolfile)
+     # Flush to get a file id (NB: This is not a commit)
+     session.flush()
+     return poolfile
+ __all__.append('add_poolfile')
  ################################################################################
  
  class Fingerprint(object):
@@@ -1094,19 -1247,19 +1254,19 @@@ __all__.append('KeyringACLMap'
  
  ################################################################################
  
- class KnownChange(object):
+ class DBChange(object):
      def __init__(self, *args, **kwargs):
          pass
  
      def __repr__(self):
-         return '<KnownChange %s>' % self.changesname
+         return '<DBChange %s>' % self.changesname
  
- __all__.append('KnownChange')
+ __all__.append('DBChange')
  
  @session_wrapper
- def get_knownchange(filename, session=None):
+ def get_dbchange(filename, session=None):
      """
-     returns knownchange object for given C{filename}.
+     returns DBChange object for given C{filename}.
  
      @type archive: string
      @param archive: the name of the arhive
      @return: Archive object for the given name (None if not present)
  
      """
-     q = session.query(KnownChange).filter_by(changesname=filename)
+     q = session.query(DBChange).filter_by(changesname=filename)
  
      try:
          return q.one()
      except NoResultFound:
          return None
  
- __all__.append('get_knownchange')
- ################################################################################
- class KnownChangePendingFile(object):
-     def __init__(self, *args, **kwargs):
-         pass
-     def __repr__(self):
-         return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
- __all__.append('KnownChangePendingFile')
+ __all__.append('get_dbchange')
  
  ################################################################################
  
@@@ -1419,38 -1561,16 +1568,38 @@@ __all__.append('get_override_type'
  
  ################################################################################
  
 -class PendingContentAssociation(object):
 +class DebContents(object):
 +    def __init__(self, *args, **kwargs):
 +        pass
 +
 +    def __repr__(self):
 +        return '<DebConetnts %s: %s>' % (self.package.package,self.file)
 +
 +__all__.append('DebContents')
 +
 +
 +class UdebContents(object):
      def __init__(self, *args, **kwargs):
          pass
  
      def __repr__(self):
 -        return '<PendingContentAssociation %s>' % self.pca_id
 +        return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
  
 -__all__.append('PendingContentAssociation')
 +__all__.append('UdebContents')
 +
 +class PendingBinContents(object):
 +    def __init__(self, *args, **kwargs):
 +        pass
  
 -def insert_pending_content_paths(package, fullpaths, session=None):
 +    def __repr__(self):
 +        return '<PendingBinContents %s>' % self.contents_id
 +
 +__all__.append('PendingBinContents')
 +
 +def insert_pending_content_paths(package,
 +                                 is_udeb,
 +                                 fullpaths,
 +                                 session=None):
      """
      Make sure given paths are temporarily associated with given
      package
          arch_id = arch.arch_id
  
          # Remove any already existing recorded files for this package
 -        q = session.query(PendingContentAssociation)
 +        q = session.query(PendingBinContents)
          q = q.filter_by(package=package['Package'])
          q = q.filter_by(version=package['Version'])
          q = q.filter_by(architecture=arch_id)
          q.delete()
  
 -        # Insert paths
 -        pathcache = {}
          for fullpath in fullpaths:
 -            (path, filename) = os.path.split(fullpath)
 -
 -            if path.startswith( "./" ):
 -                path = path[2:]
 -
 -            filepath_id = get_or_set_contents_path_id(path, session)
 -            filename_id = get_or_set_contents_file_id(filename, session)
  
 -            pathcache[fullpath] = (filepath_id, filename_id)
 +            if fullpath.startswith( "./" ):
 +                fullpath = fullpath[2:]
  
 -        for fullpath, dat in pathcache.items():
 -            pca = PendingContentAssociation()
 +            pca = PendingBinContents()
              pca.package = package['Package']
              pca.version = package['Version']
 -            pca.filepath_id = dat[0]
 -            pca.filename_id = dat[1]
 +            pca.file = fullpath
              pca.architecture = arch_id
 +
 +            if isudeb:
 +                pca.type = 8 # gross
 +            else:
 +                pca.type = 7 # also gross
              session.add(pca)
  
          # Only commit if we set up the session ourself
@@@ -1524,6 -1649,42 +1673,42 @@@ __all__.append('insert_pending_content_
  
  ################################################################################
  
+ class PolicyQueue(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<PolicyQueue %s>' % self.queue_name
+ __all__.append('PolicyQueue')
+ @session_wrapper
+ def get_policy_queue(queuename, session=None):
+     """
+     Returns PolicyQueue object for given C{queue name}
+     @type queuename: string
+     @param queuename: The name of the queue
+     @type session: Session
+     @param session: Optional SQLA session object (a temporary one will be
+     generated if not supplied)
+     @rtype: PolicyQueue
+     @return: PolicyQueue object for the given queue
+     """
+     q = session.query(PolicyQueue).filter_by(queue_name=queuename)
+     try:
+         return q.one()
+     except NoResultFound:
+         return None
+ __all__.append('get_policy_queue')
+ ################################################################################
  class Priority(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -1594,99 -1755,6 +1779,6 @@@ __all__.append('get_priorities'
  
  ################################################################################
  
- class Queue(object):
-     def __init__(self, *args, **kwargs):
-         pass
-     def __repr__(self):
-         return '<Queue %s>' % self.queue_name
-     def add_file_from_pool(self, poolfile):
-         """Copies a file into the pool.  Assumes that the PoolFile object is
-         attached to the same SQLAlchemy session as the Queue object is.
-         The caller is responsible for committing after calling this function."""
-         poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-         # Check if we have a file of this name or this ID already
-         for f in self.queuefiles:
-             if f.fileid is not None and f.fileid == poolfile.file_id or \
-                f.poolfile.filename == poolfile_basename:
-                    # In this case, update the QueueFile entry so we
-                    # don't remove it too early
-                    f.lastused = datetime.now()
-                    DBConn().session().object_session(pf).add(f)
-                    return f
-         # Prepare QueueFile object
-         qf = QueueFile()
-         qf.queue_id = self.queue_id
-         qf.lastused = datetime.now()
-         qf.filename = dest
-         targetpath = qf.fullpath
-         queuepath = os.path.join(self.path, poolfile_basename)
-         try:
-             if self.copy_pool_files:
-                 # We need to copy instead of symlink
-                 import utils
-                 utils.copy(targetfile, queuepath)
-                 # NULL in the fileid field implies a copy
-                 qf.fileid = None
-             else:
-                 os.symlink(targetfile, queuepath)
-                 qf.fileid = poolfile.file_id
-         except OSError:
-             return None
-         # Get the same session as the PoolFile is using and add the qf to it
-         DBConn().session().object_session(poolfile).add(qf)
-         return qf
- __all__.append('Queue')
- @session_wrapper
- def get_queue(queuename, session=None):
-     """
-     Returns Queue object for given C{queue name}, creating it if it does not
-     exist.
-     @type queuename: string
-     @param queuename: The name of the queue
-     @type session: Session
-     @param session: Optional SQLA session object (a temporary one will be
-     generated if not supplied)
-     @rtype: Queue
-     @return: Queue object for the given queue
-     """
-     q = session.query(Queue).filter_by(queue_name=queuename)
-     try:
-         return q.one()
-     except NoResultFound:
-         return None
- __all__.append('get_queue')
- ################################################################################
- class QueueFile(object):
-     def __init__(self, *args, **kwargs):
-         pass
-     def __repr__(self):
-         return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
- __all__.append('QueueFile')
- ################################################################################
  class Section(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -1917,6 -1985,186 +2009,186 @@@ __all__.append('get_source_in_suite'
  
  ################################################################################
  
+ @session_wrapper
+ def add_dsc_to_db(u, filename, session=None):
+     entry = u.pkg.files[filename]
+     source = DBSource()
+     pfs = []
+     source.source = u.pkg.dsc["source"]
+     source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+     source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+     source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+     source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+     source.install_date = datetime.now().date()
+     dsc_component = entry["component"]
+     dsc_location_id = entry["location id"]
+     source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+     # Set up a new poolfile if necessary
+     if not entry.has_key("files id") or not entry["files id"]:
+         filename = entry["pool name"] + filename
+         poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+         session.flush()
+         pfs.append(poolfile)
+         entry["files id"] = poolfile.file_id
+     source.poolfile_id = entry["files id"]
+     session.add(source)
+     session.flush()
+     for suite_name in u.pkg.changes["distribution"].keys():
+         sa = SrcAssociation()
+         sa.source_id = source.source_id
+         sa.suite_id = get_suite(suite_name).suite_id
+         session.add(sa)
+     session.flush()
+     # Add the source files to the DB (files and dsc_files)
+     dscfile = DSCFile()
+     dscfile.source_id = source.source_id
+     dscfile.poolfile_id = entry["files id"]
+     session.add(dscfile)
+     for dsc_file, dentry in u.pkg.dsc_files.items():
+         df = DSCFile()
+         df.source_id = source.source_id
+         # If the .orig tarball is already in the pool, it's
+         # files id is stored in dsc_files by check_dsc().
+         files_id = dentry.get("files id", None)
+         # Find the entry in the files hash
+         # TODO: Bail out here properly
+         dfentry = None
+         for f, e in u.pkg.files.items():
+             if f == dsc_file:
+                 dfentry = e
+                 break
+         if files_id is None:
+             filename = dfentry["pool name"] + dsc_file
+             (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+             # FIXME: needs to check for -1/-2 and or handle exception
+             if found and obj is not None:
+                 files_id = obj.file_id
+                 pfs.append(obj)
+             # If still not found, add it
+             if files_id is None:
+                 # HACK: Force sha1sum etc into dentry
+                 dentry["sha1sum"] = dfentry["sha1sum"]
+                 dentry["sha256sum"] = dfentry["sha256sum"]
+                 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+                 pfs.append(poolfile)
+                 files_id = poolfile.file_id
+         else:
+             poolfile = get_poolfile_by_id(files_id, session)
+             if poolfile is None:
+                 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+             pfs.append(poolfile)
+         df.poolfile_id = files_id
+         session.add(df)
+     session.flush()
+     # Add the src_uploaders to the DB
+     uploader_ids = [source.maintainer_id]
+     if u.pkg.dsc.has_key("uploaders"):
+         for up in u.pkg.dsc["uploaders"].split(","):
+             up = up.strip()
+             uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
+     added_ids = {}
+     for up in uploader_ids:
+         if added_ids.has_key(up):
+             utils.warn("Already saw uploader %s for source %s" % (up, source.source))
+             continue
+         added_ids[u]=1
+         su = SrcUploader()
+         su.maintainer_id = up
+         su.source_id = source.source_id
+         session.add(su)
+     session.flush()
+     return dsc_component, dsc_location_id, pfs
+ __all__.append('add_dsc_to_db')
+ @session_wrapper
+ def add_deb_to_db(u, filename, session=None):
+     """
+     Contrary to what you might expect, this routine deals with both
+     debs and udebs.  That info is in 'dbtype', whilst 'type' is
+     'deb' for both of them
+     """
+     cnf = Config()
+     entry = u.pkg.files[filename]
+     bin = DBBinary()
+     bin.package = entry["package"]
+     bin.version = entry["version"]
+     bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+     bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+     bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+     bin.binarytype = entry["dbtype"]
+     # Find poolfile id
+     filename = entry["pool name"] + filename
+     fullpath = os.path.join(cnf["Dir::Pool"], filename)
+     if not entry.get("location id", None):
+         entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
+     if entry.get("files id", None):
+         poolfile = get_poolfile_by_id(bin.poolfile_id)
+         bin.poolfile_id = entry["files id"]
+     else:
+         poolfile = add_poolfile(filename, entry, entry["location id"], session)
+         bin.poolfile_id = entry["files id"] = poolfile.file_id
+     # Find source id
+     bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+     if len(bin_sources) != 1:
+         raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                   (bin.package, bin.version, bin.architecture.arch_string,
+                                    filename, bin.binarytype, u.pkg.changes["fingerprint"])
+     bin.source_id = bin_sources[0].source_id
+     # Add and flush object so it has an ID
+     session.add(bin)
+     session.flush()
+     # Add BinAssociations
+     for suite_name in u.pkg.changes["distribution"].keys():
+         ba = BinAssociation()
+         ba.binary_id = bin.binary_id
+         ba.suite_id = get_suite(suite_name).suite_id
+         session.add(ba)
+     session.flush()
+     # Deal with contents - disabled for now
+     #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+     #if not contents:
+     #    print "REJECT\nCould not determine contents of package %s" % bin.package
+     #    session.rollback()
+     #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+     return poolfile
+ __all__.append('add_deb_to_db')
+ ################################################################################
  class SourceACL(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -2258,63 -2506,72 +2530,72 @@@ __all__.append('UploadBlock'
  
  ################################################################################
  
- class DBConn(Singleton):
+ class DBConn(object):
      """
      database module init.
      """
+     __shared_state = {}
      def __init__(self, *args, **kwargs):
-         super(DBConn, self).__init__(*args, **kwargs)
+         self.__dict__ = self.__shared_state
  
-     def _startup(self, *args, **kwargs):
-         self.debug = False
-         if kwargs.has_key('debug'):
-             self.debug = True
-         self.__createconn()
+         if not getattr(self, 'initialised', False):
+             self.initialised = True
+             self.debug = kwargs.has_key('debug')
+             self.__createconn()
  
      def __setuptables(self):
-         self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
-         self.tbl_archive = Table('archive', self.db_meta, autoload=True)
-         self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
-         self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
-         self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
-         self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
-         self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
-         self.tbl_component = Table('component', self.db_meta, autoload=True)
-         self.tbl_config = Table('config', self.db_meta, autoload=True)
-         self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
-         self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
-         self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
-         self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
-         self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
-         self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
-         self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
-         self.tbl_files = Table('files', self.db_meta, autoload=True)
-         self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
-         self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
-         self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True)
-         self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True)
-         self.tbl_location = Table('location', self.db_meta, autoload=True)
-         self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
-         self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
-         self.tbl_override = Table('override', self.db_meta, autoload=True)
-         self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
-         self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
-         self.tbl_priority = Table('priority', self.db_meta, autoload=True)
-         self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-         self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
-         self.tbl_section = Table('section', self.db_meta, autoload=True)
-         self.tbl_source = Table('source', self.db_meta, autoload=True)
-         self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
-         self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
-         self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
-         self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
-         self.tbl_suite = Table('suite', self.db_meta, autoload=True)
-         self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
-         self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
-         self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
-         self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
-         self.tbl_uid = Table('uid', self.db_meta, autoload=True)
-         self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
+         tables = (
+             'architecture',
+             'archive',
+             'bin_associations',
+             'binaries',
+             'binary_acl',
+             'binary_acl_map',
++            'bin_contents'
+             'build_queue',
+             'build_queue_files',
+             'component',
+             'config',
 -            'content_associations',
 -            'content_file_names',
 -            'content_file_paths',
+             'changes_pending_binaries',
+             'changes_pending_files',
+             'changes_pending_files_map',
+             'changes_pending_source',
+             'changes_pending_source_files',
+             'changes_pool_files',
++            'deb_contents',
+             'dsc_files',
+             'files',
+             'fingerprint',
+             'keyrings',
+             'changes',
+             'keyring_acl_map',
+             'location',
+             'maintainer',
+             'new_comments',
+             'override',
+             'override_type',
 -            'pending_content_associations',
++            'pending_bin_contents',
+             'policy_queue',
+             'priority',
+             'section',
+             'source',
+             'source_acl',
+             'src_associations',
+             'src_format',
+             'src_uploaders',
+             'suite',
+             'suite_architectures',
+             'suite_src_formats',
+             'suite_build_queue_copy',
++            'udeb_contents',
+             'uid',
+             'upload_blocks',
+         )
+         for table_name in tables:
+             table = Table(table_name, self.db_meta, autoload=True)
+             setattr(self, 'tbl_%s' % table_name, table)
  
      def __setupmappers(self):
          mapper(Architecture, self.tbl_architecture,
                                   binary_id = self.tbl_bin_associations.c.bin,
                                   binary = relation(DBBinary)))
  
 -        mapper(BuildQueue, self.tbl_build_queue,
 -               properties = dict(queue_id = self.tbl_build_queue.c.id))
 -
 -        mapper(BuildQueueFile, self.tbl_build_queue_files,
 -               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
 -                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
 +        mapper(PendingBinContents, self.tbl_pending_bin_contents,
 +               properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
 +                                 filename = self.tbl_pending_bin_contents.c.filename,
 +                                 package = self.tbl_pending_bin_contents.c.package,
 +                                 version = self.tbl_pending_bin_contents.c.version,
 +                                 arch = self.tbl_pending_bin_contents.c.arch,
 +                                 otype = self.tbl_pending_bin_contents.c.type))
 +
 +        mapper(DebContents, self.tbl_deb_contents,
 +               properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
 +                                 package=self.tbl_deb_contents.c.package,
 +                                 component=self.tbl_deb_contents.c.component,
 +                                 arch=self.tbl_deb_contents.c.arch,
 +                                 section=self.tbl_deb_contents.c.section,
 +                                 filename=self.tbl_deb_contents.c.filename))
 +
 +        mapper(UdebContents, self.tbl_udeb_contents,
 +               properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
 +                                 package=self.tbl_udeb_contents.c.package,
 +                                 component=self.tbl_udeb_contents.c.component,
 +                                 arch=self.tbl_udeb_contents.c.arch,
 +                                 section=self.tbl_udeb_contents.c.section,
 +                                 filename=self.tbl_udeb_contents.c.filename))
  
          mapper(DBBinary, self.tbl_binaries,
                 properties = dict(binary_id = self.tbl_binaries.c.id,
                 properties = dict(keyring_name = self.tbl_keyrings.c.name,
                                   keyring_id = self.tbl_keyrings.c.id))
  
-         mapper(KnownChange, self.tbl_known_changes,
-                properties = dict(known_change_id = self.tbl_known_changes.c.id,
+         mapper(DBChange, self.tbl_changes,
+                properties = dict(change_id = self.tbl_changes.c.id,
                                   poolfiles = relation(PoolFile,
                                                        secondary=self.tbl_changes_pool_files,
                                                        backref="changeslinks"),
+                                  files = relation(ChangePendingFile,
+                                                   secondary=self.tbl_changes_pending_files_map,
+                                                   backref="changesfile"),
+                                  in_queue_id = self.tbl_changes.c.in_queue,
+                                  in_queue = relation(PolicyQueue,
+                                                      primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
+                                  approved_for_id = self.tbl_changes.c.approved_for))
+         mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
+                properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
+         mapper(ChangePendingFile, self.tbl_changes_pending_files,
+                properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+         mapper(ChangePendingSource, self.tbl_changes_pending_source,
+                properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+                                  change = relation(DBChange),
+                                  maintainer = relation(Maintainer,
+                                                        primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+                                  changedby = relation(Maintainer,
+                                                       primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+                                  fingerprint = relation(Fingerprint),
+                                  source_files = relation(ChangePendingFile,
+                                                          secondary=self.tbl_changes_pending_source_files,
+                                                          backref="pending_sources")))
 +                                 files = relation(KnownChangePendingFile, backref="changesfile")))
 +
 +        mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
 +               properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id))
 +
          mapper(KeyringACLMap, self.tbl_keyring_acl_map,
                 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
                                   keyring = relation(Keyring, backref="keyring_acl_map"),
          mapper(Override, self.tbl_override,
                 properties = dict(suite_id = self.tbl_override.c.suite,
                                   suite = relation(Suite),
 +                                 package = self.tbl_override.c.package,
                                   component_id = self.tbl_override.c.component,
                                   component = relation(Component),
                                   priority_id = self.tbl_override.c.priority,
                 properties = dict(overridetype = self.tbl_override_type.c.type,
                                   overridetype_id = self.tbl_override_type.c.id))
  
+         mapper(PolicyQueue, self.tbl_policy_queue,
+                properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
          mapper(Priority, self.tbl_priority,
                 properties = dict(priority_id = self.tbl_priority.c.id))
  
-         mapper(Queue, self.tbl_queue,
-                properties = dict(queue_id = self.tbl_queue.c.id))
-         mapper(QueueFile, self.tbl_queue_files,
-                properties = dict(queue = relation(Queue, backref='queuefiles'),
-                                  poolfile = relation(PoolFile, backref='queueinstances')))
          mapper(Section, self.tbl_section,
 -               properties = dict(section_id = self.tbl_section.c.id))
 +               properties = dict(section_id = self.tbl_section.c.id,
 +                                 section=self.tbl_section.c.section))
  
          mapper(DBSource, self.tbl_source,
                 properties = dict(source_id = self.tbl_source.c.id,
  
          mapper(Suite, self.tbl_suite,
                 properties = dict(suite_id = self.tbl_suite.c.id,
-                                  policy_queue = relation(Queue),
-                                  copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
+                                  policy_queue = relation(PolicyQueue),
+                                  copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
  
          mapper(SuiteArchitecture, self.tbl_suite_architectures,
                 properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
diff --combined daklib/utils.py
index c3e4dbb32169b9aa4c8140943f860cb64593168f,3cc4053861896fa5023780d5b484bab61d6eab40..b7401751429b2c027f3ee89925bf593507a31795
mode 100755,100644..100644
@@@ -36,7 -36,6 +36,7 @@@ import sta
  import apt_pkg
  import time
  import re
 +import string
  import email as modemail
  import subprocess
  
@@@ -45,8 -44,7 +45,8 @@@ from dak_exceptions import 
  from textutils import fix_maintainer
  from regexes import re_html_escaping, html_escaping, re_single_line_field, \
                      re_multi_line_field, re_srchasver, re_taint_free, \
 -                    re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource
 +                    re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
 +                    re_is_orig_source
  
  from formats import parse_format, validate_changes_format
  from srcformats import get_format_from_string
@@@ -714,24 -712,23 +714,23 @@@ def where_am_i ()
          return res[0]
  
  def which_conf_file ():
-     if os.getenv("DAK_CONFIG"):
-         print(os.getenv("DAK_CONFIG"))
-         return os.getenv("DAK_CONFIG")
-     else:
-         res = socket.gethostbyaddr(socket.gethostname())
-         # In case we allow local config files per user, try if one exists
-         if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
-             homedir = os.getenv("HOME")
-             confpath = os.path.join(homedir, "/etc/dak.conf")
-             if os.path.exists(confpath):
-                 apt_pkg.ReadConfigFileISC(Cnf,default_config)
-         # We are still in here, so there is no local config file or we do
-         # not allow local files. Do the normal stuff.
-         if Cnf.get("Config::" + res[0] + "::DakConfig"):
-             return Cnf["Config::" + res[0] + "::DakConfig"]
-         else:
-             return default_config
+     if os.getenv('DAK_CONFIG'):
+         return os.getenv('DAK_CONFIG')
+     res = socket.gethostbyaddr(socket.gethostname())
+     # In case we allow local config files per user, try if one exists
+     if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
+         homedir = os.getenv("HOME")
+         confpath = os.path.join(homedir, "/etc/dak.conf")
+         if os.path.exists(confpath):
+             apt_pkg.ReadConfigFileISC(Cnf,default_config)
+     # We are still in here, so there is no local config file or we do
+     # not allow local files. Do the normal stuff.
+     if Cnf.get("Config::" + res[0] + "::DakConfig"):
+         return Cnf["Config::" + res[0] + "::DakConfig"]
+     return default_config
  
  def which_apt_conf_file ():
      res = socket.gethostbyaddr(socket.gethostname())
@@@ -1506,7 -1503,8 +1505,8 @@@ def get_changes_files(from_dir)
  apt_pkg.init()
  
  Cnf = apt_pkg.newConfiguration()
- apt_pkg.ReadConfigFileISC(Cnf,default_config)
+ if not os.getenv("DAK_TEST"):
+     apt_pkg.ReadConfigFileISC(Cnf,default_config)
  
  if which_conf_file() != default_config:
      apt_pkg.ReadConfigFileISC(Cnf,which_conf_file())