]> git.decadent.org.uk Git - dak.git/commitdiff
merge from ftp-master
authorMike O'Connor <stew@dhcp-101.dfw1.kickstart.lan>
Sat, 31 Oct 2009 10:29:34 +0000 (10:29 +0000)
committerMike O'Connor <stew@dhcp-101.dfw1.kickstart.lan>
Sat, 31 Oct 2009 10:29:34 +0000 (10:29 +0000)
1  2 
dak/contents.py
dak/dakdb/update17.py
dak/dakdb/update19.py
dak/dakdb/update23.py
daklib/binary.py
daklib/dbconn.py
daklib/utils.py

diff --combined dak/contents.py
index 4211e98e1122fd6f5aee8201e75a109b9d15cd7b,58c3aa6b668d364c23177b18b0e1b2bb9dc198ef..53d742270dda4a87f47f6ef1dc3bb1b31a375501
@@@ -39,10 -39,9 +39,11 @@@ import o
  import logging
  import gzip
  import threading
+ import traceback
  import Queue
  import apt_pkg
 +import datetime
 +import traceback
  from daklib import utils
  from daklib.binary import Binary
  from daklib.config import Config
@@@ -60,9 -59,6 +61,9 @@@ COMMAND
      bootstrap_bin
          scan the debs in the existing pool and load contents into the bin_contents table
  
 +    bootstrap
 +        copy data from the bin_contents table into the deb_contents / udeb_contents tables
 +
      cruft
          remove files/paths which are no longer referenced by a binary
  
@@@ -98,273 -94,107 +99,273 @@@ class EndOfContents(object)
      """
      pass
  
 -class GzippedContentWriter(object):
 +class OneAtATime(object):
      """
 -    An object which will write contents out to a Contents-$arch.gz
 -    file on a separate thread
 +    a one space queue which sits between multiple possible producers
 +    and multiple possible consumers
      """
 +    def __init__(self):
 +        self.next_in_line = None
 +        self.read_lock = threading.Condition()
 +        self.write_lock = threading.Condition()
 +        self.die = False
 +
 +    def enqueue(self, next):
 +        self.write_lock.acquire()
 +        while self.next_in_line:
 +            if self.die:
 +                return
 +            self.write_lock.wait()
 +
 +        assert( not self.next_in_line )
 +        self.next_in_line = next
 +        self.write_lock.release()
 +        self.read_lock.acquire()
 +        self.read_lock.notify()
 +        self.read_lock.release()
 +
 +    def dequeue(self):
 +        self.read_lock.acquire()
 +        while not self.next_in_line:
 +            if self.die:
 +                return
 +            self.read_lock.wait()
 +
 +        result = self.next_in_line
 +
 +        self.next_in_line = None
 +        self.read_lock.release()
 +        self.write_lock.acquire()
 +        self.write_lock.notify()
 +        self.write_lock.release()
 +
 +        return result
 +
 +
 +class ContentsWorkThread(threading.Thread):
 +    """
 +    """
 +    def __init__(self, upstream, downstream):
 +        threading.Thread.__init__(self)
 +        self.upstream = upstream
 +        self.downstream = downstream
  
 -    header = None # a class object holding the header section of contents file
 -
 -    def __init__(self, filename):
 -        """
 -        @type filename: string
 -        @param filename: the name of the file to write to
 -        """
 -        self.queue = Queue.Queue()
 -        self.current_file = None
 -        self.first_package = True
 -        self.output = self.open_file(filename)
 -        self.thread = threading.Thread(target=self.write_thread,
 -                                       name='Contents writer')
 -        self.thread.start()
 -
 -    def open_file(self, filename):
 +    def run(self):
 +        while True:
 +            try:
 +                contents_file = self.upstream.dequeue()
 +                if isinstance(contents_file,EndOfContents):
 +                    if self.downstream:
 +                        self.downstream.enqueue(contents_file)
 +                    break
 +
 +                s = datetime.datetime.now()
 +                print("%s start: %s" % (self,contents_file) )
 +                self._run(contents_file)
 +                print("%s finished: %s in %d seconds" % (self, contents_file, (datetime.datetime.now()-s).seconds ))
 +                if self.downstream:
 +                    self.downstream.enqueue(contents_file)
 +            except:
 +                traceback.print_exc()
 +
 +class QueryThread(ContentsWorkThread):
 +    def __init__(self, upstream, downstream):
 +        ContentsWorkThread.__init__(self, upstream, downstream)
 +
 +    def __str__(self):
 +        return "QueryThread"
 +    __repr__ = __str__
 +
 +    def _run(self, contents_file):
 +        contents_file.query()
 +
 +class IngestThread(ContentsWorkThread):
 +    def __init__(self, upstream, downstream):
 +        ContentsWorkThread.__init__(self, upstream, downstream)
 +
 +    def __str__(self):
 +        return "IngestThread"
 +    __repr__ = __str__
 +
 +    def _run(self, contents_file):
 +        contents_file.ingest()
 +
 +class SortThread(ContentsWorkThread):
 +    def __init__(self, upstream, downstream):
 +        ContentsWorkThread.__init__(self, upstream, downstream)
 +
 +    def __str__(self):
 +        return "SortThread"
 +    __repr__ = __str__
 +
 +    def _run(self, contents_file):
 +        contents_file.sorted_keys = sorted(contents_file.filenames.keys())
 +
 +class OutputThread(ContentsWorkThread):
 +    def __init__(self, upstream, downstream):
 +        ContentsWorkThread.__init__(self, upstream, downstream)
 +
 +    def __str__(self):
 +        return "OutputThread"
 +    __repr__ = __str__
 +
 +    def _run(self, contents_file):
 +        contents_file.open_file()
 +        for fname in contents_file.sorted_keys:
 +            contents_file.filehandle.write("%s\t%s\n" % (fname,contents_file.filenames[fname]))
 +        contents_file.sorted_keys = None
 +        contents_file.filenames.clear()
 +    
 +class GzipThread(ContentsWorkThread):
 +    def __init__(self, upstream, downstream):
 +        ContentsWorkThread.__init__(self, upstream, downstream)
 +
 +    def __str__(self):
 +        return "GzipThread"
 +    __repr__ = __str__
 +
 +    def _run(self, contents_file):
 +        os.system("gzip -f %s" % contents_file.filename)
 +
 +class ContentFile(object):
 +    def __init__(self,
 +                 filename,
 +                 suite_str,
 +                 suite_id):
 +
 +        self.filename = filename
 +        self.filenames = {}
 +        self.sorted_keys = None
 +        self.suite_str = suite_str
 +        self.suite_id = suite_id
 +        self.session = None
 +        self.filehandle = None
 +        self.results = None
 +
 +    def __str__(self):
 +        return self.filename
 +    __repr__ = __str__
 +
 +
 +    def cleanup(self):
 +        self.filenames = None
 +        self.sortedkeys = None
 +        self.filehandle.close()
 +        self.session.close()
 +
 +    def ingest(self):
 +        while True:
 +            r = self.results.fetchone()
 +            if not r:
 +                break
 +            filename, package = r
 +            self.filenames[filename]=package
 +
 +        self.session.close()
 +
 +#     def ingest(self):
 +#         while True:
 +#             r = self.results.fetchone()
 +#             if not r:
 +#                 break
 +#             filename, package = r
 +#             if self.filenames.has_key(filename):
 +#                 self.filenames[filename] += ",%s" % (package)
 +#             else:
 +#                 self.filenames[filename] = "%s" % (package)
 +#         self.session.close()
 +
 +    def open_file(self):
          """
          opens a gzip stream to the contents file
          """
 -        filepath = Config()["Contents::Root"] + filename
 -        filedir = os.path.dirname(filepath)
 +#        filepath = Config()["Contents::Root"] + self.filename
 +        self.filename = "/home/stew/contents/" + self.filename
 +        filedir = os.path.dirname(self.filename)
          if not os.path.isdir(filedir):
              os.makedirs(filedir)
 -        return gzip.open(filepath, "w")
 -
 -    def write(self, filename, section, package):
 -        """
 -        enqueue content to be written to the file on a separate thread
 -        """
 -        self.queue.put((filename,section,package))
 -
 -    def write_thread(self):
 -        """
 -        the target of a Thread which will do the actual writing
 -        """
 -        while True:
 -            next = self.queue.get()
 -            if isinstance(next, EndOfContents):
 -                self.output.write('\n')
 -                self.output.close()
 -                break
 +#        self.filehandle = gzip.open(self.filename, "w")
 +        self.filehandle = open(self.filename, "w")
 +        self._write_header()
  
 -            (filename,section,package)=next
 -            if next != self.current_file:
 -                # this is the first file, so write the header first
 -                if not self.current_file:
 -                    self.output.write(self._getHeader())
 +    def _write_header(self):
 +        self._get_header();
 +        self.filehandle.write(ContentFile.header)
  
 -                self.output.write('\n%s\t' % filename)
 -                self.first_package = True
 -
 -            self.current_file=filename
 -
 -            if not self.first_package:
 -                self.output.write(',')
 -            else:
 -                self.first_package=False
 -            self.output.write('%s/%s' % (section,package))
 -
 -    def finish(self):
 -        """
 -        enqueue the sentry object so that writers will know to terminate
 -        """
 -        self.queue.put(EndOfContents())
 +    header=None
  
      @classmethod
 -    def _getHeader(self):
 +    def _get_header(self):
          """
          Internal method to return the header for Contents.gz files
  
          This is boilerplate which explains the contents of the file and how
          it can be used.
          """
 -        if not GzippedContentWriter.header:
 +        if not ContentFile.header:
              if Config().has_key("Contents::Header"):
                  try:
                      h = open(os.path.join( Config()["Dir::Templates"],
                                             Config()["Contents::Header"] ), "r")
 -                    GzippedContentWriter.header = h.read()
 +                    ContentFile.header = h.read()
                      h.close()
                  except:
                      log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"],
                                                                        traceback.format_exc() ))
 -                    GzippedContentWriter.header = None
 +                    ContentFile.header = None
              else:
 -                GzippedContentWriter.header = None
 -
 -        return GzippedContentWriter.header
 -
 +                ContentFile.header = None
 +
 +        return ContentFile.header
 +
 +
 +class DebContentFile(ContentFile):
 +    def __init__(self,
 +                 filename,
 +                 suite_str,
 +                 suite_id,
 +                 arch_str,
 +                 arch_id):
 +        ContentFile.__init__(self,
 +                             filename,
 +                             suite_str,
 +                             suite_id )
 +        self.arch_str = arch_str
 +        self.arch_id = arch_id
 +
 +    def query(self):
 +        self.session = DBConn().session();
 +
 +        self.results = self.session.execute("""SELECT filename, comma_separated_list(section || '/' || package)
 +        FROM deb_contents
 +        WHERE ( arch=2 or arch = :arch) AND suite = :suite
 +        """, { 'arch':self.arch_id, 'suite':self.suite_id } )
 +
 +class UdebContentFile(ContentFile):
 +    def __init__(self,
 +                 filename,
 +                 suite_str,
 +                 suite_id,
 +                 section_name,
 +                 section_id):
 +        ContentFile.__init__(self,
 +                             filename,
 +                             suite_str,
 +                             suite_id )
 +
 +    def query(self):
 +        self.session = DBConn().session();
 +
 +        self.results = self.session.execute("""SELECT filename, comma_separated_list(section || '/' || package)
 +        FROM udeb_contents
 +        WHERE suite = :suite
 +        group by filename
 +        """ , { 'suite': self.suite_id } )
  
  class Contents(object):
      """
      Class capable of generating Contents-$arch.gz files
      """
 -
      def __init__(self):
          self.header = None
  
  
          s = DBConn().session()
  
 -        #        for binary in s.query(DBBinary).all() ):
 -        binary = s.query(DBBinary).first()
 -        if binary:
 +        for binary in s.query(DBBinary).yield_per(100):
 +            print( "binary: %s" % binary.package )
              filename = binary.poolfile.filename
               # Check for existing contents
              existingq = s.execute( "select 1 from bin_contents where binary_id=:id", {'id':binary.binary_id} );
          """
          scan the existing debs in the pool to populate the contents database tables
          """
 -        pooldir = Config()[ 'Dir::Pool' ]
 -
          s = DBConn().session()
  
 -        for suite in s.query(Suite).all():
 -            for arch in get_suite_architectures(suite.suite_name, skipsrc=True, skipall=True, session=s):
 -                q = s.query(BinAssociation).join(Suite)
 -                q = q.join(Suite).filter_by(suite_name=suite.suite_name)
 -                q = q.join(DBBinary).join(Architecture).filter_by(arch.arch_string)
 -                for ba in q:
 -                    filename = ba.binary.poolfile.filename
 -                    # Check for existing contents
 -                    existingq = s.query(ContentAssociations).filter_by(binary_pkg=ba.binary_id).limit(1)
 -                    if existingq.count() > 0:
 -                        log.debug( "already imported: %s" % (filename))
 -                    else:
 -                        # We don't have existing contents so import them
 -                        log.debug( "scanning: %s" % (filename) )
 -                        debfile = os.path.join(pooldir, filename)
 -                        if os.path.exists(debfile):
 -                            Binary(debfile, self.reject).scan_package(ba.binary_id, True)
 -                        else:
 -                            log.error("missing .deb: %s" % filename)
 +
 +        # get a mapping of all the override types we care about (right now .deb an .udeb)
 +        override_type_map = {};
 +        for override_type in s.query(OverrideType).all():
 +            if override_type.overridetype.endswith('deb' ):
 +                override_type_map[override_type.overridetype_id] = override_type.overridetype;
 +
 +        for override in s.query(Override).yield_per(100):
 +            if not override_type_map.has_key(override.overridetype_id):
 +                #this isn't an override we care about
 +                continue
 +
 +            binaries = s.execute("""SELECT b.id, b.architecture
 +                                    FROM binaries b
 +                                    JOIN bin_associations ba ON ba.bin=b.id
 +                                    WHERE ba.suite=:suite
 +                                    AND b.package=:package""", {'suite':override.suite_id, 'package':override.package})
 +            while True:
 +                binary = binaries.fetchone()
 +                if not binary:
 +                    break
 +
 +                exists = s.execute("SELECT 1 FROM %s_contents WHERE binary_id=:id limit 1" % override_type_map[override.overridetype_id], {'id':binary.id})
 +
 +
 +                if exists.fetchone():
 +                    print '.',
 +                    continue
 +                else:
 +                    print '+',
 +
 +                s.execute( """INSERT INTO %s_contents (filename,section,package,binary_id,arch,suite)
 +                              SELECT file, :section, :package, :binary_id, :arch, :suite
 +                              FROM bin_contents
 +                              WHERE binary_id=:binary_id;""" % override_type_map[override.overridetype_id],
 +                           { 'section' : override.section_id,
 +                             'package' : override.package,
 +                             'binary_id' : binary.id,
 +                             'arch' : binary.architecture,
 +                             'suite' : override.suite_id } )
 +                s.commit()
 +
 +    def generate(self):
 +        """
 +        Generate contents files for both deb and udeb
 +        """
 +        self.deb_generate()
 +#        self.udeb_generate()
 +
 +    def deb_generate(self):
 +        """
 +        Generate Contents-$arch.gz files for every available arch in each given suite.
 +        """
 +        session = DBConn().session()
 +        debtype_id = get_override_type("deb", session)
 +        suites = self._suites()
 +
 +        inputtoquery = OneAtATime()
 +        querytoingest = OneAtATime()
 +        ingesttosort = OneAtATime()
 +        sorttooutput = OneAtATime()
 +        outputtogzip = OneAtATime()
 +
 +        qt = QueryThread(inputtoquery,querytoingest)
 +        it = IngestThread(querytoingest,ingesttosort)
 +# these actually make things worse
 +#        it2 = IngestThread(querytoingest,ingesttosort)
 +#        it3 = IngestThread(querytoingest,ingesttosort)
 +#        it4 = IngestThread(querytoingest,ingesttosort)
 +        st = SortThread(ingesttosort,sorttooutput)
 +        ot = OutputThread(sorttooutput,outputtogzip)
 +        gt = GzipThread(outputtogzip, None)
 +
 +        qt.start()
 +        it.start()
 +#        it2.start()
 +#        it3.start()
 +#        it2.start()
 +        st.start()
 +        ot.start()
 +        gt.start()
 +        
 +        # Get our suites, and the architectures
 +        for suite in [i.lower() for i in suites]:
 +            suite_id = get_suite(suite, session).suite_id
 +            print( "got suite_id: %s for suite: %s" % (suite_id, suite ) )
 +            arch_list = self._arches(suite_id, session)
 +
 +            for (arch_id,arch_str) in arch_list:
 +                print( "suite: %s, arch: %s time: %s" %(suite_id, arch_id, datetime.datetime.now().isoformat()) )
 +
 +#                filename = "dists/%s/Contents-%s.gz" % (suite, arch_str)
 +                filename = "dists/%s/Contents-%s" % (suite, arch_str)
 +                cf = DebContentFile(filename, suite, suite_id, arch_str, arch_id)
 +                inputtoquery.enqueue( cf )
 +
 +        inputtoquery.enqueue( EndOfContents() )
 +        gt.join()
 +
 +    def udeb_generate(self):
 +        """
 +        Generate Contents-$arch.gz files for every available arch in each given suite.
 +        """
 +        session = DBConn().session()
 +        udebtype_id=DBConn().get_override_type_id("udeb")
 +        suites = self._suites()
 +
 +        inputtoquery = OneAtATime()
 +        querytoingest = OneAtATime()
 +        ingesttosort = OneAtATime()
 +        sorttooutput = OneAtATime()
 +        outputtogzip = OneAtATime()
 +
 +        qt = QueryThread(inputtoquery,querytoingest)
 +        it = IngestThread(querytoingest,ingesttosort)
 +# these actually make things worse
 +#        it2 = IngestThread(querytoingest,ingesttosort)
 +#        it3 = IngestThread(querytoingest,ingesttosort)
 +#        it4 = IngestThread(querytoingest,ingesttosort)
 +        st = SortThread(ingesttosort,sorttooutput)
 +        ot = OutputThread(sorttooutput,outputtogzip)
 +        gt = GzipThread(outputtogzip, None)
 +
 +        qt.start()
 +        it.start()
 +#        it2.start()
 +#        it3.start()
 +#        it2.start()
 +        st.start()
 +        ot.start()
 +        gt.start()
 +        
 +#        for section, fn_pattern in [("debian-installer","dists/%s/Contents-udeb-%s"),
 +#                                     ("non-free/debian-installer", "dists/%s/Contents-udeb-nf-%s")]:
 +
 +#             section_id = DBConn().get_section_id(section) # all udebs should be here)
 +#             if section_id != -1:
 +
 +                
 +
 +#                 # Get our suites, and the architectures
 +#                 for suite in [i.lower() for i in suites]:
 +#                     suite_id = DBConn().get_suite_id(suite)
 +#                     arch_list = self._arches(suite_id, session)
 +
 +#                     for arch_id in arch_list:
 +
 +#                         writer = GzippedContentWriter(fn_pattern % (suite, arch_id[1]))
 +#                         try:
 +
 +#                             results = session.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (suite_id, udebtype_id, section_id, arch_id))
 +
 +#                             while True:
 +#                                 r = cursor.fetchone()
 +#                                 if not r:
 +#                                     break
 +
 +#                                 filename, section, package, arch = r
 +#                                 writer.write(filename, section, package)
 +#                         finally:
 +#                             writer.close()
 +
 +
  
  
      def generate(self):
                  section = get_section(section, session)
  
              # Get our suites
 -            for suite in which_suites():
 +            for suite in which_suites(session):
                  # Which architectures do we need to work on
                  arch_list = get_suite_architectures(suite.suite_name, skipsrc=True, skipall=True, session=session)
  
                      # close all the files
                      for writer in file_writers.values():
                          writer.finish()
 +    def _suites(self):
 +        """
 +        return a list of suites to operate on
 +        """
 +        if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
 +            suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
 +        else:
 +            suites = [ 'unstable', 'testing' ]
 +#            suites = Config().SubTree("Suite").List()
 +
 +        return suites
 +
 +    def _arches(self, suite, session):
 +        """
 +        return a list of archs to operate on
 +        """
 +        arch_list = []
 +        arches = session.execute(
 +            """SELECT s.architecture, a.arch_string
 +            FROM suite_architectures s
 +            JOIN architecture a ON (s.architecture=a.id)
 +            WHERE suite = :suite_id""",
 +            {'suite_id':suite } )
 +
 +        while True:
 +            r = arches.fetchone()
 +            if not r:
 +                break
 +
 +            if r[1] != "source" and r[1] != "all":
 +                arch_list.append((r[0], r[1]))
 +
 +        return arch_list
 +
  
  ################################################################################
  
@@@ -693,9 -348,8 +694,9 @@@ def main()
                   ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
                  ]
  
 -    commands = {'generate' : Contents.generate,
 +    commands = {'generate' : Contents.deb_generate,
                  'bootstrap_bin' : Contents.bootstrap_bin,
 +                'bootstrap' : Contents.bootstrap,
                  'cruft' : Contents.cruft,
                  }
  
diff --combined dak/dakdb/update17.py
index b5bbb3cce22bd805cdfc92a39bc1a55caf7438c9,d75bdb5b2575beb2686659c7f46389d4d72008c9..beca9425020e6b3df6004eccdf6dea1299a54541
mode 100644,100755..100755
@@@ -44,7 -44,7 +44,7 @@@ def do_update(self)
          file text,
          binary_id integer,
          UNIQUE(file,binary_id))""" )
-         
          c.execute("""ALTER TABLE ONLY bin_contents
          ADD CONSTRAINT bin_contents_bin_fkey
          FOREIGN KEY (binary_id) REFERENCES binaries(id)
  
          c.execute("""CREATE INDEX ind_bin_contents_binary ON bin_contents(binary_id);""" )
  
 +        c.execute("GRANT ALL ON bin_contents TO ftpmaster;")
 +        c.execute("GRANT SELECT ON bin_contents TO public;")
+         c.execute("UPDATE config SET value = '17' WHERE name = 'db_revision'")
 +
          self.db.commit()
  
      except psycopg2.ProgrammingError, msg:
diff --combined dak/dakdb/update19.py
index f530375c6035a719453cacc65d818fae10bc9e71,49a4dbc71f24b5234e63e15fdabaf65b4b9826d5..49a4dbc71f24b5234e63e15fdabaf65b4b9826d5
mode 100644,100755..100644
@@@ -2,10 -2,10 +2,10 @@@
  # coding=utf8
  
  """
- Adding a trainee field to the process-new notes
+ Move to using the C version of debversion
  
  @contact: Debian FTP Master <ftpmaster@debian.org>
- @copyright: 2009  Mike O'Connor <stew@debian.org>
+ @copyright: 2009  Mark Hymers <mhy@debian.org>
  @license: GNU General Public License version 2 or later
  """
  
  
  import psycopg2
  import time
+ import os
+ import datetime
+ import traceback
  from daklib.dak_exceptions import DBUpdateError
+ from daklib.config import Config
  
  ################################################################################
  
- def suites():
-     """
-     return a list of suites to operate on
-     """
-     if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
-         suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
-     else:
-         suites = [ 'unstable', 'testing' ]
- #            suites = Config().SubTree("Suite").List()
-     return suites
- def arches(cursor, suite):
-     """
-     return a list of archs to operate on
-     """
-     arch_list = []
-     cursor.execute("""SELECT s.architecture, a.arch_string
-     FROM suite_architectures s
-     JOIN architecture a ON (s.architecture=a.id)
-     WHERE suite = :suite""", {'suite' : suite })
-     while True:
-         r = cursor.fetchone()
-         if not r:
-             break
-         if r[1] != "source" and r[1] != "all":
-             arch_list.append((r[0], r[1]))
-     return arch_list
  def do_update(self):
-     """
-     Adding contents table as first step to maybe, finally getting rid
-     of apt-ftparchive
-     """
-     print __doc__
+     print "Converting database to use new C based debversion type"
  
      try:
          c = self.db.cursor()
  
-         c.execute("""CREATE TABLE pending_bin_contents (
-         id serial NOT NULL,
-         package text NOT NULL,
-         version debversion NOT NULL,
-         arch int NOT NULL,
-         filename text NOT NULL,
-         type int NOT NULL,
-         PRIMARY KEY(id))""" );
-         c.execute("""CREATE TABLE deb_contents (
-         filename text,
-         section text,
-         package text,
-         binary_id integer,
-         arch integer,
-         suite integer,
-         component text)""" )
-         c.execute("""CREATE TABLE udeb_contents (
-         filename text,
-         section text,
-         package text,
-         binary_id integer,
-         suite integer,
-         arch integer,
-         component text )""" )
-         c.execute("""ALTER TABLE ONLY deb_contents
-         ADD CONSTRAINT deb_contents_arch_fkey
-         FOREIGN KEY (arch) REFERENCES architecture(id)
-         ON DELETE CASCADE;""")
-         c.execute("""ALTER TABLE ONLY udeb_contents
-         ADD CONSTRAINT udeb_contents_arch_fkey
-         FOREIGN KEY (arch) REFERENCES architecture(id)
-         ON DELETE CASCADE;""")
-         c.execute("""ALTER TABLE ONLY deb_contents
-         ADD CONSTRAINT deb_contents_pkey
-         PRIMARY KEY (filename,package,arch,suite);""")
-         c.execute("""ALTER TABLE ONLY udeb_contents
-         ADD CONSTRAINT udeb_contents_pkey
-         PRIMARY KEY (filename,package,arch,suite);""")
-         c.execute("""ALTER TABLE ONLY deb_contents
-         ADD CONSTRAINT deb_contents_suite_fkey
-         FOREIGN KEY (suite) REFERENCES suite(id)
-         ON DELETE CASCADE;""")
-         c.execute("""ALTER TABLE ONLY udeb_contents
-         ADD CONSTRAINT udeb_contents_suite_fkey
-         FOREIGN KEY (suite) REFERENCES suite(id)
-         ON DELETE CASCADE;""")
-         c.execute("""ALTER TABLE ONLY deb_contents
-         ADD CONSTRAINT deb_contents_binary_fkey
-         FOREIGN KEY (binary_id) REFERENCES binaries(id)
-         ON DELETE CASCADE;""")
-         c.execute("""ALTER TABLE ONLY udeb_contents
-         ADD CONSTRAINT udeb_contents_binary_fkey
-         FOREIGN KEY (binary_id) REFERENCES binaries(id)
-         ON DELETE CASCADE;""")
-         c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
-         suites = self.suites()
-         for suite in [i.lower() for i in suites]:
-             suite_id = DBConn().get_suite_id(suite)
-             arch_list = arches(c, suite_id)
-             arch_list = arches(c, suite_id)
-             for (arch_id,arch_str) in arch_list:
-                 c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
-             for section, sname in [("debian-installer","main"),
-                                   ("non-free/debian-installer", "nonfree")]:
-                 c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
-         c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
-     event = TD["event"]
-     if event == "DELETE" or event == "UPDATE":
-         plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
-                                   ["int","int"]),
-                                   [TD["old"]["bin"], TD["old"]["suite"]])
-     if event == "INSERT" or event == "UPDATE":
-        content_data = plpy.execute(plpy.prepare(
-             """SELECT s.section, b.package, b.architecture, c.name, ot.type
-             FROM override o
-             JOIN override_type ot on o.type=ot.id
-             JOIN binaries b on b.package=o.package
-             JOIN files f on b.file=f.id
-             JOIN location l on l.id=f.location
-             JOIN section s on s.id=o.section
-             JOIN component c on c.id=l.component
-             WHERE b.id=$1
-             AND o.suite=$2
-             """,
-             ["int", "int"]),
-             [TD["new"]["bin"], TD["new"]["suite"]])[0]
-        component_str = "";
-        if not content_data["name"] === "main":
-            component_str=content_data["name"]+"/"
-        
-        filenames = plpy.execute(plpy.prepare(
-            "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
-            ["int"]),
-            [TD["new"]["bin"]])
-        for filename in filenames:
-            plpy.execute(plpy.prepare(
-                """INSERT INTO deb_contents
-                    (file,section,package,binary_id,arch,suite,component)
-                    VALUES($1,$2,$3,$4,$5,$6,$7)""",
-                ["text","text","text","int","int","int","text"]),
-                [filename["filename"],
-                 content_data["section"],
-                 content_data["package"],
-                 TD["new"]["bin"],
-                 content_data["architecture"],
-                 TD["new"]["suite"],
-                 component_str])
- $$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
- """)
-         c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
-     event = TD["event"]
-     if event == "UPDATE":
-         otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),TD["new"]["type"] )[0];
-         if otype["type"].endswith("deb"):
-             table_name = "%s_contents" % otype["type"]
-             plpy.execute(plpy.prepare("UPDATE %s set sections=$1" % table_name
-                                       ["text"]),
-                                       [TD["new"]["section"]])
- $$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
- """)
-         c.execute( """CREATE TRIGGER bin_associations_contents_trigger
-                       AFTER INSERT OR UPDATE OR DELETE ON bin_associations
-                       FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
-         c.execute("""CREATE TRIGGER override_contents_trigger
-                       AFTER UPDATE ON override
-                       FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+         print "Temporarily converting columns to TEXT"
+         c.execute("ALTER TABLE binaries ALTER COLUMN version TYPE TEXT")
+         c.execute("ALTER TABLE source ALTER COLUMN version TYPE TEXT")
+         c.execute("ALTER TABLE upload_blocks ALTER COLUMN version TYPE TEXT")
+         c.execute("ALTER TABLE pending_content_associations ALTER COLUMN version TYPE TEXT")
+         print "Dropping old debversion type"
+         c.execute("DROP OPERATOR >(debversion, debversion)")
+         c.execute("DROP OPERATOR <(debversion, debversion)")
+         c.execute("DROP OPERATOR <=(debversion, debversion)")
+         c.execute("DROP OPERATOR >=(debversion, debversion)")
+         c.execute("DROP OPERATOR =(debversion, debversion)")
+         c.execute("DROP OPERATOR <>(debversion, debversion)")
+         c.execute("DROP FUNCTION debversion_eq(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_ge(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_gt(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_le(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_lt(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_ne(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_compare(debversion,debversion)")
+         c.execute("DROP FUNCTION debversion_revision(debversion)")
+         c.execute("DROP FUNCTION debversion_version(debversion)")
+         c.execute("DROP FUNCTION debversion_epoch(debversion)")
+         c.execute("DROP FUNCTION debversion_split(debversion)")
+         c.execute("DROP TYPE debversion")
+         # URGH - kill me now
+         print "Importing new debversion type"
+         f = open('/usr/share/postgresql/8.4/contrib/debversion.sql', 'r')
+         cmds = []
+         curcmd = ''
+         for j in f.readlines():
+             j = j.replace('\t', '').replace('\n', '').split('--')[0]
+             if not j.startswith('--'):
+                 jj = j.split(';')
+                 curcmd += " " + jj[0]
+                 if len(jj) > 1:
+                     for jjj in jj[1:]:
+                         if jjj.strip() == '':
+                             cmds.append(curcmd)
+                             curcmd = ''
+                         else:
+                             curcmd += " " + jjj
+         for cm in cmds:
+             c.execute(cm)
+         print "Converting columns to new debversion type"
+         c.execute("ALTER TABLE binaries ALTER COLUMN version TYPE debversion")
+         c.execute("ALTER TABLE source ALTER COLUMN version TYPE debversion")
+         c.execute("ALTER TABLE upload_blocks ALTER COLUMN version TYPE debversion")
+         c.execute("ALTER TABLE pending_content_associations ALTER COLUMN version TYPE debversion")
+         print "Committing"
+         c.execute("UPDATE config SET value = '19' WHERE name = 'db_revision'")
          self.db.commit()
  
-     except psycopg2.ProgrammingError, msg:
+     except psycopg2.InternalError, msg:
          self.db.rollback()
-         raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
+         raise DBUpdateError, "Unable to apply debversion update 19, rollback issued. Error message : %s" % (str(msg))
diff --combined dak/dakdb/update23.py
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..9d97172b9352059672ff1624be9caf90602eb049
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,239 @@@
++#!/usr/bin/env python
++# coding=utf8
++
++"""
++Adding a trainee field to the process-new notes
++
++@contact: Debian FTP Master <ftpmaster@debian.org>
++@copyright: 2009  Mike O'Connor <stew@debian.org>
++@license: GNU General Public License version 2 or later
++"""
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the License, or
++# (at your option) any later version.
++
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++################################################################################
++
++
++################################################################################
++
++import psycopg2
++import time
++from daklib.dak_exceptions import DBUpdateError
++
++################################################################################
++
++def suites():
++    """
++    return a list of suites to operate on
++    """
++    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
++        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
++    else:
++        suites = [ 'unstable', 'testing' ]
++#            suites = Config().SubTree("Suite").List()
++
++    return suites
++
++def arches(cursor, suite):
++    """
++    return a list of archs to operate on
++    """
++    arch_list = []
++    cursor.execute("""SELECT s.architecture, a.arch_string
++    FROM suite_architectures s
++    JOIN architecture a ON (s.architecture=a.id)
++    WHERE suite = :suite""", {'suite' : suite })
++
++    while True:
++        r = cursor.fetchone()
++        if not r:
++            break
++
++        if r[1] != "source" and r[1] != "all":
++            arch_list.append((r[0], r[1]))
++
++    return arch_list
++
++def do_update(self):
++    """
++    Adding contents table as first step to maybe, finally getting rid
++    of apt-ftparchive
++    """
++
++    print __doc__
++
++    try:
++        c = self.db.cursor()
++
++        c.execute("""CREATE TABLE pending_bin_contents (
++        id serial NOT NULL,
++        package text NOT NULL,
++        version debversion NOT NULL,
++        arch int NOT NULL,
++        filename text NOT NULL,
++        type int NOT NULL,
++        PRIMARY KEY(id))""" );
++
++        c.execute("""CREATE TABLE deb_contents (
++        filename text,
++        section text,
++        package text,
++        binary_id integer,
++        arch integer,
++        suite integer,
++        component text)""" )
++
++        c.execute("""CREATE TABLE udeb_contents (
++        filename text,
++        section text,
++        package text,
++        binary_id integer,
++        suite integer,
++        arch integer,
++        component text )""" )
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_arch_fkey
++        FOREIGN KEY (arch) REFERENCES architecture(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_arch_fkey
++        FOREIGN KEY (arch) REFERENCES architecture(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_pkey
++        PRIMARY KEY (filename,package,arch,suite);""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_pkey
++        PRIMARY KEY (filename,package,arch,suite);""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_suite_fkey
++        FOREIGN KEY (suite) REFERENCES suite(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_suite_fkey
++        FOREIGN KEY (suite) REFERENCES suite(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY deb_contents
++        ADD CONSTRAINT deb_contents_binary_fkey
++        FOREIGN KEY (binary_id) REFERENCES binaries(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""ALTER TABLE ONLY udeb_contents
++        ADD CONSTRAINT udeb_contents_binary_fkey
++        FOREIGN KEY (binary_id) REFERENCES binaries(id)
++        ON DELETE CASCADE;""")
++
++        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
++
++
++        suites = self.suites()
++
++        for suite in [i.lower() for i in suites]:
++            suite_id = DBConn().get_suite_id(suite)
++            arch_list = arches(c, suite_id)
++            arch_list = arches(c, suite_id)
++
++            for (arch_id,arch_str) in arch_list:
++                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
++
++            for section, sname in [("debian-installer","main"),
++                                  ("non-free/debian-installer", "nonfree")]:
++                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
++
++
++        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
++    event = TD["event"]
++    if event == "DELETE" or event == "UPDATE":
++
++        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
++                                  ["int","int"]),
++                                  [TD["old"]["bin"], TD["old"]["suite"]])
++
++    if event == "INSERT" or event == "UPDATE":
++
++       content_data = plpy.execute(plpy.prepare(
++            """SELECT s.section, b.package, b.architecture, c.name, ot.type
++            FROM override o
++            JOIN override_type ot on o.type=ot.id
++            JOIN binaries b on b.package=o.package
++            JOIN files f on b.file=f.id
++            JOIN location l on l.id=f.location
++            JOIN section s on s.id=o.section
++            JOIN component c on c.id=l.component
++            WHERE b.id=$1
++            AND o.suite=$2
++            """,
++            ["int", "int"]),
++            [TD["new"]["bin"], TD["new"]["suite"]])[0]
++
++       component_str = "";
++       if not content_data["name"] === "main":
++           component_str=content_data["name"]+"/"
++
++       filenames = plpy.execute(plpy.prepare(
++           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
++           ["int"]),
++           [TD["new"]["bin"]])
++
++       for filename in filenames:
++           plpy.execute(plpy.prepare(
++               """INSERT INTO deb_contents
++                   (file,section,package,binary_id,arch,suite,component)
++                   VALUES($1,$2,$3,$4,$5,$6,$7)""",
++               ["text","text","text","int","int","int","text"]),
++               [filename["filename"],
++                content_data["section"],
++                content_data["package"],
++                TD["new"]["bin"],
++                content_data["architecture"],
++                TD["new"]["suite"],
++                component_str])
++$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
++""")
++
++
++        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
++    event = TD["event"]
++    if event == "UPDATE":
++
++        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),TD["new"]["type"] )[0];
++        if otype["type"].endswith("deb"):
++            table_name = "%s_contents" % otype["type"]
++            plpy.execute(plpy.prepare("UPDATE %s set sections=$1" % table_name
++                                      ["text"]),
++                                      [TD["new"]["section"]])
++
++$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
++""")
++        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
++                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
++                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
++        c.execute("""CREATE TRIGGER override_contents_trigger
++                      AFTER UPDATE ON override
++                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
++
++        self.db.commit()
++
++    except psycopg2.ProgrammingError, msg:
++        self.db.rollback()
++        raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
++
diff --combined daklib/binary.py
index 8a0cf0921371a8e0a79c9e2f3727ef9df56490eb,c6ee96f86d5f1eed8b210720697e1620e2448ead..a70aadb943fb2e713a5771f113782384e612d251
@@@ -204,10 -204,7 +204,10 @@@ class Binary(object)
                      else:
                          pkgs = deb822.Packages.iter_paragraphs(file(os.path.join(self.tmpdir,'control')))
                          pkg = pkgs.next()
 -                        result = insert_pending_content_paths(pkg, [tarinfo.name for tarinfo in data if not tarinfo.isdir()], session)
 +                        result = insert_pending_content_paths(pkg,
 +                                                              self.filename.endswith('.udeb'),
 +                                                              [tarinfo.name for tarinfo in data if not tarinfo.isdir()],
 +                                                              session)
  
                  except:
                      traceback.print_exc()
                      except:
                          print >> sys.stderr, "E: %s has non-unicode filename: %s" % (package,tarinfo.name)
  
+                 result = True
              except:
                  traceback.print_exc()
                  result = False
  
              os.chdir(cwd)
  
+         return result
  __all__.append('Binary')
  
 -def copy_temporary_contents(package, version, archname, deb, reject, session=None):
 +
 +def copy_temporary_contents(binary, bin_association, reject, session=None):
      """
      copy the previously stored contents from the temp table to the permanant one
  
  
      arch = get_architecture(archname, session=session)
  
 -    # first see if contents exist:
 -    in_pcaq = """SELECT 1 FROM pending_content_associations
 -                               WHERE package=:package
 -                               AND version=:version
 -                               AND architecture=:archid LIMIT 1"""
 -
 -    vals = {'package': package,
 -            'version': version,
 -            'archid': arch.arch_id}
 -
 -    exists = None
 -    check = session.execute(in_pcaq, vals)
 +    pending = session.query(PendingBinContents).filter_by(package=binary.package,
 +                                                          version=binary.version,
 +                                                          arch=binary.arch).first()
  
 -    if check.rowcount > 0:
 +    if pending:
          # This should NOT happen.  We should have added contents
          # during process-unchecked.  if it did, log an error, and send
          # an email.
          message = utils.TemplateSubst(subst, cnf["Dir::Templates"]+"/missing-contents")
          utils.send_mail(message)
  
 -        # Temporarily disable contents storage until we re-do the table layout
 -        #exists = Binary(deb, reject).scan_package()
 -
 -    if exists:
 -        sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
 -                 SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations
 -                 WHERE package=:package AND version=:version AND architecture=:archid"""
 -        session.execute(sql, vals)
 -
 -        sql = """DELETE from pending_content_associations
 -                 WHERE package=:package AND version=:version AND architecture=:archid"""
 -        session.execute(sql, vals)
 -        session.commit()
 +        # rescan it now
 +        exists = Binary(deb, reject).scan_package()
 +
 +        if not exists:
 +            # LOG?
 +            return False
 +
 +    component = binary.poolfile.location.component
 +    override = session.query(Override).filter_by(package=binary.package,
 +                                                 suite=bin_association.suite,
 +                                                 component=component.id).first()
 +    if not override:
 +        # LOG?
 +        return False
 +
 +
 +    if not override.overridetype.type.endswith('deb'):
 +        return True
 +
 +    if override.overridetype.type == "udeb":
 +        table = "udeb_contents"
 +    elif override.overridetype.type == "deb":
 +        table = "deb_contents"
 +    else:
 +        return False
 +    
 +
 +    if component.name == "main":
 +        component_str = ""
 +    else:
 +        component_str = component.name + "/"
 +        
 +    vals = { 'package':binary.package,
 +             'version':binary.version,
 +             'arch':binary.architecture,
 +             'binary_id': binary.id,
 +             'component':component_str,
 +             'section':override.section.section
 +             }
 +
 +    session.execute( """INSERT INTO %s
 +    (binary_id,package,version.component,arch,section,filename)
 +    SELECT :binary_id, :package, :version, :component, :arch, :section
 +    FROM pending_bin_contents pbc
 +    WHERE pbc.package=:package
 +    AND pbc.version=:version
 +    AND pbc.arch=:arch""" % table, vals )
 +
 +    session.execute( """DELETE from pending_bin_contents package=:package
 +    AND version=:version
 +    AND arch=:arch""", vals )
  
      if privatetrans:
 +        session.commit()
          session.close()
  
      return exists
  
  __all__.append('copy_temporary_contents')
 +
 +
diff --combined daklib/dbconn.py
index 18f427d424222d02fd9e74b4ade827950679003f,9e5afec7444cb36dfbbfc6632e38a4c91ecaf84f..921f1daa03af8c35e066c777ba8872ebd4b5bba8
  ################################################################################
  
  import os
+ import re
  import psycopg2
  import traceback
+ import datetime
  
  from inspect import getargspec
  
+ import sqlalchemy
  from sqlalchemy import create_engine, Table, MetaData
  from sqlalchemy.orm import sessionmaker, mapper, relation
+ from sqlalchemy import types as sqltypes
  
  # Don't remove this, we re-export the exceptions to scripts which import us
  from sqlalchemy.exc import *
@@@ -54,6 -58,22 +58,22 @@@ from textutils import fix_maintaine
  
  ################################################################################
  
+ # Patch in support for the debversion field type so that it works during
+ # reflection
+ class DebVersion(sqltypes.Text):
+     def get_col_spec(self):
+         return "DEBVERSION"
+ sa_major_version = sqlalchemy.__version__[0:3]
+ if sa_major_version == "0.5":
+     from sqlalchemy.databases import postgres
+     postgres.ischema_names['debversion'] = DebVersion
+ else:
+     raise Exception("dak isn't ported to SQLA versions != 0.5 yet.  See daklib/dbconn.py")
+ ################################################################################
  __all__ = ['IntegrityError', 'SQLAlchemyError']
  
  ################################################################################
@@@ -267,12 -287,12 +287,12 @@@ def get_suites_binary_in(package, sessi
  __all__.append('get_suites_binary_in')
  
  @session_wrapper
- def get_binary_from_id(id, session=None):
+ def get_binary_from_id(binary_id, session=None):
      """
      Returns DBBinary object for given C{id}
  
-     @type id: int
-     @param id: Id of the required binary
+     @type binary_id: int
+     @param binary_id: Id of the required binary
  
      @type session: Session
      @param session: Optional SQLA session object (a temporary one will be
      @return: DBBinary object for the given binary (None if not present)
      """
  
-     q = session.query(DBBinary).filter_by(binary_id=id)
+     q = session.query(DBBinary).filter_by(binary_id=binary_id)
  
      try:
          return q.one()
@@@ -388,6 -408,28 +408,28 @@@ __all__.append('get_binary_components'
  
  ################################################################################
  
+ class BinaryACL(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<BinaryACL %s>' % self.binary_acl_id
+ __all__.append('BinaryACL')
+ ################################################################################
+ class BinaryACLMap(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<BinaryACLMap %s>' % self.binary_acl_map_id
+ __all__.append('BinaryACLMap')
+ ################################################################################
  class Component(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -611,16 -653,11 +653,16 @@@ def insert_content_paths(binary_id, ful
      try:
          # Insert paths
          pathcache = {}
 -        for fullpath in fullpaths:
 -            if fullpath.startswith( './' ):
 -                fullpath = fullpath[2:]
  
 -            session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id}  )
 +        def generate_path_dicts():
 +            for fullpath in fullpaths:
 +                if fullpath.startswith( './' ):
 +                    fullpath = fullpath[2:]
 +
 +                yield {'fulename':fullpath, 'id': binary_id }
 +
 +        session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
 +                         generate_path_dicts() )
  
          session.commit()
          if privatetrans:
@@@ -692,6 -729,10 +734,10 @@@ class PoolFile(object)
      def __repr__(self):
          return '<PoolFile %s>' % self.filename
  
+     @property
+     def fullpath(self):
+         return os.path.join(self.location.path, self.filename)
  __all__.append('PoolFile')
  
  @session_wrapper
@@@ -733,7 -774,7 +779,7 @@@ def check_poolfile(filename, filesize, 
          ret = (False, None)
      else:
          obj = q.one()
-         if obj.md5sum != md5sum or obj.filesize != filesize:
+         if obj.md5sum != md5sum or obj.filesize != int(filesize):
              ret = (False, obj)
  
      if ret is None:
@@@ -820,6 -861,33 +866,33 @@@ class Fingerprint(object)
  
  __all__.append('Fingerprint')
  
+ @session_wrapper
+ def get_fingerprint(fpr, session=None):
+     """
+     Returns Fingerprint object for given fpr.
+     @type fpr: string
+     @param fpr: The fpr to find / add
+     @type session: SQLAlchemy
+     @param session: Optional SQL session object (a temporary one will be
+     generated if not supplied).
+     @rtype: Fingerprint
+     @return: the Fingerprint object for the given fpr or None
+     """
+     q = session.query(Fingerprint).filter_by(fingerprint=fpr)
+     try:
+         ret = q.one()
+     except NoResultFound:
+         ret = None
+     return ret
+ __all__.append('get_fingerprint')
  @session_wrapper
  def get_or_set_fingerprint(fpr, session=None):
      """
@@@ -857,20 -925,139 +930,139 @@@ __all__.append('get_or_set_fingerprint'
  
  ################################################################################
  
+ # Helper routine for Keyring class
+ def get_ldap_name(entry):
+     name = []
+     for k in ["cn", "mn", "sn"]:
+         ret = entry.get(k)
+         if ret and ret[0] != "" and ret[0] != "-":
+             name.append(ret[0])
+     return " ".join(name)
+ ################################################################################
  class Keyring(object):
+     gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
+                      " --with-colons --fingerprint --fingerprint"
+     keys = {}
+     fpr_lookup = {}
      def __init__(self, *args, **kwargs):
          pass
  
      def __repr__(self):
          return '<Keyring %s>' % self.keyring_name
  
+     def de_escape_gpg_str(self, txt):
+         esclist = re.split(r'(\\x..)', txt)
+         for x in range(1,len(esclist),2):
+             esclist[x] = "%c" % (int(esclist[x][2:],16))
+         return "".join(esclist)
+     def load_keys(self, keyring):
+         import email.Utils
+         if not self.keyring_id:
+             raise Exception('Must be initialized with database information')
+         k = os.popen(self.gpg_invocation % keyring, "r")
+         key = None
+         signingkey = False
+         for line in k.xreadlines():
+             field = line.split(":")
+             if field[0] == "pub":
+                 key = field[4]
+                 (name, addr) = email.Utils.parseaddr(field[9])
+                 name = re.sub(r"\s*[(].*[)]", "", name)
+                 if name == "" or addr == "" or "@" not in addr:
+                     name = field[9]
+                     addr = "invalid-uid"
+                 name = self.de_escape_gpg_str(name)
+                 self.keys[key] = {"email": addr}
+                 if name != "":
+                     self.keys[key]["name"] = name
+                 self.keys[key]["aliases"] = [name]
+                 self.keys[key]["fingerprints"] = []
+                 signingkey = True
+             elif key and field[0] == "sub" and len(field) >= 12:
+                 signingkey = ("s" in field[11])
+             elif key and field[0] == "uid":
+                 (name, addr) = email.Utils.parseaddr(field[9])
+                 if name and name not in self.keys[key]["aliases"]:
+                     self.keys[key]["aliases"].append(name)
+             elif signingkey and field[0] == "fpr":
+                 self.keys[key]["fingerprints"].append(field[9])
+                 self.fpr_lookup[field[9]] = key
+     def import_users_from_ldap(self, session):
+         import ldap
+         cnf = Config()
+         LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
+         LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+         l = ldap.open(LDAPServer)
+         l.simple_bind_s("","")
+         Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+                "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+                ["uid", "keyfingerprint", "cn", "mn", "sn"])
+         ldap_fin_uid_id = {}
+         byuid = {}
+         byname = {}
+         for i in Attrs:
+             entry = i[1]
+             uid = entry["uid"][0]
+             name = get_ldap_name(entry)
+             fingerprints = entry["keyFingerPrint"]
+             keyid = None
+             for f in fingerprints:
+                 key = self.fpr_lookup.get(f, None)
+                 if key not in self.keys:
+                     continue
+                 self.keys[key]["uid"] = uid
+                 if keyid != None:
+                     continue
+                 keyid = get_or_set_uid(uid, session).uid_id
+                 byuid[keyid] = (uid, name)
+                 byname[uid] = (keyid, name)
+         return (byname, byuid)
+     def generate_users_from_keyring(self, format, session):
+         byuid = {}
+         byname = {}
+         any_invalid = False
+         for x in self.keys.keys():
+             if self.keys[x]["email"] == "invalid-uid":
+                 any_invalid = True
+                 self.keys[x]["uid"] = format % "invalid-uid"
+             else:
+                 uid = format % self.keys[x]["email"]
+                 keyid = get_or_set_uid(uid, session).uid_id
+                 byuid[keyid] = (uid, self.keys[x]["name"])
+                 byname[uid] = (keyid, self.keys[x]["name"])
+                 self.keys[x]["uid"] = uid
+         if any_invalid:
+             uid = format % "invalid-uid"
+             keyid = get_or_set_uid(uid, session).uid_id
+             byuid[keyid] = (uid, "ungeneratable user id")
+             byname[uid] = (keyid, "ungeneratable user id")
+         return (byname, byuid)
  __all__.append('Keyring')
  
  @session_wrapper
- def get_or_set_keyring(keyring, session=None):
+ def get_keyring(keyring, session=None):
      """
-     If C{keyring} does not have an entry in the C{keyrings} table yet, create one
-     and return the new Keyring
+     If C{keyring} does not have an entry in the C{keyrings} table yet, return None
      If C{keyring} already has an entry, simply return the existing Keyring
  
      @type keyring: string
      try:
          return q.one()
      except NoResultFound:
-         obj = Keyring(keyring_name=keyring)
-         session.add(obj)
-         session.commit_or_flush()
-         return obj
+         return None
+ __all__.append('get_keyring')
+ ################################################################################
+ class KeyringACLMap(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<KeyringACLMap %s>' % self.keyring_acl_map_id
+ __all__.append('KeyringACLMap')
+ ################################################################################
+ class KnownChange(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<KnownChange %s>' % self.changesname
+ __all__.append('KnownChange')
+ @session_wrapper
+ def get_knownchange(filename, session=None):
+     """
+     returns knownchange object for given C{filename}.
+     @type archive: string
+     @param archive: the name of the arhive
+     @type session: Session
+     @param session: Optional SQLA session object (a temporary one will be
+     generated if not supplied)
+     @rtype: Archive
+     @return: Archive object for the given name (None if not present)
  
- __all__.append('get_or_set_keyring')
+     """
+     q = session.query(KnownChange).filter_by(changesname=filename)
+     try:
+         return q.one()
+     except NoResultFound:
+         return None
+ __all__.append('get_knownchange')
+ ################################################################################
+ class KnownChangePendingFile(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
+ __all__.append('KnownChangePendingFile')
  
  ################################################################################
  
@@@ -1172,38 -1414,16 +1419,38 @@@ __all__.append('get_override_type'
  
  ################################################################################
  
 -class PendingContentAssociation(object):
 +class DebContents(object):
 +    def __init__(self, *args, **kwargs):
 +        pass
 +
 +    def __repr__(self):
 +        return '<DebConetnts %s: %s>' % (self.package.package,self.file)
 +
 +__all__.append('DebContents')
 +
 +
 +class UdebContents(object):
 +    def __init__(self, *args, **kwargs):
 +        pass
 +
 +    def __repr__(self):
 +        return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
 +
 +__all__.append('UdebContents')
 +
 +class PendingBinContents(object):
      def __init__(self, *args, **kwargs):
          pass
  
      def __repr__(self):
 -        return '<PendingContentAssociation %s>' % self.pca_id
 +        return '<PendingBinContents %s>' % self.contents_id
  
 -__all__.append('PendingContentAssociation')
 +__all__.append('PendingBinContents')
  
 -def insert_pending_content_paths(package, fullpaths, session=None):
 +def insert_pending_content_paths(package,
 +                                 is_udeb,
 +                                 fullpaths,
 +                                 session=None):
      """
      Make sure given paths are temporarily associated with given
      package
          arch_id = arch.arch_id
  
          # Remove any already existing recorded files for this package
 -        q = session.query(PendingContentAssociation)
 +        q = session.query(PendingBinContents)
          q = q.filter_by(package=package['Package'])
          q = q.filter_by(version=package['Version'])
          q = q.filter_by(architecture=arch_id)
          q.delete()
  
 -        # Insert paths
 -        pathcache = {}
          for fullpath in fullpaths:
 -            (path, filename) = os.path.split(fullpath)
 -
 -            if path.startswith( "./" ):
 -                path = path[2:]
 -
 -            filepath_id = get_or_set_contents_path_id(path, session)
 -            filename_id = get_or_set_contents_file_id(filename, session)
  
 -            pathcache[fullpath] = (filepath_id, filename_id)
 +            if fullpath.startswith( "./" ):
 +                fullpath = fullpath[2:]
  
 -        for fullpath, dat in pathcache.items():
 -            pca = PendingContentAssociation()
 +            pca = PendingBinContents()
              pca.package = package['Package']
              pca.version = package['Version']
 -            pca.filepath_id = dat[0]
 -            pca.filename_id = dat[1]
 +            pca.file = fullpath
              pca.architecture = arch_id
 +
 +            if isudeb:
 +                pca.type = 8 # gross
 +            else:
 +                pca.type = 7 # also gross
              session.add(pca)
  
          # Only commit if we set up the session ourself
@@@ -1354,106 -1579,55 +1601,55 @@@ class Queue(object)
      def __repr__(self):
          return '<Queue %s>' % self.queue_name
  
-     def autobuild_upload(self, changes, srcpath, session=None):
-         """
-         Update queue_build database table used for incoming autobuild support.
+     def add_file_from_pool(self, poolfile):
+         """Copies a file into the pool.  Assumes that the PoolFile object is
+         attached to the same SQLAlchemy session as the Queue object is.
  
-         @type changes: Changes
-         @param changes: changes object for the upload to process
+         The caller is responsible for committing after calling this function."""
+         poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
  
-         @type srcpath: string
-         @param srcpath: path for the queue file entries/link destinations
+         # Check if we have a file of this name or this ID already
+         for f in self.queuefiles:
+             if f.fileid is not None and f.fileid == poolfile.file_id or \
+                f.poolfile.filename == poolfile_basename:
+                    # In this case, update the QueueFile entry so we
+                    # don't remove it too early
+                    f.lastused = datetime.now()
+                    DBConn().session().object_session(pf).add(f)
+                    return f
  
-         @type session: SQLAlchemy session
-         @param session: Optional SQLAlchemy session.  If this is passed, the
-         caller is responsible for ensuring a transaction has begun and
-         committing the results or rolling back based on the result code.  If
-         not passed, a commit will be performed at the end of the function,
-         otherwise the caller is responsible for commiting.
+         # Prepare QueueFile object
+         qf = QueueFile()
+         qf.queue_id = self.queue_id
+         qf.lastused = datetime.now()
+         qf.filename = dest
  
-         @rtype: NoneType or string
-         @return: None if the operation failed, a string describing the error if not
-         """
+         targetpath = qf.fullpath
+         queuepath = os.path.join(self.path, poolfile_basename)
  
-         privatetrans = False
-         if session is None:
-             session = DBConn().session()
-             privatetrans = True
-         # TODO: Remove by moving queue config into the database
-         conf = Config()
-         for suitename in changes.changes["distribution"].keys():
-             # TODO: Move into database as:
-             #       buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build)
-             #       buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink)
-             #       This also gets rid of the SecurityQueueBuild hack below
-             if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"):
-                 continue
-             # Find suite object
-             s = get_suite(suitename, session)
-             if s is None:
-                 return "INTERNAL ERROR: Could not find suite %s" % suitename
-             # TODO: Get from database as above
-             dest_dir = conf["Dir::QueueBuild"]
-             # TODO: Move into database as above
-             if conf.FindB("Dinstall::SecurityQueueBuild"):
-                 dest_dir = os.path.join(dest_dir, suitename)
-             for file_entry in changes.files.keys():
-                 src = os.path.join(srcpath, file_entry)
-                 dest = os.path.join(dest_dir, file_entry)
-                 # TODO: Move into database as above
-                 if conf.FindB("Dinstall::SecurityQueueBuild"):
-                     # Copy it since the original won't be readable by www-data
-                     import utils
-                     utils.copy(src, dest)
-                 else:
-                     # Create a symlink to it
-                     os.symlink(src, dest)
-                 qb = QueueBuild()
-                 qb.suite_id = s.suite_id
-                 qb.queue_id = self.queue_id
-                 qb.filename = dest
-                 qb.in_queue = True
-                 session.add(qb)
-             exists, symlinked = utils.ensure_orig_files(changes, dest, session)
-             # Add symlinked files to the list of packages for later processing
-             # by apt-ftparchive
-             for filename in symlinked:
-                 qb = QueueBuild()
-                 qb.suite_id = s.suite_id
-                 qb.queue_id = self.queue_id
-                 qb.filename = filename
-                 qb.in_queue = True
-                 session.add(qb)
-             # Update files to ensure they are not removed prematurely
-             for filename in exists:
-                 qb = get_queue_build(filename, s.suite_id, session)
-                 if qb is None:
-                     qb.in_queue = True
-                     qb.last_used = None
-                     session.add(qb)
+         try:
+             if self.copy_pool_files:
+                 # We need to copy instead of symlink
+                 import utils
+                 utils.copy(targetfile, queuepath)
+                 # NULL in the fileid field implies a copy
+                 qf.fileid = None
+             else:
+                 os.symlink(targetfile, queuepath)
+                 qf.fileid = poolfile.file_id
+         except OSError:
+             return None
  
-         if privatetrans:
-             session.commit()
-             session.close()
+         # Get the same session as the PoolFile is using and add the qf to it
+         DBConn().session().object_session(poolfile).add(qf)
+         return qf
  
-         return None
  
  __all__.append('Queue')
  
  @session_wrapper
- def get_or_set_queue(queuename, session=None):
+ def get_queue(queuename, session=None):
      """
      Returns Queue object for given C{queue name}, creating it if it does not
      exist.
      q = session.query(Queue).filter_by(queue_name=queuename)
  
      try:
-         ret = q.one()
+         return q.one()
      except NoResultFound:
-         queue = Queue()
-         queue.queue_name = queuename
-         session.add(queue)
-         session.commit_or_flush()
-         ret = queue
-     return ret
+         return None
  
- __all__.append('get_or_set_queue')
+ __all__.append('get_queue')
  
  ################################################################################
  
- class QueueBuild(object):
+ class QueueFile(object):
      def __init__(self, *args, **kwargs):
          pass
  
      def __repr__(self):
-         return '<QueueBuild %s (%s)>' % (self.filename, self.queue_id)
- __all__.append('QueueBuild')
- @session_wrapper
- def get_queue_build(filename, suite, session=None):
-     """
-     Returns QueueBuild object for given C{filename} and C{suite}.
+         return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
  
-     @type filename: string
-     @param filename: The name of the file
-     @type suiteid: int or str
-     @param suiteid: Suite name or ID
-     @type session: Session
-     @param session: Optional SQLA session object (a temporary one will be
-     generated if not supplied)
-     @rtype: Queue
-     @return: Queue object for the given queue
-     """
-     if isinstance(suite, int):
-         q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite)
-     else:
-         q = session.query(QueueBuild).filter_by(filename=filename)
-         q = q.join(Suite).filter_by(suite_name=suite)
-     try:
-         return q.one()
-     except NoResultFound:
-         return None
- __all__.append('get_queue_build')
+ __all__.append('QueueFile')
  
  ################################################################################
  
@@@ -1759,6 -1895,17 +1917,17 @@@ __all__.append('get_source_in_suite'
  
  ################################################################################
  
+ class SourceACL(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<SourceACL %s>' % self.source_acl_id
+ __all__.append('SourceACL')
+ ################################################################################
  class SrcAssociation(object):
      def __init__(self, *args, **kwargs):
          pass
@@@ -2078,6 -2225,17 +2247,17 @@@ __all__.append('get_uid_from_fingerprin
  
  ################################################################################
  
+ class UploadBlock(object):
+     def __init__(self, *args, **kwargs):
+         pass
+     def __repr__(self):
+         return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
+ __all__.append('UploadBlock')
+ ################################################################################
  class DBConn(Singleton):
      """
      database module init.
      def __setuptables(self):
          self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
          self.tbl_archive = Table('archive', self.db_meta, autoload=True)
 +        self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
          self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
          self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
+         self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
+         self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
          self.tbl_component = Table('component', self.db_meta, autoload=True)
          self.tbl_config = Table('config', self.db_meta, autoload=True)
          self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
          self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
          self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
+         self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
+         self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
          self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
 +        self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
          self.tbl_files = Table('files', self.db_meta, autoload=True)
          self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
          self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
+         self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True)
+         self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True)
          self.tbl_location = Table('location', self.db_meta, autoload=True)
          self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
          self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
          self.tbl_override = Table('override', self.db_meta, autoload=True)
          self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
 -        self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True)
 +        self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
          self.tbl_priority = Table('priority', self.db_meta, autoload=True)
          self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-         self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True)
+         self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
          self.tbl_section = Table('section', self.db_meta, autoload=True)
          self.tbl_source = Table('source', self.db_meta, autoload=True)
+         self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
          self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
          self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
          self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
          self.tbl_suite = Table('suite', self.db_meta, autoload=True)
          self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
          self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
+         self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
 +        self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
          self.tbl_uid = Table('uid', self.db_meta, autoload=True)
+         self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
  
      def __setupmappers(self):
          mapper(Architecture, self.tbl_architecture,
                                   binary_id = self.tbl_bin_associations.c.bin,
                                   binary = relation(DBBinary)))
  
 +        mapper(PendingBinContents, self.tbl_pending_bin_contents,
 +               properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
 +                                 filename = self.tbl_pending_bin_contents.c.filename,
 +                                 package = self.tbl_pending_bin_contents.c.package,
 +                                 version = self.tbl_pending_bin_contents.c.version,
 +                                 arch = self.tbl_pending_bin_contents.c.arch,
 +                                 otype = self.tbl_pending_bin_contents.c.type))
 +
 +        mapper(DebContents, self.tbl_deb_contents,
 +               properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
 +                                 package=self.tbl_deb_contents.c.package,
 +                                 component=self.tbl_deb_contents.c.component,
 +                                 arch=self.tbl_deb_contents.c.arch,
 +                                 section=self.tbl_deb_contents.c.section,
 +                                 filename=self.tbl_deb_contents.c.filename))
 +
 +        mapper(UdebContents, self.tbl_udeb_contents,
 +               properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
 +                                 package=self.tbl_udeb_contents.c.package,
 +                                 component=self.tbl_udeb_contents.c.component,
 +                                 arch=self.tbl_udeb_contents.c.arch,
 +                                 section=self.tbl_udeb_contents.c.section,
 +                                 filename=self.tbl_udeb_contents.c.filename))
  
          mapper(DBBinary, self.tbl_binaries,
                 properties = dict(binary_id = self.tbl_binaries.c.id,
                                   binassociations = relation(BinAssociation,
                                                              primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))))
  
+         mapper(BinaryACL, self.tbl_binary_acl,
+                properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
+         mapper(BinaryACLMap, self.tbl_binary_acl_map,
+                properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
+                                  fingerprint = relation(Fingerprint, backref="binary_acl_map"),
+                                  architecture = relation(Architecture)))
          mapper(Component, self.tbl_component,
                 properties = dict(component_id = self.tbl_component.c.id,
                                   component_name = self.tbl_component.c.name))
                                   uid_id = self.tbl_fingerprint.c.uid,
                                   uid = relation(Uid),
                                   keyring_id = self.tbl_fingerprint.c.keyring,
-                                  keyring = relation(Keyring)))
+                                  keyring = relation(Keyring),
+                                  source_acl = relation(SourceACL),
+                                  binary_acl = relation(BinaryACL)))
  
          mapper(Keyring, self.tbl_keyrings,
                 properties = dict(keyring_name = self.tbl_keyrings.c.name,
                                   keyring_id = self.tbl_keyrings.c.id))
  
+         mapper(KnownChange, self.tbl_known_changes,
+                properties = dict(known_change_id = self.tbl_known_changes.c.id,
+                                  poolfiles = relation(PoolFile,
+                                                       secondary=self.tbl_changes_pool_files,
+                                                       backref="changeslinks"),
+                                  files = relation(KnownChangePendingFile, backref="changesfile")))
+         mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
+                properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id))
+         mapper(KeyringACLMap, self.tbl_keyring_acl_map,
+                properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
+                                  keyring = relation(Keyring, backref="keyring_acl_map"),
+                                  architecture = relation(Architecture)))
          mapper(Location, self.tbl_location,
                 properties = dict(location_id = self.tbl_location.c.id,
                                   component_id = self.tbl_location.c.component,
          mapper(Override, self.tbl_override,
                 properties = dict(suite_id = self.tbl_override.c.suite,
                                   suite = relation(Suite),
 +                                 package = self.tbl_override.c.package,
                                   component_id = self.tbl_override.c.component,
                                   component = relation(Component),
                                   priority_id = self.tbl_override.c.priority,
          mapper(Queue, self.tbl_queue,
                 properties = dict(queue_id = self.tbl_queue.c.id))
  
-         mapper(QueueBuild, self.tbl_queue_build,
-                properties = dict(suite_id = self.tbl_queue_build.c.suite,
-                                  queue_id = self.tbl_queue_build.c.queue,
-                                  queue = relation(Queue, backref='queuebuild')))
+         mapper(QueueFile, self.tbl_queue_files,
+                properties = dict(queue = relation(Queue, backref='queuefiles'),
+                                  poolfile = relation(PoolFile, backref='queueinstances')))
  
          mapper(Section, self.tbl_section,
 -               properties = dict(section_id = self.tbl_section.c.id))
 +               properties = dict(section_id = self.tbl_section.c.id,
 +                                 section=self.tbl_section.c.section))
  
          mapper(DBSource, self.tbl_source,
                 properties = dict(source_id = self.tbl_source.c.id,
                                   srcfiles = relation(DSCFile,
                                                       primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
                                   srcassociations = relation(SrcAssociation,
-                                                             primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source))))
+                                                             primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)),
+                                  srcuploaders = relation(SrcUploader)))
+         mapper(SourceACL, self.tbl_source_acl,
+                properties = dict(source_acl_id = self.tbl_source_acl.c.id))
  
          mapper(SrcAssociation, self.tbl_src_associations,
                 properties = dict(sa_id = self.tbl_src_associations.c.id,
                                                         primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
  
          mapper(Suite, self.tbl_suite,
-                properties = dict(suite_id = self.tbl_suite.c.id))
+                properties = dict(suite_id = self.tbl_suite.c.id,
+                                  policy_queue = relation(Queue),
+                                  copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
  
          mapper(SuiteArchitecture, self.tbl_suite_architectures,
                 properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
                 properties = dict(uid_id = self.tbl_uid.c.id,
                                   fingerprint = relation(Fingerprint)))
  
+         mapper(UploadBlock, self.tbl_upload_blocks,
+                properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
+                                  fingerprint = relation(Fingerprint, backref="uploadblocks"),
+                                  uid = relation(Uid, backref="uploadblocks")))
      ## Connection functions
      def __createconn(self):
          from config import Config
diff --combined daklib/utils.py
index accf5fdb36b9157c4ad1eb5b86dcb1bbce760e2e,05287483a52147ed19fac447fd7b2a8393f43ef5..c3e4dbb32169b9aa4c8140943f860cb64593168f
@@@ -36,7 -36,6 +36,7 @@@ import sta
  import apt_pkg
  import time
  import re
 +import string
  import email as modemail
  import subprocess
  
@@@ -45,8 -44,7 +45,8 @@@ from dak_exceptions import 
  from textutils import fix_maintainer
  from regexes import re_html_escaping, html_escaping, re_single_line_field, \
                      re_multi_line_field, re_srchasver, re_taint_free, \
 -                    re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource
 +                    re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
 +                    re_is_orig_source
  
  from formats import parse_format, validate_changes_format
  from srcformats import get_format_from_string
@@@ -64,15 -62,18 +64,18 @@@ key_uid_email_cache = {}  #: Cache for 
  known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)),
                  ("sha256", apt_pkg.sha256sum, (1, 8))] #: hashes we accept for entries in .changes/.dsc
  
- # Monkeypatch commands.getstatusoutput as it returns a "0" exit code in
- # all situations under lenny's Python.
- import commands
+ # Monkeypatch commands.getstatusoutput as it may not return the correct exit
+ # code in lenny's Python. This also affects commands.getoutput and
+ # commands.getstatus.
  def dak_getstatusoutput(cmd):
      pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
          stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
  
      output = "".join(pipe.stdout.readlines())
  
+     if output[-1:] == '\n':
+         output = output[:-1]
      ret = pipe.wait()
      if ret is None:
          ret = 0
@@@ -305,13 -306,13 +308,13 @@@ def check_hash(where, files, hashname, 
          try:
              try:
                  file_handle = open_file(f)
-     
                  # Check for the hash entry, to not trigger a KeyError.
                  if not files[f].has_key(hash_key(hashname)):
                      rejmsg.append("%s: misses %s checksum in %s" % (f, hashname,
                          where))
                      continue
-     
                  # Actually check the hash for correctness.
                  if hashfunc(file_handle) != files[f][hash_key(hashname)]:
                      rejmsg.append("%s: %s check failed in %s" % (f, hashname,
@@@ -529,7 -530,8 +532,8 @@@ def build_file_list(changes, is_a_dsc=0
          raise NoFilesFieldError
  
      # Validate .changes Format: field
-     validate_changes_format(parse_format(changes['format']), field)
+     if not is_a_dsc:
+         validate_changes_format(parse_format(changes['format']), field)
  
      includes_section = (not is_a_dsc) and field == "files"
  
  
          (section, component) = extract_component_from_section(section)
  
-         files[name] = Dict(size=size, section=section,
+         files[name] = dict(size=size, section=section,
                             priority=priority, component=component)
          files[name][hashname] = md5
  
@@@ -612,7 -614,7 +616,7 @@@ def send_mail (message, filename="")
                  if len(match) == 0:
                      del message_raw[field]
                  else:
-                     message_raw.replace_header(field, string.join(match, ", "))
+                     message_raw.replace_header(field, ', '.join(match))
  
          # Change message fields in order if we don't have a To header
          if not message_raw.has_key("To"):
@@@ -753,12 -755,12 +757,12 @@@ def which_alias_file()
  
  ################################################################################
  
- def TemplateSubst(map, filename):
+ def TemplateSubst(subst_map, filename):
      """ Perform a substition of template """
      templatefile = open_file(filename)
      template = templatefile.read()
-     for x in map.keys():
-         template = template.replace(x, str(map[x]))
+     for k, v in subst_map.iteritems():
+         template = template.replace(k, str(v))
      templatefile.close()
      return template
  
@@@ -1091,10 -1093,6 +1095,6 @@@ def split_args (s, dwim=1)
  
  ################################################################################
  
- def Dict(**dict): return dict
- ########################################
  def gpgv_get_status_output(cmd, status_read, status_write):
      """
      Our very own version of commands.getouputstatus(), hacked to support
@@@ -1362,9 -1360,9 +1362,9 @@@ def check_signature (sig_filename, data
          rejects.append("signature on %s does not appear to be valid [No SIG_ID]." % (sig_filename))
  
      # Finally ensure there's not something we don't recognise
-     known_keywords = Dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
+     known_keywords = dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
                            SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="",
-                           NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="")
+                           NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="",POLICY_URL="")
  
      for keyword in keywords.keys():
          if not known_keywords.has_key(keyword):
@@@ -1484,7 -1482,7 +1484,7 @@@ def is_email_alias(email)
  
  ################################################################################
  
- def get_changes_files(dir):
+ def get_changes_files(from_dir):
      """
      Takes a directory and lists all .changes files in it (as well as chdir'ing
      to the directory; this is due to broken behaviour on the part of p-u/p-a
      """
      try:
          # Much of the rest of p-u/p-a depends on being in the right place
-         os.chdir(dir)
-         changes_files = [x for x in os.listdir(dir) if x.endswith('.changes')]
+         os.chdir(from_dir)
+         changes_files = [x for x in os.listdir(from_dir) if x.endswith('.changes')]
      except OSError, e:
-         fubar("Failed to read list from directory %s (%s)" % (dir, e))
+         fubar("Failed to read list from directory %s (%s)" % (from_dir, e))
  
      return changes_files
  
@@@ -1510,50 -1508,3 +1510,3 @@@ apt_pkg.ReadConfigFileISC(Cnf,default_c
  
  if which_conf_file() != default_config:
      apt_pkg.ReadConfigFileISC(Cnf,which_conf_file())
- ###############################################################################
- def ensure_orig_files(changes, dest_dir, session):
-     """
-     Ensure that dest_dir contains all the orig tarballs for the specified
-     changes. If it does not, symlink them into place.
-     Returns a 2-tuple (already_exists, symlinked) containing a list of files
-     that were already there and a list of files that were symlinked into place.
-     """
-     exists, symlinked = [], []
-     for dsc_file in changes.dsc_files:
-         # Skip all files that are not orig tarballs
-         if not re_is_orig_source.match(dsc_file):
-             continue
-         # Skip orig files not identified in the pool
-         if not (dsc_file in changes.orig_files and
-                 'id' in changes.orig_files[dsc_file]):
-             continue
-         dest = os.path.join(dest_dir, dsc_file)
-         if os.path.exists(dest):
-             exists.append(dest)
-             continue
-         orig_file_id = changes.orig_files[dsc_file]['id']
-         c = session.execute(
-             'SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id',
-             {'id': orig_file_id}
-         )
-         res = c.fetchone()
-         if not res:
-             return "[INTERNAL ERROR] Couldn't find id %s in files table." % orig_file_id
-         src = os.path.join(res[0], res[1])
-         os.symlink(src, dest)
-         symlinked.append(dest)
-     return (exists, symlinked)