]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/dbconn.py
fill in the changes files tables when moving to a queue
[dak.git] / daklib / dbconn.py
index 96eacd7b6a3bde2d350224bf202dd8bc6ec40bee..4e54a3272ea75a4ba1f7c71b85ad0f7affb3b0eb 100644 (file)
@@ -37,7 +37,9 @@ import os
 import re
 import psycopg2
 import traceback
-from datetime import datetime
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
 
 from inspect import getargspec
 
@@ -429,12 +431,188 @@ __all__.append('BinaryACLMap')
 
 ################################################################################
 
+MINIMAL_APT_CONF="""
+Dir
+{
+   ArchiveDir "%(archivepath)s";
+   OverrideDir "/srv/ftp.debian.org/scripts/override/";
+   CacheDir "/srv/ftp.debian.org/database/";
+};
+
+Default
+{
+   Packages::Compress ". bzip2 gzip";
+   Sources::Compress ". bzip2 gzip";
+   DeLinkLimit 0;
+   FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+   Packages "Packages";
+   Contents " ";
+
+   BinOverride "override.sid.all3";
+   BinCacheDB "packages-accepted.db";
+
+   FileList "%(filelist)s";
+
+   PathPrefix "";
+   Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+   Sources "Sources";
+   BinOverride "override.sid.all3";
+   SrcOverride "override.sid.all3.src";
+   FileList "%(filelist)s";
+};
+"""
+
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<Queue %s>' % self.queue_name
+        return '<BuildQueue %s>' % self.queue_name
+
+    def write_metadata(self, starttime, force=False):
+        # Do we write out metafiles?
+        if not (force or self.generate_metadata):
+            return
+
+        session = DBConn().session().object_session(self)
+
+        fl_fd = fl_name = ac_fd = ac_name = None
+        tempdir = None
+        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+        startdir = os.getcwd()
+
+        try:
+            # Grab files we want to include
+            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+            # Write file list with newer files
+            (fl_fd, fl_name) = mkstemp()
+            for n in newer:
+                os.write(fl_fd, '%s\n' % n.fullpath)
+            os.close(fl_fd)
+
+            # Write minimal apt.conf
+            # TODO: Remove hardcoding from template
+            (ac_fd, ac_name) = mkstemp()
+            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+                                                'filelist': fl_name})
+            os.close(ac_fd)
+
+            # Run apt-ftparchive generate
+            os.chdir(os.path.dirname(ac_name))
+            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+            # Run apt-ftparchive release
+            # TODO: Eww - fix this
+            bname = os.path.basename(self.path)
+            os.chdir(self.path)
+            os.chdir('..')
+
+            # We have to remove the Release file otherwise it'll be included in the
+            # new one
+            try:
+                os.unlink(os.path.join(bname, 'Release'))
+            except OSError:
+                pass
+
+            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+            # Sign if necessary
+            if self.signingkey:
+                cnf = Config()
+                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+                if cnf.has_key("Dinstall::SigningPubKeyring"):
+                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+            # Move the files if we got this far
+            os.rename('Release', os.path.join(bname, 'Release'))
+            if self.signingkey:
+                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+        # Clean up any left behind files
+        finally:
+            os.chdir(startdir)
+            if fl_fd:
+                try:
+                    os.close(fl_fd)
+                except OSError:
+                    pass
+
+            if fl_name:
+                try:
+                    os.unlink(fl_name)
+                except OSError:
+                    pass
+
+            if ac_fd:
+                try:
+                    os.close(ac_fd)
+                except OSError:
+                    pass
+
+            if ac_name:
+                try:
+                    os.unlink(ac_name)
+                except OSError:
+                    pass
+
+    def clean_and_update(self, starttime, Logger, dryrun=False):
+        """WARNING: This routine commits for you"""
+        session = DBConn().session().object_session(self)
+
+        if self.generate_metadata and not dryrun:
+            self.write_metadata(starttime)
+
+        # Grab files older than our execution time
+        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+        for o in older:
+            killdb = False
+            try:
+                if dryrun:
+                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+                else:
+                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
+                    os.unlink(o.fullpath)
+                    killdb = True
+            except OSError, e:
+                # If it wasn't there, don't worry
+                if e.errno == ENOENT:
+                    killdb = True
+                else:
+                    # TODO: Replace with proper logging call
+                    Logger.log(["E: Could not remove %s" % o.fullpath])
+
+            if killdb:
+                session.delete(o)
+
+        session.commit()
+
+        for f in os.listdir(self.path):
+            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'):
+                continue
+
+            try:
+                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+            except NoResultFound:
+                fp = os.path.join(self.path, f)
+                if dryrun:
+                    Logger.log(["I: Would remove unused link %s" % fp])
+                else:
+                    Logger.log(["I: Removing unused link %s" % fp])
+                    try:
+                        os.unlink(fp)
+                    except OSError:
+                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
 
     def add_file_from_pool(self, poolfile):
         """Copies a file into the pool.  Assumes that the PoolFile object is
@@ -450,7 +628,7 @@ class BuildQueue(object):
                    # In this case, update the BuildQueueFile entry so we
                    # don't remove it too early
                    f.lastused = datetime.now()
-                   DBConn().session().object_session(pf).add(f)
+                   DBConn().session().object_session(poolfile).add(f)
                    return f
 
         # Prepare BuildQueueFile object
@@ -518,6 +696,11 @@ class BuildQueueFile(object):
     def __repr__(self):
         return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
 
+    @property
+    def fullpath(self):
+        return os.path.join(self.buildqueue.path, self.filename)
+
+
 __all__.append('BuildQueueFile')
 
 ################################################################################
@@ -969,7 +1152,7 @@ def get_poolfile_like_name(filename, session=None):
     """
 
     # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
 
     return q.all()
 
@@ -1254,6 +1437,75 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
+    def upload_into_db(self, u, path):
+        cnf = Config()
+        session = DBConn().session().object_session(self)
+
+        files = []
+        for chg_fn, entry in u.pkg.files.items():
+            try:
+                f = open(os.path.join(path, chg_fn))
+                cpf = ChangePendingFile()
+                cpf.filename = chg_fn
+                cpf.size = entry['size']
+                cpf.md5sum = entry['md5sum']
+
+                if entry.has_key('sha1sum'):
+                    cpf.sha1sum = entry['sha1sum']
+                else:
+                    f.seek(0)
+                    cpf.sha1sum = apt_pkg.sha1sum(f)
+
+                if entry.has_key('sha256sum'):
+                    cpf.sha256sum = entry['sha256sum']
+                else:
+                    f.seek(0)
+                    cpf.sha256sum = apt_pkg.sha256sum(f)
+
+                session.add(cpf)
+                files.append(cpf)
+                f.close()
+
+            except IOError:
+                # Can't find the file, try to look it up in the pool
+                from utils import poolify
+                poolname = poolify(entry["source"], entry["component"])
+                l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+                found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+                                                 entry['size'],
+                                                 entry["md5sum"],
+                                                 l.location_id,
+                                                 session=session)
+
+                if found is None:
+                    Logger.log(["E: Found multiple files for pool (%s) for %s" %  % (chg_fn, entry["component"]))
+                elif found is False and poolfile is not None:
+                    Logger.log(["E: md5sum/size mismatch for %s in pool" %  % (chg_fn))
+                else:
+                    if poolfile is None:
+                        Logger.log(["E: Could not find %s in pool" %  % (chg_fn))
+                    else:
+                        chg.poolfiles.append(poolfile)
+
+        chg.files = files
+
+
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
+
+        # Remove changes_pool_files entries
+        for pf in self.poolfiles:
+            self.poolfiles.remove(pf)
+
+        # Remove change
+        for cf in self.files:
+            self.files.remove(cf)
+
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
+
 __all__.append('DBChange')
 
 @session_wrapper
@@ -2062,6 +2314,11 @@ def add_dsc_to_db(u, filename, session=None):
                 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
                 pfs.append(poolfile)
                 files_id = poolfile.file_id
+        else:
+            poolfile = get_poolfile_by_id(files_id, session)
+            if poolfile is None:
+                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+            pfs.append(poolfile)
 
         df.poolfile_id = files_id
         session.add(df)
@@ -2655,6 +2912,16 @@ class DBConn(object):
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
                                  files = relation(ChangePendingFile,
                                                   secondary=self.tbl_changes_pending_files_map,
                                                   backref="changesfile"),
@@ -2667,7 +2934,12 @@ class DBConn(object):
                properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
 
         mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
 
         mapper(ChangePendingSource, self.tbl_changes_pending_source,
                properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,