]> git.decadent.org.uk Git - dak.git/blobdiff - daklib/queue.py
Fix various errors caused by moving stuff around
[dak.git] / daklib / queue.py
index 3e19ac18722c8115f4ed16621b5d379ae66bd9e3..bfacbfaf197b5b89d115150fa3eb0d6396e24f50 100755 (executable)
@@ -38,6 +38,8 @@ import commands
 import shutil
 import textwrap
 from types import *
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
 
 import yaml
 
@@ -46,6 +48,7 @@ from changes import *
 from regexes import *
 from config import Config
 from holding import Holding
+from urgencylog import UrgencyLog
 from dbconn import *
 from summarystats import SummaryStats
 from utils import parse_changes, check_dsc_files
@@ -796,17 +799,11 @@ class Upload(object):
             entry["othercomponents"] = res.fetchone()[0]
 
     def check_files(self, action=True):
-        archive = utils.where_am_i()
         file_keys = self.pkg.files.keys()
         holding = Holding()
         cnf = Config()
 
-        # XXX: As far as I can tell, this can no longer happen - see
-        #      comments by AJ in old revisions - mhy
-        # if reprocess is 2 we've already done this and we're checking
-        # things again for the new .orig.tar.gz.
-        # [Yes, I'm fully aware of how disgusting this is]
-        if action and self.reprocess < 2:
+        if action:
             cwd = os.getcwd()
             os.chdir(self.pkg.directory)
             for f in file_keys:
@@ -817,11 +814,10 @@ class Upload(object):
 
             os.chdir(cwd)
 
-        # Check there isn't already a .changes or .dak file of the same name in
-        # the proposed-updates "CopyChanges" or "CopyDotDak" storage directories.
+        # Check there isn't already a .changes file of the same name in
+        # the proposed-updates "CopyChanges" storage directories.
         # [NB: this check must be done post-suite mapping]
         base_filename = os.path.basename(self.pkg.changes_file)
-        dot_dak_filename = base_filename[:-8] + ".dak"
 
         for suite in self.pkg.changes["distribution"].keys():
             copychanges = "Suite::%s::CopyChanges" % (suite)
@@ -830,13 +826,6 @@ class Upload(object):
                 self.rejects.append("%s: a file with this name already exists in %s" \
                            % (base_filename, cnf[copychanges]))
 
-            copy_dot_dak = "Suite::%s::CopyDotDak" % (suite)
-            if cnf.has_key(copy_dot_dak) and \
-                   os.path.exists(os.path.join(cnf[copy_dot_dak], dot_dak_filename)):
-                self.rejects.append("%s: a file with this name already exists in %s" \
-                           % (dot_dak_filename, Cnf[copy_dot_dak]))
-
-        self.reprocess = 0
         has_binaries = False
         has_source = False
 
@@ -1084,15 +1073,10 @@ class Upload(object):
             self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
 
     def check_source(self):
-        # XXX: I'm fairly sure reprocess == 2 can never happen
-        #      AJT disabled the is_incoming check years ago - mhy
-        #      We should probably scrap or rethink the whole reprocess thing
         # Bail out if:
         #    a) there's no source
-        # or b) reprocess is 2 - we will do this check next time when orig
-        #       tarball is in 'files'
         # or c) the orig files are MIA
-        if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \
+        if not self.pkg.changes["architecture"].has_key("source") \
            or len(self.pkg.orig_files) == 0:
             return
 
@@ -1493,7 +1477,7 @@ class Upload(object):
         #  or binary, whereas keys with no access might be able to
         #  upload some binaries)
         if fpr.source_acl.access_level == 'dm':
-            self.check_dm_source_upload(fpr, session)
+            self.check_dm_upload(fpr, session)
         else:
             # Check source-based permissions for other types
             if self.pkg.changes["architecture"].has_key("source"):
@@ -1838,12 +1822,12 @@ distribution."""
 
     ###########################################################################
 
-    def accept (self, summary, short_summary, targetdir=None):
+    def accept (self, summary, short_summary, session):
         """
         Accept an upload.
 
-        This moves all files referenced from the .changes into the I{accepted}
-        queue, sends the accepted mail, announces to lists, closes bugs and
+        This moves all files referenced from the .changes into the pool,
+        sends the accepted mail, announces to lists, closes bugs and
         also checks for override disparities. If enabled it will write out
         the version history for the BTS Version Tracking and will finally call
         L{queue_build}.
@@ -1853,31 +1837,84 @@ distribution."""
 
         @type short_summary: string
         @param short_summary: Short summary
-
         """
 
         cnf = Config()
         stats = SummaryStats()
 
-        accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')
+        print "Installing."
+        self.logger.log(["installing changes", self.pkg.changes_file])
 
-        if targetdir is None:
-            targetdir = cnf["Dir::Queue::Accepted"]
+        # Add the .dsc file to the DB first
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "dsc":
+                dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
 
-        print "Accepting."
-        if self.logger:
-            self.logger.log(["Accepting changes", self.pkg.changes_file])
+        # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "deb":
+                add_deb_to_db(self, newfile, session)
+
+        # If this is a sourceful diff only upload that is moving
+        # cross-component we need to copy the .orig files into the new
+        # component too for the same reasons as above.
+        if self.pkg.changes["architecture"].has_key("source"):
+            for orig_file in self.pkg.orig_files.keys():
+                if not self.pkg.orig_files[orig_file].has_key("id"):
+                    continue # Skip if it's not in the pool
+                orig_file_id = self.pkg.orig_files[orig_file]["id"]
+                if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+                    continue # Skip if the location didn't change
+
+                # Do the move
+                oldf = get_poolfile_by_id(orig_file_id, session)
+                old_filename = os.path.join(oldf.location.path, oldf.filename)
+                old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
+                           'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
+
+                new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+
+                # TODO: Care about size/md5sum collisions etc
+                (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+
+                if newf is None:
+                    utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
+                    newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
+
+                    # TODO: Check that there's only 1 here
+                    source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
+                    dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
+                    dscf.poolfile_id = newf.file_id
+                    session.add(dscf)
+                    session.flush()
+
+        # Install the files into the pool
+        for newfile, entry in self.pkg.files.items():
+            destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
+            utils.move(newfile, destination)
+            self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
+            stats.accept_bytes += float(entry["size"])
 
-        self.pkg.write_dot_dak(targetdir)
+        # Copy the .changes file across for suite which need it.
+        copy_changes = {}
+        for suite_name in self.pkg.changes["distribution"].keys():
+            if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
+                copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
 
-        # Move all the files into the accepted directory
-        utils.move(self.pkg.changes_file, targetdir)
+        for dest in copy_changes.keys():
+            utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
 
-        for name, entry in sorted(self.pkg.files.items()):
-            utils.move(name, targetdir)
-            stats.accept_bytes += float(entry["size"])
+        # We're done - commit the database changes
+        session.commit()
+        # Our SQL session will automatically start a new transaction after
+        # the last commit
 
-        stats.accept_count += 1
+        # Move the .changes into the 'done' directory
+        utils.move(self.pkg.changes_file,
+                   os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
+
+        if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+            UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
 
         # Send accept mail, announce to lists, close bugs and check for
         # override disparities
@@ -1885,7 +1922,8 @@ distribution."""
             self.update_subst()
             self.Subst["__SUITE__"] = ""
             self.Subst["__SUMMARY__"] = summary
-            mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+            mail_message = utils.TemplateSubst(self.Subst,
+                                               os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
             utils.send_mail(mail_message)
             self.announce(short_summary, 1)
 
@@ -1923,24 +1961,16 @@ distribution."""
             os.rename(temp_filename, filename)
             os.chmod(filename, 0644)
 
-        # Its is Cnf["Dir::Queue::Accepted"] here, not targetdir!
-        # <Ganneff> we do call queue_build too
-        # <mhy> well yes, we'd have had to if we were inserting into accepted
-        # <Ganneff> now. thats database only.
-        # <mhy> urgh, that's going to get messy
-        # <Ganneff> so i make the p-n call to it *also* using accepted/
-        # <mhy> but then the packages will be in the queue_build table without the files being there
-        # <Ganneff> as the buildd queue is only regenerated whenever unchecked runs
-        # <mhy> ah, good point
-        # <Ganneff> so it will work out, as unchecked move it over
-        # <mhy> that's all completely sick
-        # <Ganneff> yes
-
-        # This routine returns None on success or an error on failure
-        res = get_or_set_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
-        if res:
-            utils.fubar(res)
+        # auto-build queue
+#        res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session)
+#        if res:
+#            utils.fubar(res)
+#            now_date = datetime.now()
 
+        session.commit()
+
+        # Finally...
+        stats.accept_count += 1
 
     def check_override(self):
         """
@@ -1979,15 +2009,21 @@ distribution."""
     def remove(self, from_dir=None):
         """
         Used (for instance) in p-u to remove the package from unchecked
+
+        Also removes the package from holding area.
         """
         if from_dir is None:
-            os.chdir(self.pkg.directory)
-        else:
-            os.chdir(from_dir)
+            from_dir = self.pkg.directory
+        h = Holding()
 
         for f in self.pkg.files.keys():
-            os.unlink(f)
-        os.unlink(self.pkg.changes_file)
+            os.unlink(os.path.join(from_dir, f))
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
+                          
+        os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+        if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+            os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
 
     ###########################################################################
 
@@ -1995,9 +2031,11 @@ distribution."""
         """
         Move files to dest with certain perms/changesperms
         """
-        utils.move(self.pkg.changes_file, dest, perms=changesperms)
+        h = Holding()
+        utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+                   dest, perms=changesperms)
         for f in self.pkg.files.keys():
-            utils.move(f, dest, perms=perms)
+            utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
 
     ###########################################################################