]> git.decadent.org.uk Git - dak.git/blobdiff - dak/process_upload.py
Holding isn't a queue either, it's a tmpdir
[dak.git] / dak / process_upload.py
index 3e923ea05825211c71e12746eb1756256817496e..bf74baa65be064e356abaa8459b39a6688c85ad9 100755 (executable)
@@ -125,24 +125,58 @@ Checks Debian packages from Incoming
 ##       pu: create files for BTS
 ##       pu: create entry in queue_build
 ##       pu: check overrides
 ##       pu: create files for BTS
 ##       pu: create entry in queue_build
 ##       pu: check overrides
-import errno
+
+# Integrity checks
+## GPG
+## Parsing changes (check for duplicates)
+## Parse dsc
+## file list checks
+
+# New check layout (TODO: Implement)
+## Permission checks
+### suite mappings
+### ACLs
+### version checks (suite)
+### override checks
+
+## Source checks
+### copy orig
+### unpack
+### BTS changelog
+### src contents
+### lintian
+### urgency log
+
+## Binary checks
+### timestamps
+### control checks
+### src relation check
+### contents
+
+## Database insertion (? copy from stuff)
+### BYHAND / NEW / Policy queues
+### Pool
+
+## Queue builds
+
+from errno import EACCES, EAGAIN
 import fcntl
 import os
 import sys
 import fcntl
 import os
 import sys
-#from datetime import datetime
 import traceback
 import apt_pkg
 import traceback
 import apt_pkg
+from sqlalchemy.orm.exc import NoResultFound
 
 from daklib import daklog
 from daklib.queue import *
 
 from daklib import daklog
 from daklib.queue import *
+from daklib.queue_install import *
 from daklib import utils
 from daklib.dbconn import *
 from daklib import utils
 from daklib.dbconn import *
-#from daklib.dak_exceptions import *
-#from daklib.regexes import re_default_answer, re_issource, re_fdnic
 from daklib.urgencylog import UrgencyLog
 from daklib.summarystats import SummaryStats
 from daklib.holding import Holding
 from daklib.config import Config
 from daklib.urgencylog import UrgencyLog
 from daklib.summarystats import SummaryStats
 from daklib.holding import Holding
 from daklib.config import Config
+from daklib.regexes import re_match_expired
 
 ###############################################################################
 
 
 ###############################################################################
 
@@ -163,7 +197,137 @@ def usage (exit_code=0):
 
 ###############################################################################
 
 
 ###############################################################################
 
-def process_it(changes_file):
+def byebye():
+    if not Options["No-Action"]:
+        # Clean out the queue files
+        session = DBConn().session()
+        session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
+        session.commit()
+
+
+
+def action(u, session):
+    global Logger
+
+    cnf = Config()
+    holding = Holding()
+
+    # changes["distribution"] may not exist in corner cases
+    # (e.g. unreadable changes files)
+    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict):
+        u.pkg.changes["distribution"] = {}
+
+    (summary, short_summary) = u.build_summaries()
+
+    (prompt, answer) = ("", "XXX")
+    if Options["No-Action"] or Options["Automatic"]:
+        answer = 'S'
+
+    queuekey = ''
+
+    pi = u.package_info()
+
+    try:
+        chg = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one()
+    except NoResultFound, e:
+        chg = None
+
+    if len(u.rejects) > 0:
+        if u.upload_too_new():
+            print "SKIP (too new)\n" + pi,
+            prompt = "[S]kip, Quit ?"
+        else:
+            print "REJECT\n" + pi
+            prompt = "[R]eject, Skip, Quit ?"
+            if Options["Automatic"]:
+                answer = 'R'
+    else:
+        # Are we headed for NEW / BYHAND / AUTOBYHAND?
+        # Note that policy queues are no longer handled here
+        qu = determine_target(u)
+        if qu:
+            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
+            queuekey = qu[0].upper()
+            if queuekey in "RQSA":
+                queuekey = "D"
+                prompt = "[D]ivert, Skip, Quit ?"
+            else:
+                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
+            if Options["Automatic"]:
+                answer = queuekey
+        else:
+            # Does suite have a policy_queue configured
+            divert = False
+            for s in u.pkg.changes["distribution"].keys():
+                suite = get_suite(s, session)
+                if suite.policy_queue:
+                    if not chg or chg.approved_for_id != suite.policy_queue.policy_queue_id:
+                        # This routine will check whether the upload is a binary
+                        # upload when the source is already in the target suite.  If
+                        # so, we skip the policy queue, otherwise we go there.
+                        divert = package_to_suite(u, suite.suite_name, session=session)
+                        if divert:
+                            print "%s for %s\n%s%s" % ( suite.policy_queue.queue_name.upper(),
+                                                        ", ".join(u.pkg.changes["distribution"].keys()),
+                                                        pi, summary)
+                            queuekey = "P"
+                            prompt = "[P]olicy, Skip, Quit ?"
+                            policyqueue = suite.policy_queue
+                            if Options["Automatic"]:
+                                answer = 'P'
+                            break
+
+            if not divert:
+                print "ACCEPT\n" + pi + summary,
+                prompt = "[A]ccept, Skip, Quit ?"
+                if Options["Automatic"]:
+                    answer = 'A'
+
+    while prompt.find(answer) == -1:
+        answer = utils.our_raw_input(prompt)
+        m = re_default_answer.match(prompt)
+        if answer == "":
+            answer = m.group(1)
+        answer = answer[:1].upper()
+
+    if answer == 'R':
+        os.chdir(u.pkg.directory)
+        u.do_reject(0, pi)
+    elif answer == 'A':
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
+        session.commit()
+        u.accept(summary, short_summary, session)
+        u.check_override()
+        chg.clean_from_queue()
+        session.commit()
+        u.remove()
+    elif answer == 'P':
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
+        package_to_queue(u, summary, short_summary, policyqueue, chg, session)
+        session.commit()
+        u.remove()
+    elif answer == queuekey:
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
+        QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
+        session.commit()
+        u.remove()
+    elif answer == 'Q':
+        byebye()
+        sys.exit(0)
+
+    session.commit()
+
+###############################################################################
+
+def cleanup():
+    h = Holding()
+    if not Options["No-Action"]:
+        h.clean()
+
+def process_it(changes_file, session):
     global Logger
 
     Logger.log(["Processing changes file", changes_file])
     global Logger
 
     Logger.log(["Processing changes file", changes_file])
@@ -172,6 +336,9 @@ def process_it(changes_file):
 
     holding = Holding()
 
 
     holding = Holding()
 
+    # TODO: Actually implement using pending* tables so that we don't lose track
+    #       of what is where
+
     u = Upload()
     u.pkg.changes_file = changes_file
     u.pkg.directory = os.getcwd()
     u = Upload()
     u.pkg.changes_file = changes_file
     u.pkg.directory = os.getcwd()
@@ -197,7 +364,7 @@ def process_it(changes_file):
         # If this is the Real Thing(tm), copy things into a private
         # holding directory first to avoid replacable file races.
         if not Options["No-Action"]:
         # If this is the Real Thing(tm), copy things into a private
         # holding directory first to avoid replacable file races.
         if not Options["No-Action"]:
-            os.chdir(cnf["Dir::Queue::Holding"])
+            holding.chdir_to_holding()
 
             # Absolutize the filename to avoid the requirement of being in the
             # same directory as the .changes file.
 
             # Absolutize the filename to avoid the requirement of being in the
             # same directory as the .changes file.
@@ -214,6 +381,12 @@ def process_it(changes_file):
         if u.pkg.changes["fingerprint"]:
             valid_changes_p = u.load_changes(changespath)
         else:
         if u.pkg.changes["fingerprint"]:
             valid_changes_p = u.load_changes(changespath)
         else:
+            for reason in rejects:
+                if re_match_expired.match(reason):
+                    # Hrm, key expired. Lets see if we can still parse the .changes before
+                    # we reject. Then we would be able to mail the maintainer, instead of
+                    # just silently dropping the upload.
+                    u.load_changes(changespath)
             valid_changes_p = False
             u.rejects.extend(rejects)
 
             valid_changes_p = False
             u.rejects.extend(rejects)
 
@@ -223,21 +396,24 @@ def process_it(changes_file):
             valid_dsc_p = u.check_dsc(not Options["No-Action"])
             if valid_dsc_p and not Options["No-Action"]:
                 u.check_source()
             valid_dsc_p = u.check_dsc(not Options["No-Action"])
             if valid_dsc_p and not Options["No-Action"]:
                 u.check_source()
-                u.check_lintian()
             u.check_hashes()
             u.check_hashes()
+            if valid_dsc_p and not Options["No-Action"] and not len(u.rejects):
+                u.check_lintian()
             u.check_urgency()
             u.check_timestamps()
             u.check_signed_by_key()
 
             u.check_urgency()
             u.check_timestamps()
             u.check_signed_by_key()
 
-#        action(u)
+        action(u, session)
 
     except (SystemExit, KeyboardInterrupt):
 
     except (SystemExit, KeyboardInterrupt):
+        cleanup()
         raise
 
     except:
         print "ERROR"
         traceback.print_exc(file=sys.stderr)
 
         raise
 
     except:
         print "ERROR"
         traceback.print_exc(file=sys.stderr)
 
+    cleanup()
     # Restore previous WD
     os.chdir(u.prevdir)
 
     # Restore previous WD
     os.chdir(u.prevdir)
 
@@ -248,7 +424,6 @@ def main():
 
     cnf = Config()
     summarystats = SummaryStats()
 
     cnf = Config()
     summarystats = SummaryStats()
-    log_urgency = False
 
     DBConn()
 
 
     DBConn()
 
@@ -280,7 +455,7 @@ def main():
 
     # Obtain lock if not in no-action mode and initialize the log
     if not Options["No-Action"]:
 
     # Obtain lock if not in no-action mode and initialize the log
     if not Options["No-Action"]:
-        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
+        lock_fd = os.open(os.path.join(cnf["Dir::Lock"], 'dinstall.lock'), os.O_RDWR | os.O_CREAT)
         try:
             fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except IOError, e:
         try:
             fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except IOError, e:
@@ -288,12 +463,12 @@ def main():
                 utils.fubar("Couldn't obtain lock; assuming another 'dak process-upload' is already running.")
             else:
                 raise
                 utils.fubar("Couldn't obtain lock; assuming another 'dak process-upload' is already running.")
             else:
                 raise
-        if cnf.get("Dir::UrgencyLog"):
-            # Initialise UrgencyLog()
-            log_urgency = True
-            UrgencyLog()
 
 
-    Logger = daklog.Logger(cnf, "process-upload", Options["No-Action"])
+        # Initialise UrgencyLog() - it will deal with the case where we don't
+        # want to log urgencies
+        urgencylog = UrgencyLog()
+
+    Logger = daklog.Logger("process-upload", Options["No-Action"])
 
     # If we have a directory flag, use it to find our files
     if cnf["Dinstall::Options::Directory"] != "":
 
     # If we have a directory flag, use it to find our files
     if cnf["Dinstall::Options::Directory"] != "":
@@ -316,20 +491,29 @@ def main():
     for changes_file in changes_files:
         print "\n" + changes_file
         session = DBConn().session()
     for changes_file in changes_files:
         print "\n" + changes_file
         session = DBConn().session()
-        process_it(changes_file)
+        process_it(changes_file, session)
         session.close()
 
     if summarystats.accept_count:
         sets = "set"
         if summarystats.accept_count > 1:
             sets = "sets"
         session.close()
 
     if summarystats.accept_count:
         sets = "set"
         if summarystats.accept_count > 1:
             sets = "sets"
-        sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
-                                                             utils.size_type(int(summarystats.accept_bytes))))
+        print "Installed %d package %s, %s." % (summarystats.accept_count, sets,
+                                                utils.size_type(int(summarystats.accept_bytes)))
         Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
 
         Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
 
+    if summarystats.reject_count:
+        sets = "set"
+        if summarystats.reject_count > 1:
+            sets = "sets"
+        print "Rejected %d package %s." % (summarystats.reject_count, sets)
+        Logger.log(["rejected", summarystats.reject_count])
+
+    byebye()
+
     if not Options["No-Action"]:
     if not Options["No-Action"]:
-        if log_urgency:
-            UrgencyLog().close()
+        urgencylog.close()
+
     Logger.close()
 
 ###############################################################################
     Logger.close()
 
 ###############################################################################