]> git.decadent.org.uk Git - dak.git/commitdiff
merge from ftp-master
authorMike O'Connor <stew@vireo.org>
Sat, 19 Dec 2009 13:41:19 +0000 (08:41 -0500)
committerMike O'Connor <stew@vireo.org>
Sat, 19 Dec 2009 13:41:19 +0000 (08:41 -0500)
Signed-off-by: Mike O'Connor <stew@vireo.org>
36 files changed:
config/debian/common
config/debian/cron.dinstall
config/debian/cron.hourly
config/debian/cron.unchecked
config/debian/dak.conf
config/debian/dinstall.functions
dak/cruft_report.py
dak/dak.py
dak/dakdb/update25.py
dak/dakdb/update26.py [new file with mode: 0755]
dak/dakdb/update27.py [new file with mode: 0755]
dak/dakdb/update28.py [new file with mode: 0644]
dak/dominate.py [new file with mode: 0755]
dak/examine_package.py
dak/import_new_files.py [new file with mode: 0755]
dak/import_users_from_passwd.py
dak/override.py
dak/process_new.py
dak/process_policy.py [new file with mode: 0755]
dak/process_upload.py
dak/queue_report.py
dak/show_new.py
dak/transitions.py
dak/update_db.py
daklib/changes.py
daklib/changesutils.py [new file with mode: 0644]
daklib/dbconn.py
daklib/holding.py
daklib/queue.py [changed mode: 0644->0755]
daklib/queue_install.py [changed mode: 0644->0755]
daklib/utils.py
tools/debianqueued-0.9/config
tools/debianqueued-0.9/config-security
tools/debianqueued-0.9/config-upload
tools/logs.py
web/index.html

index 70f92fdd52464718331cc8adceef09fba6ff1538..212d84ad4b07adc456e76580bebcf9a74b779093 100644 (file)
@@ -49,6 +49,15 @@ function make_buildd_dir () {
     cp -al ${incoming}/buildd/. tree/${STAMP}/
     ln -sfT tree/${STAMP} ${incoming}/builddweb
     find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+
+    for dist in experimental
+    do
+        cd ${incoming}/dists/${dist}
+        mkdir -p tree/${STAMP}
+        cp -al ${incoming}/dists/${dist}/buildd/. tree/${STAMP}/
+        ln -sfT tree/${STAMP} ${incoming}/dists/${dist}/current
+        find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+    done
 }
 
 # Do the unchecked processing, in case we have files.
@@ -64,6 +73,19 @@ function do_unchecked () {
     dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
 }
 
+# Do the newstage processing, in case we have files.
+function do_newstage () {
+    cd $newstage
+
+    changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
+    report=$queuedir/REPORT
+    timestamp=$(date "+%Y-%m-%d %H:%M")
+    UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
+
+    echo "$timestamp": ${changes:-"Nothing to do in newstage"}  >> $report
+    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+}
+
 function sync_debbugs () {
     # sync with debbugs
     echo "--" >> $report
index e3c6f4e672290c8e07ee96d6354b649ec4d49f0a..747db2f1333b2c7bec2d9c6313930e1b184fa87b 100755 (executable)
@@ -244,8 +244,8 @@ rm -f "$LOCK_ACCEPTED"
 rm -f "$LOCK_NEW"
 
 GO=(
-    FUNC="msfl"
-    TIME="make-suite-file-list"
+    FUNC="dominate"
+    TIME="dominate"
     ARGS=""
     ERR=""
 )
index 9dd1450473a24d674db70207cc722419097e435a..6d0efb2cbdebf94dea432aba1b49b711be027845 100755 (executable)
@@ -15,7 +15,7 @@ dak queue-report -n > $webdir/new.html
 # We used to have accepted in here, but it doesn't exist in that form any more
 dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates
 dak show-deferred > ${webdir}/deferred.html
-#cd $queuedir/new ; dak show-new *.changes > /dev/null
+dak show-new > /dev/null
 $base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
 $base/dak/tools/removals.pl > $webdir/rss/removals.rss
 
index 008b8243abec079291bb62692cd70d5ad2f56280..a9e59f2f373a7b7c006d08f49c17c011b1f55c40 100755 (executable)
@@ -90,6 +90,7 @@ export LC_ALL=C
 lockfile -r3 $LOCKFILE || exit 0
 trap cleanup 0
 
+do_newstage
 do_unchecked
 
 if [ ! -z "$changes" ]; then
index ceafaeda9e9a783ba73133e722685a0b4498535e..bb7682b2e0ec48acb0e1b7a051e60b9b1d72873b 100644 (file)
@@ -49,7 +49,6 @@ Dinstall
 Transitions
 {
    Notifications "debian-devel@lists.debian.org";
-   TempPath "/srv/ftp.debian.org/tmp/";
 };
 
 Generate-Index-Diffs
@@ -555,6 +554,7 @@ Dir
   Override "/srv/ftp.debian.org/scripts/override/";
   QueueBuild "/srv/incoming.debian.org/buildd/";
   UrgencyLog "/srv/release.debian.org/britney/input/urgencies/";
+  TempPath "/srv/ftp.debian.org/tmp/";
   Queue
   {
     Byhand "/srv/ftp.debian.org/queue/byhand/";
index ddce96920c45b73add7b903c0612b1c03e2b9668..26c1c8b8bd33c5a0fc30480468c82991cefbcd12 100644 (file)
@@ -129,9 +129,9 @@ function cruft() {
     dak check-overrides
 }
 
-function msfl() {
-    log "Generating suite file lists for apt-ftparchive"
-    dak make-suite-file-list
+function dominate() {
+    log "Removing obsolete source and binary associations"
+    dak dominate
 }
 
 function filelist() {
index 4541bf6eb6477c8c09cc91022b8c41636d031ebc..ef9362fda3235bc6f548d11a66a849c06d8bebbe 100755 (executable)
@@ -238,7 +238,7 @@ def do_nbs(real_nbs):
             output += "        o %s: %s\n" % (version, ", ".join(packages))
         if all_packages:
             all_packages.sort()
-            cmd_output += " dak rm -m \"[auto-cruft] NBS (was built by %s)\" -s %s -b %s\n\n" % (source, suite.suite_name, " ".join(all_packages))
+            cmd_output += " dak rm -m \"[auto-cruft] NBS (was built by %s)\" -s %s -b %s -R\n\n" % (source, suite.suite_name, " ".join(all_packages))
 
         output += "\n"
 
@@ -272,47 +272,84 @@ def do_dubious_nbs(dubious_nbs):
 
 ################################################################################
 
-def do_obsolete_source(duplicate_bins, bin2source):
-    obsolete = {}
-    for key in duplicate_bins.keys():
-        (source_a, source_b) = key.split('_')
-        for source in [ source_a, source_b ]:
-            if not obsolete.has_key(source):
-                if not source_binaries.has_key(source):
-                    # Source has already been removed
-                    continue
-                else:
-                    obsolete[source] = [ i.strip() for i in source_binaries[source].split(',') ]
-            for binary in duplicate_bins[key]:
-                if bin2source.has_key(binary) and bin2source[binary]["source"] == source:
-                    continue
-                if binary in obsolete[source]:
-                    obsolete[source].remove(binary)
-
-    to_remove = []
-    output = "Obsolete source package\n"
-    output += "-----------------------\n\n"
-    obsolete_keys = obsolete.keys()
-    obsolete_keys.sort()
-    for source in obsolete_keys:
-        if not obsolete[source]:
-            to_remove.append(source)
-            output += " * %s (%s)\n" % (source, source_versions[source])
-            for binary in [ i.strip() for i in source_binaries[source].split(',') ]:
-                if bin2source.has_key(binary):
-                    output += "    o %s (%s) is built by %s.\n" \
-                          % (binary, bin2source[binary]["version"],
-                             bin2source[binary]["source"])
-                else:
-                    output += "    o %s is not built.\n" % binary
-            output += "\n"
-
-    if to_remove:
-        print output
-
-        print "Suggested command:"
-        print " dak rm -S -p -m \"[auto-cruft] obsolete source package\" %s" % (" ".join(to_remove))
-        print
+def obsolete_source(suite_name, session):
+    """returns obsolete source packages for suite_name without binaries
+    in the same suite sorted by install_date; install_date should help
+    detecting source only (or binary throw away) uploads; duplicates in
+    the suite are skipped
+
+    subquery 'source_suite_unique' returns source package names from
+    suite without duplicates; the rationale behind is that neither
+    cruft-report nor rm cannot handle duplicates (yet)"""
+
+    query = """
+WITH source_suite_unique AS
+    (SELECT source, suite
+        FROM source_suite GROUP BY source, suite HAVING count(*) = 1)
+SELECT ss.src, ss.source, ss.version,
+    to_char(ss.install_date, 'YYYY-MM-DD') AS install_date
+    FROM source_suite ss
+    JOIN source_suite_unique ssu
+       ON ss.source = ssu.source AND ss.suite = ssu.suite
+    JOIN suite s ON s.id = ss.suite
+    LEFT JOIN bin_associations_binaries bab
+       ON ss.src = bab.source AND ss.suite = bab.suite
+    WHERE s.suite_name = :suite_name AND bab.id IS NULL
+    ORDER BY install_date"""
+    args = { 'suite_name': suite_name }
+    return session.execute(query, args)
+
+def source_bin(source, session):
+    """returns binaries built by source for all or no suite grouped and
+    ordered by package name"""
+
+    query = """
+SELECT b.package
+    FROM binaries b
+    JOIN src_associations_src sas ON b.source = sas.src
+    WHERE sas.source = :source
+    GROUP BY b.package
+    ORDER BY b.package"""
+    args = { 'source': source }
+    return session.execute(query, args)
+
+def newest_source_bab(suite_name, package, session):
+    """returns newest source that builds binary package in suite grouped
+    and sorted by source and package name"""
+
+    query = """
+SELECT sas.source, MAX(sas.version) AS srcver
+    FROM src_associations_src sas
+    JOIN bin_associations_binaries bab ON sas.src = bab.source
+    JOIN suite s on s.id = bab.suite
+    WHERE s.suite_name = :suite_name AND bab.package = :package
+       GROUP BY sas.source, bab.package
+        ORDER BY sas.source, bab.package"""
+    args = { 'suite_name': suite_name, 'package': package }
+    return session.execute(query, args)
+
+def report_obsolete_source(suite_name, session):
+    rows = obsolete_source(suite_name, session)
+    if rows.rowcount == 0:
+        return
+    print \
+"""Obsolete source packages in suite %s
+----------------------------------%s\n""" % \
+        (suite_name, '-' * len(suite_name))
+    for os_row in rows.fetchall():
+        (src, old_source, version, install_date) = os_row
+        print " * obsolete source %s version %s installed at %s" % \
+            (old_source, version, install_date)
+        for sb_row in source_bin(old_source, session):
+            (package, ) = sb_row
+            print "   - has built binary %s" % package
+            for nsb_row in newest_source_bab(suite_name, package, session):
+                (new_source, srcver) = nsb_row
+                print "     currently built by source %s version %s" % \
+                    (new_source, srcver)
+        print "   - suggested command:"
+        rm_opts = "-S -p -m \"[auto-cruft] obsolete source package\""
+        print "     dak rm -s %s %s %s\n" % (suite_name, rm_opts, old_source)
 
 def get_suite_binaries(suite, session):
     # Initalize a large hash table of all binary packages
@@ -383,10 +420,13 @@ def main ():
     suite_id = suite.suite_id
     suite_name = suite.suite_name.lower()
 
+    if "obsolete source" in checks:
+        report_obsolete_source(suite_name, session)
+
     bin_not_built = {}
 
     if "bnb" in checks:
-        bins_in_suite = get_suite_binaries(suite_name, session)
+        bins_in_suite = get_suite_binaries(suite, session)
 
     # Checks based on the Sources files
     components = cnf.ValueList("Suite::%s::Components" % (suite_name))
@@ -419,8 +459,8 @@ def main ():
 
             # Check for duplicated packages and build indices for checking "no source" later
             source_index = component + '/' + source
-            if src_pkgs.has_key(source):
-                print " %s is a duplicated source package (%s and %s)" % (source, source_index, src_pkgs[source])
+            #if src_pkgs.has_key(source):
+            #    print " %s is a duplicated source package (%s and %s)" % (source, source_index, src_pkgs[source])
             src_pkgs[source] = source_index
             for binary in binaries_list:
                 if bin_pkgs.has_key(binary):
@@ -501,9 +541,6 @@ def main ():
             packages.close()
             os.unlink(temp_filename)
 
-    if "obsolete source" in checks:
-        do_obsolete_source(duplicate_bins, bin2source)
-
     # Distinguish dubious (version numbers match) and 'real' NBS (they don't)
     dubious_nbs = {}
     real_nbs = {}
index 1d9336dbfb1e5ee9c364726e5b6e3c1d836c87bb..67314ff5bf54718d03141c28ef2bcc9071488ea9 100755 (executable)
@@ -69,9 +69,13 @@ def init():
          "Process NEW and BYHAND packages"),
         ("process-upload",
          "Process packages in queue/unchecked"),
+        ("process-policy",
+         "Process packages in policy queues from COMMENTS files"),
 
+        ("dominate",
+         "Remove obsolete source and binary associations from suites"),
         ("make-suite-file-list",
-         "Generate lists of packages per suite for apt-ftparchive"),
+         "OBSOLETE: replaced by dominate and generate-filelist"),
         ("make-pkg-file-mapping",
          "Generate package <-> file mapping"),
         ("generate-filelist",
index a61deb61352f6b1464309147655142f485b3e7be..b2813d9118f820438676a54f9b7bf78aeb718b18 100644 (file)
@@ -1,11 +1,10 @@
 #!/usr/bin/env python
-# coding=utf8
 
 """
-Adding a trainee field to the process-new notes
+Add views for new dominate command.
 
 @contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2009  Mike O'Connor <stew@debian.org>
+@copyright: 2009  Torsten Werner <twerner@debian.org>
 @license: GNU General Public License version 2 or later
 """
 
@@ -23,246 +22,168 @@ Adding a trainee field to the process-new notes
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
-################################################################################
-
-
-################################################################################
-
 import psycopg2
-import time
-from daklib.dak_exceptions import DBUpdateError
-
-################################################################################
-
-def suites():
-    """
-    return a list of suites to operate on
-    """
-    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
-        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
-    else:
-        suites = [ 'unstable', 'testing' ]
-#            suites = Config().SubTree("Suite").List()
-
-    return suites
-
-def arches(cursor, suite):
-    """
-    return a list of archs to operate on
-    """
-    arch_list = []
-    cursor.execute("""SELECT s.architecture, a.arch_string
-    FROM suite_architectures s
-    JOIN architecture a ON (s.architecture=a.id)
-    WHERE suite = :suite""", {'suite' : suite })
-
-    while True:
-        r = cursor.fetchone()
-        if not r:
-            break
-
-        if r[1] != "source" and r[1] != "all":
-            arch_list.append((r[0], r[1]))
-
-    return arch_list
 
 def do_update(self):
-    """
-    Adding contents table as first step to maybe, finally getting rid
-    of apt-ftparchive
-    """
-
-    print __doc__
+    print "Add views for generate_filelist to database."
 
     try:
         c = self.db.cursor()
 
-        c.execute("""CREATE TABLE pending_bin_contents (
-        id serial NOT NULL,
-        package text NOT NULL,
-        version debversion NOT NULL,
-        arch int NOT NULL,
-        filename text NOT NULL,
-        type int NOT NULL,
-        PRIMARY KEY(id))""" );
-
-        c.execute("""CREATE TABLE deb_contents (
-        filename text,
-        section text,
-        package text,
-        binary_id integer,
-        arch integer,
-        suite integer)""" )
-
-        c.execute("""CREATE TABLE udeb_contents (
-        filename text,
-        section text,
-        package text,
-        binary_id integer,
-        suite integer,
-        arch integer)""" )
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_arch_fkey
-        FOREIGN KEY (arch) REFERENCES architecture(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_arch_fkey
-        FOREIGN KEY (arch) REFERENCES architecture(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_pkey
-        PRIMARY KEY (filename,package,arch,suite);""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_pkey
-        PRIMARY KEY (filename,package,arch,suite);""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_suite_fkey
-        FOREIGN KEY (suite) REFERENCES suite(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_suite_fkey
-        FOREIGN KEY (suite) REFERENCES suite(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_binary_fkey
-        FOREIGN KEY (binary_id) REFERENCES binaries(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_binary_fkey
-        FOREIGN KEY (binary_id) REFERENCES binaries(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
-
-
-        suites = self.suites()
-
-        for suite in [i.lower() for i in suites]:
-            suite_id = DBConn().get_suite_id(suite)
-            arch_list = arches(c, suite_id)
-            arch_list = arches(c, suite_id)
-
-            for (arch_id,arch_str) in arch_list:
-                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
-
-            for section, sname in [("debian-installer","main"),
-                                  ("non-free/debian-installer", "nonfree")]:
-                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
-
-
-        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "DELETE" or event == "UPDATE":
-
-        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
-                                  ["int","int"]),
-                                  [TD["old"]["bin"], TD["old"]["suite"]])
-
-    if event == "INSERT" or event == "UPDATE":
-
-       content_data = plpy.execute(plpy.prepare(
-            """SELECT s.section, b.package, b.architecture, ot.type
-            FROM override o
-            JOIN override_type ot on o.type=ot.id
-            JOIN binaries b on b.package=o.package
-            JOIN files f on b.file=f.id
-            JOIN location l on l.id=f.location
-            JOIN section s on s.id=o.section
-            WHERE b.id=$1
-            AND o.suite=$2
-            """,
-            ["int", "int"]),
-            [TD["new"]["bin"], TD["new"]["suite"]])[0]
-
-       tablename="%s_contents" % content_data['type']
-
-       plpy.execute(plpy.prepare("""DELETE FROM %s
-                   WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
-                   ['text','int','int']),
-                   [content_data['package'],
-                   content_data['architecture'],
-                   TD["new"]["suite"]])
-
-       filenames = plpy.execute(plpy.prepare(
-           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
-           ["int"]),
-           [TD["new"]["bin"]])
-
-       for filename in filenames:
-           plpy.execute(plpy.prepare(
-               """INSERT INTO %s
-                   (filename,section,package,binary_id,arch,suite)
-                   VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
-               ["text","text","text","int","int","int"]),
-               [filename["file"],
-                content_data["section"],
-                content_data["package"],
-                TD["new"]["bin"],
-                content_data["architecture"],
-                TD["new"]["suite"]] )
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
-
-        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "UPDATE":
-
-        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
-        if otype["type"].endswith("deb"):
-            section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
-
-            table_name = "%s_contents" % otype["type"]
-            plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
-                                      ["text","text","int"]),
-                                      [section["section"],
-                                      TD["new"]["package"],
-                                      TD["new"]["suite"]])
-
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
-        c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
-                      RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "UPDATE" or event == "INSERT":
-        row = TD["new"]
-        r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
-                  JOIN binaries b ON b.architecture = sa.architecture
-                  WHERE b.id = $1 and sa.suite = $2""",
-                ["int", "int"]),
-                [row["bin"], row["suite"]])
-        if not len(r):
-            plpy.error("Illegal architecture for this suite")
-
-$$ LANGUAGE plpythonu VOLATILE;""")
-
-        c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
-                      BEFORE INSERT OR UPDATE ON bin_associations
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
-        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
-                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
-        c.execute("""CREATE TRIGGER override_contents_trigger
-                      AFTER UPDATE ON override
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
-
-        c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
-        c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
-
+        print "Drop old views."
+        c.execute("DROP VIEW IF EXISTS binaries_suite_arch CASCADE")
+        c.execute("DROP VIEW IF EXISTS newest_all_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS obsolete_any_by_all_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS newest_any_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS obsolete_any_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS source_suite CASCADE")
+        c.execute("DROP VIEW IF EXISTS newest_source CASCADE")
+        c.execute("DROP VIEW IF EXISTS newest_src_association CASCADE")
+        c.execute("DROP VIEW IF EXISTS any_associations_source CASCADE")
+        c.execute("DROP VIEW IF EXISTS src_associations_src CASCADE")
+        c.execute("DROP VIEW IF EXISTS almost_obsolete_src_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS obsolete_src_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS bin_associations_binaries CASCADE")
+        c.execute("DROP VIEW IF EXISTS src_associations_bin CASCADE")
+        c.execute("DROP VIEW IF EXISTS almost_obsolete_all_associations CASCADE")
+        c.execute("DROP VIEW IF EXISTS obsolete_all_associations CASCADE")
+
+        print "Create new views."
+        c.execute("""
+CREATE VIEW binaries_suite_arch AS
+    SELECT bin_associations.id, binaries.id AS bin, binaries.package,
+           binaries.version, binaries.source, bin_associations.suite,
+           suite.suite_name, binaries.architecture, architecture.arch_string
+        FROM binaries JOIN bin_associations ON binaries.id = bin_associations.bin
+        JOIN suite ON suite.id = bin_associations.suite
+        JOIN architecture ON binaries.architecture = architecture.id;
+           """)
+        c.execute("""
+CREATE VIEW newest_all_associations AS
+    SELECT package, max(version) AS version, suite, architecture
+        FROM binaries_suite_arch
+        WHERE architecture = 2 GROUP BY package, suite, architecture;
+           """)
+        c.execute("""
+CREATE VIEW obsolete_any_by_all_associations AS
+    SELECT binaries_suite_arch.id, binaries_suite_arch.package,
+           binaries_suite_arch.version, binaries_suite_arch.suite,
+           binaries_suite_arch.architecture
+        FROM binaries_suite_arch
+        JOIN newest_all_associations
+            ON (binaries_suite_arch.package = newest_all_associations.package AND
+                binaries_suite_arch.version < newest_all_associations.version AND
+                binaries_suite_arch.suite = newest_all_associations.suite AND
+                binaries_suite_arch.architecture > 2);
+           """)
+        c.execute("""
+CREATE VIEW newest_any_associations AS
+    SELECT package, max(version) AS version, suite, architecture
+        FROM binaries_suite_arch
+        WHERE architecture > 2 GROUP BY package, suite, architecture;
+           """)
+        c.execute("""
+CREATE VIEW obsolete_any_associations AS
+    SELECT id, binaries_suite_arch.architecture, binaries_suite_arch.version,
+           binaries_suite_arch.package, binaries_suite_arch.suite
+        FROM binaries_suite_arch
+        JOIN newest_any_associations
+            ON binaries_suite_arch.architecture = newest_any_associations.architecture AND
+               binaries_suite_arch.package = newest_any_associations.package AND
+               binaries_suite_arch.suite = newest_any_associations.suite AND
+               binaries_suite_arch.version != newest_any_associations.version;
+           """)
+        c.execute("""
+CREATE VIEW source_suite AS
+    SELECT src_associations.id, source.id AS src , source.source, source.version,
+           src_associations.suite, suite.suite_name
+        FROM source
+        JOIN src_associations ON source.id = src_associations.source
+        JOIN suite ON suite.id = src_associations.suite;
+           """)
+        c.execute("""
+CREATE VIEW newest_source AS
+    SELECT source, max(version) AS version, suite
+        FROM source_suite
+        GROUP BY source, suite;
+           """)
+        c.execute("""
+CREATE VIEW newest_src_association AS
+    SELECT id, src, source, version, suite
+        FROM source_suite
+        JOIN newest_source USING (source, version, suite);
+           """)
+        c.execute("""
+CREATE VIEW any_associations_source AS
+    SELECT bin_associations.id, bin_associations.suite, binaries.id AS bin,
+           binaries.package, binaries.version AS binver, binaries.architecture,
+           source.id AS src, source.source, source.version AS srcver
+        FROM bin_associations
+        JOIN binaries ON bin_associations.bin = binaries.id AND architecture != 2
+        JOIN source ON binaries.source = source.id;
+           """)
+        c.execute("""
+CREATE VIEW src_associations_src AS
+    SELECT src_associations.id, src_associations.suite, source.id AS src,
+           source.source, source.version
+        FROM src_associations
+        JOIN source ON src_associations.source = source.id;
+           """)
+        c.execute("""
+CREATE VIEW almost_obsolete_src_associations AS
+    SELECT src_associations_src.id, src_associations_src.src,
+           src_associations_src.source, src_associations_src.version, suite
+        FROM src_associations_src
+        LEFT JOIN any_associations_source USING (src, suite)
+        WHERE bin IS NULL;
+           """)
+        c.execute("""
+CREATE VIEW obsolete_src_associations AS
+    SELECT almost.id, almost.src, almost.source, almost.version, almost.suite
+        FROM almost_obsolete_src_associations as almost
+    JOIN newest_src_association AS newest
+        ON almost.source  = newest.source AND
+           almost.version < newest.version AND
+           almost.suite   = newest.suite;
+           """)
+        c.execute("""
+CREATE VIEW bin_associations_binaries AS
+    SELECT bin_associations.id, bin_associations.bin, binaries.package,
+           binaries.version, bin_associations.suite, binaries.architecture
+        FROM bin_associations
+        JOIN binaries ON bin_associations.bin = binaries.id;
+           """)
+        c.execute("""
+CREATE VIEW src_associations_bin AS
+    SELECT src_associations.id, src_associations.source, src_associations.suite,
+           binaries.id AS bin, binaries.architecture
+        FROM src_associations
+        JOIN source ON src_associations.source = source.id
+        JOIN binaries ON source.id = binaries.source;
+           """)
+        c.execute("""
+CREATE VIEW almost_obsolete_all_associations AS
+    SELECT bin_associations_binaries.id AS id, bin, bin_associations_binaries.package,
+           bin_associations_binaries.version, suite
+        FROM bin_associations_binaries
+        LEFT JOIN src_associations_bin USING (bin, suite, architecture)
+        WHERE source IS NULL AND architecture = 2;
+           """)
+        c.execute("""
+CREATE VIEW obsolete_all_associations AS
+    SELECT almost.id, almost.bin, almost.package, almost.version, almost.suite
+        FROM almost_obsolete_all_associations AS almost
+        JOIN newest_all_associations AS newest
+            ON almost.package = newest.package AND
+               almost.version < newest.version AND
+               almost.suite   = newest.suite;
+           """)
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '25' WHERE name = 'db_revision'")
         self.db.commit()
 
-    except psycopg2.ProgrammingError, msg:
+    except psycopg2.InternalError, msg:
         self.db.rollback()
-        raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
+        raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
 
diff --git a/dak/dakdb/update26.py b/dak/dakdb/update26.py
new file mode 100755 (executable)
index 0000000..1b9a7fc
--- /dev/null
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+"""
+Add created,modified columns for all tables.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Barry deFreese <bdefreese@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import psycopg2
+
+def do_update(self):
+    print "Add created, modified fields for all tables."
+
+    updatetables = ['architecture', 'archive', 'bin_associations', 'bin_contents',
+        'binaries', 'binary_acl', 'binary_acl_map', 'build_queue', 'build_queue_files',
+        'changes', 'changes_pending_binaries', 'changes_pending_files',
+        'changes_pending_files_map', 'changes_pending_source', 'changes_pending_source_files',
+        'changes_pool_files', 'component', 'config', 'dsc_files', 'files', 'fingerprint',
+        'keyring_acl_map', 'keyrings', 'location', 'maintainer', 'new_comments', 'override',
+        'override_type', 'policy_queue', 'priority', 'section', 'source', 'source_acl',
+        'src_associations', 'src_format', 'src_uploaders', 'suite', 'suite_architectures',
+        'suite_build_queue_copy', 'suite_src_formats', 'uid', 'upload_blocks']
+
+    c = self.db.cursor()
+
+    print "Create trigger function."
+    c.execute("""CREATE OR REPLACE FUNCTION tfunc_set_modified() RETURNS trigger AS $$
+    BEGIN NEW.modified = now(); return NEW; END;
+    $$ LANGUAGE 'plpgsql'""")
+
+    try:
+        for updatetable in updatetables:
+
+            print "Add created field to %s." % updatetable
+            c.execute("ALTER TABLE %s ADD COLUMN created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now()" % updatetable)
+
+            print "Add modified field to %s." % updatetable
+            c.execute("ALTER TABLE %s ADD COLUMN modified TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now()" % updatetable)
+
+            print "Create modified trigger."
+            c.execute("""CREATE TRIGGER modified_%s BEFORE UPDATE ON %s
+            FOR EACH ROW EXECUTE PROCEDURE tfunc_set_modified()""" % (updatetable, updatetable))
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '26' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.InternalError, msg:
+            self.db.rollback()
+            raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
diff --git a/dak/dakdb/update27.py b/dak/dakdb/update27.py
new file mode 100755 (executable)
index 0000000..b06932b
--- /dev/null
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+"""
+Add views for new obsolete source detection.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Torsten Werner <twerner@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import psycopg2
+
+def do_update(self):
+    print "Add/modify views for obsolete source detection."
+
+    try:
+        c = self.db.cursor()
+
+        print "Replace old views."
+        # joins src_associations and source
+        c.execute("""
+CREATE OR REPLACE VIEW source_suite AS
+    SELECT src_associations.id, source.id AS src, source.source, source.version,
+           src_associations.suite, suite.suite_name, source.install_date
+        FROM source
+        JOIN src_associations ON source.id = src_associations.source
+        JOIN suite ON suite.id = src_associations.suite;
+            """)
+        # joins bin_associations and binaries
+        c.execute("""
+CREATE OR REPLACE VIEW bin_associations_binaries AS
+    SELECT bin_associations.id, bin_associations.bin, binaries.package,
+           binaries.version, bin_associations.suite, binaries.architecture,
+           binaries.source
+        FROM bin_associations
+        JOIN binaries ON bin_associations.bin = binaries.id;
+            """)
+
+        print "Grant permissions to views."
+        c.execute("GRANT SELECT ON binfiles_suite_component_arch TO PUBLIC;");
+        c.execute("GRANT SELECT ON srcfiles_suite_component TO PUBLIC;");
+        c.execute("GRANT SELECT ON binaries_suite_arch TO PUBLIC;");
+        c.execute("GRANT SELECT ON newest_all_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON obsolete_any_by_all_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON newest_any_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON obsolete_any_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON source_suite TO PUBLIC;");
+        c.execute("GRANT SELECT ON newest_source TO PUBLIC;");
+        c.execute("GRANT SELECT ON newest_src_association TO PUBLIC;");
+        c.execute("GRANT SELECT ON any_associations_source TO PUBLIC;");
+        c.execute("GRANT SELECT ON src_associations_src TO PUBLIC;");
+        c.execute("GRANT SELECT ON almost_obsolete_src_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON obsolete_src_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON bin_associations_binaries TO PUBLIC;");
+        c.execute("GRANT SELECT ON src_associations_bin TO PUBLIC;");
+        c.execute("GRANT SELECT ON almost_obsolete_all_associations TO PUBLIC;");
+        c.execute("GRANT SELECT ON obsolete_all_associations TO PUBLIC;");
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '27' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.InternalError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
diff --git a/dak/dakdb/update28.py b/dak/dakdb/update28.py
new file mode 100644 (file)
index 0000000..9e5c066
--- /dev/null
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Adding a trainee field to the process-new notes
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+
+def suites():
+    """
+    return a list of suites to operate on
+    """
+    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+    else:
+        suites = [ 'unstable', 'testing' ]
+#            suites = Config().SubTree("Suite").List()
+
+    return suites
+
+def arches(cursor, suite):
+    """
+    return a list of archs to operate on
+    """
+    arch_list = []
+    cursor.execute("""SELECT s.architecture, a.arch_string
+    FROM suite_architectures s
+    JOIN architecture a ON (s.architecture=a.id)
+    WHERE suite = :suite""", {'suite' : suite })
+
+    while True:
+        r = cursor.fetchone()
+        if not r:
+            break
+
+        if r[1] != "source" and r[1] != "all":
+            arch_list.append((r[0], r[1]))
+
+    return arch_list
+
+def do_update(self):
+    """
+    Adding contents table as first step to maybe, finally getting rid
+    of apt-ftparchive
+    """
+
+    print __doc__
+
+    try:
+        c = self.db.cursor()
+
+        c.execute("""CREATE TABLE pending_bin_contents (
+        id serial NOT NULL,
+        package text NOT NULL,
+        version debversion NOT NULL,
+        arch int NOT NULL,
+        filename text NOT NULL,
+        type int NOT NULL,
+        PRIMARY KEY(id))""" );
+
+        c.execute("""CREATE TABLE deb_contents (
+        filename text,
+        section text,
+        package text,
+        binary_id integer,
+        arch integer,
+        suite integer)""" )
+
+        c.execute("""CREATE TABLE udeb_contents (
+        filename text,
+        section text,
+        package text,
+        binary_id integer,
+        suite integer,
+        arch integer)""" )
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_arch_fkey
+        FOREIGN KEY (arch) REFERENCES architecture(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_arch_fkey
+        FOREIGN KEY (arch) REFERENCES architecture(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_pkey
+        PRIMARY KEY (filename,package,arch,suite);""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_pkey
+        PRIMARY KEY (filename,package,arch,suite);""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_suite_fkey
+        FOREIGN KEY (suite) REFERENCES suite(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_suite_fkey
+        FOREIGN KEY (suite) REFERENCES suite(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_binary_fkey
+        FOREIGN KEY (binary_id) REFERENCES binaries(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_binary_fkey
+        FOREIGN KEY (binary_id) REFERENCES binaries(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
+
+
+        suites = self.suites()
+
+        for suite in [i.lower() for i in suites]:
+            suite_id = DBConn().get_suite_id(suite)
+            arch_list = arches(c, suite_id)
+            arch_list = arches(c, suite_id)
+
+            for (arch_id,arch_str) in arch_list:
+                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
+
+            for section, sname in [("debian-installer","main"),
+                                  ("non-free/debian-installer", "nonfree")]:
+                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
+
+
+        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "DELETE" or event == "UPDATE":
+
+        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
+                                  ["int","int"]),
+                                  [TD["old"]["bin"], TD["old"]["suite"]])
+
+    if event == "INSERT" or event == "UPDATE":
+
+       content_data = plpy.execute(plpy.prepare(
+            """SELECT s.section, b.package, b.architecture, ot.type
+            FROM override o
+            JOIN override_type ot on o.type=ot.id
+            JOIN binaries b on b.package=o.package
+            JOIN files f on b.file=f.id
+            JOIN location l on l.id=f.location
+            JOIN section s on s.id=o.section
+            WHERE b.id=$1
+            AND o.suite=$2
+            """,
+            ["int", "int"]),
+            [TD["new"]["bin"], TD["new"]["suite"]])[0]
+
+       tablename="%s_contents" % content_data['type']
+
+       plpy.execute(plpy.prepare("""DELETE FROM %s
+                   WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
+                   ['text','int','int']),
+                   [content_data['package'],
+                   content_data['architecture'],
+                   TD["new"]["suite"]])
+
+       filenames = plpy.execute(plpy.prepare(
+           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
+           ["int"]),
+           [TD["new"]["bin"]])
+
+       for filename in filenames:
+           plpy.execute(plpy.prepare(
+               """INSERT INTO %s
+                   (filename,section,package,binary_id,arch,suite)
+                   VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
+               ["text","text","text","int","int","int"]),
+               [filename["file"],
+                content_data["section"],
+                content_data["package"],
+                TD["new"]["bin"],
+                content_data["architecture"],
+                TD["new"]["suite"]] )
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+
+        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "UPDATE":
+
+        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
+        if otype["type"].endswith("deb"):
+            section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
+
+            table_name = "%s_contents" % otype["type"]
+            plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
+                                      ["text","text","int"]),
+                                      [section["section"],
+                                      TD["new"]["package"],
+                                      TD["new"]["suite"]])
+
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+        c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
+                      RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "UPDATE" or event == "INSERT":
+        row = TD["new"]
+        r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
+                  JOIN binaries b ON b.architecture = sa.architecture
+                  WHERE b.id = $1 and sa.suite = $2""",
+                ["int", "int"]),
+                [row["bin"], row["suite"]])
+        if not len(r):
+            plpy.error("Illegal architecture for this suite")
+
+$$ LANGUAGE plpythonu VOLATILE;""")
+
+        c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
+                      BEFORE INSERT OR UPDATE ON bin_associations
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
+                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
+        c.execute("""CREATE TRIGGER override_contents_trigger
+                      AFTER UPDATE ON override
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+
+        c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
+        c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
+
+        c.execute("UPDATE config SET value = '28' WHERE name = 'db_revision'")
+
+        self.db.commit()
+
+    except psycopg2.ProgrammingError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Unable to apply process-new update 28, rollback issued. Error message : %s" % (str(msg))
+
diff --git a/dak/dominate.py b/dak/dominate.py
new file mode 100755 (executable)
index 0000000..7bb74c6
--- /dev/null
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+"""
+Remove obsolete source and binary associations from suites.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Torsten Werner <twerner@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+from daklib.dbconn import *
+from daklib.config import Config
+from daklib import daklog, utils
+import apt_pkg, sys
+
+Options = None
+Logger = None
+
+def fetch(reason, query, args, session):
+    idList = []
+    for row in session.execute(query, args).fetchall():
+        (id, package, version, suite_name, architecture) = row
+        if Options['No-Action']:
+            print "Delete %s %s from %s architecture %s (%s, %d)" % \
+                (package, version, suite_name, architecture, reason, id)
+        else:
+            Logger.log([reason, package, version, suite_name, \
+               architecture, id])
+        idList.append(id)
+    return idList
+
+def obsoleteAnyByAllAssociations(suite, session):
+    query = """
+        SELECT obsolete.id, package, obsolete.version, suite_name, arch_string
+            FROM obsolete_any_by_all_associations AS obsolete
+            JOIN architecture ON obsolete.architecture = architecture.id
+            JOIN suite ON obsolete.suite = suite.id
+            WHERE suite = :suite
+    """
+    return fetch('newer_all', query, { 'suite': suite }, session)
+
+def obsoleteAnyAssociations(suite, session):
+    query = """
+        SELECT obsolete.id, package, obsolete.version, suite_name, arch_string
+            FROM obsolete_any_associations AS obsolete
+            JOIN architecture ON obsolete.architecture = architecture.id
+            JOIN suite ON obsolete.suite = suite.id
+            WHERE suite = :suite
+    """
+    return fetch('newer_any', query, { 'suite': suite }, session)
+
+def obsoleteSrcAssociations(suite, session):
+    query = """
+        SELECT obsolete.id, source, obsolete.version, suite_name,
+           'source' AS arch_string
+            FROM obsolete_src_associations AS obsolete
+            JOIN suite ON obsolete.suite = suite.id
+            WHERE suite = :suite
+    """
+    return fetch('old_and_unreferenced', query, { 'suite': suite }, session)
+
+def obsoleteAllAssociations(suite, session):
+    query = """
+        SELECT obsolete.id, package, obsolete.version, suite_name,
+           'all' AS arch_string
+            FROM obsolete_all_associations AS obsolete
+            JOIN suite ON obsolete.suite = suite.id
+            WHERE suite = :suite
+    """
+    return fetch('old_and_unreferenced', query, { 'suite': suite }, session)
+
+def deleteAssociations(table, idList, session):
+    query = """
+        DELETE
+            FROM %s
+            WHERE id = :id
+    """ % table
+    session.execute(query, [{'id': id} for id in idList])
+
+def doDaDoDa(suite, session):
+    # keep this part disabled because it is too dangerous
+    #idList = obsoleteAnyByAllAssociations(suite, session)
+    #deleteAssociations('bin_associations', idList, session)
+
+    idList = obsoleteAnyAssociations(suite, session)
+    deleteAssociations('bin_associations', idList, session)
+
+    idList = obsoleteSrcAssociations(suite, session)
+    deleteAssociations('src_associations', idList, session)
+
+    idList = obsoleteAllAssociations(suite, session)
+    deleteAssociations('bin_associations', idList, session)
+
+def usage():
+    print """Usage: dak dominate [OPTIONS]
+Remove obsolete source and binary associations from suites.
+
+    -s, --suite=SUITE          act on this suite
+    -h, --help                 show this help and exit
+    -n, --no-action            don't commit changes
+    -f, --force                also clean up untouchable suites
+
+SUITE can be comma (or space) separated list, e.g.
+    --suite=testing,unstable"""
+    sys.exit()
+
+def main():
+    global Options, Logger
+    cnf = Config()
+    Arguments = [('h', "help",      "Obsolete::Options::Help"),
+                 ('s', "suite",     "Obsolete::Options::Suite", "HasArg"),
+                 ('n', "no-action", "Obsolete::Options::No-Action"),
+                 ('f', "force",     "Obsolete::Options::Force")]
+    query_suites = DBConn().session().query(Suite)
+    suites = [suite.suite_name for suite in query_suites.all()]
+    if not cnf.has_key('Obsolete::Options::Suite'):
+        cnf['Obsolete::Options::Suite'] = ','.join(suites)
+    cnf['Obsolete::Options::Help'] = ''
+    cnf['Obsolete::Options::No-Action'] = ''
+    cnf['Obsolete::Options::Force'] = ''
+    apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+    Options = cnf.SubTree("Obsolete::Options")
+    if Options['Help']:
+        usage()
+    Logger = daklog.Logger(cnf.Cnf, "dominate")
+    session = DBConn().session()
+    for suite_name in utils.split_args(Options['Suite']):
+        suite = session.query(Suite).filter_by(suite_name = suite_name).one()
+        if not suite.untouchable or Options['Force']:
+            doDaDoDa(suite.suite_id, session)
+    if Options['No-Action']:
+        session.rollback()
+    else:
+        session.commit()
+
+if __name__ == '__main__':
+    main()
+
index 55b55aa9a7ce4fd447b85b71657e0caeea97a919..ea865ed839f38e44ee5726351e38fffcf47439d6 100755 (executable)
@@ -306,9 +306,9 @@ def read_changes_or_dsc (suite, filename):
 def create_depends_string (suite, depends_tree):
     result = ""
     if suite == 'experimental':
-        suite_where = " in ('experimental','unstable')"
+        suite_where = "in ('experimental','unstable')"
     else:
-        suite_where = " ='%s'" % suite
+        suite_where = "'%s'" % suite
 
     comma_count = 1
     session = DBConn().session()
diff --git a/dak/import_new_files.py b/dak/import_new_files.py
new file mode 100755 (executable)
index 0000000..5b132c8
--- /dev/null
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Import known_changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import sys
+import os
+import logging
+import threading
+import glob
+import apt_pkg
+from daklib.dbconn import DBConn, get_dbchange, get_policy_queue, session_wrapper, ChangePendingFile, get_location, check_poolfile
+from daklib.config import Config
+from daklib.queue import Upload
+from daklib.utils import poolify
+
+# where in dak.conf all of our configuration will be stowed
+options_prefix = "NewFiles"
+options_prefix = "%s::Options" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
+
+def usage (exit_code=0):
+    print """Usage: dak import-new-files [options]
+
+OPTIONS
+     -v, --verbose
+        show verbose information messages
+
+     -q, --quiet
+        supress all output but errors
+
+"""
+    sys.exit(exit_code)
+
+class ImportNewFiles(object):
+    @session_wrapper
+    def __init__(self, session=None):
+        cnf = Config()
+        try:
+            newq = get_policy_queue('new', session)
+            for changes_fn in glob.glob(newq.path + "/*.changes"):
+                changes_bn = os.path.basename(changes_fn)
+                chg = get_dbchange(changes_bn, session)
+
+                u = Upload()
+                success = u.load_changes(changes_fn)
+                u.pkg.changes_file = changes_bn
+                u.check_hashes()
+
+                if not chg:
+                    chg = u.pkg.add_known_changes(newq.path, newq.policy_queue_id, session)
+                    session.add(chg)
+
+                if not success:
+                    log.critical("failed to load %s" % changes_fn)
+                    sys.exit(1)
+                else:
+                    log.critical("ACCLAIM: %s" % changes_fn)
+
+                files=[]
+                for chg_fn in u.pkg.files.keys():
+                    try:
+                        f = open(os.path.join(newq.path, chg_fn))
+                        cpf = ChangePendingFile()
+                        cpf.filename = chg_fn
+                        cpf.size = u.pkg.files[chg_fn]['size']
+                        cpf.md5sum = u.pkg.files[chg_fn]['md5sum']
+
+                        if u.pkg.files[chg_fn].has_key('sha1sum'):
+                            cpf.sha1sum = u.pkg.files[chg_fn]['sha1sum']
+                        else:
+                            log.warning("Having to generate sha1sum for %s" % chg_fn)
+                            f.seek(0)
+                            cpf.sha1sum = apt_pkg.sha1sum(f)
+
+                        if u.pkg.files[chg_fn].has_key('sha256sum'):
+                            cpf.sha256sum = u.pkg.files[chg_fn]['sha256sum']
+                        else:
+                            log.warning("Having to generate sha256sum for %s" % chg_fn)
+                            f.seek(0)
+                            cpf.sha256sum = apt_pkg.sha256sum(f)
+
+                        session.add(cpf)
+                        files.append(cpf)
+                        f.close()
+                    except IOError:
+                        # Can't find the file, try to look it up in the pool
+                        poolname = poolify(u.pkg.changes["source"], u.pkg.files[chg_fn]["component"])
+                        l = get_location(cnf["Dir::Pool"], u.pkg.files[chg_fn]["component"], session=session)
+                        if not l:
+                            log.critical("ERROR: Can't find location for %s (component %s)" % (chg_fn, u.pkg.files[chg_fn]["component"]))
+
+                        found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+                                                         u.pkg.files[chg_fn]['size'],
+                                                         u.pkg.files[chg_fn]["md5sum"],
+                                                         l.location_id,
+                                                         session=session)
+
+                        if found is None:
+                            log.critical("ERROR: Found multiple files for %s in pool" % chg_fn)
+                            sys.exit(1)
+                        elif found is False and poolfile is not None:
+                            log.critical("ERROR: md5sum / size mismatch for %s in pool" % chg_fn)
+                            sys.exit(1)
+                        else:
+                            if poolfile is None:
+                                log.critical("ERROR: Could not find %s in pool" % chg_fn)
+                                sys.exit(1)
+                            else:
+                                chg.poolfiles.append(poolfile)
+
+
+                chg.files = files
+
+
+            session.commit()
+
+        except KeyboardInterrupt:
+            print("Caught C-c; terminating.")
+            utils.warn("Caught C-c; terminating.")
+            self.plsDie()
+
+
+def main():
+    cnf = Config()
+
+    arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+                 ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+                 ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+                ]
+
+    args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+    num_threads = 1
+
+    if len(args) > 0:
+        usage(1)
+
+    if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+        usage(0)
+
+    level=logging.INFO
+    if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+        level=logging.ERROR
+
+    elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+        level=logging.DEBUG
+
+
+    logging.basicConfig( level=level,
+                         format='%(asctime)s %(levelname)s %(message)s',
+                         stream = sys.stderr )
+
+    ImportNewFiles()
+
+
+if __name__ == '__main__':
+    main()
index 07c6193aede7a23c8efaf8e7765a8d023d870d58..958486710251b99e5ac5e2ebf5a6cf75efef37b2 100755 (executable)
@@ -99,8 +99,9 @@ def main ():
     keys = postgres_unames.keys()
     keys.sort()
     for uname in keys:
-        if not passwd_unames.has_key(uname)and not known_postgres_unames.has_key(uname):
-            print "W: %s is in Postgres but not the passwd file or list of known Postgres users." % (uname)
+        if not passwd_unames.has_key(uname) and not known_postgres_unames.has_key(uname):
+            print "I: Deleting %s from Postgres, no longer in passwd or list of known Postgres users" % (uname)
+            q = session.execute('DROP USER "%s"' % (uname))
 
     keys = passwd_unames.keys()
     keys.sort()
index e253967749c948a3d20fe2c6e28053ecad12a7dd..ce35d925ff978fca8bcb11ff06942c06a9e5ed19 100755 (executable)
@@ -240,9 +240,9 @@ def main ():
             Subst["__BCC__"] = "Bcc: " + ", ".join(bcc)
         else:
             Subst["__BCC__"] = "X-Filler: 42"
-        Subst["__CC__"] = "Cc: " + package + "@" + Cnf["Dinstall::PackagesServer"] + "\nX-DAK: dak override"
-        Subst["__ADMIN_ADDRESS__"] = Cnf["Dinstall::MyAdminAddress"]
-        Subst["__DISTRO__"] = Cnf["Dinstall::MyDistribution"]
+        Subst["__CC__"] = "Cc: " + package + "@" + cnf["Dinstall::PackagesServer"] + "\nX-DAK: dak override"
+        Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"]
+        Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"]
         Subst["__WHOAMI__"] = utils.whoami()
         Subst["__SOURCE__"] = package
 
index 3028edbbf48ce175bfafce7c9c2f60a912cbbd3f..5a88ffcfab435fd1bb281b0e3d625044870caf16 100755 (executable)
@@ -60,10 +60,11 @@ from daklib.dbconn import *
 from daklib.queue import *
 from daklib import daklog
 from daklib import utils
-from daklib.regexes import re_no_epoch, re_default_answer, re_isanum
+from daklib.regexes import re_no_epoch, re_default_answer, re_isanum, re_package
 from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
 from daklib.summarystats import SummaryStats
 from daklib.config import Config
+from daklib.changesutils import *
 
 # Globals
 Options = None
@@ -95,6 +96,8 @@ def recheck(upload, session):
 
         if answer == 'R':
             upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
+            upload.pkg.remove_known_changes(session=session)
+            session.commit()
             return 0
         elif answer == 'S':
             return 0
@@ -106,104 +109,6 @@ def recheck(upload, session):
 
 ################################################################################
 
-def indiv_sg_compare (a, b):
-    """Sort by source name, source, version, 'have source', and
-       finally by filename."""
-    # Sort by source version
-    q = apt_pkg.VersionCompare(a["version"], b["version"])
-    if q:
-        return -q
-
-    # Sort by 'have source'
-    a_has_source = a["architecture"].get("source")
-    b_has_source = b["architecture"].get("source")
-    if a_has_source and not b_has_source:
-        return -1
-    elif b_has_source and not a_has_source:
-        return 1
-
-    return cmp(a["filename"], b["filename"])
-
-############################################################
-
-def sg_compare (a, b):
-    a = a[1]
-    b = b[1]
-    """Sort by have note, source already in database and time of oldest upload."""
-    # Sort by have note
-    a_note_state = a["note_state"]
-    b_note_state = b["note_state"]
-    if a_note_state < b_note_state:
-        return -1
-    elif a_note_state > b_note_state:
-        return 1
-    # Sort by source already in database (descending)
-    source_in_database = cmp(a["source_in_database"], b["source_in_database"])
-    if source_in_database:
-        return -source_in_database
-
-    # Sort by time of oldest upload
-    return cmp(a["oldest"], b["oldest"])
-
-def sort_changes(changes_files, session):
-    """Sort into source groups, then sort each source group by version,
-    have source, filename.  Finally, sort the source groups by have
-    note, time of oldest upload of each source upload."""
-    if len(changes_files) == 1:
-        return changes_files
-
-    sorted_list = []
-    cache = {}
-    # Read in all the .changes files
-    for filename in changes_files:
-        u = Upload()
-        try:
-            u.pkg.changes_file = filename
-            u.load_changes(filename)
-            u.update_subst()
-            cache[filename] = copy.copy(u.pkg.changes)
-            cache[filename]["filename"] = filename
-        except:
-            sorted_list.append(filename)
-            break
-    # Divide the .changes into per-source groups
-    per_source = {}
-    for filename in cache.keys():
-        source = cache[filename]["source"]
-        if not per_source.has_key(source):
-            per_source[source] = {}
-            per_source[source]["list"] = []
-        per_source[source]["list"].append(cache[filename])
-    # Determine oldest time and have note status for each source group
-    for source in per_source.keys():
-        q = session.query(DBSource).filter_by(source = source).all()
-        per_source[source]["source_in_database"] = len(q)>0
-        source_list = per_source[source]["list"]
-        first = source_list[0]
-        oldest = os.stat(first["filename"])[stat.ST_MTIME]
-        have_note = 0
-        for d in per_source[source]["list"]:
-            mtime = os.stat(d["filename"])[stat.ST_MTIME]
-            if mtime < oldest:
-                oldest = mtime
-            have_note += has_new_comment(d["source"], d["version"], session)
-        per_source[source]["oldest"] = oldest
-        if not have_note:
-            per_source[source]["note_state"] = 0; # none
-        elif have_note < len(source_list):
-            per_source[source]["note_state"] = 1; # some
-        else:
-            per_source[source]["note_state"] = 2; # all
-        per_source[source]["list"].sort(indiv_sg_compare)
-    per_source_items = per_source.items()
-    per_source_items.sort(sg_compare)
-    for i in per_source_items:
-        for j in i[1]["list"]:
-            sorted_list.append(j["filename"])
-    return sorted_list
-
-################################################################################
-
 class Section_Completer:
     def __init__ (self, session):
         self.sections = []
@@ -588,9 +493,8 @@ def prod_maintainer (note, upload):
     prod_mail_message = utils.TemplateSubst(
         Subst,cnf["Dir::Templates"]+"/process-new.prod")
 
-    # Send the prod mail if appropriate
-    if not cnf["Dinstall::Options::No-Mail"]:
-        utils.send_mail(prod_mail_message)
+    # Send the prod mail
+    utils.send_mail(prod_mail_message)
 
     print "Sent proding message"
 
@@ -603,28 +507,12 @@ def do_new(upload, session):
     changes = upload.pkg.changes
     cnf = Config()
 
+    # Check for a valid distribution
+    upload.check_distributions()
+
     # Make a copy of distribution we can happily trample on
     changes["suite"] = copy.copy(changes["distribution"])
 
-    # Fix up the list of target suites
-    for suite in changes["suite"].keys():
-        override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
-        if override:
-            (olderr, newerr) = (get_suite(suite, session) == None,
-                                get_suite(override, session) == None)
-            if olderr or newerr:
-                (oinv, newinv) = ("", "")
-                if olderr: oinv = "invalid "
-                if newerr: ninv = "invalid "
-                print "warning: overriding %ssuite %s to %ssuite %s" % (
-                        oinv, suite, ninv, override)
-            del changes["suite"][suite]
-            changes["suite"][override] = 1
-    # Validate suites
-    for suite in changes["suite"].keys():
-        if get_suite(suite, session) is None:
-            utils.fubar("%s has invalid suite '%s' (possibly overriden).  say wha?" % (changes, suite))
-
     # The main NEW processing loop
     done = 0
     while not done:
@@ -666,6 +554,7 @@ def do_new(upload, session):
             try:
                 check_daily_lock()
                 done = add_overrides (new, upload, session)
+                new_accept(upload, Options["No-Action"], session)
                 Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
             except CantGetLockError:
                 print "Hello? Operator! Give me the number for 911!"
@@ -675,11 +564,12 @@ def do_new(upload, session):
         elif answer == 'E' and not Options["Trainee"]:
             new = edit_overrides (new, upload, session)
         elif answer == 'M' and not Options["Trainee"]:
-            upload.pkg.remove_known_changes()
             aborted = upload.do_reject(manual=1,
                                        reject_message=Options["Manual-Reject"],
-                                       note=get_new_comments(changes.get("source", ""), session=session))
+                                       notes=get_new_comments(changes.get("source", ""), session=session))
             if not aborted:
+                upload.pkg.remove_known_changes(session=session)
+                session.commit()
                 Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
                 done = 1
         elif answer == 'N':
@@ -769,6 +659,8 @@ def do_byhand(upload, session):
         elif answer == 'M':
             Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
             upload.do_reject(manual=1, reject_message=Options["Manual-Reject"])
+            upload.pkg.remove_known_changes(session=session)
+            session.commit()
             done = 1
         elif answer == 'S':
             done = 1
@@ -816,55 +708,26 @@ def lock_package(package):
     finally:
         os.unlink(path)
 
-def move_file_to_queue(to_q, f, session):
-    """mark a file as being in the unchecked queue"""
-    # update the queue_file entry for the existing queue
-    qf = session.query(QueueFile).filter_by(queueid=to_q.queueid,
-                                            filename=f.filename)
-    qf.queue = to_q
-
-    # update the changes_pending_files row
-    f.queue = to_q
+class clean_holding(object):
+    def __init__(self,pkg):
+        self.pkg = pkg
 
-def changes_to_unchecked(changes, session):
-    """move a changes file to unchecked"""
-    unchecked = get_policy_queue('unchecked', session );
-    changes.in_queue = unchecked
+    def __enter__(self):
+        pass
 
-    for f in changes.pkg.files:
-        move_file_to_queue(unchecked, f)
+    def __exit__(self, type, value, traceback):
+        h = Holding()
 
-    # actually move files
-    changes.move_to_queue(unchecked)
+        for f in self.pkg.files.keys():
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
 
-def _accept(upload):
-    if Options["No-Action"]:
-        return
-    (summary, short_summary) = upload.build_summaries()
-#    upload.accept(summary, short_summary, targetqueue)
-#    os.unlink(upload.pkg.changes_file[:-8]+".dak")
-    changes_to_unchecked(upload)
-
-def do_accept(upload):
-    print "ACCEPT"
-    cnf = Config()
-    if not Options["No-Action"]:
-        (summary, short_summary) = upload.build_summaries()
-
-        if cnf.FindB("Dinstall::SecurityQueueHandling"):
-            upload.dump_vars(cnf["Dir::Queue::Embargoed"])
-            upload.move_to_queue(get_policy_queue('embargoed'))
-            upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
-            # Check for override disparities
-            upload.Subst["__SUMMARY__"] = summary
-        else:
-            # Just a normal upload, accept it...
-            _accept(upload)
 
 def do_pkg(changes_file, session):
     new_queue = get_policy_queue('new', session );
     u = Upload()
     u.pkg.changes_file = changes_file
+    (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
     u.load_changes(changes_file)
     u.pkg.directory = new_queue.path
     u.update_subst()
@@ -879,14 +742,33 @@ def do_pkg(changes_file, session):
         u.Subst["__BCC__"] = bcc
 
     files = u.pkg.files
+    for deb_filename, f in files.items():
+        if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+            u.binary_file_checks(deb_filename, session)
+            u.check_binary_against_db(deb_filename, session)
+        else:
+            u.source_file_checks(deb_filename, session)
+            u.check_source_against_db(deb_filename, session)
+
+        u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
 
     try:
         with lock_package(u.pkg.changes["source"]):
-            if not recheck(u, session):
-                return
-
-            do_new(u,session)
-
+            with clean_holding(u.pkg):
+                if not recheck(u, session):
+                    return
+
+                # FIXME: This does need byhand checks added!
+                new = determine_new(u.pkg.changes, files)
+                if new:
+                    do_new(u, session)
+                else:
+                    try:
+                        check_daily_lock()
+                        new_accept(u, Options["No-Action"], session)
+                    except CantGetLockError:
+                        print "Hello? Operator! Give me the number for 911!"
+                        print "Dinstall in the locked area, cant process packages, come back later"
 #             (new, byhand) = check_status(files)
 #             if new or byhand:
 #                 if new:
@@ -963,9 +845,6 @@ def main():
         sys.stderr.write("Sorting changes...\n")
     changes_files = sort_changes(changes_files, session)
 
-    # Kill me now? **FIXME**
-    cnf["Dinstall::Options::No-Mail"] = ""
-
     for changes_file in changes_files:
         changes_file = utils.validate_changes_file_arg(changes_file, 0)
         if not changes_file:
diff --git a/dak/process_policy.py b/dak/process_policy.py
new file mode 100755 (executable)
index 0000000..16a8810
--- /dev/null
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+# vim:set et ts=4 sw=4:
+
+""" Handles packages from policy queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
+@copyright: 2009 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+# <mhy> So how do we handle that at the moment?
+# <stew> Probably incorrectly.
+
+################################################################################
+
+import os
+import copy
+import sys
+import apt_pkg
+
+from daklib.dbconn import *
+from daklib.queue import *
+from daklib import daklog
+from daklib import utils
+from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
+from daklib.config import Config
+from daklib.changesutils import *
+
+# Globals
+Options = None
+Logger = None
+
+################################################################################
+
+def do_comments(dir, srcqueue, opref, npref, line, fn, session):
+    for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
+        lines = open("%s/%s" % (dir, comm)).readlines()
+        if len(lines) == 0 or lines[0] != line + "\n": continue
+        changes_files = [ x for x in os.listdir(".") if x.startswith(comm[7:]+"_")
+                                and x.endswith(".changes") ]
+        changes_files = sort_changes(changes_files, session)
+        for f in changes_files:
+            print "Processing changes file: %s" % f
+            f = utils.validate_changes_file_arg(f, 0)
+            if not f:
+                print "Couldn't validate changes file %s" % f
+                continue
+            fn(f, srcqueue, "".join(lines[1:]), session)
+
+        if opref != npref and not Options["No-Action"]:
+            newcomm = npref + comm[len(opref):]
+            os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm))
+
+################################################################################
+
+def comment_accept(changes_file, srcqueue, comments, session):
+    u = Upload()
+    u.pkg.changes_file = changes_file
+    u.load_changes(changes_file)
+    u.update_subst()
+
+    if not Options["No-Action"]:
+        destqueue = get_policy_queue('newstage', session)
+       if changes_to_queue(u, srcqueue, destqueue, session):
+            Logger.log(["Policy Queue ACCEPT: %s:  %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+       else:
+            print "E: Failed to migrate %s" % u.pkg.changes_file
+
+################################################################################
+
+def comment_reject(changes_file, srcqueue, comments, session):
+    u = Upload()
+    u.pkg.changes_file = changes_file
+    u.load_changes(changes_file)
+    u.update_subst()
+
+    u.rejects.append(comments)
+
+    cnf = Config()
+    bcc = "X-DAK: dak process-policy"
+    if cnf.has_key("Dinstall::Bcc"):
+        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+    else:
+        u.Subst["__BCC__"] = bcc
+
+    if not Options["No-Action"]:
+        u.do_reject(manual=0, reject_message='\n'.join(u.rejects))
+        u.pkg.remove_known_changes(session=session)
+        session.commit()
+
+        Logger.log(["Policy Queue REJECT: %s:  %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+
+
+################################################################################
+
+def main():
+    global Options, Logger
+
+    cnf = Config()
+    session = DBConn().session()
+
+    Arguments = [('h',"help","Process-Policy::Options::Help"),
+                 ('n',"no-action","Process-Policy::Options::No-Action")]
+
+    for i in ["help", "no-action"]:
+        if not cnf.has_key("Process-Policy::Options::%s" % (i)):
+            cnf["Process-Policy::Options::%s" % (i)] = ""
+
+    queue_name = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
+
+    if len(queue_name) != 1:
+        print "E: Specify exactly one policy queue"
+        sys.exit(1)
+
+    queue_name = queue_name[0]
+
+    Options = cnf.SubTree("Process-Policy::Options")
+
+    if Options["Help"]:
+        usage()
+
+    if not Options["No-Action"]:
+        try:
+            Logger = daklog.Logger(cnf, "process-policy")
+        except CantOpenError, e:
+            Logger = None
+
+    # Find policy queue
+    session.query(PolicyQueue)
+
+    try:
+        pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
+    except NoResultFound:
+        print "E: Cannot find policy queue %s" % queue_name
+        sys.exit(1)
+
+    commentsdir = os.path.join(pq.path, 'COMMENTS')
+    # The comments stuff relies on being in the right directory
+    os.chdir(pq.path)
+    do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, session)
+    do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, session)
+
+
+################################################################################
+
+if __name__ == '__main__':
+    main()
index ec7fbd35a19a70e05be39d803516bc0fccf6ca1d..4d1cbc2f6677da8d5ae58fe4c05ea2beef6b97f6 100755 (executable)
@@ -196,6 +196,15 @@ def usage (exit_code=0):
 
 ###############################################################################
 
+def byebye():
+    if not Options["No-Action"]:
+        # Clean out the queue files
+        session = DBConn().session()
+        session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
+        session.commit()
+
+
+
 def action(u, session):
     cnf = Config()
     holding = Holding()
@@ -249,7 +258,7 @@ def action(u, session):
             for s in u.pkg.changes["distribution"].keys():
                 suite = get_suite(s, session)
                 if suite.policy_queue:
-                    if not chg or chg.approved_for_id != su.policy_queue.policy_queue_id:
+                    if not chg or chg.approved_for_id != suite.policy_queue.policy_queue_id:
                         # This routine will check whether the upload is a binary
                         # upload when the source is already in the target suite.  If
                         # so, we skip the policy queue, otherwise we go there.
@@ -284,8 +293,10 @@ def action(u, session):
     elif answer == 'A':
         if not chg:
             chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
+        session.commit()
         u.accept(summary, short_summary, session)
         u.check_override()
+        chg.clean_from_queue()
         session.commit()
         u.remove()
     elif answer == 'P':
@@ -301,6 +312,7 @@ def action(u, session):
         session.commit()
         u.remove()
     elif answer == 'Q':
+        byebye()
         sys.exit(0)
 
     session.commit()
@@ -481,9 +493,12 @@ def main():
                                                 utils.size_type(int(summarystats.accept_bytes)))
         Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
 
+    byebye()
+
     if not Options["No-Action"]:
         if log_urgency:
             UrgencyLog().close()
+
     Logger.close()
 
 ###############################################################################
index e270cc71e109a124b77e0ffbb76abaa042c33137..f1d805650b1397c4b06e0f23591f2c7f0fadd020 100755 (executable)
@@ -40,7 +40,7 @@ import apt_pkg
 
 from daklib import utils
 from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment, DBChange
+from daklib.dbconn import DBConn, has_new_comment, DBChange, get_uid_from_fingerprint
 from daklib.textutils import fix_maintainer
 from daklib.dak_exceptions import *
 
@@ -360,6 +360,8 @@ def process_changes_files(changes_files, type, log):
         source = i[1]["list"][0]["source"]
         if len(source) > max_source_len:
             max_source_len = len(source)
+        binary_list = i[1]["list"][0]["binary"].keys()
+        binary = ', '.join(binary_list)
         arches = {}
         versions = {}
         for j in i[1]["list"]:
@@ -395,12 +397,11 @@ def process_changes_files(changes_files, type, log):
                 closes=j["closes"].keys()
                 if dbc:
                     fingerprint = dbc.fingerprint
-
-                # TODO: This won't work now as it never gets set
-                #       Fix so that we compare the changed-by/maintainer and the signing key
-                #       Should probably be done somewhere more central
-                #if j.has_key("sponsoremail"):
-                #    sponsor=j["sponsoremail"]
+                    sponsor_name = get_uid_from_fingerprint(fingerprint).name
+                    sponsor_email = get_uid_from_fingerprint(fingerprint).uid + "@debian.org"
+                    if sponsor_name != maintainer["maintainername"] and sponsor_name != changeby["changedbyname"] and \
+                    sponsor_email != maintainer["maintaineremail"] and sponsor_name != changeby["changedbyemail"]:
+                        sponsor = sponsor_email
 
             for arch in j["architecture"].keys():
                 arches[arch] = ""
@@ -418,7 +419,7 @@ def process_changes_files(changes_files, type, log):
             note = " | [N]"
         else:
             note = ""
-        entries.append([source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
+        entries.append([source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
 
     # direction entry consists of "Which field, which direction, time-consider" where
     # time-consider says how we should treat last_modified. Thats all.
@@ -460,11 +461,12 @@ def process_changes_files(changes_files, type, log):
     if Cnf.has_key("Queue-Report::Options::822"):
         # print stuff out in 822 format
         for entry in entries:
-            (source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
+            (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
 
             # We'll always have Source, Version, Arch, Mantainer, and Dist
             # For the rest, check to see if we have them, then print them out
             log.write("Source: " + source + "\n")
+            log.write("Binary: " + binary + "\n")
             log.write("Version: " + version_list + "\n")
             log.write("Architectures: ")
             log.write( (", ".join(arch_list.split(" "))) + "\n")
@@ -502,7 +504,7 @@ def process_changes_files(changes_files, type, log):
             source_count = len(per_source_items)
             table_header(type.upper(), source_count, total_count)
             for entry in entries:
-                (source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
+                (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
                 table_row(source, version_list, arch_list, time_pp(last_modified), maint, distribution, closes, fingerprint, sponsor, changedby)
             table_footer(type.upper())
     elif not Cnf.has_key("Queue-Report::Options::822"):
@@ -511,7 +513,7 @@ def process_changes_files(changes_files, type, log):
 
         msg = ""
         for entry in entries:
-            (source, version_list, arch_list, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
+            (source, binary, version_list, arch_list, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
             msg += format % (source, version_list, arch_list, note, time_pp(last_modified))
 
         if msg:
index b21efcce43ac336797034dc84bcdf2743bcc15c2..bcf1300e155fcdb0b0077b68b95366f503bc0b87 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 """ Output html for packages in NEW """
-# Copyright (C) 2007 Joerg Jaspert <joerg@debian.org>
+# Copyright (C) 2007, 2009 Joerg Jaspert <joerg@debian.org>
 
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -30,9 +30,13 @@ import os, sys, time
 import apt_pkg
 import examine_package
 
-from daklib.queue import determine_new, check_valid
+from daklib.dbconn import *
+from daklib.queue import determine_new, check_valid, Upload, get_policy_queue
 from daklib import utils
 from daklib.regexes import re_source_ext
+from daklib.config import Config
+from daklib import daklog
+from daklib.changesutils import *
 
 # Globals
 Cnf = None
@@ -135,41 +139,53 @@ def html_footer():
   </body>
 </html>
 """
-
+#"""
 ################################################################################
 
 
-def do_pkg(changes_file):
-    c = Changes()
-    c.load_dot_dak(changes_file)
-    files = c.files
-    changes = c.changes
-
-    c.changes["suite"] = copy(c.changes["distribution"])
-    distribution = c.changes["distribution"].keys()[0]
-    # Find out what's new
-    new = determine_new(c.changes, c.files, 0)
+def do_pkg(changes_file, session):
+    u = Upload()
+    u.pkg.changes_file = changes_file
+    (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
+    u.load_changes(changes_file)
+    new_queue = get_policy_queue('new', session );
+    u.pkg.directory = new_queue.path
+    u.update_subst()
+    origchanges = os.path.abspath(u.pkg.changes_file)
+    files = u.pkg.files
+    changes = u.pkg.changes
+
+    for deb_filename, f in files.items():
+        if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+            u.binary_file_checks(deb_filename, session)
+            u.check_binary_against_db(deb_filename, session)
+        else:
+            u.source_file_checks(deb_filename, session)
+            u.check_source_against_db(deb_filename, session)
+    u.pkg.changes["suite"] = u.pkg.changes["distribution"]
+
+    new = determine_new(u.pkg.changes, files, 0)
 
     stdout_fd = sys.stdout
 
-    htmlname = c.changes["source"] + "_" + c.changes["version"] + ".html"
+    htmlname = changes["source"] + "_" + changes["version"] + ".html"
     sources.add(htmlname)
     # do not generate html output if that source/version already has one.
-    if not os.path.exists(os.path.join(Cnf["Show-New::HTMLPath"],htmlname)):
-        sys.stdout = open(os.path.join(Cnf["Show-New::HTMLPath"],htmlname),"w")
+    if not os.path.exists(os.path.join(cnf["Show-New::HTMLPath"],htmlname)):
+        sys.stdout = open(os.path.join(cnf["Show-New::HTMLPath"],htmlname),"w")
 
         filestoexamine = []
         for pkg in new.keys():
             for fn in new[pkg]["files"]:
-                if (c.files[fn].has_key("new") and
-                    (c.files[fn]["type"] == "dsc" or
-                     not re_source_ext.match(c.files[fn]["type"]))):
+                if (files[fn].has_key("new") and
+                    (files[fn]["type"] == "dsc" or
+                     not re_source_ext.match(files[fn]["type"]))):
                     filestoexamine.append(fn)
 
-        html_header(c.changes["source"], filestoexamine)
+        html_header(changes["source"], filestoexamine)
 
         check_valid(new)
-        examine_package.display_changes( distribution, changes_file)
+        examine_package.display_changes( u.pkg.changes["distribution"], changes_file)
 
         for fn in filter(lambda fn: fn.endswith(".dsc"), filestoexamine):
             examine_package.check_dsc(distribution, fn)
@@ -192,20 +208,24 @@ def usage (exit_code=0):
 
 ################################################################################
 
-def init():
-    global Cnf, Options
+def init(session):
+    global cnf, Options
 
-    Cnf = utils.get_conf()
+    cnf = Config()
 
     Arguments = [('h',"help","Show-New::Options::Help"),
                  ("p","html-path","Show-New::HTMLPath","HasArg")]
 
     for i in ["help"]:
-        if not Cnf.has_key("Show-New::Options::%s" % (i)):
-            Cnf["Show-New::Options::%s" % (i)] = ""
+        if not cnf.has_key("Show-New::Options::%s" % (i)):
+            cnf["Show-New::Options::%s" % (i)] = ""
 
-    changes_files = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
-    Options = Cnf.SubTree("Show-New::Options")
+    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
+    if len(changes_files) == 0:
+        new_queue = get_policy_queue('new', session );
+        changes_files = utils.get_changes_files(new_queue.path)
+
+    Options = cnf.SubTree("Show-New::Options")
 
     if Options["help"]:
         usage()
@@ -217,7 +237,8 @@ def init():
 ################################################################################
 
 def main():
-    changes_files = init()
+    session = DBConn().session()
+    changes_files = init(session)
 
     examine_package.use_html=1
 
@@ -226,11 +247,12 @@ def main():
         if not changes_file:
             continue
         print "\n" + changes_file
-        do_pkg (changes_file)
-    files = set(os.listdir(Cnf["Show-New::HTMLPath"]))
+        do_pkg (changes_file, session)
+
+    files = set(os.listdir(cnf["Show-New::HTMLPath"]))
     to_delete = filter(lambda x: x.endswith(".html"), files.difference(sources))
     for f in to_delete:
-        os.remove(os.path.join(Cnf["Show-New::HTMLPath"],f))
+        os.remove(os.path.join(cnf["Show-New::HTMLPath"],f))
 
 ################################################################################
 
index 4c4ac78ee8a0ed5cb4ad44d9deaf3894a66a87fb..157e1c0a0febbcf6074f03ad49115c88ac583373 100755 (executable)
@@ -287,8 +287,8 @@ def write_transitions_from_file(from_file):
     """
 
     # Lets check if from_file is in the directory we expect it to be in
-    if not os.path.abspath(from_file).startswith(Cnf["Transitions::TempPath"]):
-        print "Will not accept transitions file outside of %s" % (Cnf["Transitions::TempPath"])
+    if not os.path.abspath(from_file).startswith(Cnf["Dir::TempPath"]):
+        print "Will not accept transitions file outside of %s" % (Cnf["Dir::TempPath"])
         sys.exit(3)
 
     if Options["sudo"]:
@@ -318,7 +318,7 @@ def temp_transitions_file(transitions):
            sudo-ed script and would be unreadable if it has default mkstemp mode
     """
 
-    (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Transitions::TempPath"])
+    (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Dir::TempPath"])
     os.chmod(path, 0644)
     f = open(path, "w")
     yaml.dump(transitions, f, default_flow_style=False)
@@ -577,13 +577,13 @@ def main():
                           (Cnf["Dinstall::Reject::ReleaseTransitions"]))
         sys.exit(1)
     # Also check if our temp directory is defined and existant
-    temppath = Cnf.get("Transitions::TempPath", "")
+    temppath = Cnf.get("Dir::TempPath", "")
     if temppath == "":
-        utils.warn("Transitions::TempPath not defined")
+        utils.warn("Dir::TempPath not defined")
         sys.exit(1)
     if not os.path.exists(temppath):
         utils.warn("Temporary path %s not found." %
-                          (Cnf["Transitions::TempPath"]))
+                          (Cnf["Dir::TempPath"]))
         sys.exit(1)
 
     if Options["import"]:
index ecdd99a79bf2ef0d76d96be91ba446c27ce8ec30..2d962dcb5d91ee7fda6ea838b2547feb3cf334ba 100755 (executable)
@@ -45,7 +45,7 @@ from daklib.dak_exceptions import DBUpdateError
 ################################################################################
 
 Cnf = None
-required_database_schema = 24
+required_database_schema = 28
 
 ################################################################################
 
index 6b7762681e8febb39b8531363e4261124c7eec79..e016638cd1db36296d67f4ea0505723106ac057e 100644 (file)
@@ -204,30 +204,75 @@ class Changes(object):
             else:
                 multivalues[key] = self.changes[key]
 
-        # TODO: Use ORM
-        session.execute(
-            """INSERT INTO changes
-              (changesname, in_queue, seen, source, binaries, architecture, version,
-              distribution, urgency, maintainer, fingerprint, changedby, date)
-              VALUES (:changesfile,:in_queue,:filetime,:source,:binary, :architecture,
-              :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
-              { 'changesfile':  self.changes_file,
-                'filetime':     filetime,
-                'in_queue':     in_queue,
-                'source':       self.changes["source"],
-                'binary':       multivalues["binary"],
-                'architecture': multivalues["architecture"],
-                'version':      self.changes["version"],
-                'distribution': multivalues["distribution"],
-                'urgency':      self.changes["urgency"],
-                'maintainer':   self.changes["maintainer"],
-                'fingerprint':  self.changes["fingerprint"],
-                'changedby':    self.changes["changed-by"],
-                'date':         self.changes["date"]} )
+        chg = DBChange()
+        chg.changesname = self.changes_file
+        chg.seen = filetime
+        chg.in_queue_id = in_queue
+        chg.source = self.changes["source"]
+        chg.binaries = multivalues["binary"]
+        chg.architecture = multivalues["architecture"]
+        chg.version = self.changes["version"]
+        chg.distribution = multivalues["distribution"]
+        chg.urgency = self.changes["urgency"]
+        chg.maintainer = self.changes["maintainer"]
+        chg.fingerprint = self.changes["fingerprint"]
+        chg.changedby = self.changes["changed-by"]
+        chg.date = self.changes["date"]
+
+        session.add(chg)
+
+        files = []
+        for chg_fn, entry in self.files.items():
+            try:
+                f = open(os.path.join(dirpath, chg_fn))
+                cpf = ChangePendingFile()
+                cpf.filename = chg_fn
+                cpf.size = entry['size']
+                cpf.md5sum = entry['md5sum']
+
+                if entry.has_key('sha1sum'):
+                    cpf.sha1sum = entry['sha1sum']
+                else:
+                    f.seek(0)
+                    cpf.sha1sum = apt_pkg.sha1sum(f)
+
+                if entry.has_key('sha256sum'):
+                    cpf.sha256sum = entry['sha256sum']
+                else:
+                    f.seek(0)
+                    cpf.sha256sum = apt_pkg.sha256sum(f)
+
+                session.add(cpf)
+                files.append(cpf)
+                f.close()
+
+            except IOError:
+                # Can't find the file, try to look it up in the pool
+                poolname = poolify(entry["source"], entry["component"])
+                l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+                found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+                                                 entry['size'],
+                                                 entry["md5sum"],
+                                                 l.location_id,
+                                                 session=session)
+
+                if found is None:
+                    Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+                elif found is False and poolfile is not None:
+                    Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+                else:
+                    if poolfile is None:
+                        Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+                    else:
+                        chg.poolfiles.append(poolfile)
+
+        chg.files = files
 
         session.commit()
+        chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
 
-        return session.query(DBChange).filter_by(changesname = self.changes_file).one()
+        return chg
 
     def unknown_files_fields(self, name):
         return sorted(list( set(self.files[name].keys()) -
diff --git a/daklib/changesutils.py b/daklib/changesutils.py
new file mode 100644 (file)
index 0000000..0aca121
--- /dev/null
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+# vim:set et ts=4 sw=4:
+
+"""Utilities for handling changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import copy
+import os
+import stat
+import apt_pkg
+
+from daklib.dbconn import *
+from daklib.queue import *
+from daklib import utils
+from daklib.config import Config
+
+################################################################################
+
+__all__ = []
+
+################################################################################
+
+def indiv_sg_compare (a, b):
+    """Sort by source name, source, version, 'have source', and
+       finally by filename."""
+    # Sort by source version
+    q = apt_pkg.VersionCompare(a["version"], b["version"])
+    if q:
+        return -q
+
+    # Sort by 'have source'
+    a_has_source = a["architecture"].get("source")
+    b_has_source = b["architecture"].get("source")
+    if a_has_source and not b_has_source:
+        return -1
+    elif b_has_source and not a_has_source:
+        return 1
+
+    return cmp(a["filename"], b["filename"])
+
+__all__.append('indiv_sg_compare')
+
+############################################################
+
+def sg_compare (a, b):
+    a = a[1]
+    b = b[1]
+    """Sort by have note, source already in database and time of oldest upload."""
+    # Sort by have note
+    a_note_state = a["note_state"]
+    b_note_state = b["note_state"]
+    if a_note_state < b_note_state:
+        return -1
+    elif a_note_state > b_note_state:
+        return 1
+    # Sort by source already in database (descending)
+    source_in_database = cmp(a["source_in_database"], b["source_in_database"])
+    if source_in_database:
+        return -source_in_database
+
+    # Sort by time of oldest upload
+    return cmp(a["oldest"], b["oldest"])
+
+__all__.append('sg_compare')
+
+def sort_changes(changes_files, session):
+    """Sort into source groups, then sort each source group by version,
+    have source, filename.  Finally, sort the source groups by have
+    note, time of oldest upload of each source upload."""
+    if len(changes_files) == 1:
+        return changes_files
+
+    sorted_list = []
+    cache = {}
+    # Read in all the .changes files
+    for filename in changes_files:
+        u = Upload()
+        try:
+            u.pkg.changes_file = filename
+            u.load_changes(filename)
+            u.update_subst()
+            cache[filename] = copy.copy(u.pkg.changes)
+            cache[filename]["filename"] = filename
+        except:
+            sorted_list.append(filename)
+            break
+    # Divide the .changes into per-source groups
+    per_source = {}
+    for filename in cache.keys():
+        source = cache[filename]["source"]
+        if not per_source.has_key(source):
+            per_source[source] = {}
+            per_source[source]["list"] = []
+        per_source[source]["list"].append(cache[filename])
+    # Determine oldest time and have note status for each source group
+    for source in per_source.keys():
+        q = session.query(DBSource).filter_by(source = source).all()
+        per_source[source]["source_in_database"] = len(q)>0
+        source_list = per_source[source]["list"]
+        first = source_list[0]
+        oldest = os.stat(first["filename"])[stat.ST_MTIME]
+        have_note = 0
+        for d in per_source[source]["list"]:
+            mtime = os.stat(d["filename"])[stat.ST_MTIME]
+            if mtime < oldest:
+                oldest = mtime
+            have_note += has_new_comment(d["source"], d["version"], session)
+        per_source[source]["oldest"] = oldest
+        if not have_note:
+            per_source[source]["note_state"] = 0; # none
+        elif have_note < len(source_list):
+            per_source[source]["note_state"] = 1; # some
+        else:
+            per_source[source]["note_state"] = 2; # all
+        per_source[source]["list"].sort(indiv_sg_compare)
+    per_source_items = per_source.items()
+    per_source_items.sort(sg_compare)
+    for i in per_source_items:
+        for j in i[1]["list"]:
+            sorted_list.append(j["filename"])
+    return sorted_list
+
+__all__.append('sort_changes')
+
+################################################################################
+
+def changes_to_queue(upload, srcqueue, destqueue, session):
+    """Move a changes file to a different queue and mark as approved for the
+       source queue"""
+
+    try:
+        chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
+    except NoResultFound:
+        return False
+
+    chg.approved_for_id = srcqueue.policy_queue_id
+
+    for f in chg.files:
+        # update the changes_pending_files row
+        f.queue = destqueue
+        utils.move(os.path.join(srcqueue.path, f.filename), destqueue.path, perms=int(destqueue.perms, 8))
+
+    utils.move(os.path.join(srcqueue.path, upload.pkg.changes_file), destqueue.path, perms=int(destqueue.perms, 8))
+    chg.in_queue = destqueue
+    session.commit()
+
+    return True
+
+__all__.append('changes_to_queue')
+
+def new_accept(upload, dry_run, session):
+    print "ACCEPT"
+
+    if not dry_run:
+        cnf = Config()
+
+        (summary, short_summary) = upload.build_summaries()
+
+        # XXX: mhy: I think this is wrong as these are all attributes on the
+        # build and policy queues now
+        if cnf.FindB("Dinstall::SecurityQueueHandling"):
+            upload.dump_vars(cnf["Dir::Queue::Embargoed"])
+            upload.move_to_queue(get_policy_queue('embargoed'))
+            upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
+            # Check for override disparities
+            upload.Subst["__SUMMARY__"] = summary
+        else:
+            # Just a normal upload, accept it...
+            (summary, short_summary) = upload.build_summaries()
+            srcqueue = get_policy_queue('new', session)
+            destqueue = get_policy_queue('newstage', session)
+
+            changes_to_queue(upload, srcqueue, destqueue, session)
+
+__all__.append('new_accept')
index 0d5f4021dff90497471eaaa3447bb4e78910fe2a..0fe48f109b84bf0eeeee0f1e6731b50e9ace76f3 100644 (file)
@@ -379,16 +379,16 @@ def get_binary_from_name_suite(package, suitename, session=None):
 
     sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
              FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
-             WHERE b.package=:package
+             WHERE b.package='%(package)s'
                AND b.file = fi.id
                AND fi.location = l.id
                AND l.component = c.id
                AND ba.bin=b.id
                AND ba.suite = su.id
-               AND su.suite_name=:suitename
+               AND su.suite_name %(suitename)s
           ORDER BY b.version DESC"""
 
-    return session.execute(sql, {'package': package, 'suitename': suitename})
+    return session.execute(sql % {'package': package, 'suitename': suitename})
 
 __all__.append('get_binary_from_name_suite')
 
@@ -1444,6 +1444,19 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
+
+        # Remove changes_pool_files entries
+        self.poolfiles = []
+
+        # Remove changes_pending_files references
+        self.files = []
+
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
+
 __all__.append('DBChange')
 
 @session_wrapper
@@ -2302,7 +2315,7 @@ def add_dsc_to_db(u, filename, session=None):
 
     session.flush()
 
-    return dsc_component, dsc_location_id, pfs
+    return source, dsc_component, dsc_location_id, pfs
 
 __all__.append('add_dsc_to_db')
 
@@ -2884,6 +2897,16 @@ class DBConn(object):
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
                                  files = relation(ChangePendingFile,
                                                   secondary=self.tbl_changes_pending_files_map,
                                                   backref="changesfile"),
@@ -2896,7 +2919,12 @@ class DBConn(object):
                properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
 
         mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
 
         mapper(ChangePendingSource, self.tbl_changes_pending_source,
                properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
index b637738a5af73d86f1b978137727407d7d9bcf5c..f77416cadc224c7a6827608883df8db6037dd2c6 100644 (file)
@@ -46,6 +46,9 @@ class Holding(object):
 
             self.in_holding = {}
             self.holding_dir = Config()["Dir::Queue::Holding"]
+            # ftptrainees haven't access to holding, use a temp directory instead
+            if not os.access(self.holding_dir, os.W_OK):
+                self.holding_dir = Config()["Dir::TempPath"]
 
     def copy_to_holding(self, filename):
         base_filename = os.path.basename(filename)
old mode 100644 (file)
new mode 100755 (executable)
index d97b99f..a13c041
@@ -148,6 +148,22 @@ def determine_new(changes, files, warn=1):
         if f.has_key("othercomponents"):
             new[pkg]["othercomponents"] = f["othercomponents"]
 
+    # Fix up the list of target suites
+    cnf = Config()
+    for suite in changes["suite"].keys():
+        override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
+        if override:
+            (olderr, newerr) = (get_suite(suite, session) == None,
+                                get_suite(override, session) == None)
+            if olderr or newerr:
+                (oinv, newinv) = ("", "")
+                if olderr: oinv = "invalid "
+                if newerr: ninv = "invalid "
+                print "warning: overriding %ssuite %s to %ssuite %s" % (
+                        oinv, suite, ninv, override)
+            del changes["suite"][suite]
+            changes["suite"][override] = 1
+
     for suite in changes["suite"].keys():
         for pkg in new.keys():
             ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
@@ -272,6 +288,8 @@ class Upload(object):
         self.warnings = []
         self.notes = []
 
+        self.later_check_files = []
+
         self.pkg.reset()
 
     def package_info(self):
@@ -289,7 +307,7 @@ class Upload(object):
         for title, messages in msgs:
             if messages:
                 msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
-        msg += '\n'
+        msg += '\n\n'
 
         return msg
 
@@ -427,7 +445,7 @@ class Upload(object):
             self.pkg.changes["changedbyemail"] = ""
 
             self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
-                   % (filename, changes["changed-by"], msg))
+                   % (filename, self.pkg.changes["changed-by"], msg))
 
         # Ensure all the values in Closes: are numbers
         if self.pkg.changes.has_key("closes"):
@@ -804,8 +822,7 @@ class Upload(object):
             for f in file_keys:
                 ret = holding.copy_to_holding(f)
                 if ret is not None:
-                    # XXX: Should we bail out here or try and continue?
-                    self.rejects.append(ret)
+                    self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
 
             os.chdir(cwd)
 
@@ -820,7 +837,7 @@ class Upload(object):
             # if in the pool or in a queue other than unchecked, reject
             if (dbc.in_queue is None) \
                    or (dbc.in_queue is not None
-                       and dbc.in_queue.queue_name != 'unchecked'):
+                       and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
                 self.rejects.append("%s file already known to dak" % base_filename)
         except NoResultFound, e:
             # not known, good
@@ -847,7 +864,9 @@ class Upload(object):
                     if os.path.exists(f):
                         self.rejects.append("Can't read `%s'. [permission denied]" % (f))
                     else:
-                        self.rejects.append("Can't read `%s'. [file not found]" % (f))
+                        # Don't directly reject, mark to check later to deal with orig's
+                        # we can find in the pool
+                        self.later_check_files.append(f)
                 entry["type"] = "unreadable"
                 continue
 
@@ -992,6 +1011,10 @@ class Upload(object):
         self.check_dsc_against_db(dsc_filename, session)
         session.close()
 
+        # Finally, check if we're missing any files
+        for f in self.later_check_files:
+            self.rejects.append("Could not find file %s references in changes" % f)
+
         return True
 
     ###########################################################################
@@ -1445,16 +1468,15 @@ class Upload(object):
             self.check_dm_upload(fpr, session)
         else:
             # Check source-based permissions for other types
-            if self.pkg.changes["architecture"].has_key("source"):
-                if fpr.source_acl.access_level is None:
-                    rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
-                    rej += '\nPlease contact ftpmaster if you think this is incorrect'
-                    self.rejects.append(rej)
-                    return
-            else:
-                # If not a DM, we allow full upload rights
-                uid_email = "%s@debian.org" % (fpr.uid.uid)
-                self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+            if self.pkg.changes["architecture"].has_key("source") and \
+                fpr.source_acl.access_level is None:
+                rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+                rej += '\nPlease contact ftpmaster if you think this is incorrect'
+                self.rejects.append(rej)
+                return
+            # If not a DM, we allow full upload rights
+            uid_email = "%s@debian.org" % (fpr.uid.uid)
+            self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
 
 
         # Check binary upload permissions
@@ -1815,7 +1837,7 @@ distribution."""
         # Add the .dsc file to the DB first
         for newfile, entry in self.pkg.files.items():
             if entry["type"] == "dsc":
-                dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+                source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
                 for j in pfs:
                     poolfiles.append(j)
 
@@ -1827,6 +1849,7 @@ distribution."""
         # If this is a sourceful diff only upload that is moving
         # cross-component we need to copy the .orig files into the new
         # component too for the same reasons as above.
+        # XXX: mhy: I think this should be in add_dsc_to_db
         if self.pkg.changes["architecture"].has_key("source"):
             for orig_file in self.pkg.orig_files.keys():
                 if not self.pkg.orig_files[orig_file].has_key("id"):
@@ -1844,21 +1867,45 @@ distribution."""
                 new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
 
                 # TODO: Care about size/md5sum collisions etc
-                (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+                (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
 
+                # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
                 if newf is None:
                     utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
                     newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
 
-                    # TODO: Check that there's only 1 here
-                    source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
-                    dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
-                    dscf.poolfile_id = newf.file_id
-                    session.add(dscf)
                     session.flush()
 
+                    # Don't reference the old file from this changes
+                    for p in poolfiles:
+                        if p.file_id == oldf.file_id:
+                            poolfiles.remove(p)
+
                     poolfiles.append(newf)
 
+                    # Fix up the DSC references
+                    toremove = []
+
+                    for df in source.srcfiles:
+                        if df.poolfile.file_id == oldf.file_id:
+                            # Add a new DSC entry and mark the old one for deletion
+                            # Don't do it in the loop so we don't change the thing we're iterating over
+                            newdscf = DSCFile()
+                            newdscf.source_id = source.source_id
+                            newdscf.poolfile_id = newf.file_id
+                            session.add(newdscf)
+
+                            toremove.append(df)
+
+                    for df in toremove:
+                        session.delete(df)
+
+                    # Flush our changes
+                    session.flush()
+
+                    # Make sure that our source object is up-to-date
+                    session.expire(source)
+
         # Install the files into the pool
         for newfile, entry in self.pkg.files.items():
             destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
@@ -1887,16 +1934,13 @@ distribution."""
         if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
             UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
 
-        # Send accept mail, announce to lists, close bugs and check for
-        # override disparities
-        if not cnf["Dinstall::Options::No-Mail"]:
-            self.update_subst()
-            self.Subst["__SUITE__"] = ""
-            self.Subst["__SUMMARY__"] = summary
-            mail_message = utils.TemplateSubst(self.Subst,
-                                               os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
-            utils.send_mail(mail_message)
-            self.announce(short_summary, 1)
+        self.update_subst()
+        self.Subst["__SUITE__"] = ""
+        self.Subst["__SUMMARY__"] = summary
+        mail_message = utils.TemplateSubst(self.Subst,
+                                           os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
+        utils.send_mail(mail_message)
+        self.announce(short_summary, 1)
 
         ## Helper stuff for DebBugs Version Tracking
         if cnf.Find("Dir::Queue::BTSVersionTrack"):
@@ -1958,11 +2002,8 @@ distribution."""
 
         cnf = Config()
 
-        # Abandon the check if:
-        #  a) override disparity checks have been disabled
-        #  b) we're not sending mail
-        if not cnf.FindB("Dinstall::OverrideDisparityCheck") or \
-           cnf["Dinstall::Options::No-Mail"]:
+        # Abandon the check if override disparity checks have been disabled
+        if not cnf.FindB("Dinstall::OverrideDisparityCheck"):
             return
 
         summary = self.pkg.check_override()
@@ -2062,7 +2103,7 @@ distribution."""
             os.close(dest_fd)
 
     ###########################################################################
-    def do_reject (self, manual=0, reject_message="", note=""):
+    def do_reject (self, manual=0, reject_message="", notes=""):
         """
         Reject an upload. If called without a reject message or C{manual} is
         true, spawn an editor so the user can write one.
@@ -2081,9 +2122,10 @@ distribution."""
         if manual and not reject_message:
             (fd, temp_filename) = utils.temp_filename()
             temp_file = os.fdopen(fd, 'w')
-            if len(note) > 0:
-                for line in note:
-                    temp_file.write(line)
+            if len(notes) > 0:
+                for note in notes:
+                    temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
+                                    % (note.author, note.version, note.notedate, note.comment))
             temp_file.close()
             editor = os.environ.get("EDITOR","vi")
             answer = 'E'
@@ -2139,6 +2181,7 @@ distribution."""
             user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
             self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
             self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
+            self.Subst["__REJECT_MESSAGE__"] = ""
             self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
             reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
             # Write the rejection email out as the <foo>.reason file
@@ -2150,9 +2193,8 @@ distribution."""
 
         os.close(reason_fd)
 
-        # Send the rejection mail if appropriate
-        if not cnf["Dinstall::Options::No-Mail"]:
-            utils.send_mail(reject_mail_message)
+        # Send the rejection mail
+        utils.send_mail(reject_mail_message)
 
         if self.logger:
             self.logger.log(["rejected", self.pkg.changes_file])
@@ -2307,7 +2349,7 @@ distribution."""
                             cansave = 1
 
                     if not cansave:
-                        self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
+                        self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
 
     ################################################################################
     def check_binary_against_db(self, filename, session):
@@ -2404,6 +2446,13 @@ distribution."""
                                 orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
                                 match = 1
 
+                                # Don't bitch that we couldn't find this file later
+                                try:
+                                    self.later_check_files.remove(dsc_name)
+                                except ValueError:
+                                    pass
+
+
                     if not match:
                         self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
 
old mode 100644 (file)
new mode 100755 (executable)
index 3283e1e..627df43
@@ -69,7 +69,7 @@ def package_to_queue(u, summary, short_summary, queue, chg, session, announce=No
     u.check_override()
 
     # Send accept mail, announce to lists and close bugs
-    if announce and not cnf["Dinstall::Options::No-Mail"]:
+    if announce:
         template = os.path.join(cnf["Dir::Templates"], announce)
         u.update_subst()
         u.Subst["__SUITE__"] = ""
@@ -166,7 +166,7 @@ def is_autobyhand(u):
 
 def do_autobyhand(u, summary, short_summary, chg, session):
     print "Attempting AUTOBYHAND."
-    byhandleft = True
+    byhandleft = False
     for f, entry in u.pkg.files.items():
         byhandfile = f
 
@@ -188,7 +188,7 @@ def do_autobyhand(u, summary, short_summary, chg, session):
 
         if result == 0:
             os.unlink(byhandfile)
-            del entry
+            del u.pkg.files[f]
         else:
             print "Error processing %s, left as byhand." % (f)
             byhandleft = True
@@ -233,13 +233,12 @@ def acknowledge_new(u, summary, short_summary, chg, session):
     session.add(chg)
     session.commit()
 
-    if not cnf["Dinstall::Options::No-Mail"]:
-        print "Sending new ack."
-        template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
-        u.update_subst()
-        u.Subst["__SUMMARY__"] = summary
-        new_ack_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(new_ack_message)
+    print "Sending new ack."
+    template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
+    u.update_subst()
+    u.Subst["__SUMMARY__"] = summary
+    new_ack_message = utils.TemplateSubst(u.Subst, template)
+    utils.send_mail(new_ack_message)
 
 ################################################################################
 
index b7401751429b2c027f3ee89925bf593507a31795..6642622f9410938753e802cb77c40e3dbb6c378c 100644 (file)
@@ -117,7 +117,12 @@ def open_file(filename, mode='r'):
 
 def our_raw_input(prompt=""):
     if prompt:
-        sys.stdout.write(prompt)
+        while 1:
+            try:
+                sys.stdout.write(prompt)
+                break
+            except IOError:
+                pass
     sys.stdout.flush()
     try:
         ret = raw_input()
@@ -569,6 +574,10 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"):
 def send_mail (message, filename=""):
     """sendmail wrapper, takes _either_ a message string or a file as arguments"""
 
+    # Check whether we're supposed to be sending mail
+    if Cnf.has_key("Dinstall::Options::No-Mail") and Cnf["Dinstall::Options::No-Mail"]:
+        return
+
     # If we've been passed a string dump it into a temporary file
     if message:
         (fd, filename) = tempfile.mkstemp()
@@ -1341,9 +1350,9 @@ def check_signature (sig_filename, data_filename="", keyrings=None, autofetch=No
     if exit_status:
         rejects.append("gpgv failed while checking %s." % (sig_filename))
         if status.strip():
-            rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "), "")
+            rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "))
         else:
-            rejects.append(prefix_multi_line_string(output, " [GPG output:] "), "")
+            rejects.append(prefix_multi_line_string(output, " [GPG output:] "))
         return (None, rejects)
 
     # Sanity check the good stuff we expect
index e6ba85aec7c06767d7f734459414dbee0b3d58e1..1fe0b0e312987892698400d667e43403ecbf6486 100644 (file)
@@ -47,7 +47,7 @@ $max_delayed = 15;
 $keep_files = '(status|\.message|README)$';
 
 # file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
 
 # Change files to mode 644 locally (after md5 check) or only on master?
 $chmod_on_target = 0;
index 0dcb7db0bb704c219ca373605e1532d6e899086f..2f0d86739be3b480b96c684bd0ef6d824da4f672 100644 (file)
@@ -47,7 +47,7 @@ $max_delayed = -1;
 $keep_files = '(status|\.message|README)$';
 
 # file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
 
 # Change files to mode 644 locally (after md5 check) or only on master?
 $chmod_on_target = 0;
index 633972fa860ee5e968d99c1b8c0222290c45eb32..f8033936ed958631e34ac04d45c16f673152db08 100644 (file)
@@ -47,7 +47,7 @@ $max_delayed = -1;
 $keep_files = '(status|\.message|README)$';
 
 # file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
 
 # Change files to mode 644 locally (after md5 check) or only on master?
 $chmod_on_target = 0;
index c4eb6bea15add578d5ed1681db7924e3f35edfd8..2ebb5bc4d722595d7a699001bbe7ada51fc22ee9 100755 (executable)
@@ -14,7 +14,7 @@ LINE = re.compile(r'(?:|.*/)dinstall_(\d{4})\.(\d{2})\.(\d{2})-(\d{2}):(\d{2}):(
                   r'Archive maintenance timestamp \(([^\)]*)\): (\d{2}):(\d{2}):(\d{2})$')
 UNSAFE = re.compile(r'[^a-zA-Z/\._:0-9\- ]')
 
-graphs = {"dinstall1": {"keystolist":["pg_dump1", "i18n 1", "accepted", "make-suite-file-list", "apt-ftparchive",
+graphs = {"dinstall1": {"keystolist":["pg_dump1", "i18n 1", "accepted", "dominate", "generate-filelist", "apt-ftparchive",
                                     "pdiff", "release files", "w-b", "i18n 2", "apt-ftparchive cleanup"],
                         "showothers":True},
           "dinstall2": {"keystolist":['External Updates', 'p-u-new', 'o-p-u-new', 'cruft', 'import-keyring', 'overrides', 'cleanup', 'scripts', 'mirror hardlinks', 'stats', 'compress', "pkg-file-mapping" ],
@@ -46,7 +46,7 @@ if m:
     raise Exception("I don't like command line arguments including char '%s'"%m.group(0))
 
 if args:
-  for l in os.popen('bzgrep -H "Archive maintenance timestamp" "'+'" "'.join(args)+'"'):
+  for l in os.popen('bzgrep -H "^Archive maintenance timestamp" "'+'" "'.join(args)+'"'):
     m = LINE.match(l)
     if not m:
         raise Exception("woops '%s'"%l)
index 9f0adcaed700cfc316e513d7662da3aec6a90512..8b4206062685c8ae4fa51443792b26469ca4c05b 100644 (file)
@@ -41,6 +41,7 @@
                        <li><a href="#new">NEW</a></li>
                        <li><a href="#pending">Pending removals</a></li>
                        <li><a href="#removed">Removed packages</a></li>
+                       <li><a href="#cruft">Cruft Report</a></li>
                        <li><a href="#testing">Testing</a></li>
                        <li><a href="#stable">Stable</a></li>
                        <li><a href="#rejections">Rejections</a></li>
                </p>
         </div>
 
+        <div id="cruft">
+            <h1>Cruft Report</h1>
+           <p>Some packages which needs to be removed manually are found in
+           <a href="cruft-report-daily.txt">the cruft-report</a>.</p>
+        </div>
+
         <div id="testing">
             <h1>Testing</h1>
             <p>Squeeze is testing, sid is unstable. For more details please look