]> git.decadent.org.uk Git - dak.git/commitdiff
Merge branch 'master' of ssh://ftp-master.debian.org/srv/ftp.debian.org/git/dak
authorMark Hymers <mhy@debian.org>
Sat, 26 Mar 2011 18:09:37 +0000 (18:09 +0000)
committerMark Hymers <mhy@debian.org>
Sat, 26 Mar 2011 18:09:37 +0000 (18:09 +0000)
config/homedir/syncdd.sh
dak/generate_filelist.py
dak/generate_packages_sources2.py
dak/show_new.py
daklib/dakmultiprocessing.py [new file with mode: 0644]
daklib/filewriter.py
scripts/debian/buildd-add-keys
scripts/debian/buildd-remove-keys

index 9260e76402b32162cabed1fc6158b2e75f9dd104..3f6629c7506fecabaab9bb57175addb2d39236fe 100755 (executable)
@@ -77,7 +77,7 @@ if lockfile -r3 ${HOME}/sync.lock; then
     rsync -aH -B8192 \
         --exclude backup/*.xz \
         --exclude backup/dump* \
-        --exclude database/*.db \
+        --exclude database/\*.db \
         ${EXTRA} \
         --exclude mirror \
         --exclude morgue/ \
index 2a6d218badf2e6e12a6d2b9a43151a4400d4906c..2d2e9499e932581acb2db1c95e76d741b10c4343 100755 (executable)
@@ -39,7 +39,7 @@ Generate file lists for apt-ftparchive.
 from daklib.dbconn import *
 from daklib.config import Config
 from daklib import utils, daklog
-from multiprocessing import Pool
+from daklib.dakmultiprocessing import Pool
 import apt_pkg, os, stat, sys
 
 from daklib.lists import getSources, getBinaries, getArchAll
@@ -72,11 +72,13 @@ def writeSourceList(suite_id, component_id, incremental_mode):
     (file, timestamp) = listPath(suite, component,
             incremental_mode = incremental_mode)
 
+    message = "sources list for %s %s" % (suite.suite_name, component.component_name)
+
     for _, filename in getSources(suite, component, session, timestamp):
         file.write(filename + '\n')
-    session.close()
+    session.rollback()
     file.close()
-    return "sources list for %s %s" % (suite.suite_name, component.component_name)
+    return message
 
 def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode):
     session = DBConn().session()
@@ -86,12 +88,14 @@ def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode
     (file, timestamp) = listPath(suite, component, architecture, type,
             incremental_mode)
 
+    message = "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+
     for _, filename in getArchAll(suite, component, architecture, type,
             session, timestamp):
         file.write(filename + '\n')
-    session.close()
+    session.rollback()
     file.close()
-    return "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+    return message
 
 def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_mode):
     session = DBConn().session()
@@ -101,12 +105,14 @@ def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_m
     (file, timestamp) = listPath(suite, component, architecture, type,
             incremental_mode)
 
+    message = "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+
     for _, filename in getBinaries(suite, component, architecture, type,
             session, timestamp):
         file.write(filename + '\n')
-    session.close()
+    session.rollback()
     file.close()
-    return "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+    return message
 
 def usage():
     print """Usage: dak generate_filelist [OPTIONS]
index 8d8a8aefa3f2d51103a6324480726c3101d2e6ee..ea3c0e53b21866ab40dcff27f75171121d6fb341 100755 (executable)
@@ -31,7 +31,9 @@ Generate Packages/Sources files
 from daklib.dbconn import *
 from daklib.config import Config
 from daklib import utils, daklog
-from multiprocessing import Pool
+from daklib.dakmultiprocessing import Pool
+from daklib.filewriter import PackagesFileWriter, SourcesFileWriter
+
 import apt_pkg, os, stat, sys
 
 def usage():
@@ -93,23 +95,6 @@ ORDER BY
 s.source, s.version
 """
 
-def open_sources(suite, component):
-    cnf = Config()
-    dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'source', 'Sources')
-
-    # create queue if it does not exist yet
-    if os.path.exists(dest) and os.path.isdir(dest):
-        dest_dir = dest
-    else:
-        dest_dir = os.path.dirname(dest)
-    if not os.path.exists(dest_dir):
-        umask = os.umask(00000)
-        os.makedirs(dest_dir, 02775)
-        os.umask(umask)
-
-    f = open(dest, 'w')
-    return f
-
 def generate_sources(suite_id, component_id):
     global _sources_query
 
@@ -119,7 +104,8 @@ def generate_sources(suite_id, component_id):
     suite = session.query(Suite).get(suite_id)
     component = session.query(Component).get(component_id)
 
-    output = open_sources(suite, component)
+    writer = SourcesFileWriter(suite=suite.suite_name, component=component.component_name)
+    output = writer.open()
 
     # run query and write Sources
     r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "dsc_type": dsc_type})
@@ -127,7 +113,11 @@ def generate_sources(suite_id, component_id):
         print >>output, stanza
         print >>output, ""
 
-    return ["generate sources", suite.suite_name, component.component_name]
+    writer.close()
+
+    message = ["generate sources", suite.suite_name, component.component_name]
+    session.rollback()
+    return message
 
 #############################################################################
 
@@ -198,26 +188,6 @@ WHERE
 ORDER BY tmp.package, tmp.version
 """
 
-def open_packages(suite, component, architecture, type_name):
-    cnf = Config()
-    if type_name == 'udeb':
-        dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'debian-installer', 'binary-%s' % architecture.arch_string, 'Packages')
-    else:
-        dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'binary-%s' % architecture.arch_string, 'Packages')
-
-    # create queue if it does not exist yet
-    if os.path.exists(dest) and os.path.isdir(dest):
-        dest_dir = dest
-    else:
-        dest_dir = os.path.dirname(dest)
-    if not os.path.exists(dest_dir):
-        umask = os.umask(00000)
-        os.makedirs(dest_dir, 02775)
-        os.umask(umask)
-
-    f = open(dest, 'w')
-    return f
-
 def generate_packages(suite_id, component_id, architecture_id, type_name):
     global _packages_query
 
@@ -229,7 +199,9 @@ def generate_packages(suite_id, component_id, architecture_id, type_name):
     component = session.query(Component).get(component_id)
     architecture = session.query(Architecture).get(architecture_id)
 
-    output = open_packages(suite, component, architecture, type_name)
+    writer = PackagesFileWriter(suite=suite.suite_name, component=component.component_name,
+            architecture=architecture.arch_string, debtype=type_name)
+    output = writer.open()
 
     r = session.execute(_packages_query, {"suite": suite_id, "component": component_id,
         "arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id})
@@ -237,9 +209,11 @@ def generate_packages(suite_id, component_id, architecture_id, type_name):
         print >>output, stanza
         print >>output, ""
 
-    session.close()
+    writer.close()
 
-    return ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string]
+    message = ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string]
+    session.rollback()
+    return message
 
 #############################################################################
 
@@ -289,9 +263,8 @@ def main():
         for c in component_ids:
             pool.apply_async(generate_sources, [s.suite_id, c], callback=log)
             for a in s.architectures:
-                #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log)
-                apply(generate_packages, [s.suite_id, c, a.arch_id, 'deb'])
-                #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log)
+                pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log)
+                pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log)
 
     pool.close()
     pool.join()
index 8405aeef9f0fe2391d9e969169b944db7abf179b..84b45077b1d11733b55cfd89cdc789e727092cac 100755 (executable)
@@ -37,7 +37,7 @@ from daklib.regexes import re_source_ext
 from daklib.config import Config
 from daklib import daklog
 from daklib.changesutils import *
-from daklib.threadpool import ThreadPool
+from daklib.dakmultiprocessing import Pool
 
 # Globals
 Cnf = None
@@ -250,14 +250,15 @@ def main():
 
     examine_package.use_html=1
 
-    threadpool = ThreadPool()
+    pool = Pool()
     for changes_file in changes_files:
         changes_file = utils.validate_changes_file_arg(changes_file, 0)
         if not changes_file:
             continue
         print "\n" + changes_file
-        threadpool.queueTask(do_pkg, changes_file)
-    threadpool.joinAll()
+        pool.apply_async(do_pkg, (changes_file,))
+    pool.close()
+    pool.join()
 
     files = set(os.listdir(cnf["Show-New::HTMLPath"]))
     to_delete = filter(lambda x: x.endswith(".html"), files.difference(sources))
diff --git a/daklib/dakmultiprocessing.py b/daklib/dakmultiprocessing.py
new file mode 100644 (file)
index 0000000..ded81a2
--- /dev/null
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# vim:set et sw=4:
+
+"""
+multiprocessing for DAK
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2011  Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+###############################################################################
+
+import multiprocessing
+import sqlalchemy.orm.session
+
+def _func_wrapper(func, *args, **kwds):
+    try:
+        return func(*args, **kwds)
+    finally:
+        # Make sure connections are closed. We might die otherwise.
+        sqlalchemy.orm.session.Session.close_all()
+
+class Pool():
+    def __init__(self, *args, **kwds):
+        self.pool = multiprocessing.Pool(*args, **kwds)
+        self.results = []
+
+    def apply_async(self, func, args=(), kwds={}, callback=None):
+        wrapper_args = list(args)
+        wrapper_args.insert(0, func)
+        self.results.append(self.pool.apply_async(_func_wrapper, wrapper_args, kwds, callback))
+
+    def close(self):
+        self.pool.close()
+
+    def join(self):
+        self.pool.join()
+        for r in self.results:
+            # return values were already handled in the callbacks, but asking
+            # for them might raise exceptions which would otherwise be lost
+            r.get()
index a3c16ea828c9f7b10b6881fec336fa9b294542a1..8907fa6d96fa39f7c8e2d7163f3c20e4f361bd2e 100755 (executable)
@@ -54,6 +54,11 @@ class BaseFileWriter(object):
         '''
         Returns a file object for writing.
         '''
+        # create missing directories
+        try:
+            os.makedirs(os.path.dirname(self.path))
+        except:
+            pass
         self.file = open(self.path + '.new', 'w')
         return self.file
 
index 1283838f14ce04c9da5d8e81ad2b3646fc7d88ea..26fc1f3f3e6c65edf0db2ac28d7fe92de0abc0e5 100755 (executable)
@@ -177,55 +177,82 @@ for file in ${KEYS}; do
     # Read in the TEMPKEYDATAFILE, but avoid using a subshell like a
     # while read line otherwise would do
     exec 4<> "${TEMPKEYDATA}"
-    error=""
+    KEYUID=""
+    #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc <buildd_powerpc-poulenc@buildd.debian.org>:
+
+    # Of course this sucky gpg crapshit of an "interface" does give you different things depending on how people
+    # created their keys. And of course the buildd people created the test keys differently to what they now do
+    # which just means extra work for nothing. So as they now do other steps, the thing we get back suddenly looks like
+
+    #pub:-:4096:1:99595DC7865BEAD2:2011-03-26:2011-07-24::-:
+    #uid:::::::::buildd autosigning key corelli <buildd_mips-corelli@buildd.debian.org>:
+
+    # Besides fiddling out the data we need to check later, this regex also check:
+    # - the keytype (:1:, 1 there means RSA)
+    # - the UID
+    # - that the key does have an expiration date (or it wont match, the second date
+    #   field would be empty
+    regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:(buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>):$"
+    regex2="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:$"
+    regex3="^uid:::::::::(buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>):$"
     while read line <&4; do
-        #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc <buildd_powerpc-poulenc@buildd.debian.org>:
-
-        # Besides fiddling out the data we need to check later, this regex also check:
-        # - the keytype (:1:, 1 there means RSA)
-        # - the UID
-        # - that the key does have an expiration date (or it wont match, the second date
-        #   field would be empty
-        regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>:$"
         if [[ $line =~ $regex ]]; then
             KEYSIZE=${BASH_REMATCH[1]}
             KEYID=${BASH_REMATCH[2]}
             KEYCREATE=${BASH_REMATCH[3]}
             KEYEXPIRE=${BASH_REMATCH[4]}
-
-            # We do want 4096 or anything above
-            if [ ${KEYSIZE} -lt 4096 ]; then
-                log "Keysize ${KEYSIZE} too small"
-                error="${error} Keysize ${KEYSIZE} too small"
-                continue
-            fi
-
-            # We want a maximum lifetime of 120 days, so check that.
-            # Easiest to compare in epoch, so lets see, 120 days midnight from now,
-            # compared with their set expiration date at midnight
-            # maxdate should turn out higher. just in case we make it 121 for this check
-            maxdate=$(date -d '121 day 00:00:00' +%s)
-            theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s)
-            if [ ${theirexpire} -gt ${maxdate} ]; then
-                log "Key expiry ${KEYEXPIRE} wrong"
-                error="${error} Key expiry ${KEYEXPIRE} wrong"
-                continue
-            fi
-        else
-            log "Unknown line $line, sod off"
-            error="${error} Unknown line $line, sod off"
-            continue
-        fi
+        KEYUID=${BASH_REMATCH[5]}
+    elif [[ $line =~ $regex2 ]]; then
+        KEYSIZE=${BASH_REMATCH[1]}
+            KEYID=${BASH_REMATCH[2]}
+            KEYCREATE=${BASH_REMATCH[3]}
+            KEYEXPIRE=${BASH_REMATCH[4]}
+    elif [[ $line =~ $regex3 ]]; then
+        KEYUID=${BASH_REMATCH[1]}
+    else
+        log "Didn't recognize the key. Go kiss gpg"
+            DATE=$(date -Is)
+            mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}"
+            mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}"
+            mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}"
+            rm -f "${GPGOUTF}"
+        continue
+    fi
     done
-    if [ -n "${error}" ]; then
-        log ${error}
+    if [ -z "${KEYUID}" ]; then
+    log "Did not recognize the UID format"
         DATE=$(date -Is)
-        mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}"
-        mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}"
-        mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}"
-        echo "${error}" >> "${ERRORS}/badkey.${file}.error.${DATE}"
+        mv "${INCOMING}/${file}" "${ERRORS}/keyuid.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/keyuid.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/keyuid.${file}.gpglogs.${DATE}"
         rm -f "${GPGOUTF}"
-        continue
+    continue
+    fi
+    # We do want 4096 or anything above
+    if [ ${KEYSIZE} -lt 4096 ]; then
+        log "Keysize ${KEYSIZE} too small"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/keysize.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/keysize.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/keysize.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+    continue
+    fi
+
+    # We want a maximum lifetime of 120 days, so check that.
+    # Easiest to compare in epoch, so lets see, 120 days midnight from now,
+    # compared with their set expiration date at midnight
+    # maxdate should turn out higher. just in case we make it 121 for this check
+    maxdate=$(date -d '121 day 00:00:00' +%s)
+    theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s)
+    if [ ${theirexpire} -gt ${maxdate} ]; then
+        log "Key expiry ${KEYEXPIRE} wrong"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/keyexpire.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/keyexpire.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/keyexpire.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+    continue
     fi
 
     # And now lets check how many keys this buildd already has. 2 is the maximum, so key
@@ -245,7 +272,9 @@ for file in ${KEYS}; do
 
     # Right. At this point everything should be in order, which means we should put the key into
     # the keyring
-    log "Accepting key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}"
+    KEYSUBMITTER=$(cat "${GPGSTATUS}"|grep GOODSIG)
+    KEYSUBMITTER=${KEYSUBMITTER##*GOODSIG}
+    log "${KEYSUBMITTER} added key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}"
     gpg ${DEFGPGOPT} --status-fd 4 --logger-fd 5 --keyring "${ARCHKEYRING}" --import "${GPGOUTF}" 2>/dev/null
 
     mv "${INCOMING}/${file}" "${base}/${ARCH}"
index 3591785d6c028769b169a9b22f1f81b3b1f5e2a9..b9b2ecfea7907160bf644cfefb58800dd9fdcdcc 100755 (executable)
@@ -190,7 +190,9 @@ for file in ${KEYS}; do
         # So put it into the removed keyring
         gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --export ${KEYID} | gpg ${DEFGPGOPT} --keyring "${REMOVED}" --import 2>/dev/null
         if gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --yes --delete-keys ${KEYID}; then
-            log "Removed key ${KEYID}, reason: ${COMMENT}"
+            KEYSUBMITTER=$(cat "${GPGSTATUS}"|grep GOODSIG)
+            KEYSUBMITTER=${KEYSUBMITTER##*GOODSIG}
+            log "${KEYSUBMITTER} removed key ${KEYID} for ${ARCH} buildd ${BUILDD}, reason: ${COMMENT}"
             mv "${INCOMING}/${file}" "${base}/${ARCH}"
             continue
         fi