]> git.decadent.org.uk Git - dak.git/commitdiff
Merge remote-tracking branch 'ansgar/generate-filelist-multiprocessing' into merge
authorJoerg Jaspert <joerg@debian.org>
Fri, 25 Mar 2011 15:59:05 +0000 (16:59 +0100)
committerJoerg Jaspert <joerg@debian.org>
Fri, 25 Mar 2011 15:59:05 +0000 (16:59 +0100)
* ansgar/generate-filelist-multiprocessing:
  generate-filelist: Use multiprocessing

Signed-off-by: Joerg Jaspert <joerg@debian.org>
dak/admin.py
dak/dakdb/update53.py [new file with mode: 0755]
dak/dakdb/update54.py [new file with mode: 0755]
dak/update_db.py
daklib/changes.py
daklib/dbconn.py
daklib/queue.py
daklib/queue_install.py
scripts/debian/buildd-add-keys [new file with mode: 0755]
scripts/debian/buildd-prepare-dir [new file with mode: 0755]
scripts/debian/buildd-remove-keys [new file with mode: 0755]

index 808fb88785edb626b99d46881da114a58f276ee5..d159651e75df850904971df8f7b3705b4bbd6837 100755 (executable)
@@ -59,6 +59,7 @@ Perform administrative work on the dak database.
   config / c:
      c db                   show db config
      c db-shell             show db config in a usable form for psql
+     c NAME                 show option NAME as set in configuration table
 
   architecture / a:
      a list                 show a list of architectures
@@ -456,7 +457,12 @@ def show_config(command):
             e.append('PGPORT')
         print "export " + " ".join(e)
     else:
-        die("E: config command unknown")
+        session = DBConn().session()
+        try:
+            o = session.query(DBConfig).filter_by(name = mode).one()
+            print o.value
+        except NoResultFound:
+            print "W: option '%s' not set" % mode
 
 dispatch['config'] = show_config
 dispatch['c'] = show_config
diff --git a/dak/dakdb/update53.py b/dak/dakdb/update53.py
new file mode 100755 (executable)
index 0000000..36a076f
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add table for build queue files from policy queues.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2011 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+def do_update(self):
+    """
+    Add table for build queue files from policy queues.
+    """
+    print __doc__
+    try:
+        c = self.db.cursor()
+
+        c.execute("""
+            CREATE TABLE build_queue_policy_files (
+                build_queue_id INTEGER NOT NULL REFERENCES build_queue(id) ON DELETE CASCADE,
+                file_id INTEGER NOT NULL REFERENCES changes_pending_files(id) ON DELETE CASCADE,
+                filename TEXT NOT NULL,
+                created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
+                lastused TIMESTAMP WITHOUT TIME ZONE,
+                PRIMARY KEY (build_queue_id, file_id)
+            )""")
+
+        c.execute("UPDATE config SET value = '53' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError, msg:
+        self.db.rollback()
+        raise DBUpdateError, 'Unable to apply sick update 53, rollback issued. Error message : %s' % (str(msg))
diff --git a/dak/dakdb/update54.py b/dak/dakdb/update54.py
new file mode 100755 (executable)
index 0000000..e7676ef
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add send_to_build_queues to policy_queue table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2011 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+def do_update(self):
+    """
+    Add send_to_build_queues to policy_queue table
+    """
+    print __doc__
+    try:
+        c = self.db.cursor()
+
+        c.execute("""
+            ALTER TABLE policy_queue ADD COLUMN send_to_build_queues BOOLEAN NOT NULL DEFAULT 'f'
+            """)
+        c.execute("""
+            UPDATE policy_queue SET send_to_build_queues='t' WHERE queue_name IN ('embargo', 'disembargo')
+            """)
+
+        c.execute("UPDATE config SET value = '54' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError, msg:
+        self.db.rollback()
+        raise DBUpdateError, 'Unable to apply sick update 54, rollback issued. Error message : %s' % (str(msg))
index 9051704b406ba6140480ee3b8abad23dc1a90d8a..88ff20f52c1e842bb025da4213349c1d013020bc 100755 (executable)
@@ -46,7 +46,7 @@ from daklib.daklog import Logger
 ################################################################################
 
 Cnf = None
-required_database_schema = 52
+required_database_schema = 54
 
 ################################################################################
 
index e016638cd1db36296d67f4ea0505723106ac057e..54adb3b06b4d091a0134df5eff7820daee2ba466 100644 (file)
@@ -187,6 +187,31 @@ class Changes(object):
             if (not self.changes.has_key(key)) or (not self.changes[key]):
                 self.changes[key]='missing'
 
+    def __get_file_from_pool(self, filename, entry, session):
+        cnf = Config()
+
+        poolname = poolify(entry["source"], entry["component"])
+        l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+        found, poolfile = check_poolfile(os.path.join(poolname, filename),
+                                         entry['size'],
+                                         entry["md5sum"],
+                                         l.location_id,
+                                         session=session)
+
+        if found is None:
+            Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+            return None
+        elif found is False and poolfile is not None:
+            Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+            return None
+        else:
+            if poolfile is None:
+                Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+                return None
+            else:
+                return poolfile
+
     @session_wrapper
     def add_known_changes(self, dirpath, in_queue=None, session=None):
         """add "missing" in fields which we will require for the known_changes table"""
@@ -248,27 +273,22 @@ class Changes(object):
 
             except IOError:
                 # Can't find the file, try to look it up in the pool
-                poolname = poolify(entry["source"], entry["component"])
-                l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
-
-                found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
-                                                 entry['size'],
-                                                 entry["md5sum"],
-                                                 l.location_id,
-                                                 session=session)
-
-                if found is None:
-                    Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
-                elif found is False and poolfile is not None:
-                    Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
-                else:
-                    if poolfile is None:
-                        Logger.log(["E: Could not find %s in pool" % (chg_fn)])
-                    else:
-                        chg.poolfiles.append(poolfile)
+                poolfile = self.__get_file_from_pool(chg_fn, entry, session)
+                if poolfile:
+                    chg.poolfiles.append(poolfile)
 
         chg.files = files
 
+        # Add files referenced in .dsc, but not included in .changes
+        for name, entry in self.dsc_files.items():
+            if self.files.has_key(name):
+                continue
+
+            entry['source'] = self.changes['source']
+            poolfile = self.__get_file_from_pool(name, entry, session)
+            if poolfile:
+                chg.poolfiles.append(poolfile)
+
         session.commit()
         chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
 
index fe04ebc3df4c90e1f1bf13a4c0ddb3966bab3d1c..ae5a9e0d21b6583712fcc5d7fd3578cb55479876 100755 (executable)
@@ -703,6 +703,7 @@ class BuildQueue(object):
         try:
             # Grab files we want to include
             newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+            newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
             # Write file list with newer files
             (fl_fd, fl_name) = mkstemp()
             for n in newer:
@@ -795,6 +796,7 @@ class BuildQueue(object):
 
         # Grab files older than our execution time
         older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+        older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
 
         for o in older:
             killdb = False
@@ -822,9 +824,7 @@ class BuildQueue(object):
             if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
                 continue
 
-            try:
-                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
-            except NoResultFound:
+            if not self.contains_filename(f):
                 fp = os.path.join(self.path, f)
                 if dryrun:
                     Logger.log(["I: Would remove unused link %s" % fp])
@@ -835,6 +835,18 @@ class BuildQueue(object):
                     except OSError:
                         Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
 
+    def contains_filename(self, filename):
+        """
+        @rtype Boolean
+        @returns True if filename is supposed to be in the queue; False otherwise
+        """
+        session = DBConn().session().object_session(self)
+        if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
+            return True
+        elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
+            return True
+        return False
+
     def add_file_from_pool(self, poolfile):
         """Copies a file into the pool.  Assumes that the PoolFile object is
         attached to the same SQLAlchemy session as the Queue object is.
@@ -879,6 +891,61 @@ class BuildQueue(object):
 
         return qf
 
+    def add_changes_from_policy_queue(self, policyqueue, changes):
+        """
+        Copies a changes from a policy queue together with its poolfiles.
+
+        @type policyqueue: PolicyQueue
+        @param policyqueue: policy queue to copy the changes from
+
+        @type changes: DBChange
+        @param changes: changes to copy to this build queue
+        """
+        for policyqueuefile in changes.files:
+            self.add_file_from_policy_queue(policyqueue, policyqueuefile)
+        for poolfile in changes.poolfiles:
+            self.add_file_from_pool(poolfile)
+
+    def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
+        """
+        Copies a file from a policy queue.
+        Assumes that the policyqueuefile is attached to the same SQLAlchemy
+        session as the Queue object is.  The caller is responsible for
+        committing after calling this function.
+
+        @type policyqueue: PolicyQueue
+        @param policyqueue: policy queue to copy the file from
+
+        @type policyqueuefile: ChangePendingFile
+        @param policyqueuefile: file to be added to the build queue
+        """
+        session = DBConn().session().object_session(policyqueuefile)
+
+        # Is the file already there?
+        try:
+            f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
+            f.lastused = datetime.now()
+            return f
+        except NoResultFound:
+            pass # continue below
+
+        # We have to add the file.
+        f = BuildQueuePolicyFile()
+        f.build_queue = self
+        f.file = policyqueuefile
+        f.filename = policyqueuefile.filename
+
+        source = os.path.join(policyqueue.path, policyqueuefile.filename)
+        target = f.fullpath
+        try:
+            # Always copy files from policy queues as they might move around.
+            import utils
+            utils.copy(source, target)
+        except OSError:
+            return None
+
+        session.add(f)
+        return f
 
 __all__.append('BuildQueue')
 
@@ -911,6 +978,10 @@ __all__.append('get_build_queue')
 ################################################################################
 
 class BuildQueueFile(object):
+    """
+    BuildQueueFile represents a file in a build queue coming from a pool.
+    """
+
     def __init__(self, *args, **kwargs):
         pass
 
@@ -926,6 +997,27 @@ __all__.append('BuildQueueFile')
 
 ################################################################################
 
+class BuildQueuePolicyFile(object):
+    """
+    BuildQueuePolicyFile represents a file in a build queue that comes from a
+    policy queue (and not a pool).
+    """
+
+    def __init__(self, *args, **kwargs):
+        pass
+
+    #@property
+    #def filename(self):
+    #    return self.file.filename
+
+    @property
+    def fullpath(self):
+        return os.path.join(self.build_queue.path, self.filename)
+
+__all__.append('BuildQueuePolicyFile')
+
+################################################################################
+
 class ChangePendingBinary(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -3080,6 +3172,7 @@ class DBConn(object):
             'binary_acl_map',
             'build_queue',
             'build_queue_files',
+            'build_queue_policy_files',
             'changelogs_text',
             'changes',
             'component',
@@ -3174,6 +3267,11 @@ class DBConn(object):
                properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
                                  poolfile = relation(PoolFile, backref='buildqueueinstances')))
 
+        mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
+               properties = dict(
+                build_queue = relation(BuildQueue, backref='policy_queue_files'),
+                file = relation(ChangePendingFile, lazy='joined')))
+
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
                                  package = self.tbl_binaries.c.package,
index 4ea117b30883abaa2f04a3510e4dd3cdf08af03b..b652f844370f3409c8badd37df2ce86caa3d4998 100755 (executable)
@@ -105,7 +105,7 @@ def get_type(f, session):
 
 # Determine what parts in a .changes are NEW
 
-def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = {}):
+def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = None):
     """
     Determine what parts in a C{changes} file are NEW.
 
@@ -134,6 +134,8 @@ def determine_new(filename, changes, files, warn=1, session = None, dsc = None,
     # TODO: This should all use the database instead of parsing the changes
     # file again
     byhand = {}
+    if new is None:
+        new = {}
 
     dbchg = get_dbchange(filename, session)
     if dbchg is None:
index b1c2f55e049a7deaab1e87a65b857c0437eb53d8..8878e55c12486437f88e95e26cf75c383150a331 100755 (executable)
@@ -64,6 +64,14 @@ def package_to_queue(u, summary, short_summary, queue, chg, session, announce=No
     u.move_to_queue(queue)
     chg.in_queue_id = queue.policy_queue_id
     session.add(chg)
+
+    # send to build queues
+    if queue.send_to_build_queues:
+        for suite_name in u.pkg.changes["distribution"].keys():
+            suite = get_suite(suite_name, session)
+            for q in suite.copy_queues:
+                q.add_changes_from_policy_queue(queue, chg)
+
     session.commit()
 
     # Check for override disparities
@@ -126,11 +134,6 @@ def do_unembargo(u, summary, short_summary, chg, session=None):
     package_to_queue(u, summary, short_summary,
                      polq, chg, session,
                      announce=None)
-    for suite_name in u.pkg.changes["distribution"].keys():
-        suite = get_suite(suite_name, session)
-        for q in suite.copy_queues:
-            for f in u.pkg.files.keys():
-                copyfile(os.path.join(polq.path, f), os.path.join(q.path, f))
 #
 #################################################################################
 #
@@ -152,11 +155,6 @@ def do_embargo(u, summary, short_summary, chg, session=None):
     package_to_queue(u, summary, short_summary,
                      polq, chg, session,
                      announce=None)
-    for suite_name in u.pkg.changes["distribution"].keys():
-        suite = get_suite(suite_name, session)
-        for q in suite.copy_queues:
-            for f in u.pkg.files.keys():
-                copyfile(os.path.join(polq.path, f), os.path.join(q.path, f))
 
 ################################################################################
 
diff --git a/scripts/debian/buildd-add-keys b/scripts/debian/buildd-add-keys
new file mode 100755 (executable)
index 0000000..ddb56a4
--- /dev/null
@@ -0,0 +1,241 @@
+#!/bin/bash
+# No way I try to deal with a crippled sh just for POSIX foo.
+
+# Copyright (C) 2011 Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+# exit on errors
+set -e
+# make sure to only use defined variables
+set -u
+# ERR traps should be inherited from functions too.
+set -E
+
+# import the general variable set.
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+umask 027
+
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
+PROGRAM="buildd-add-keys"
+
+# common functions are "outsourced"
+. "${configdir}/common"
+
+function cleanup() {
+    ERRVAL=$?
+    trap - ERR EXIT TERM HUP INT QUIT
+
+    for TEMPFILE in GPGSTATUS GPGLOGS GPGOUTF TEMPKEYDATA; do
+        TFILE=${TEMPFILE:=$TEMPFILE}
+        DELF=${!TFILE:-""}
+        if [ -n "${DELF}" ] && [ -f "${DELF}" ]; then
+            rm -f "${DELF}"
+        fi
+    done
+    exit $ERRVAL
+}
+trap cleanup ERR EXIT TERM HUP INT QUIT
+
+base="${base}/scripts/builddkeyrings"
+INCOMING="${base}/incoming"
+ERRORS="${base}/errors"
+ADMINS="${base}/adminkeys.gpg"
+
+# Default options for our gpg calls
+DEFGPGOPT="--no-default-keyring --batch --no-tty --no-options --exit-on-status-write-error --no-greeting"
+
+if ! [ -d "${INCOMING}" ]; then
+    log "Missing incoming dir, nothing to do"
+    exit 1
+fi
+
+# Whenever something goes wrong, its put in there.
+mkdir -p "${ERRORS}"
+
+# We process all new files in our incoming directory
+for file in $(ls -1 ${INCOMING}/*.key); do
+    file=${file##*/}
+    # First we want to see if we recognize the filename. The buildd people have
+    # to follow a certain schema:
+    # architecture_builddname.YEAR-MONTH-DAY_HOUR:MINUTE.key
+    if [[ $file =~ (.*)_(.*).([0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}).key ]]; then
+        ARCH=${BASH_REMATCH[1]}
+        BUILDD=${BASH_REMATCH[2]}
+        # Right now timestamp is unused
+        TIMESTAMP=${BASH_REMATCH[3]}
+    else
+        log "Unknown file ${file}, not processing"
+        mv "${INCOMING}/${file}" "${ERRORS}/unknown.${file}.$(date -Is)"
+        continue
+    fi
+
+    # Do we know the architecture?
+    found=0
+    for carch in ${archs}; do
+        if [ "${ARCH}" == "${carch}" ]; then
+            log "Known arch ${ARCH}, buildd ${BUILDD}"
+            found=1
+            break
+        fi
+    done
+
+    if [ ${found} -eq 0 ]; then
+        log "Unknown architecture ${ARCH}"
+        mv "${INCOMING}/${file}" "${ERRORS}/unknownarch.${file}.$(date -Is)"
+        continue
+    fi
+
+    # If we did have a file with this name already somethings wrong
+    if [ -f "${base}/${ARCH}/${file}" ]; then
+        log "Already processed this file"
+        mv "${INCOMING}/${file}" "${ERRORS}/duplicate.${file}.$(date -Is)"
+        continue
+    fi
+
+    # Where we want the status-fd from gpgv turn up
+    GPGSTATUS=$(mktemp -p "${TMPDIR}" GPGSTATUS.XXXXXX)
+    # Same for the loggger-fd
+    GPGLOGS=$(mktemp -p "${TMPDIR}" GPGLOGS.XXXXXX)
+    # And "decrypt" gives us output, the key without the pgp sig around it
+    GPGOUTF=$(mktemp -p "${TMPDIR}" GPGOUTF.XXXXXX)
+
+    # Open the filehandles, assigning them to the two files, so we can let gpg use them
+    exec 4> "${GPGSTATUS}"
+    exec 5> "${GPGLOGS}"
+
+    # So lets run gpg, status/logger into the two files, to "decrypt" the keyfile
+    if ! gpg ${DEFGPGOPT} --keyring "${ADMINS}" --status-fd 4 --logger-fd 5 --decrypt "${INCOMING}/${file}" > "${GPGOUTF}"; then
+        ret=$?
+        log "gpg returned with ${ret}, not adding key from file ${file}"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/gpgerror.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/gpgerror.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/gpgerror.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # Read in the status output
+    GPGSTAT=$(cat "${GPGSTATUS}")
+    # And check if we like the sig. It has to be both, GOODISG and VALIDSIG or we don't accept it
+    if [[ ${GPGSTAT} =~ "GOODSIG" ]] && [[ ${GPGSTAT} =~ "VALIDSIG" ]]; then
+        log "Signature for ${file} accepted"
+    else
+        log "We are missing one of GOODSIG or VALIDSIG"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/badsig.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/badsig.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/badsig.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # So at this point we know we accepted the signature of the file as valid,
+    # that is it is from a key allowed for this architecture. Which only
+    # leaves us with the task of checking if the key fulfills the requirements
+    # before we add it to the architectures keyring.
+
+    # Those currently are:
+    # - keysize 4096 or larger
+    # - RSA key, no encryption capability
+    # - UID matching "buildd autosigning key BUILDDNAME <buildd_ARCH-BUILDDNAME@buildd.debian.org>
+    # - expire within a 120 days
+    # - maximum 2 keys per architecture and buildd
+
+    TEMPKEYDATA=$(mktemp -p "${TMPDIR}" BDKEYS.XXXXXX)
+
+    gpg ${DEFGPGOPT} --with-colons "${GPGOUTF}" > "${TEMPKEYDATA}"
+
+    # Read in the TEMPKEYDATAFILE, but avoid using a subshell like a
+    # while read line otherwise would do
+    exec 4<> "${TEMPKEYDATA}"
+    error=""
+    while read line <&4; do
+        #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc <buildd_powerpc-poulenc@buildd.debian.org>:
+
+        # Besides fiddling out the data we need to check later, this regex also check:
+        # - the keytype (:1:, 1 there means RSA)
+        # - the UID
+        # - that the key does have an expiration date (or it wont match, the second date
+        #   field would be empty
+        regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>:$"
+        if [[ $line =~ $regex ]]; then
+            KEYSIZE=${BASH_REMATCH[1]}
+            KEYID=${BASH_REMATCH[2]}
+            KEYCREATE=${BASH_REMATCH[3]}
+            KEYEXPIRE=${BASH_REMATCH[4]}
+
+            # We do want 4096 or anything above
+            if [ ${KEYSIZE} -lt 4096 ]; then
+                log "Keysize ${KEYSIZE} too small"
+                error="${error} Keysize ${KEYSIZE} too small"
+                continue
+            fi
+
+            # We want a maximum lifetime of 120 days, so check that.
+            # Easiest to compare in epoch, so lets see, 120 days midnight from now,
+            # compared with their set expiration date at midnight
+            # maxdate should turn out higher. just in case we make it 121 for this check
+            maxdate=$(date -d '121 day 00:00:00' +%s)
+            theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s)
+            if [ ${theirexpire} -gt ${maxdate} ]; then
+                log "Key expiry ${KEYEXPIRE} wrong"
+                error="${error} Key expiry ${KEYEXPIRE} wrong"
+                continue
+            fi
+        else
+            log "Unknown line $line, sod off"
+            error="${error} Unknown line $line, sod off"
+            continue
+        fi
+    done
+    if [ -n "${error}" ]; then
+        log ${error}
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}"
+        echo "${error}" >> "${ERRORS}/badkey.${file}.error.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # And now lets check how many keys this buildd already has. 2 is the maximum, so key
+    # rollover works. 3 won't, they have to rm one first
+    # We need to check for the amount of keys
+    ARCHKEYRING="${base}/${ARCH}/keyring.gpg"
+
+    KEYNO=$(gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --with-colons --list-keys "buildd_${ARCH}-${BUILDD}@buildd.debian.org" | grep -c '^pub:')
+    if [ ${KEYNO} -gt 2 ]; then
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/toomany.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/toomany.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/toomany.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # Right. At this point everything should be in order, which means we should put the key into
+    # the keyring
+    log "Accepting key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}"
+    gpg ${DEFGPGOPT} --status-fd 4 --logger-fd 5 --keyring "${ARCHKEYRING}" --import "${GPGOUTF}" 2>/dev/null
+
+    mv "${INCOMING}/${file}" "${base}/${ARCH}"
+done
diff --git a/scripts/debian/buildd-prepare-dir b/scripts/debian/buildd-prepare-dir
new file mode 100755 (executable)
index 0000000..e0f6053
--- /dev/null
@@ -0,0 +1,62 @@
+#!/bin/bash
+# No way I try to deal with a crippled sh just for POSIX foo.
+
+# Copyright (C) 2011 Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+# exit on errors
+set -e
+# make sure to only use defined variables
+set -u
+# ERR traps should be inherited from functions too.
+set -E
+
+# import the general variable set.
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+umask 027
+
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
+PROGRAM="buildd-prepare-dir"
+
+# common functions are "outsourced"
+. "${configdir}/common"
+
+# should be relative to the general base dir later
+COPYTARGET="${base}/keyrings"
+base="${base}/scripts/builddkeyrings"
+TARGET="${base}/keyrings"
+REMOVED="${base}/removed-buildd-keys.gpg"
+
+mkdir -p "${TARGET}/keyrings"
+
+for arch in $archs; do
+    if [ -f ${base}/${arch}/keyring.gpg ]; then
+        cp -al ${base}/${arch}/keyring.gpg ${TARGET}/keyrings/buildd-${arch}-keyring.gpg
+        chmod 0644 ${TARGET}/keyrings/buildd-${arch}-keyring.gpg
+    fi
+done
+
+cd ${TARGET}
+sha512sum keyrings/* > sha512sums
+
+rm -f ${TARGET}/sha512sums.txt
+SIGNINGKEY=$(dak admin c signingkeyids)
+gpg --no-options  --batch --no-tty --armour --default-key ${SIGNINKEY} --clearsign -o "${TARGET}/sha512sums.txt" "${TARGET}/sha512sums"
+rm -f ${TARGET}/sha512sums
diff --git a/scripts/debian/buildd-remove-keys b/scripts/debian/buildd-remove-keys
new file mode 100755 (executable)
index 0000000..c07ff04
--- /dev/null
@@ -0,0 +1,196 @@
+#!/bin/bash
+# No way I try to deal with a crippled sh just for POSIX foo.
+
+# Copyright (C) 2011 Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+# exit on errors
+set -e
+# make sure to only use defined variables
+set -u
+# ERR traps should be inherited from functions too.
+set -E
+
+# import the general variable set.
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+umask 027
+
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
+PROGRAM="buildd-remove-keys"
+
+# common functions are "outsourced"
+. "${configdir}/common"
+
+function cleanup() {
+    ERRVAL=$?
+    trap - ERR EXIT TERM HUP INT QUIT
+
+    for TEMPFILE in GPGSTATUS GPGLOGS GPGOUTF TEMPKEYDATA; do
+        TFILE=${TEMPFILE:=$TEMPFILE}
+        DELF=${!TFILE:-""}
+        if [ -n "${DELF}" ] && [ -f "${DELF}" ]; then
+            rm -f "${DELF}"
+        fi
+    done
+    exit $ERRVAL
+}
+trap cleanup ERR EXIT TERM HUP INT QUIT
+
+base="${base}/scripts/builddkeyrings"
+INCOMING="${base}/incoming"
+ERRORS="${base}/errors"
+ADMINS="${base}/adminkeys.gpg"
+REMOVED="${base}/removed-buildd-keys.gpg"
+
+# Default options for our gpg calls
+DEFGPGOPT="--no-default-keyring --batch --no-tty --no-options --exit-on-status-write-error --no-greeting"
+
+if ! [ -d "${INCOMING}" ]; then
+    log "Missing incoming dir, nothing to do"
+    exit 1
+fi
+
+# Whenever something goes wrong, its put in there.
+mkdir -p "${ERRORS}"
+
+# We process all new files in our incoming directory
+for file in $(ls -1 ${INCOMING}/*.del ); do
+    file=${file##*/}
+    # First we want to see if we recognize the filename. The buildd people have
+    # to follow a certain schema:
+    # architecture_builddname.YEAR-MONTH-DAY_HOUR:MINUTE.del
+    if [[ $file =~ (.*)_(.*).([0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}).del ]]; then
+        ARCH=${BASH_REMATCH[1]}
+        BUILDD=${BASH_REMATCH[2]}
+        # Right now timestamp is unused
+        TIMESTAMP=${BASH_REMATCH[3]}
+    else
+        log "Unknown file ${file}, not processing"
+        mv "${INCOMING}/${file}" "${ERRORS}/unknown.${file}.$(date -Is)"
+        continue
+    fi
+
+    # Do we know the architecture?
+    found=0
+    for carch in ${archs}; do
+        if [ "${ARCH}" == "${carch}" ]; then
+            log "Known arch ${ARCH}, buildd ${BUILDD}"
+            found=1
+            break
+        fi
+    done
+
+    if [ ${found} -eq 0 ]; then
+        log "Unknown architecture ${ARCH}"
+        mv "${INCOMING}/${file}" "${ERRORS}/unknownarch.${file}.$(date -Is)"
+        continue
+    fi
+
+    # If we did have a file with this name already somethings wrong
+    if [ -f "${base}/${ARCH}/${file}" ]; then
+        log "Already processed this file"
+        mv "${INCOMING}/${file}" "${ERRORS}/duplicate.${file}.$(date -Is)"
+        continue
+    fi
+
+    # Where we want the status-fd from gpgv turn up
+    GPGSTATUS=$(mktemp -p "${TMPDIR}" GPGSTATUS.XXXXXX)
+    # Same for the loggger-fd
+    GPGLOGS=$(mktemp -p "${TMPDIR}" GPGLOGS.XXXXXX)
+    # And "decrypt" gives us output, the key without the pgp sig around it
+    GPGOUTF=$(mktemp -p "${TMPDIR}" GPGOUTF.XXXXXX)
+
+    # Open the filehandles, assigning them to the two files, so we can let gpg use them
+    exec 4> "${GPGSTATUS}"
+    exec 5> "${GPGLOGS}"
+
+    # So lets run gpg, status/logger into the two files, to "decrypt" the keyfile
+    if ! gpg ${DEFGPGOPT} --keyring "${ADMINS}" --status-fd 4 --logger-fd 5 --decrypt "${INCOMING}/${file}" > "${GPGOUTF}"; then
+        ret=$?
+        log "gpg returned with ${ret}, not removing key using ${file}"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/gpgerror.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/gpgerror.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/gpgerror.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # Read in the status output
+    GPGSTAT=$(cat "${GPGSTATUS}")
+    # And check if we like the sig. It has to be both, GOODISG and VALIDSIG or we don't accept it
+    if [[ ${GPGSTAT} =~ "GOODSIG" ]] && [[ ${GPGSTAT} =~ "VALIDSIG" ]]; then
+        log "Signature for ${file} accepted"
+    else
+        log "We are missing one of GOODSIG or VALIDSIG"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/badsig.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/badsig.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/badsig.${file}.gpglogs.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+
+    # So at this point we know we accepted the signature of the file as valid,
+    # that is it is from a key allowed for this architecture. Which only
+    # leaves us with the task of checking if there is a key to remove, and then remove
+    # it. We won't even check they have a key left, so if they want to they can
+    # empty out the set for an architecture
+
+    # Read in the GPGOUTF, but avoid using a subshell like a
+    # while read line otherwise would do
+    exec 4<> "${GPGOUTF}"
+    error=""
+    while read line <&4; do
+        if [[ $line =~ key:.([0-9A-F]{16}) ]]; then
+            KEYID=${BASH_REMATCH[1]}
+        elif [[ $line =~ comment:.(.*) ]]; then
+            COMMENT=${BASH_REMATCH[1]}
+        else
+            echo "Nay"
+        fi
+    done
+
+    # Right, we have the keyid, know the arch, lets see if we can remove it
+    ARCHKEYRING="${base}/${ARCH}/keyring.gpg"
+
+    # Is the key in there?
+    KEYNO=$(gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --with-colons --list-keys ${KEYID} | grep -c '^pub:')
+
+    if [ $KEYNO -eq 1 ]; then
+        # Right, exactly one there, lets get rid of it
+        # So put it into the removed keyring
+        gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --export ${KEYID} | gpg ${DEFGPGOPT} --keyring "${REMOVED}" --import 2>/dev/null
+        if gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --yes --delete-keys ${KEYID}; then
+            log "Removed key ${KEYID}, reason: ${COMMENT}"
+            mv "${INCOMING}/${file}" "${base}/${ARCH}"
+            continue
+        fi
+    else
+        log "Found more (or less) than one key I could delete. Not doing anything"
+        DATE=$(date -Is)
+        mv "${INCOMING}/${file}" "${ERRORS}/toomanykeys.${file}.${DATE}"
+        mv "${GPGSTATUS}" "${ERRORS}/toomanykeys.${file}.gpgstatus.${DATE}"
+        mv "${GPGLOGS}" "${ERRORS}/toomanykeys.${file}.gpglogs.${DATE}"
+        echo "${error}" >> "${ERRORS}/toomanykeys.${file}.error.${DATE}"
+        rm -f "${GPGOUTF}"
+        continue
+    fi
+done