From: Joerg Jaspert Date: Sat, 26 Mar 2011 17:25:26 +0000 (+0100) Subject: Merge remote-tracking branch 'ansgar/p-s-from-db' into merge X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=e8ef4a21ceff20319cde5002cb562ae05d7622c9;hp=a36297dc834fadd8c0e21d362c64df29c60aeb31;p=dak.git Merge remote-tracking branch 'ansgar/p-s-from-db' into merge * ansgar/p-s-from-db: show-new: Use dakmultiprocessing dakmultiprocessing: close all session Add missing directories in filewriter.py. dont have GOODSIG in the output mention who submitted the key wtf? i hate gpg change to using the debian buildd host list Fix unicode issue in show-deferred. generate-packages-sources2: Use new filewriter dakmultiprocessing: Add a helpful comment generate-packages-sources2: Use dakmultiprocessing generate-filelist: Use dakmultiprocessing Add a simple multiprocessing wrapper to catch exceptions Signed-off-by: Joerg Jaspert --- diff --git a/config/debian/apache.conf-incoming b/config/debian/apache.conf-incoming index c1adf761..2181fe17 100644 --- a/config/debian/apache.conf-incoming +++ b/config/debian/apache.conf-incoming @@ -22,164 +22,105 @@ Order allow,deny - # buildd.d.o, cimarosa + + Use DebianBuilddHostList + + # buildd.d.o, cimarosa allow from 206.12.19.8 - # franck.d.o + + # franck.d.o allow from 128.148.34.3 - # test access to check functionality, ganneff + + # test access to check functionality, ganneff allow from 213.146.108.162 - # alpha - # goetz - allow from 193.62.202.26 - # goedel (temporarily allow two addresses; see RT#1287) - allow from 213.188.99.214 - allow from 213.188.99.208 - # amd64 - # barber - allow from 194.177.211.203 - allow from 2001:648:2ffc:deb:214:22ff:feb2:2370 - # brahms - Allow from 206.12.19.115 + + # Should be in DSA list + # amd64 # vitry (archive rebuild) allow from 194.177.211.206 allow from 2001:648:2ffc:deb:214:22ff:feb2:122c # krenek (archive rebuild) allow from 194.177.211.207 allow from 2001:648:2ffc:deb:214:22ff:feb1:ff56 - # arm - # netwinder + + # Known Extras + + # No idea about + # arm + ## netwinder allow from 192.133.104.24 - # + ## allow from 217.147.81.26 - # toffee + ## toffee allow from 78.32.9.218 - # + ## allow from 86.3.74.169 - # nw1.xandros + ## nw1.xandros allow from 67.210.160.89 - # nw2.xandros + ## nw2.xandros allow from 67.210.160.90 - # hdges.billgatliff + ## hdges.billgatliff allow from 209.251.101.204 - # armel - # arcadelt - allow from 82.195.75.87 - # argento - allow from 93.94.130.160 - # allegri + + # armel + ## allegri allow from 157.193.39.233 - # ancina - allow from 157.193.39.13 - # arnold - allow from 217.140.96.57 - # alain - allow from 217.140.96.58 - # alwyn - allow from 217.140.96.59 - # antheil - allow from 217.140.96.60 - # hppa - # sarti - allow from 193.201.200.199 - # bld3.mmjgroup + + # hppa + ## bld3.mmjgroup allow from 192.25.206.243 - # peri - allow from 192.25.206.15 - # - allow from 192.25.206.68 - # lafayette - allow from 147.215.7.160 - # paer + ## paer allow from 192.25.206.11 - # hurd-i386 - # rossini (NOT .debian.org) + + # hurd-i386 + ## rossini (NOT .debian.org) allow from 192.33.98.55 - # back / mozart (xen domains; NOT .debian.org) + ## back / mozart (xen domains; NOT .debian.org) allow from 80.87.129.151 - # i386 - # murphy - Allow from 70.103.162.31 - # biber - allow from 194.177.211.204 - allow from 2001:648:2ffc:deb:214:22ff:feb2:1268 - # ia64 - # caballero - allow from 193.201.200.200 - # mundi + + # ia64 + ## mundi allow from 192.25.206.62 - # alkman - allow from 192.25.206.63 - # mips - # + + # mips + ## allow from 217.147.81.21 - # ball - allow from 82.195.75.70 - allow from 2001:41b8:202:deb:202:4cff:fefe:d09 - # mayr - allow from 140.211.166.58 - # sigrun, aba + ## sigrun, aba allow from 82.195.75.68 allow from 2001:41b8:202:deb:a00:69ff:fe08:30c6 - # corelli - allow from 206.12.19.16 - # lucatelli - allow from 206.12.19.15 - # mipsel - # rem - allow from 82.195.75.68 - allow from 2001:41b8:202:deb:202:4cff:fefe:d06 - # mayer - allow from 140.211.166.78 - # monteverdi + + # mipsel + ## monteverdi allow from 78.47.2.111 - # kritias, aba + ## kritias, aba allow from 78.46.213.163 - # powerpc - # static-72-66-115-54.washdc.fios.verizon.net - allow from 72.66.115.54 - # praetorius - allow from 130.239.18.121 - # poulenc - allow from 144.32.168.77 - # porpora - allow from 144.32.168.78 - # s390 - # debian01.zseries + + # s390 + ## debian01.zseries allow from 195.243.109.161 - # l003092.zseriespenguins.ihost.com + ## l003092.zseriespenguins.ihost.com allow from 32.97.40.46 - # + ## allow from 148.100.96.45 - # + ## allow from 148.100.96.52 - # lxdebian.bfinv + ## lxdebian.bfinv allow from 80.245.147.60 - # zandonai - allow from 80.245.147.46 - # sparc - # spontini - allow from 206.12.19.14 - # lebrun - allow from 193.198.184.10 - # schroeder - allow from 193.198.184.11 - # titan.ayous.org ('non-standard' buildd; contact HE) + + # sparc + ## titan.ayous.org ('non-standard' buildd; contact HE) allow from 82.195.75.33 - # kfreebsd - # amd64 - # fasch - allow from 194.177.211.201 - # fano - allow from 206.12.19.110 - # i386 + + # kfreebsd + ## i386 # himalai1, ganymede1 allow from 129.175.22.65 - # field - allow from 194.177.211.210 - # luchesi - # Password based due to being KVM instance - # allow from 137.82.84.78 -# dynamics use password auth + ## luchesi + ## Password based due to being KVM instance + ## allow from 137.82.84.78 + + # Dynamics use password auth + AuthType Basic AuthName "incoming.debian.org" AuthUserFile /srv/incoming.debian.org/htpasswd diff --git a/config/homedir/syncdd.sh b/config/homedir/syncdd.sh index 9260e764..3f6629c7 100755 --- a/config/homedir/syncdd.sh +++ b/config/homedir/syncdd.sh @@ -77,7 +77,7 @@ if lockfile -r3 ${HOME}/sync.lock; then rsync -aH -B8192 \ --exclude backup/*.xz \ --exclude backup/dump* \ - --exclude database/*.db \ + --exclude database/\*.db \ ${EXTRA} \ --exclude mirror \ --exclude morgue/ \ diff --git a/dak/generate_filelist.py b/dak/generate_filelist.py index 2a6d218b..2d2e9499 100755 --- a/dak/generate_filelist.py +++ b/dak/generate_filelist.py @@ -39,7 +39,7 @@ Generate file lists for apt-ftparchive. from daklib.dbconn import * from daklib.config import Config from daklib import utils, daklog -from multiprocessing import Pool +from daklib.dakmultiprocessing import Pool import apt_pkg, os, stat, sys from daklib.lists import getSources, getBinaries, getArchAll @@ -72,11 +72,13 @@ def writeSourceList(suite_id, component_id, incremental_mode): (file, timestamp) = listPath(suite, component, incremental_mode = incremental_mode) + message = "sources list for %s %s" % (suite.suite_name, component.component_name) + for _, filename in getSources(suite, component, session, timestamp): file.write(filename + '\n') - session.close() + session.rollback() file.close() - return "sources list for %s %s" % (suite.suite_name, component.component_name) + return message def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode): session = DBConn().session() @@ -86,12 +88,14 @@ def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode (file, timestamp) = listPath(suite, component, architecture, type, incremental_mode) + message = "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) + for _, filename in getArchAll(suite, component, architecture, type, session, timestamp): file.write(filename + '\n') - session.close() + session.rollback() file.close() - return "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) + return message def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_mode): session = DBConn().session() @@ -101,12 +105,14 @@ def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_m (file, timestamp) = listPath(suite, component, architecture, type, incremental_mode) + message = "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) + for _, filename in getBinaries(suite, component, architecture, type, session, timestamp): file.write(filename + '\n') - session.close() + session.rollback() file.close() - return "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) + return message def usage(): print """Usage: dak generate_filelist [OPTIONS] diff --git a/dak/generate_packages_sources2.py b/dak/generate_packages_sources2.py index 8d8a8aef..ea3c0e53 100755 --- a/dak/generate_packages_sources2.py +++ b/dak/generate_packages_sources2.py @@ -31,7 +31,9 @@ Generate Packages/Sources files from daklib.dbconn import * from daklib.config import Config from daklib import utils, daklog -from multiprocessing import Pool +from daklib.dakmultiprocessing import Pool +from daklib.filewriter import PackagesFileWriter, SourcesFileWriter + import apt_pkg, os, stat, sys def usage(): @@ -93,23 +95,6 @@ ORDER BY s.source, s.version """ -def open_sources(suite, component): - cnf = Config() - dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'source', 'Sources') - - # create queue if it does not exist yet - if os.path.exists(dest) and os.path.isdir(dest): - dest_dir = dest - else: - dest_dir = os.path.dirname(dest) - if not os.path.exists(dest_dir): - umask = os.umask(00000) - os.makedirs(dest_dir, 02775) - os.umask(umask) - - f = open(dest, 'w') - return f - def generate_sources(suite_id, component_id): global _sources_query @@ -119,7 +104,8 @@ def generate_sources(suite_id, component_id): suite = session.query(Suite).get(suite_id) component = session.query(Component).get(component_id) - output = open_sources(suite, component) + writer = SourcesFileWriter(suite=suite.suite_name, component=component.component_name) + output = writer.open() # run query and write Sources r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "dsc_type": dsc_type}) @@ -127,7 +113,11 @@ def generate_sources(suite_id, component_id): print >>output, stanza print >>output, "" - return ["generate sources", suite.suite_name, component.component_name] + writer.close() + + message = ["generate sources", suite.suite_name, component.component_name] + session.rollback() + return message ############################################################################# @@ -198,26 +188,6 @@ WHERE ORDER BY tmp.package, tmp.version """ -def open_packages(suite, component, architecture, type_name): - cnf = Config() - if type_name == 'udeb': - dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'debian-installer', 'binary-%s' % architecture.arch_string, 'Packages') - else: - dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'binary-%s' % architecture.arch_string, 'Packages') - - # create queue if it does not exist yet - if os.path.exists(dest) and os.path.isdir(dest): - dest_dir = dest - else: - dest_dir = os.path.dirname(dest) - if not os.path.exists(dest_dir): - umask = os.umask(00000) - os.makedirs(dest_dir, 02775) - os.umask(umask) - - f = open(dest, 'w') - return f - def generate_packages(suite_id, component_id, architecture_id, type_name): global _packages_query @@ -229,7 +199,9 @@ def generate_packages(suite_id, component_id, architecture_id, type_name): component = session.query(Component).get(component_id) architecture = session.query(Architecture).get(architecture_id) - output = open_packages(suite, component, architecture, type_name) + writer = PackagesFileWriter(suite=suite.suite_name, component=component.component_name, + architecture=architecture.arch_string, debtype=type_name) + output = writer.open() r = session.execute(_packages_query, {"suite": suite_id, "component": component_id, "arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id}) @@ -237,9 +209,11 @@ def generate_packages(suite_id, component_id, architecture_id, type_name): print >>output, stanza print >>output, "" - session.close() + writer.close() - return ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string] + message = ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string] + session.rollback() + return message ############################################################################# @@ -289,9 +263,8 @@ def main(): for c in component_ids: pool.apply_async(generate_sources, [s.suite_id, c], callback=log) for a in s.architectures: - #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log) - apply(generate_packages, [s.suite_id, c, a.arch_id, 'deb']) - #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log) + pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log) + pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log) pool.close() pool.join() diff --git a/dak/show_deferred.py b/dak/show_deferred.py index 6307bd38..dce80987 100755 --- a/dak/show_deferred.py +++ b/dak/show_deferred.py @@ -242,8 +242,9 @@ def list_uploads(filelist, rrd_dir): Delayed-Until: %s Delay-Remaining: %s"""%(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time()+u[0])),u[2]) print >> f, fields - print >> f, str(u[5]).rstrip() - open(os.path.join(Cnf["Show-Deferred::LinkPath"],u[1]),"w").write(str(u[5])+fields+'\n') + encoded = unicode(u[5]).encode('utf-8') + print >> f, encoded.rstrip() + open(os.path.join(Cnf["Show-Deferred::LinkPath"],u[1]),"w").write(encoded+fields+'\n') print >> f f.close() os.rename(os.path.join(Cnf["Show-Deferred::LinkPath"],'.status.tmp'), diff --git a/dak/show_new.py b/dak/show_new.py index 8405aeef..84b45077 100755 --- a/dak/show_new.py +++ b/dak/show_new.py @@ -37,7 +37,7 @@ from daklib.regexes import re_source_ext from daklib.config import Config from daklib import daklog from daklib.changesutils import * -from daklib.threadpool import ThreadPool +from daklib.dakmultiprocessing import Pool # Globals Cnf = None @@ -250,14 +250,15 @@ def main(): examine_package.use_html=1 - threadpool = ThreadPool() + pool = Pool() for changes_file in changes_files: changes_file = utils.validate_changes_file_arg(changes_file, 0) if not changes_file: continue print "\n" + changes_file - threadpool.queueTask(do_pkg, changes_file) - threadpool.joinAll() + pool.apply_async(do_pkg, (changes_file,)) + pool.close() + pool.join() files = set(os.listdir(cnf["Show-New::HTMLPath"])) to_delete = filter(lambda x: x.endswith(".html"), files.difference(sources)) diff --git a/daklib/dakmultiprocessing.py b/daklib/dakmultiprocessing.py new file mode 100644 index 00000000..ded81a29 --- /dev/null +++ b/daklib/dakmultiprocessing.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# vim:set et sw=4: + +""" +multiprocessing for DAK + +@contact: Debian FTP Master +@copyright: 2011 Ansgar Burchardt +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +############################################################################### + +import multiprocessing +import sqlalchemy.orm.session + +def _func_wrapper(func, *args, **kwds): + try: + return func(*args, **kwds) + finally: + # Make sure connections are closed. We might die otherwise. + sqlalchemy.orm.session.Session.close_all() + +class Pool(): + def __init__(self, *args, **kwds): + self.pool = multiprocessing.Pool(*args, **kwds) + self.results = [] + + def apply_async(self, func, args=(), kwds={}, callback=None): + wrapper_args = list(args) + wrapper_args.insert(0, func) + self.results.append(self.pool.apply_async(_func_wrapper, wrapper_args, kwds, callback)) + + def close(self): + self.pool.close() + + def join(self): + self.pool.join() + for r in self.results: + # return values were already handled in the callbacks, but asking + # for them might raise exceptions which would otherwise be lost + r.get() diff --git a/daklib/filewriter.py b/daklib/filewriter.py index a3c16ea8..8907fa6d 100755 --- a/daklib/filewriter.py +++ b/daklib/filewriter.py @@ -54,6 +54,11 @@ class BaseFileWriter(object): ''' Returns a file object for writing. ''' + # create missing directories + try: + os.makedirs(os.path.dirname(self.path)) + except: + pass self.file = open(self.path + '.new', 'w') return self.file diff --git a/scripts/debian/buildd-add-keys b/scripts/debian/buildd-add-keys index 1283838f..26fc1f3f 100755 --- a/scripts/debian/buildd-add-keys +++ b/scripts/debian/buildd-add-keys @@ -177,55 +177,82 @@ for file in ${KEYS}; do # Read in the TEMPKEYDATAFILE, but avoid using a subshell like a # while read line otherwise would do exec 4<> "${TEMPKEYDATA}" - error="" + KEYUID="" + #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc : + + # Of course this sucky gpg crapshit of an "interface" does give you different things depending on how people + # created their keys. And of course the buildd people created the test keys differently to what they now do + # which just means extra work for nothing. So as they now do other steps, the thing we get back suddenly looks like + + #pub:-:4096:1:99595DC7865BEAD2:2011-03-26:2011-07-24::-: + #uid:::::::::buildd autosigning key corelli : + + # Besides fiddling out the data we need to check later, this regex also check: + # - the keytype (:1:, 1 there means RSA) + # - the UID + # - that the key does have an expiration date (or it wont match, the second date + # field would be empty + regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:(buildd autosigning key ${BUILDD} ):$" + regex2="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:$" + regex3="^uid:::::::::(buildd autosigning key ${BUILDD} ):$" while read line <&4; do - #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc : - - # Besides fiddling out the data we need to check later, this regex also check: - # - the keytype (:1:, 1 there means RSA) - # - the UID - # - that the key does have an expiration date (or it wont match, the second date - # field would be empty - regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:buildd autosigning key ${BUILDD} :$" if [[ $line =~ $regex ]]; then KEYSIZE=${BASH_REMATCH[1]} KEYID=${BASH_REMATCH[2]} KEYCREATE=${BASH_REMATCH[3]} KEYEXPIRE=${BASH_REMATCH[4]} - - # We do want 4096 or anything above - if [ ${KEYSIZE} -lt 4096 ]; then - log "Keysize ${KEYSIZE} too small" - error="${error} Keysize ${KEYSIZE} too small" - continue - fi - - # We want a maximum lifetime of 120 days, so check that. - # Easiest to compare in epoch, so lets see, 120 days midnight from now, - # compared with their set expiration date at midnight - # maxdate should turn out higher. just in case we make it 121 for this check - maxdate=$(date -d '121 day 00:00:00' +%s) - theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s) - if [ ${theirexpire} -gt ${maxdate} ]; then - log "Key expiry ${KEYEXPIRE} wrong" - error="${error} Key expiry ${KEYEXPIRE} wrong" - continue - fi - else - log "Unknown line $line, sod off" - error="${error} Unknown line $line, sod off" - continue - fi + KEYUID=${BASH_REMATCH[5]} + elif [[ $line =~ $regex2 ]]; then + KEYSIZE=${BASH_REMATCH[1]} + KEYID=${BASH_REMATCH[2]} + KEYCREATE=${BASH_REMATCH[3]} + KEYEXPIRE=${BASH_REMATCH[4]} + elif [[ $line =~ $regex3 ]]; then + KEYUID=${BASH_REMATCH[1]} + else + log "Didn't recognize the key. Go kiss gpg" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi done - if [ -n "${error}" ]; then - log ${error} + if [ -z "${KEYUID}" ]; then + log "Did not recognize the UID format" DATE=$(date -Is) - mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}" - mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}" - mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}" - echo "${error}" >> "${ERRORS}/badkey.${file}.error.${DATE}" + mv "${INCOMING}/${file}" "${ERRORS}/keyuid.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/keyuid.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/keyuid.${file}.gpglogs.${DATE}" rm -f "${GPGOUTF}" - continue + continue + fi + # We do want 4096 or anything above + if [ ${KEYSIZE} -lt 4096 ]; then + log "Keysize ${KEYSIZE} too small" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/keysize.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/keysize.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/keysize.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # We want a maximum lifetime of 120 days, so check that. + # Easiest to compare in epoch, so lets see, 120 days midnight from now, + # compared with their set expiration date at midnight + # maxdate should turn out higher. just in case we make it 121 for this check + maxdate=$(date -d '121 day 00:00:00' +%s) + theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s) + if [ ${theirexpire} -gt ${maxdate} ]; then + log "Key expiry ${KEYEXPIRE} wrong" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/keyexpire.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/keyexpire.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/keyexpire.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue fi # And now lets check how many keys this buildd already has. 2 is the maximum, so key @@ -245,7 +272,9 @@ for file in ${KEYS}; do # Right. At this point everything should be in order, which means we should put the key into # the keyring - log "Accepting key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}" + KEYSUBMITTER=$(cat "${GPGSTATUS}"|grep GOODSIG) + KEYSUBMITTER=${KEYSUBMITTER##*GOODSIG} + log "${KEYSUBMITTER} added key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}" gpg ${DEFGPGOPT} --status-fd 4 --logger-fd 5 --keyring "${ARCHKEYRING}" --import "${GPGOUTF}" 2>/dev/null mv "${INCOMING}/${file}" "${base}/${ARCH}"