rsync -aH -B8192 \
--exclude backup/*.xz \
--exclude backup/dump* \
- --exclude database/*.db \
+ --exclude database/\*.db \
${EXTRA} \
--exclude mirror \
--exclude morgue/ \
from daklib.dbconn import *
from daklib.config import Config
from daklib import utils, daklog
-from multiprocessing import Pool
+from daklib.dakmultiprocessing import Pool
import apt_pkg, os, stat, sys
from daklib.lists import getSources, getBinaries, getArchAll
(file, timestamp) = listPath(suite, component,
incremental_mode = incremental_mode)
+ message = "sources list for %s %s" % (suite.suite_name, component.component_name)
+
for _, filename in getSources(suite, component, session, timestamp):
file.write(filename + '\n')
- session.close()
+ session.rollback()
file.close()
- return "sources list for %s %s" % (suite.suite_name, component.component_name)
+ return message
def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode):
session = DBConn().session()
(file, timestamp) = listPath(suite, component, architecture, type,
incremental_mode)
+ message = "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+
for _, filename in getArchAll(suite, component, architecture, type,
session, timestamp):
file.write(filename + '\n')
- session.close()
+ session.rollback()
file.close()
- return "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+ return message
def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_mode):
session = DBConn().session()
(file, timestamp) = listPath(suite, component, architecture, type,
incremental_mode)
+ message = "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+
for _, filename in getBinaries(suite, component, architecture, type,
session, timestamp):
file.write(filename + '\n')
- session.close()
+ session.rollback()
file.close()
- return "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type)
+ return message
def usage():
print """Usage: dak generate_filelist [OPTIONS]
from daklib.dbconn import *
from daklib.config import Config
from daklib import utils, daklog
-from multiprocessing import Pool
+from daklib.dakmultiprocessing import Pool
+from daklib.filewriter import PackagesFileWriter, SourcesFileWriter
+
import apt_pkg, os, stat, sys
def usage():
s.source, s.version
"""
-def open_sources(suite, component):
- cnf = Config()
- dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'source', 'Sources')
-
- # create queue if it does not exist yet
- if os.path.exists(dest) and os.path.isdir(dest):
- dest_dir = dest
- else:
- dest_dir = os.path.dirname(dest)
- if not os.path.exists(dest_dir):
- umask = os.umask(00000)
- os.makedirs(dest_dir, 02775)
- os.umask(umask)
-
- f = open(dest, 'w')
- return f
-
def generate_sources(suite_id, component_id):
global _sources_query
suite = session.query(Suite).get(suite_id)
component = session.query(Component).get(component_id)
- output = open_sources(suite, component)
+ writer = SourcesFileWriter(suite=suite.suite_name, component=component.component_name)
+ output = writer.open()
# run query and write Sources
r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "dsc_type": dsc_type})
print >>output, stanza
print >>output, ""
- return ["generate sources", suite.suite_name, component.component_name]
+ writer.close()
+
+ message = ["generate sources", suite.suite_name, component.component_name]
+ session.rollback()
+ return message
#############################################################################
ORDER BY tmp.package, tmp.version
"""
-def open_packages(suite, component, architecture, type_name):
- cnf = Config()
- if type_name == 'udeb':
- dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'debian-installer', 'binary-%s' % architecture.arch_string, 'Packages')
- else:
- dest = os.path.join(cnf['Dir::Root'], 'dists', suite.suite_name, component.component_name, 'binary-%s' % architecture.arch_string, 'Packages')
-
- # create queue if it does not exist yet
- if os.path.exists(dest) and os.path.isdir(dest):
- dest_dir = dest
- else:
- dest_dir = os.path.dirname(dest)
- if not os.path.exists(dest_dir):
- umask = os.umask(00000)
- os.makedirs(dest_dir, 02775)
- os.umask(umask)
-
- f = open(dest, 'w')
- return f
-
def generate_packages(suite_id, component_id, architecture_id, type_name):
global _packages_query
component = session.query(Component).get(component_id)
architecture = session.query(Architecture).get(architecture_id)
- output = open_packages(suite, component, architecture, type_name)
+ writer = PackagesFileWriter(suite=suite.suite_name, component=component.component_name,
+ architecture=architecture.arch_string, debtype=type_name)
+ output = writer.open()
r = session.execute(_packages_query, {"suite": suite_id, "component": component_id,
"arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id})
print >>output, stanza
print >>output, ""
- session.close()
+ writer.close()
- return ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string]
+ message = ["generate-packages", suite.suite_name, component.component_name, architecture.arch_string]
+ session.rollback()
+ return message
#############################################################################
for c in component_ids:
pool.apply_async(generate_sources, [s.suite_id, c], callback=log)
for a in s.architectures:
- #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log)
- apply(generate_packages, [s.suite_id, c, a.arch_id, 'deb'])
- #pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log)
+ pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'deb'], callback=log)
+ pool.apply_async(generate_packages, [s.suite_id, c, a.arch_id, 'udeb'], callback=log)
pool.close()
pool.join()
from daklib.config import Config
from daklib import daklog
from daklib.changesutils import *
-from daklib.threadpool import ThreadPool
+from daklib.dakmultiprocessing import Pool
# Globals
Cnf = None
examine_package.use_html=1
- threadpool = ThreadPool()
+ pool = Pool()
for changes_file in changes_files:
changes_file = utils.validate_changes_file_arg(changes_file, 0)
if not changes_file:
continue
print "\n" + changes_file
- threadpool.queueTask(do_pkg, changes_file)
- threadpool.joinAll()
+ pool.apply_async(do_pkg, (changes_file,))
+ pool.close()
+ pool.join()
files = set(os.listdir(cnf["Show-New::HTMLPath"]))
to_delete = filter(lambda x: x.endswith(".html"), files.difference(sources))
--- /dev/null
+#!/usr/bin/env python
+# vim:set et sw=4:
+
+"""
+multiprocessing for DAK
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2011 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+###############################################################################
+
+import multiprocessing
+import sqlalchemy.orm.session
+
+def _func_wrapper(func, *args, **kwds):
+ try:
+ return func(*args, **kwds)
+ finally:
+ # Make sure connections are closed. We might die otherwise.
+ sqlalchemy.orm.session.Session.close_all()
+
+class Pool():
+ def __init__(self, *args, **kwds):
+ self.pool = multiprocessing.Pool(*args, **kwds)
+ self.results = []
+
+ def apply_async(self, func, args=(), kwds={}, callback=None):
+ wrapper_args = list(args)
+ wrapper_args.insert(0, func)
+ self.results.append(self.pool.apply_async(_func_wrapper, wrapper_args, kwds, callback))
+
+ def close(self):
+ self.pool.close()
+
+ def join(self):
+ self.pool.join()
+ for r in self.results:
+ # return values were already handled in the callbacks, but asking
+ # for them might raise exceptions which would otherwise be lost
+ r.get()
'''
Returns a file object for writing.
'''
+ # create missing directories
+ try:
+ os.makedirs(os.path.dirname(self.path))
+ except:
+ pass
self.file = open(self.path + '.new', 'w')
return self.file
# Read in the TEMPKEYDATAFILE, but avoid using a subshell like a
# while read line otherwise would do
exec 4<> "${TEMPKEYDATA}"
- error=""
+ KEYUID=""
+ #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc <buildd_powerpc-poulenc@buildd.debian.org>:
+
+ # Of course this sucky gpg crapshit of an "interface" does give you different things depending on how people
+ # created their keys. And of course the buildd people created the test keys differently to what they now do
+ # which just means extra work for nothing. So as they now do other steps, the thing we get back suddenly looks like
+
+ #pub:-:4096:1:99595DC7865BEAD2:2011-03-26:2011-07-24::-:
+ #uid:::::::::buildd autosigning key corelli <buildd_mips-corelli@buildd.debian.org>:
+
+ # Besides fiddling out the data we need to check later, this regex also check:
+ # - the keytype (:1:, 1 there means RSA)
+ # - the UID
+ # - that the key does have an expiration date (or it wont match, the second date
+ # field would be empty
+ regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:(buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>):$"
+ regex2="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:$"
+ regex3="^uid:::::::::(buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>):$"
while read line <&4; do
- #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc <buildd_powerpc-poulenc@buildd.debian.org>:
-
- # Besides fiddling out the data we need to check later, this regex also check:
- # - the keytype (:1:, 1 there means RSA)
- # - the UID
- # - that the key does have an expiration date (or it wont match, the second date
- # field would be empty
- regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:buildd autosigning key ${BUILDD} <buildd_${ARCH}-${BUILDD}@buildd.debian.org>:$"
if [[ $line =~ $regex ]]; then
KEYSIZE=${BASH_REMATCH[1]}
KEYID=${BASH_REMATCH[2]}
KEYCREATE=${BASH_REMATCH[3]}
KEYEXPIRE=${BASH_REMATCH[4]}
-
- # We do want 4096 or anything above
- if [ ${KEYSIZE} -lt 4096 ]; then
- log "Keysize ${KEYSIZE} too small"
- error="${error} Keysize ${KEYSIZE} too small"
- continue
- fi
-
- # We want a maximum lifetime of 120 days, so check that.
- # Easiest to compare in epoch, so lets see, 120 days midnight from now,
- # compared with their set expiration date at midnight
- # maxdate should turn out higher. just in case we make it 121 for this check
- maxdate=$(date -d '121 day 00:00:00' +%s)
- theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s)
- if [ ${theirexpire} -gt ${maxdate} ]; then
- log "Key expiry ${KEYEXPIRE} wrong"
- error="${error} Key expiry ${KEYEXPIRE} wrong"
- continue
- fi
- else
- log "Unknown line $line, sod off"
- error="${error} Unknown line $line, sod off"
- continue
- fi
+ KEYUID=${BASH_REMATCH[5]}
+ elif [[ $line =~ $regex2 ]]; then
+ KEYSIZE=${BASH_REMATCH[1]}
+ KEYID=${BASH_REMATCH[2]}
+ KEYCREATE=${BASH_REMATCH[3]}
+ KEYEXPIRE=${BASH_REMATCH[4]}
+ elif [[ $line =~ $regex3 ]]; then
+ KEYUID=${BASH_REMATCH[1]}
+ else
+ log "Didn't recognize the key. Go kiss gpg"
+ DATE=$(date -Is)
+ mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}"
+ mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}"
+ mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}"
+ rm -f "${GPGOUTF}"
+ continue
+ fi
done
- if [ -n "${error}" ]; then
- log ${error}
+ if [ -z "${KEYUID}" ]; then
+ log "Did not recognize the UID format"
DATE=$(date -Is)
- mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}"
- mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}"
- mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}"
- echo "${error}" >> "${ERRORS}/badkey.${file}.error.${DATE}"
+ mv "${INCOMING}/${file}" "${ERRORS}/keyuid.${file}.${DATE}"
+ mv "${GPGSTATUS}" "${ERRORS}/keyuid.${file}.gpgstatus.${DATE}"
+ mv "${GPGLOGS}" "${ERRORS}/keyuid.${file}.gpglogs.${DATE}"
rm -f "${GPGOUTF}"
- continue
+ continue
+ fi
+ # We do want 4096 or anything above
+ if [ ${KEYSIZE} -lt 4096 ]; then
+ log "Keysize ${KEYSIZE} too small"
+ DATE=$(date -Is)
+ mv "${INCOMING}/${file}" "${ERRORS}/keysize.${file}.${DATE}"
+ mv "${GPGSTATUS}" "${ERRORS}/keysize.${file}.gpgstatus.${DATE}"
+ mv "${GPGLOGS}" "${ERRORS}/keysize.${file}.gpglogs.${DATE}"
+ rm -f "${GPGOUTF}"
+ continue
+ fi
+
+ # We want a maximum lifetime of 120 days, so check that.
+ # Easiest to compare in epoch, so lets see, 120 days midnight from now,
+ # compared with their set expiration date at midnight
+ # maxdate should turn out higher. just in case we make it 121 for this check
+ maxdate=$(date -d '121 day 00:00:00' +%s)
+ theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s)
+ if [ ${theirexpire} -gt ${maxdate} ]; then
+ log "Key expiry ${KEYEXPIRE} wrong"
+ DATE=$(date -Is)
+ mv "${INCOMING}/${file}" "${ERRORS}/keyexpire.${file}.${DATE}"
+ mv "${GPGSTATUS}" "${ERRORS}/keyexpire.${file}.gpgstatus.${DATE}"
+ mv "${GPGLOGS}" "${ERRORS}/keyexpire.${file}.gpglogs.${DATE}"
+ rm -f "${GPGOUTF}"
+ continue
fi
# And now lets check how many keys this buildd already has. 2 is the maximum, so key
# Right. At this point everything should be in order, which means we should put the key into
# the keyring
- log "Accepting key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}"
+ KEYSUBMITTER=$(cat "${GPGSTATUS}"|grep GOODSIG)
+ KEYSUBMITTER=${KEYSUBMITTER##*GOODSIG}
+ log "${KEYSUBMITTER} added key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}"
gpg ${DEFGPGOPT} --status-fd 4 --logger-fd 5 --keyring "${ARCHKEYRING}" --import "${GPGOUTF}" 2>/dev/null
mv "${INCOMING}/${file}" "${base}/${ARCH}"
# So put it into the removed keyring
gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --export ${KEYID} | gpg ${DEFGPGOPT} --keyring "${REMOVED}" --import 2>/dev/null
if gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --yes --delete-keys ${KEYID}; then
- log "Removed key ${KEYID}, reason: ${COMMENT}"
+ KEYSUBMITTER=$(cat "${GPGSTATUS}"|grep GOODSIG)
+ KEYSUBMITTER=${KEYSUBMITTER##*GOODSIG}
+ log "${KEYSUBMITTER} removed key ${KEYID} for ${ARCH} buildd ${BUILDD}, reason: ${COMMENT}"
mv "${INCOMING}/${file}" "${base}/${ARCH}"
continue
fi