From: Mark Hymers Date: Fri, 25 Mar 2011 18:16:17 +0000 (+0000) Subject: Merge remote branch 'origin/master' X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=3b0114ce123dc45186d02045233510b777a01b02;hp=fc3ac588d53dabdae2e8cd1678cb9050dd9c8a9e;p=dak.git Merge remote branch 'origin/master' --- diff --git a/dak/admin.py b/dak/admin.py index 808fb887..d159651e 100755 --- a/dak/admin.py +++ b/dak/admin.py @@ -59,6 +59,7 @@ Perform administrative work on the dak database. config / c: c db show db config c db-shell show db config in a usable form for psql + c NAME show option NAME as set in configuration table architecture / a: a list show a list of architectures @@ -456,7 +457,12 @@ def show_config(command): e.append('PGPORT') print "export " + " ".join(e) else: - die("E: config command unknown") + session = DBConn().session() + try: + o = session.query(DBConfig).filter_by(name = mode).one() + print o.value + except NoResultFound: + print "W: option '%s' not set" % mode dispatch['config'] = show_config dispatch['c'] = show_config diff --git a/dak/dakdb/update53.py b/dak/dakdb/update53.py new file mode 100755 index 00000000..36a076f6 --- /dev/null +++ b/dak/dakdb/update53.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Add table for build queue files from policy queues. + +@contact: Debian FTP Master +@copyright: 2011 Ansgar Burchardt +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +import psycopg2 +from daklib.dak_exceptions import DBUpdateError + +################################################################################ +def do_update(self): + """ + Add table for build queue files from policy queues. + """ + print __doc__ + try: + c = self.db.cursor() + + c.execute(""" + CREATE TABLE build_queue_policy_files ( + build_queue_id INTEGER NOT NULL REFERENCES build_queue(id) ON DELETE CASCADE, + file_id INTEGER NOT NULL REFERENCES changes_pending_files(id) ON DELETE CASCADE, + filename TEXT NOT NULL, + created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + lastused TIMESTAMP WITHOUT TIME ZONE, + PRIMARY KEY (build_queue_id, file_id) + )""") + + c.execute("UPDATE config SET value = '53' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, 'Unable to apply sick update 53, rollback issued. Error message : %s' % (str(msg)) diff --git a/dak/dakdb/update54.py b/dak/dakdb/update54.py new file mode 100755 index 00000000..e7676ef0 --- /dev/null +++ b/dak/dakdb/update54.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Add send_to_build_queues to policy_queue table + +@contact: Debian FTP Master +@copyright: 2011 Ansgar Burchardt +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +import psycopg2 +from daklib.dak_exceptions import DBUpdateError + +################################################################################ +def do_update(self): + """ + Add send_to_build_queues to policy_queue table + """ + print __doc__ + try: + c = self.db.cursor() + + c.execute(""" + ALTER TABLE policy_queue ADD COLUMN send_to_build_queues BOOLEAN NOT NULL DEFAULT 'f' + """) + c.execute(""" + UPDATE policy_queue SET send_to_build_queues='t' WHERE queue_name IN ('embargo', 'disembargo') + """) + + c.execute("UPDATE config SET value = '54' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, 'Unable to apply sick update 54, rollback issued. Error message : %s' % (str(msg)) diff --git a/dak/generate_filelist.py b/dak/generate_filelist.py index 1f4d6654..2a6d218b 100755 --- a/dak/generate_filelist.py +++ b/dak/generate_filelist.py @@ -5,6 +5,7 @@ Generate file lists for apt-ftparchive. @contact: Debian FTP Master @copyright: 2009 Torsten Werner +@copyright: 2011 Ansgar Burchardt @license: GNU General Public License version 2 or later """ @@ -37,8 +38,8 @@ Generate file lists for apt-ftparchive. from daklib.dbconn import * from daklib.config import Config -from daklib.threadpool import ThreadPool -from daklib import utils +from daklib import utils, daklog +from multiprocessing import Pool import apt_pkg, os, stat, sys from daklib.lists import getSources, getBinaries, getArchAll @@ -64,37 +65,48 @@ def listPath(suite, component, architecture = None, type = None, file.truncate() return (file, timestamp) -def writeSourceList(args): - (suite, component, incremental_mode) = args +def writeSourceList(suite_id, component_id, incremental_mode): + session = DBConn().session() + suite = Suite.get(suite_id, session) + component = Component.get(component_id, session) (file, timestamp) = listPath(suite, component, incremental_mode = incremental_mode) - session = DBConn().session() + for _, filename in getSources(suite, component, session, timestamp): file.write(filename + '\n') session.close() file.close() + return "sources list for %s %s" % (suite.suite_name, component.component_name) -def writeAllList(args): - (suite, component, architecture, type, incremental_mode) = args +def writeAllList(suite_id, component_id, architecture_id, type, incremental_mode): + session = DBConn().session() + suite = Suite.get(suite_id, session) + component = Component.get(component_id, session) + architecture = Architecture.get(architecture_id, session) (file, timestamp) = listPath(suite, component, architecture, type, incremental_mode) - session = DBConn().session() + for _, filename in getArchAll(suite, component, architecture, type, session, timestamp): file.write(filename + '\n') session.close() file.close() + return "all list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) -def writeBinaryList(args): - (suite, component, architecture, type, incremental_mode) = args +def writeBinaryList(suite_id, component_id, architecture_id, type, incremental_mode): + session = DBConn().session() + suite = Suite.get(suite_id, session) + component = Component.get(component_id, session) + architecture = Architecture.get(architecture_id, session) (file, timestamp) = listPath(suite, component, architecture, type, incremental_mode) - session = DBConn().session() + for _, filename in getBinaries(suite, component, architecture, type, session, timestamp): file.write(filename + '\n') session.close() file.close() + return "binary list for %s %s (arch=%s, type=%s)" % (suite.suite_name, component.component_name, architecture.arch_string, type) def usage(): print """Usage: dak generate_filelist [OPTIONS] @@ -114,6 +126,7 @@ Incremental mode appends only newer files to existing lists.""" def main(): cnf = Config() + Logger = daklog.Logger(cnf, 'generate-filelist') Arguments = [('h', "help", "Filelist::Options::Help"), ('s', "suite", "Filelist::Options::Suite", "HasArg"), ('c', "component", "Filelist::Options::Component", "HasArg"), @@ -140,36 +153,44 @@ def main(): Options = cnf.SubTree("Filelist::Options") if Options['Help']: usage() - threadpool = ThreadPool() + pool = Pool() query_suites = query_suites. \ filter(Suite.suite_name.in_(utils.split_args(Options['Suite']))) query_components = query_components. \ filter(Component.component_name.in_(utils.split_args(Options['Component']))) query_architectures = query_architectures. \ filter(Architecture.arch_string.in_(utils.split_args(Options['Architecture']))) + + def log(message): + Logger.log([message]) + for suite in query_suites: + suite_id = suite.suite_id for component in query_components: + component_id = component.component_id for architecture in query_architectures: + architecture_id = architecture.arch_id if architecture not in suite.architectures: pass elif architecture.arch_string == 'source': - threadpool.queueTask(writeSourceList, - (suite, component, Options['Incremental'])) + pool.apply_async(writeSourceList, + (suite_id, component_id, Options['Incremental']), callback=log) elif architecture.arch_string == 'all': - threadpool.queueTask(writeAllList, - (suite, component, architecture, 'deb', - Options['Incremental'])) - threadpool.queueTask(writeAllList, - (suite, component, architecture, 'udeb', - Options['Incremental'])) + pool.apply_async(writeAllList, + (suite_id, component_id, architecture_id, 'deb', + Options['Incremental']), callback=log) + pool.apply_async(writeAllList, + (suite_id, component_id, architecture_id, 'udeb', + Options['Incremental']), callback=log) else: # arch any - threadpool.queueTask(writeBinaryList, - (suite, component, architecture, 'deb', - Options['Incremental'])) - threadpool.queueTask(writeBinaryList, - (suite, component, architecture, 'udeb', - Options['Incremental'])) - threadpool.joinAll() + pool.apply_async(writeBinaryList, + (suite_id, component_id, architecture_id, 'deb', + Options['Incremental']), callback=log) + pool.apply_async(writeBinaryList, + (suite_id, component_id, architecture_id, 'udeb', + Options['Incremental']), callback=log) + pool.close() + pool.join() # this script doesn't change the database session.close() diff --git a/dak/update_db.py b/dak/update_db.py index 9051704b..88ff20f5 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -46,7 +46,7 @@ from daklib.daklog import Logger ################################################################################ Cnf = None -required_database_schema = 52 +required_database_schema = 54 ################################################################################ diff --git a/daklib/changes.py b/daklib/changes.py index e016638c..54adb3b0 100644 --- a/daklib/changes.py +++ b/daklib/changes.py @@ -187,6 +187,31 @@ class Changes(object): if (not self.changes.has_key(key)) or (not self.changes[key]): self.changes[key]='missing' + def __get_file_from_pool(self, filename, entry, session): + cnf = Config() + + poolname = poolify(entry["source"], entry["component"]) + l = get_location(cnf["Dir::Pool"], entry["component"], session=session) + + found, poolfile = check_poolfile(os.path.join(poolname, filename), + entry['size'], + entry["md5sum"], + l.location_id, + session=session) + + if found is None: + Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])]) + return None + elif found is False and poolfile is not None: + Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)]) + return None + else: + if poolfile is None: + Logger.log(["E: Could not find %s in pool" % (chg_fn)]) + return None + else: + return poolfile + @session_wrapper def add_known_changes(self, dirpath, in_queue=None, session=None): """add "missing" in fields which we will require for the known_changes table""" @@ -248,27 +273,22 @@ class Changes(object): except IOError: # Can't find the file, try to look it up in the pool - poolname = poolify(entry["source"], entry["component"]) - l = get_location(cnf["Dir::Pool"], entry["component"], session=session) - - found, poolfile = check_poolfile(os.path.join(poolname, chg_fn), - entry['size'], - entry["md5sum"], - l.location_id, - session=session) - - if found is None: - Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])]) - elif found is False and poolfile is not None: - Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)]) - else: - if poolfile is None: - Logger.log(["E: Could not find %s in pool" % (chg_fn)]) - else: - chg.poolfiles.append(poolfile) + poolfile = self.__get_file_from_pool(chg_fn, entry, session) + if poolfile: + chg.poolfiles.append(poolfile) chg.files = files + # Add files referenced in .dsc, but not included in .changes + for name, entry in self.dsc_files.items(): + if self.files.has_key(name): + continue + + entry['source'] = self.changes['source'] + poolfile = self.__get_file_from_pool(name, entry, session) + if poolfile: + chg.poolfiles.append(poolfile) + session.commit() chg = session.query(DBChange).filter_by(changesname = self.changes_file).one(); diff --git a/daklib/config.py b/daklib/config.py index 9993ec3a..ed8cf1d0 100755 --- a/daklib/config.py +++ b/daklib/config.py @@ -39,12 +39,9 @@ default_config = "/etc/dak/dak.conf" #: default dak config, defines host propert # suppress some deprecation warnings in squeeze related to apt_pkg # module import warnings -warnings.filterwarnings('ignore', \ - "Attribute '.*' of the 'apt_pkg\.Configuration' object is deprecated, use '.*' instead\.", \ - DeprecationWarning) -warnings.filterwarnings('ignore', \ - "apt_pkg\.newConfiguration\(\) is deprecated\. Use apt_pkg\.Configuration\(\) instead\.", \ - DeprecationWarning) +warnings.filterwarnings('ignore', ".*apt_pkg.* is deprecated.*", DeprecationWarning) + +################################################################################ def which_conf_file(): return os.getenv("DAK_CONFIG", default_config) diff --git a/daklib/contents.py b/daklib/contents.py index f3077aab..449fb88e 100755 --- a/daklib/contents.py +++ b/daklib/contents.py @@ -285,7 +285,7 @@ def binary_helper(suite_id, arch_id, overridetype_id, component_id = None): This function is called in a new subprocess and multiprocessing wants a top level function. ''' - session = DBConn().session() + session = DBConn().session(work_mem = 1000) suite = Suite.get(suite_id, session) architecture = Architecture.get(arch_id, session) overridetype = OverrideType.get(overridetype_id, session) @@ -304,7 +304,7 @@ def source_helper(suite_id, component_id): This function is called in a new subprocess and multiprocessing wants a top level function. ''' - session = DBConn().session() + session = DBConn().session(work_mem = 1000) suite = Suite.get(suite_id, session) component = Component.get(component_id, session) log_message = [suite.suite_name, 'source', component.component_name] diff --git a/daklib/dbconn.py b/daklib/dbconn.py index fe04ebc3..9c25f670 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -703,6 +703,7 @@ class BuildQueue(object): try: # Grab files we want to include newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() # Write file list with newer files (fl_fd, fl_name) = mkstemp() for n in newer: @@ -795,6 +796,7 @@ class BuildQueue(object): # Grab files older than our execution time older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() for o in older: killdb = False @@ -822,9 +824,7 @@ class BuildQueue(object): if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'): continue - try: - r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() - except NoResultFound: + if not self.contains_filename(f): fp = os.path.join(self.path, f) if dryrun: Logger.log(["I: Would remove unused link %s" % fp]) @@ -835,6 +835,18 @@ class BuildQueue(object): except OSError: Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + def contains_filename(self, filename): + """ + @rtype Boolean + @returns True if filename is supposed to be in the queue; False otherwise + """ + session = DBConn().session().object_session(self) + if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0: + return True + elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0: + return True + return False + def add_file_from_pool(self, poolfile): """Copies a file into the pool. Assumes that the PoolFile object is attached to the same SQLAlchemy session as the Queue object is. @@ -879,6 +891,61 @@ class BuildQueue(object): return qf + def add_changes_from_policy_queue(self, policyqueue, changes): + """ + Copies a changes from a policy queue together with its poolfiles. + + @type policyqueue: PolicyQueue + @param policyqueue: policy queue to copy the changes from + + @type changes: DBChange + @param changes: changes to copy to this build queue + """ + for policyqueuefile in changes.files: + self.add_file_from_policy_queue(policyqueue, policyqueuefile) + for poolfile in changes.poolfiles: + self.add_file_from_pool(poolfile) + + def add_file_from_policy_queue(self, policyqueue, policyqueuefile): + """ + Copies a file from a policy queue. + Assumes that the policyqueuefile is attached to the same SQLAlchemy + session as the Queue object is. The caller is responsible for + committing after calling this function. + + @type policyqueue: PolicyQueue + @param policyqueue: policy queue to copy the file from + + @type policyqueuefile: ChangePendingFile + @param policyqueuefile: file to be added to the build queue + """ + session = DBConn().session().object_session(policyqueuefile) + + # Is the file already there? + try: + f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one() + f.lastused = datetime.now() + return f + except NoResultFound: + pass # continue below + + # We have to add the file. + f = BuildQueuePolicyFile() + f.build_queue = self + f.file = policyqueuefile + f.filename = policyqueuefile.filename + + source = os.path.join(policyqueue.path, policyqueuefile.filename) + target = f.fullpath + try: + # Always copy files from policy queues as they might move around. + import utils + utils.copy(source, target) + except OSError: + return None + + session.add(f) + return f __all__.append('BuildQueue') @@ -911,6 +978,10 @@ __all__.append('get_build_queue') ################################################################################ class BuildQueueFile(object): + """ + BuildQueueFile represents a file in a build queue coming from a pool. + """ + def __init__(self, *args, **kwargs): pass @@ -926,6 +997,27 @@ __all__.append('BuildQueueFile') ################################################################################ +class BuildQueuePolicyFile(object): + """ + BuildQueuePolicyFile represents a file in a build queue that comes from a + policy queue (and not a pool). + """ + + def __init__(self, *args, **kwargs): + pass + + #@property + #def filename(self): + # return self.file.filename + + @property + def fullpath(self): + return os.path.join(self.build_queue.path, self.filename) + +__all__.append('BuildQueuePolicyFile') + +################################################################################ + class ChangePendingBinary(object): def __init__(self, *args, **kwargs): pass @@ -3080,6 +3172,7 @@ class DBConn(object): 'binary_acl_map', 'build_queue', 'build_queue_files', + 'build_queue_policy_files', 'changelogs_text', 'changes', 'component', @@ -3174,6 +3267,11 @@ class DBConn(object): properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files, + properties = dict( + build_queue = relation(BuildQueue, backref='policy_queue_files'), + file = relation(ChangePendingFile, lazy='joined'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -3493,12 +3591,21 @@ class DBConn(object): self.__setupmappers() self.pid = os.getpid() - def session(self): + def session(self, work_mem = 0): + ''' + Returns a new session object. If a work_mem parameter is provided a new + transaction is started and the work_mem parameter is set for this + transaction. The work_mem parameter is measured in MB. A default value + will be used if the parameter is not set. + ''' # reinitialize DBConn in new processes if self.pid != os.getpid(): clear_mappers() self.__createconn() - return self.db_smaker() + session = self.db_smaker() + if work_mem > 0: + session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem) + return session __all__.append('DBConn') diff --git a/daklib/queue.py b/daklib/queue.py index 4ea117b3..ef781f19 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -56,16 +56,6 @@ from textutils import fix_maintainer from lintian import parse_lintian_output, generate_reject_messages from contents import UnpackedSource -# suppress some deprecation warnings in squeeze related to apt_pkg -# module -import warnings -warnings.filterwarnings('ignore', \ - "apt_pkg.ParseSection\(\) is deprecated. Please see apt_pkg\.TagSection\(\) for the replacement\.", \ - DeprecationWarning) -warnings.filterwarnings('ignore', \ - "Attribute 'Find' of the 'apt_pkg\.TagSection' object is deprecated, use 'find' instead\.", \ - DeprecationWarning) - ############################################################################### def get_type(f, session): @@ -105,7 +95,7 @@ def get_type(f, session): # Determine what parts in a .changes are NEW -def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = {}): +def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = None): """ Determine what parts in a C{changes} file are NEW. @@ -134,6 +124,8 @@ def determine_new(filename, changes, files, warn=1, session = None, dsc = None, # TODO: This should all use the database instead of parsing the changes # file again byhand = {} + if new is None: + new = {} dbchg = get_dbchange(filename, session) if dbchg is None: diff --git a/daklib/queue_install.py b/daklib/queue_install.py index b1c2f55e..8878e55c 100755 --- a/daklib/queue_install.py +++ b/daklib/queue_install.py @@ -64,6 +64,14 @@ def package_to_queue(u, summary, short_summary, queue, chg, session, announce=No u.move_to_queue(queue) chg.in_queue_id = queue.policy_queue_id session.add(chg) + + # send to build queues + if queue.send_to_build_queues: + for suite_name in u.pkg.changes["distribution"].keys(): + suite = get_suite(suite_name, session) + for q in suite.copy_queues: + q.add_changes_from_policy_queue(queue, chg) + session.commit() # Check for override disparities @@ -126,11 +134,6 @@ def do_unembargo(u, summary, short_summary, chg, session=None): package_to_queue(u, summary, short_summary, polq, chg, session, announce=None) - for suite_name in u.pkg.changes["distribution"].keys(): - suite = get_suite(suite_name, session) - for q in suite.copy_queues: - for f in u.pkg.files.keys(): - copyfile(os.path.join(polq.path, f), os.path.join(q.path, f)) # ################################################################################# # @@ -152,11 +155,6 @@ def do_embargo(u, summary, short_summary, chg, session=None): package_to_queue(u, summary, short_summary, polq, chg, session, announce=None) - for suite_name in u.pkg.changes["distribution"].keys(): - suite = get_suite(suite_name, session) - for q in suite.copy_queues: - for f in u.pkg.files.keys(): - copyfile(os.path.join(polq.path, f), os.path.join(q.path, f)) ################################################################################ diff --git a/scripts/debian/buildd-add-keys b/scripts/debian/buildd-add-keys new file mode 100755 index 00000000..ddb56a42 --- /dev/null +++ b/scripts/debian/buildd-add-keys @@ -0,0 +1,241 @@ +#!/bin/bash +# No way I try to deal with a crippled sh just for POSIX foo. + +# Copyright (C) 2011 Joerg Jaspert +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +# exit on errors +set -e +# make sure to only use defined variables +set -u +# ERR traps should be inherited from functions too. +set -E + +# import the general variable set. +export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars +. $SCRIPTVARS + +umask 027 + +# And use one locale, no matter what the caller has set +export LANG=C +export LC_ALL=C +PROGRAM="buildd-add-keys" + +# common functions are "outsourced" +. "${configdir}/common" + +function cleanup() { + ERRVAL=$? + trap - ERR EXIT TERM HUP INT QUIT + + for TEMPFILE in GPGSTATUS GPGLOGS GPGOUTF TEMPKEYDATA; do + TFILE=${TEMPFILE:=$TEMPFILE} + DELF=${!TFILE:-""} + if [ -n "${DELF}" ] && [ -f "${DELF}" ]; then + rm -f "${DELF}" + fi + done + exit $ERRVAL +} +trap cleanup ERR EXIT TERM HUP INT QUIT + +base="${base}/scripts/builddkeyrings" +INCOMING="${base}/incoming" +ERRORS="${base}/errors" +ADMINS="${base}/adminkeys.gpg" + +# Default options for our gpg calls +DEFGPGOPT="--no-default-keyring --batch --no-tty --no-options --exit-on-status-write-error --no-greeting" + +if ! [ -d "${INCOMING}" ]; then + log "Missing incoming dir, nothing to do" + exit 1 +fi + +# Whenever something goes wrong, its put in there. +mkdir -p "${ERRORS}" + +# We process all new files in our incoming directory +for file in $(ls -1 ${INCOMING}/*.key); do + file=${file##*/} + # First we want to see if we recognize the filename. The buildd people have + # to follow a certain schema: + # architecture_builddname.YEAR-MONTH-DAY_HOUR:MINUTE.key + if [[ $file =~ (.*)_(.*).([0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}).key ]]; then + ARCH=${BASH_REMATCH[1]} + BUILDD=${BASH_REMATCH[2]} + # Right now timestamp is unused + TIMESTAMP=${BASH_REMATCH[3]} + else + log "Unknown file ${file}, not processing" + mv "${INCOMING}/${file}" "${ERRORS}/unknown.${file}.$(date -Is)" + continue + fi + + # Do we know the architecture? + found=0 + for carch in ${archs}; do + if [ "${ARCH}" == "${carch}" ]; then + log "Known arch ${ARCH}, buildd ${BUILDD}" + found=1 + break + fi + done + + if [ ${found} -eq 0 ]; then + log "Unknown architecture ${ARCH}" + mv "${INCOMING}/${file}" "${ERRORS}/unknownarch.${file}.$(date -Is)" + continue + fi + + # If we did have a file with this name already somethings wrong + if [ -f "${base}/${ARCH}/${file}" ]; then + log "Already processed this file" + mv "${INCOMING}/${file}" "${ERRORS}/duplicate.${file}.$(date -Is)" + continue + fi + + # Where we want the status-fd from gpgv turn up + GPGSTATUS=$(mktemp -p "${TMPDIR}" GPGSTATUS.XXXXXX) + # Same for the loggger-fd + GPGLOGS=$(mktemp -p "${TMPDIR}" GPGLOGS.XXXXXX) + # And "decrypt" gives us output, the key without the pgp sig around it + GPGOUTF=$(mktemp -p "${TMPDIR}" GPGOUTF.XXXXXX) + + # Open the filehandles, assigning them to the two files, so we can let gpg use them + exec 4> "${GPGSTATUS}" + exec 5> "${GPGLOGS}" + + # So lets run gpg, status/logger into the two files, to "decrypt" the keyfile + if ! gpg ${DEFGPGOPT} --keyring "${ADMINS}" --status-fd 4 --logger-fd 5 --decrypt "${INCOMING}/${file}" > "${GPGOUTF}"; then + ret=$? + log "gpg returned with ${ret}, not adding key from file ${file}" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/gpgerror.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/gpgerror.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/gpgerror.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # Read in the status output + GPGSTAT=$(cat "${GPGSTATUS}") + # And check if we like the sig. It has to be both, GOODISG and VALIDSIG or we don't accept it + if [[ ${GPGSTAT} =~ "GOODSIG" ]] && [[ ${GPGSTAT} =~ "VALIDSIG" ]]; then + log "Signature for ${file} accepted" + else + log "We are missing one of GOODSIG or VALIDSIG" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/badsig.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/badsig.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/badsig.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # So at this point we know we accepted the signature of the file as valid, + # that is it is from a key allowed for this architecture. Which only + # leaves us with the task of checking if the key fulfills the requirements + # before we add it to the architectures keyring. + + # Those currently are: + # - keysize 4096 or larger + # - RSA key, no encryption capability + # - UID matching "buildd autosigning key BUILDDNAME + # - expire within a 120 days + # - maximum 2 keys per architecture and buildd + + TEMPKEYDATA=$(mktemp -p "${TMPDIR}" BDKEYS.XXXXXX) + + gpg ${DEFGPGOPT} --with-colons "${GPGOUTF}" > "${TEMPKEYDATA}" + + # Read in the TEMPKEYDATAFILE, but avoid using a subshell like a + # while read line otherwise would do + exec 4<> "${TEMPKEYDATA}" + error="" + while read line <&4; do + #pub:-:4096:1:FAB983612A6554FA:2011-03-24:2011-07-22::-:buildd autosigning key poulenc : + + # Besides fiddling out the data we need to check later, this regex also check: + # - the keytype (:1:, 1 there means RSA) + # - the UID + # - that the key does have an expiration date (or it wont match, the second date + # field would be empty + regex="^pub:-:([0-9]{4}):1:([0-9A-F]{16}):([0-9]{4}-[0-9]{2}-[0-9]{2}):([0-9]{4}-[0-9]{2}-[0-9]{2})::-:buildd autosigning key ${BUILDD} :$" + if [[ $line =~ $regex ]]; then + KEYSIZE=${BASH_REMATCH[1]} + KEYID=${BASH_REMATCH[2]} + KEYCREATE=${BASH_REMATCH[3]} + KEYEXPIRE=${BASH_REMATCH[4]} + + # We do want 4096 or anything above + if [ ${KEYSIZE} -lt 4096 ]; then + log "Keysize ${KEYSIZE} too small" + error="${error} Keysize ${KEYSIZE} too small" + continue + fi + + # We want a maximum lifetime of 120 days, so check that. + # Easiest to compare in epoch, so lets see, 120 days midnight from now, + # compared with their set expiration date at midnight + # maxdate should turn out higher. just in case we make it 121 for this check + maxdate=$(date -d '121 day 00:00:00' +%s) + theirexpire=$(date -d "${KEYEXPIRE} 00:00:00" +%s) + if [ ${theirexpire} -gt ${maxdate} ]; then + log "Key expiry ${KEYEXPIRE} wrong" + error="${error} Key expiry ${KEYEXPIRE} wrong" + continue + fi + else + log "Unknown line $line, sod off" + error="${error} Unknown line $line, sod off" + continue + fi + done + if [ -n "${error}" ]; then + log ${error} + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/badkey.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/badkey.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/badkey.${file}.gpglogs.${DATE}" + echo "${error}" >> "${ERRORS}/badkey.${file}.error.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # And now lets check how many keys this buildd already has. 2 is the maximum, so key + # rollover works. 3 won't, they have to rm one first + # We need to check for the amount of keys + ARCHKEYRING="${base}/${ARCH}/keyring.gpg" + + KEYNO=$(gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --with-colons --list-keys "buildd_${ARCH}-${BUILDD}@buildd.debian.org" | grep -c '^pub:') + if [ ${KEYNO} -gt 2 ]; then + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/toomany.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/toomany.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/toomany.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # Right. At this point everything should be in order, which means we should put the key into + # the keyring + log "Accepting key ${KEYID} for ${ARCH} buildd ${BUILDD}, expire ${KEYEXPIRE}" + gpg ${DEFGPGOPT} --status-fd 4 --logger-fd 5 --keyring "${ARCHKEYRING}" --import "${GPGOUTF}" 2>/dev/null + + mv "${INCOMING}/${file}" "${base}/${ARCH}" +done diff --git a/scripts/debian/buildd-prepare-dir b/scripts/debian/buildd-prepare-dir new file mode 100755 index 00000000..0c1f08ba --- /dev/null +++ b/scripts/debian/buildd-prepare-dir @@ -0,0 +1,62 @@ +#!/bin/bash +# No way I try to deal with a crippled sh just for POSIX foo. + +# Copyright (C) 2011 Joerg Jaspert +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +# exit on errors +set -e +# make sure to only use defined variables +set -u +# ERR traps should be inherited from functions too. +set -E + +# import the general variable set. +export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars +. $SCRIPTVARS + +umask 027 + +# And use one locale, no matter what the caller has set +export LANG=C +export LC_ALL=C +PROGRAM="buildd-prepare-dir" + +# common functions are "outsourced" +. "${configdir}/common" + +# should be relative to the general base dir later +COPYTARGET="${base}/keyrings" +base="${base}/scripts/builddkeyrings" +TARGET="${base}/keyrings" +REMOVED="${base}/removed-buildd-keys.gpg" + +mkdir -p "${TARGET}/keyrings" + +for arch in $archs; do + if [ -f ${base}/${arch}/keyring.gpg ]; then + cp -al ${base}/${arch}/keyring.gpg ${TARGET}/keyrings/buildd-${arch}-keyring.gpg + chmod 0644 ${TARGET}/keyrings/buildd-${arch}-keyring.gpg + fi +done + +cd ${TARGET} +sha512sum keyrings/* > sha512sums + +rm -f ${TARGET}/sha512sums.txt +SIGNINGKEY=$(dak admin c signingkeyids) +gpg --no-options --batch --no-tty --armour --default-key ${SIGNINGKEY} --clearsign -o "${TARGET}/sha512sums.txt" "${TARGET}/sha512sums" +rm -f ${TARGET}/sha512sums diff --git a/scripts/debian/buildd-remove-keys b/scripts/debian/buildd-remove-keys new file mode 100755 index 00000000..c07ff04b --- /dev/null +++ b/scripts/debian/buildd-remove-keys @@ -0,0 +1,196 @@ +#!/bin/bash +# No way I try to deal with a crippled sh just for POSIX foo. + +# Copyright (C) 2011 Joerg Jaspert +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +# exit on errors +set -e +# make sure to only use defined variables +set -u +# ERR traps should be inherited from functions too. +set -E + +# import the general variable set. +export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars +. $SCRIPTVARS + +umask 027 + +# And use one locale, no matter what the caller has set +export LANG=C +export LC_ALL=C +PROGRAM="buildd-remove-keys" + +# common functions are "outsourced" +. "${configdir}/common" + +function cleanup() { + ERRVAL=$? + trap - ERR EXIT TERM HUP INT QUIT + + for TEMPFILE in GPGSTATUS GPGLOGS GPGOUTF TEMPKEYDATA; do + TFILE=${TEMPFILE:=$TEMPFILE} + DELF=${!TFILE:-""} + if [ -n "${DELF}" ] && [ -f "${DELF}" ]; then + rm -f "${DELF}" + fi + done + exit $ERRVAL +} +trap cleanup ERR EXIT TERM HUP INT QUIT + +base="${base}/scripts/builddkeyrings" +INCOMING="${base}/incoming" +ERRORS="${base}/errors" +ADMINS="${base}/adminkeys.gpg" +REMOVED="${base}/removed-buildd-keys.gpg" + +# Default options for our gpg calls +DEFGPGOPT="--no-default-keyring --batch --no-tty --no-options --exit-on-status-write-error --no-greeting" + +if ! [ -d "${INCOMING}" ]; then + log "Missing incoming dir, nothing to do" + exit 1 +fi + +# Whenever something goes wrong, its put in there. +mkdir -p "${ERRORS}" + +# We process all new files in our incoming directory +for file in $(ls -1 ${INCOMING}/*.del ); do + file=${file##*/} + # First we want to see if we recognize the filename. The buildd people have + # to follow a certain schema: + # architecture_builddname.YEAR-MONTH-DAY_HOUR:MINUTE.del + if [[ $file =~ (.*)_(.*).([0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}:[0-9]{2}).del ]]; then + ARCH=${BASH_REMATCH[1]} + BUILDD=${BASH_REMATCH[2]} + # Right now timestamp is unused + TIMESTAMP=${BASH_REMATCH[3]} + else + log "Unknown file ${file}, not processing" + mv "${INCOMING}/${file}" "${ERRORS}/unknown.${file}.$(date -Is)" + continue + fi + + # Do we know the architecture? + found=0 + for carch in ${archs}; do + if [ "${ARCH}" == "${carch}" ]; then + log "Known arch ${ARCH}, buildd ${BUILDD}" + found=1 + break + fi + done + + if [ ${found} -eq 0 ]; then + log "Unknown architecture ${ARCH}" + mv "${INCOMING}/${file}" "${ERRORS}/unknownarch.${file}.$(date -Is)" + continue + fi + + # If we did have a file with this name already somethings wrong + if [ -f "${base}/${ARCH}/${file}" ]; then + log "Already processed this file" + mv "${INCOMING}/${file}" "${ERRORS}/duplicate.${file}.$(date -Is)" + continue + fi + + # Where we want the status-fd from gpgv turn up + GPGSTATUS=$(mktemp -p "${TMPDIR}" GPGSTATUS.XXXXXX) + # Same for the loggger-fd + GPGLOGS=$(mktemp -p "${TMPDIR}" GPGLOGS.XXXXXX) + # And "decrypt" gives us output, the key without the pgp sig around it + GPGOUTF=$(mktemp -p "${TMPDIR}" GPGOUTF.XXXXXX) + + # Open the filehandles, assigning them to the two files, so we can let gpg use them + exec 4> "${GPGSTATUS}" + exec 5> "${GPGLOGS}" + + # So lets run gpg, status/logger into the two files, to "decrypt" the keyfile + if ! gpg ${DEFGPGOPT} --keyring "${ADMINS}" --status-fd 4 --logger-fd 5 --decrypt "${INCOMING}/${file}" > "${GPGOUTF}"; then + ret=$? + log "gpg returned with ${ret}, not removing key using ${file}" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/gpgerror.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/gpgerror.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/gpgerror.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # Read in the status output + GPGSTAT=$(cat "${GPGSTATUS}") + # And check if we like the sig. It has to be both, GOODISG and VALIDSIG or we don't accept it + if [[ ${GPGSTAT} =~ "GOODSIG" ]] && [[ ${GPGSTAT} =~ "VALIDSIG" ]]; then + log "Signature for ${file} accepted" + else + log "We are missing one of GOODSIG or VALIDSIG" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/badsig.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/badsig.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/badsig.${file}.gpglogs.${DATE}" + rm -f "${GPGOUTF}" + continue + fi + + # So at this point we know we accepted the signature of the file as valid, + # that is it is from a key allowed for this architecture. Which only + # leaves us with the task of checking if there is a key to remove, and then remove + # it. We won't even check they have a key left, so if they want to they can + # empty out the set for an architecture + + # Read in the GPGOUTF, but avoid using a subshell like a + # while read line otherwise would do + exec 4<> "${GPGOUTF}" + error="" + while read line <&4; do + if [[ $line =~ key:.([0-9A-F]{16}) ]]; then + KEYID=${BASH_REMATCH[1]} + elif [[ $line =~ comment:.(.*) ]]; then + COMMENT=${BASH_REMATCH[1]} + else + echo "Nay" + fi + done + + # Right, we have the keyid, know the arch, lets see if we can remove it + ARCHKEYRING="${base}/${ARCH}/keyring.gpg" + + # Is the key in there? + KEYNO=$(gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --with-colons --list-keys ${KEYID} | grep -c '^pub:') + + if [ $KEYNO -eq 1 ]; then + # Right, exactly one there, lets get rid of it + # So put it into the removed keyring + gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --export ${KEYID} | gpg ${DEFGPGOPT} --keyring "${REMOVED}" --import 2>/dev/null + if gpg ${DEFGPGOPT} --keyring "${ARCHKEYRING}" --yes --delete-keys ${KEYID}; then + log "Removed key ${KEYID}, reason: ${COMMENT}" + mv "${INCOMING}/${file}" "${base}/${ARCH}" + continue + fi + else + log "Found more (or less) than one key I could delete. Not doing anything" + DATE=$(date -Is) + mv "${INCOMING}/${file}" "${ERRORS}/toomanykeys.${file}.${DATE}" + mv "${GPGSTATUS}" "${ERRORS}/toomanykeys.${file}.gpgstatus.${DATE}" + mv "${GPGLOGS}" "${ERRORS}/toomanykeys.${file}.gpglogs.${DATE}" + echo "${error}" >> "${ERRORS}/toomanykeys.${file}.error.${DATE}" + rm -f "${GPGOUTF}" + continue + fi +done