X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=dak%2Fgenerate_releases.py;h=3006364602517c17466c075afa82efc0ee7fed56;hb=475051efae41a30723cdc1ab82c521cd1accf75b;hp=b21f30a5483f11ab5997bb393fbcb603473bd252;hpb=e1156b3b857f5496a299e621d291cff0ba957d23;p=dak.git diff --git a/dak/generate_releases.py b/dak/generate_releases.py index b21f30a5..30063646 100755 --- a/dak/generate_releases.py +++ b/dak/generate_releases.py @@ -40,7 +40,6 @@ import bz2 import apt_pkg from tempfile import mkstemp, mkdtemp import commands -from multiprocessing import Pool, TimeoutError from sqlalchemy.orm import object_session from daklib import utils, daklog @@ -48,10 +47,10 @@ from daklib.regexes import re_gensubrelease, re_includeinrelease from daklib.dak_exceptions import * from daklib.dbconn import * from daklib.config import Config +from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS ################################################################################ Logger = None #: Our logging object -results = [] #: Results of the subprocesses ################################################################################ @@ -74,12 +73,7 @@ SUITE can be a space seperated list, e.g. ######################################################################## -def get_result(arg): - global results - if arg: - results.append(arg) - -def sign_release_dir(dirname): +def sign_release_dir(suite, dirname): cnf = Config() if cnf.has_key("Dinstall::SigningKeyring"): @@ -88,7 +82,6 @@ def sign_release_dir(dirname): keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] arguments = "--no-options --batch --no-tty --armour" - signkeyids = cnf.signingkeyids.split() relname = os.path.join(dirname, 'Release') @@ -100,17 +93,20 @@ def sign_release_dir(dirname): if os.path.exists(inlinedest): os.unlink(inlinedest) - for keyid in signkeyids: - if keyid != "": - defkeyid = "--default-key %s" % keyid - else: - defkeyid = "" + # We can only use one key for inline signing so use the first one in + # the array for consistency + firstkey = True + + for keyid in suite.signingkeys: + defkeyid = "--default-key %s" % keyid os.system("gpg %s %s %s --detach-sign <%s >>%s" % (keyring, defkeyid, arguments, relname, dest)) - os.system("gpg %s %s %s --clearsign <%s >>%s" % - (keyring, defkeyid, arguments, relname, inlinedest)) + if firstkey: + os.system("gpg %s %s %s --clearsign <%s >>%s" % + (keyring, defkeyid, arguments, relname, inlinedest)) + firstkey = False class ReleaseWriter(object): def __init__(self, suite): @@ -152,11 +148,15 @@ class ReleaseWriter(object): suite_suffix = "%s" % (cnf.Find("Dinstall::SuiteSuffix")) outfile = os.path.join(cnf["Dir::Root"], 'dists', "%s/%s" % (suite.suite_name, suite_suffix), "Release") - out = open(outfile, "w") + out = open(outfile + ".new", "w") for key, dbfield in attribs: if getattr(suite, dbfield) is not None: - out.write("%s: %s\n" % (key, getattr(suite, dbfield))) + # TEMPORARY HACK HACK HACK until we change the way we store the suite names etc + if key == 'Suite' and getattr(suite, dbfield) == 'squeeze-updates': + out.write("Suite: stable-updates\n") + else: + out.write("%s: %s\n" % (key, getattr(suite, dbfield))) out.write("Date: %s\n" % (time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime(time.time())))) @@ -278,8 +278,9 @@ class ReleaseWriter(object): out.write(" %s %8d %s\n" % (fileinfo[filename][h], fileinfo[filename]['len'], filename)) out.close() + os.rename(outfile + '.new', outfile) - sign_release_dir(os.path.dirname(outfile)) + sign_release_dir(suite, os.path.dirname(outfile)) os.chdir(oldcwd) @@ -287,7 +288,7 @@ class ReleaseWriter(object): def main (): - global Logger, results + global Logger cnf = Config() @@ -322,10 +323,8 @@ def main (): suites = session.query(Suite).filter(Suite.untouchable == False).all() broken=[] - # For each given suite, run one process - results = [] - pool = Pool() + pool = DakProcessPool() for s in suites: # Setup a multiprocessing Pool. As many workers as we have CPU cores. @@ -335,18 +334,17 @@ def main (): print "Processing %s" % s.suite_name Logger.log(['Processing release file for Suite: %s' % (s.suite_name)]) - pool.apply_async(generate_helper, (s.suite_id, ), callback=get_result) + pool.apply_async(generate_helper, (s.suite_id, )) # No more work will be added to our pool, close it and then wait for all to finish pool.close() pool.join() - retcode = 0 + retcode = pool.overall_status() - if len(results) > 0: - Logger.log(['Release file generation broken: %s' % (results)]) - print "Release file generation broken:\n", '\n'.join(results) - retcode = 1 + if retcode > 0: + # TODO: CENTRAL FUNCTION FOR THIS / IMPROVE LOGGING + Logger.log(['Release file generation broken: %s' % (','.join([str(x[1]) for x in pool.results]))]) Logger.close() @@ -358,13 +356,12 @@ def generate_helper(suite_id): ''' session = DBConn().session() suite = Suite.get(suite_id, session) - try: - rw = ReleaseWriter(suite) - rw.generate_release_files() - except Exception, e: - return str(e) - return + # We allow the process handler to catch and deal with any exceptions + rw = ReleaseWriter(suite) + rw.generate_release_files() + + return (PROC_STATUS_SUCCESS, 'Release file written for %s' % suite.suite_name) #######################################################################################