From: Mark Hymers Date: Wed, 28 Oct 2009 10:50:53 +0000 (+0000) Subject: Merge commit 'ftpmaster/master' X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=1fa1f22b70c6ee46aea78ee40b9797a574d7c583;hp=52b14d883272923a4296ac02921ccf350815db83;p=dak.git Merge commit 'ftpmaster/master' --- diff --git a/config/debian/apt.conf b/config/debian/apt.conf index 66ea80a8..4c01d303 100644 --- a/config/debian/apt.conf +++ b/config/debian/apt.conf @@ -50,7 +50,7 @@ tree "dists/testing" FileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source"; + Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source"; BinOverride "override.squeeze.$(SECTION)"; ExtraOverride "override.squeeze.extra.$(SECTION)"; SrcOverride "override.squeeze.$(SECTION).src"; @@ -61,7 +61,7 @@ tree "dists/testing-proposed-updates" FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source"; + Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source"; BinOverride "override.squeeze.$(SECTION)"; ExtraOverride "override.squeeze.extra.$(SECTION)"; SrcOverride "override.squeeze.$(SECTION).src"; @@ -109,7 +109,7 @@ tree "dists/testing/main" { FileList "/srv/ftp.debian.org/database/dists/testing_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; + Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; BinOverride "override.squeeze.main.$(SECTION)"; SrcOverride "override.squeeze.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -121,7 +121,7 @@ tree "dists/testing/non-free" { FileList "/srv/ftp.debian.org/database/dists/testing_non-free_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; + Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; BinOverride "override.squeeze.main.$(SECTION)"; SrcOverride "override.squeeze.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -133,7 +133,7 @@ tree "dists/testing-proposed-updates/main" { FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; + Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64"; BinOverride "override.squeeze.main.$(SECTION)"; SrcOverride "override.squeeze.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; diff --git a/config/debian/cron.dinstall b/config/debian/cron.dinstall index 5a8f1073..1c9fa5af 100755 --- a/config/debian/cron.dinstall +++ b/config/debian/cron.dinstall @@ -355,9 +355,12 @@ function merkel3() { ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1 } -function runparts() { - log "Using run-parts to run scripts in $base/scripts/distmnt" - run-parts --report $base/scripts/distmnt +function mirrorpush() { + log "Starting the mirrorpush" + date -u > /srv/ftp.debian.org/web/mirrorstart + echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart + echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart + sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 & } function i18n2() { @@ -638,7 +641,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="punew" @@ -662,7 +665,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & lockfile "$LOCK_ACCEPTED" lockfile "$LOCK_NEW" @@ -717,7 +720,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="overrides" @@ -797,7 +800,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & rm -f "${NOTICE}" rm -f "${LOCK_DAILY}" @@ -810,7 +813,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & GO=( FUNC="expire" @@ -818,7 +821,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & GO=( FUNC="transitionsclean" @@ -826,7 +829,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & GO=( FUNC="reports" @@ -834,7 +837,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & GO=( FUNC="dm" @@ -842,7 +845,7 @@ GO=( ARGS="" ERR="" ) -stage $GO +stage $GO & GO=( FUNC="bts" @@ -850,7 +853,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="merkel2" @@ -858,11 +861,11 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( - FUNC="runparts" - TIME="run-parts" + FUNC="mirrorpush" + TIME="mirrorpush" ARGS="" ERR="false" ) @@ -882,7 +885,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="testingsourcelist" @@ -908,7 +911,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="merkel3" @@ -916,7 +919,7 @@ GO=( ARGS="" ERR="false" ) -stage $GO +stage $GO & GO=( FUNC="compress" diff --git a/config/debian/dak.conf b/config/debian/dak.conf index 474a4f3d..b254a7ad 100644 --- a/config/debian/dak.conf +++ b/config/debian/dak.conf @@ -26,6 +26,7 @@ Dinstall CloseBugs "true"; OverrideDisparityCheck "true"; DefaultSuite "unstable"; + LintianTags "/srv/ftp.debian.org/dak/config/debian/lintian.tags"; QueueBuildSuites { unstable; diff --git a/config/debian/lintian.tags b/config/debian/lintian.tags new file mode 100644 index 00000000..1c05410c --- /dev/null +++ b/config/debian/lintian.tags @@ -0,0 +1,76 @@ +lintian: + warning: + - statically-linked-binary + - arch-independent-package-contains-binary-or-object + - arch-dependent-file-in-usr-share + - missing-build-dependency + - arch-dependent-file-in-usr-share + - missing-dependency-on-libc + - usr-share-doc-symlink-without-dependency + - binary-with-bad-dynamic-table + - usr-share-doc-symlink-without-dependency + - mknod-in-maintainer-script + error: + - binary-in-etc + - missing-dependency-on-perlapi + - copyright-lists-upstream-authors-with-dh_make-boilerplate + - section-is-dh_make-template + - package-installs-python-pyc + - library-in-debug-or-profile-should-not-be-stripped + - binary-file-compressed-with-upx + - html-changelog-without-text-version + - file-in-usr-marked-as-conffile + - build-info-in-binary-control-file-section + - debian-control-with-duplicate-fields + - not-allowed-control-file + - control-file-has-bad-permissions + - control-file-has-bad-owner + - no-copyright-file + - copyright-refers-to-old-directory + - copyright-file-compressed + - copyright-file-is-symlink + - usr-share-doc-symlink-to-foreign-package + - old-style-copyright-file + - copyright-refers-to-incorrect-directory + - package-has-no-description + - description-synopsis-is-empty + - extended-description-is-empty + - description-is-dh_make-template + - file-in-etc-not-marked-as-conffile + - no-package-name + - bad-package-name + - package-not-lowercase + - no-version-field + - bad-version-number + - upstream-version-not-numeric + - no-architecture-field + - magic-arch-in-arch-list + - too-many-architectures + - arch-any-in-binary-pkg + - no-maintainer-field + - maintainer-name-missing + - maintainer-address-missing + - maintainer-address-malformed + - maintainer-address-is-on-localhost + - uploader-name-missing + - uploader-address-malformed + - uploader-address-is-on-localhost + - no-source-field + - source-field-does-not-match-pkg-name + - section-is-dh_make-template + - build-depends-on-essential-package-without-using-version + - depends-on-build-essential-package-without-using-version + - build-depends-on-build-essential + - executable-in-usr-share-doc + - symlink-has-too-many-up-segments + - debian-rules-is-symlink + - debian-rules-not-a-makefile + - debian-rules-missing-required-target + - maintainer-script-removes-device-files + - no-standards-version-field + - invalid-standards-version + - dir-or-file-in-var-www + - dir-or-file-in-tmp + - dir-or-file-in-mnt + - dir-or-file-in-opt + - dir-or-file-in-srv diff --git a/dak/add_user.py b/dak/add_user.py index 8da9dcdf..77de3e3f 100755 --- a/dak/add_user.py +++ b/dak/add_user.py @@ -18,13 +18,9 @@ add his key to the GPGKeyring # I know what I say. I dont know python and I wrote it. So go and read some other stuff. import commands -import re import sys -import time -import os import apt_pkg -from daklib import daklog from daklib import utils from daklib.dbconn import DBConn, add_database_user, get_or_set_uid from daklib.regexes import re_gpg_fingerprint, re_user_address, re_user_mails, re_user_name diff --git a/dak/admin.py b/dak/admin.py index e3d5298a..eb765a66 100755 --- a/dak/admin.py +++ b/dak/admin.py @@ -25,7 +25,6 @@ import apt_pkg from daklib import utils from daklib.dbconn import * -from daklib.config import Config ################################################################################ diff --git a/dak/check_archive.py b/dak/check_archive.py index 6ca84c69..2162068e 100755 --- a/dak/check_archive.py +++ b/dak/check_archive.py @@ -40,7 +40,6 @@ import apt_inst from daklib.dbconn import * from daklib import utils -from daklib.regexes import re_issource from daklib.config import Config ################################################################################ @@ -68,7 +67,7 @@ The following MODEs are available: missing-overrides - check for missing overrides source-in-one-dir - ensure the source for each package is in one directory timestamps - check for future timestamps in .deb's - tar-gz-in-dsc - ensure each .dsc lists a .tar.gz file + files-in-dsc - ensure each .dsc references appropriate Files validate-indices - ensure files mentioned in Packages & Sources exist files-not-symlinks - check files in the database aren't symlinks validate-builddeps - validate build-dependencies of .dsc files in the archive @@ -320,9 +319,10 @@ def check_timestamps(): ################################################################################ -def check_missing_tar_gz_in_dsc(): +def check_files_in_dsc(): """ - Ensure each .dsc lists a .tar.gz file + Ensure each .dsc lists appropriate files in its Files field (according + to the format announced in its Format field). """ count = 0 @@ -343,19 +343,11 @@ def check_missing_tar_gz_in_dsc(): except: utils.fubar("error parsing .dsc file '%s'." % (filename)) - dsc_files = utils.build_file_list(dsc, is_a_dsc=1) - has_tar = 0 + reasons = utils.check_dsc_files(filename, dsc) + for r in reasons: + utils.warn(r) - for f in dsc_files.keys(): - m = re_issource.match(f) - if not m: - utils.fubar("%s not recognised as source." % (f)) - ftype = m.group(3) - if ftype == "orig.tar.gz" or ftype == "tar.gz": - has_tar = 1 - - if not has_tar: - utils.warn("%s has no .tar.gz in the .dsc file." % (f)) + if len(reasons) > 0: count += 1 if count: @@ -526,8 +518,8 @@ def main (): check_source_in_one_dir() elif mode == "timestamps": check_timestamps() - elif mode == "tar-gz-in-dsc": - check_missing_tar_gz_in_dsc() + elif mode == "files-in-dsc": + check_files_in_dsc() elif mode == "validate-indices": check_indices_files_exist() elif mode == "files-not-symlinks": diff --git a/dak/check_overrides.py b/dak/check_overrides.py index 5cccfb6b..1e9a6d6b 100755 --- a/dak/check_overrides.py +++ b/dak/check_overrides.py @@ -288,7 +288,7 @@ SELECT s.source FROM source s, src_associations sa, files f, location l, # we can copy packages[package] = 1 Logger.log(["copying missing override", osuite, component, - type, package, priorities[i[1]], sections[i[2]], i[3]]) + otype, package, priorities[i[1]], sections[i[2]], i[3]]) if not Options["No-Action"]: session.execute("""INSERT INTO override (package, suite, component, priority, section, type, maintainer) diff --git a/dak/clean_queues.py b/dak/clean_queues.py index 34d90473..a5b15427 100755 --- a/dak/clean_queues.py +++ b/dak/clean_queues.py @@ -33,14 +33,16 @@ ################################################################################ -import os, stat, sys, time +import os, os.path, stat, sys, time import apt_pkg from daklib import utils +from daklib import daklog +from daklib.config import Config ################################################################################ -Cnf = None Options = None +Logger = None del_dir = None delete_date = None @@ -60,15 +62,15 @@ Clean out incoming directories. ################################################################################ -def init (): +def init (cnf): global delete_date, del_dir delete_date = int(time.time())-(int(Options["Days"])*84600) + date = time.strftime("%Y-%m-%d") + del_dir = os.path.join(cnf["Dir::Morgue"], cnf["Clean-Queues::MorgueSubDir"], date) # Ensure a directory exists to remove files to if not Options["No-Action"]: - date = time.strftime("%Y-%m-%d") - del_dir = Cnf["Dir::Morgue"] + '/' + Cnf["Clean-Queues::MorgueSubDir"] + '/' + date if not os.path.exists(del_dir): os.makedirs(del_dir, 02775) if not os.path.isdir(del_dir): @@ -77,33 +79,38 @@ def init (): # Move to the directory to clean incoming = Options["Incoming"] if incoming == "": - incoming = Cnf["Dir::Queue::Unchecked"] + incoming = cnf["Dir::Queue::Unchecked"] os.chdir(incoming) # Remove a file to the morgue def remove (f): + fname = os.path.basename(f) if os.access(f, os.R_OK): - dest_filename = del_dir + '/' + os.path.basename(f) + Logger.log(["move file to morgue", fname, del_dir]) + if Options["Verbose"]: + print "Removing '%s' (to '%s')." % (fname, del_dir) + if Options["No-Action"]: + return + + dest_filename = os.path.join(del_dir, fname) # If the destination file exists; try to find another filename to use if os.path.exists(dest_filename): dest_filename = utils.find_next_free(dest_filename, 10) + Logger.log(["change destination file name", os.path.basename(dest_filename)]) utils.move(f, dest_filename, 0660) else: - utils.warn("skipping '%s', permission denied." % (os.path.basename(f))) + Logger.log(["skipping file because of permission problem", fname]) + utils.warn("skipping '%s', permission denied." % fname) # Removes any old files. # [Used for Incoming/REJECT] # def flush_old (): + Logger.log(["check Incoming/REJECT for old files"]) for f in os.listdir('.'): if os.path.isfile(f): if os.stat(f)[stat.ST_MTIME] < delete_date: - if Options["No-Action"]: - print "I: Would delete '%s'." % (os.path.basename(f)) - else: - if Options["Verbose"]: - print "Removing '%s' (to '%s')." % (os.path.basename(f), del_dir) - remove(f) + remove(f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) @@ -115,6 +122,7 @@ def flush_orphans (): all_files = {} changes_files = [] + Logger.log(["check Incoming for old orphaned files"]) # Build up the list of all files in the directory for i in os.listdir('.'): if os.path.isfile(i): @@ -155,12 +163,7 @@ def flush_orphans (): # a .dsc) and should be deleted if old enough. for f in all_files.keys(): if os.stat(f)[stat.ST_MTIME] < delete_date: - if Options["No-Action"]: - print "I: Would delete '%s'." % (os.path.basename(f)) - else: - if Options["Verbose"]: - print "Removing '%s' (to '%s')." % (os.path.basename(f), del_dir) - remove(f) + remove(f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) @@ -168,15 +171,15 @@ def flush_orphans (): ################################################################################ def main (): - global Cnf, Options + global Options, Logger - Cnf = utils.get_conf() + cnf = Config() for i in ["Help", "Incoming", "No-Action", "Verbose" ]: - if not Cnf.has_key("Clean-Queues::Options::%s" % (i)): - Cnf["Clean-Queues::Options::%s" % (i)] = "" - if not Cnf.has_key("Clean-Queues::Options::Days"): - Cnf["Clean-Queues::Options::Days"] = "14" + if not cnf.has_key("Clean-Queues::Options::%s" % (i)): + cnf["Clean-Queues::Options::%s" % (i)] = "" + if not cnf.has_key("Clean-Queues::Options::Days"): + cnf["Clean-Queues::Options::Days"] = "14" Arguments = [('h',"help","Clean-Queues::Options::Help"), ('d',"days","Clean-Queues::Options::Days", "IntLevel"), @@ -184,25 +187,29 @@ def main (): ('n',"no-action","Clean-Queues::Options::No-Action"), ('v',"verbose","Clean-Queues::Options::Verbose")] - apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv) - Options = Cnf.SubTree("Clean-Queues::Options") + apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv) + Options = cnf.SubTree("Clean-Queues::Options") if Options["Help"]: usage() - init() + Logger = daklog.Logger(cnf, 'clean-queues', Options['No-Action']) + + init(cnf) if Options["Verbose"]: print "Processing incoming..." flush_orphans() - reject = Cnf["Dir::Queue::Reject"] + reject = cnf["Dir::Queue::Reject"] if os.path.exists(reject) and os.path.isdir(reject): if Options["Verbose"]: print "Processing incoming/REJECT..." os.chdir(reject) flush_old() + Logger.close() + ####################################################################################### if __name__ == '__main__': diff --git a/dak/clean_suites.py b/dak/clean_suites.py index d40f67ec..52b2a8cc 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -35,10 +35,12 @@ from datetime import datetime, timedelta from daklib.config import Config from daklib.dbconn import * from daklib import utils +from daklib import daklog ################################################################################ Options = None +Logger = None ################################################################################ @@ -59,13 +61,13 @@ def check_binaries(now_date, delete_date, max_delete, session): # Get the list of binary packages not in a suite and mark them for # deletion. - # TODO: This can be a single SQL UPDATE statement q = session.execute(""" -SELECT b.file FROM binaries b, files f +SELECT b.file, f.filename FROM binaries b, files f WHERE f.last_used IS NULL AND b.file = f.id AND NOT EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""") for i in q.fetchall(): + Logger.log(["set lastused", i[1]]) session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL", {'lastused': now_date, 'fileid': i[0]}) session.commit() @@ -73,13 +75,13 @@ SELECT b.file FROM binaries b, files f # Check for any binaries which are marked for eventual deletion # but are now used again. - # TODO: This can be a single SQL UPDATE statement q = session.execute(""" -SELECT b.file FROM binaries b, files f +SELECT b.file, f.filename FROM binaries b, files f WHERE f.last_used IS NOT NULL AND f.id = b.file AND EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""") for i in q.fetchall(): + Logger.log(["unset lastused", i[1]]) session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]}) session.commit() @@ -91,7 +93,7 @@ def check_sources(now_date, delete_date, max_delete, session): # Get the list of source packages not in a suite and not used by # any binaries. q = session.execute(""" -SELECT s.id, s.file FROM source s, files f +SELECT s.id, s.file, f.filename FROM source s, files f WHERE f.last_used IS NULL AND s.file = f.id AND NOT EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id) AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)""") @@ -103,20 +105,24 @@ SELECT s.id, s.file FROM source s, files f for i in q.fetchall(): source_id = i[0] dsc_file_id = i[1] + dsc_fname = i[2] # Mark the .dsc file for deletion + Logger.log(["set lastused", dsc_fname]) session.execute("""UPDATE files SET last_used = :last_used WHERE id = :dscfileid AND last_used IS NULL""", {'last_used': now_date, 'dscfileid': dsc_file_id}) # Mark all other files references by .dsc too if they're not used by anyone else - x = session.execute("""SELECT f.id FROM files f, dsc_files d + x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d WHERE d.source = :sourceid AND d.file = f.id""", {'sourceid': source_id}) for j in x.fetchall(): file_id = j[0] + file_name = j[1] y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id}) if len(y.fetchall()) == 1: + Logger.log(["set lastused", file_name]) session.execute("""UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL""", {'lastused': now_date, 'fileid': file_id}) @@ -127,7 +133,7 @@ SELECT s.id, s.file FROM source s, files f # are now used again. q = session.execute(""" -SELECT f.id FROM source s, files f, dsc_files df +SELECT f.id, f.filename FROM source s, files f, dsc_files df WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id)) OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)))""") @@ -135,9 +141,8 @@ SELECT f.id FROM source s, files f, dsc_files df #### XXX: this should also handle deleted binaries specially (ie, not #### reinstate sources because of them - # Could be done in SQL; but left this way for hysterical raisins - # [and freedom to innovate don'cha know?] for i in q.fetchall(): + Logger.log(["unset lastused", i[1]]) session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]}) @@ -158,13 +163,15 @@ def check_files(now_date, delete_date, max_delete, session): SELECT id, filename FROM files f WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id) AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id) + AND last_used IS NULL ORDER BY filename""") ql = q.fetchall() if len(ql) > 0: - print "WARNING: check_files found something it shouldn't" + utils.warn("check_files found something it shouldn't") for x in ql: - print x + utils.warn("orphaned file: %s" % x) + Logger.log(["set lastused", x[1], "ORPHANED FILE"]) session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid", {'lastused': now_date, 'fileid': x[0]}) @@ -177,12 +184,13 @@ def clean_binaries(now_date, delete_date, max_delete, session): # XXX: why doesn't this remove the files here as well? I don't think it # buys anything keeping this separate print "Cleaning binaries from the DB..." + print "Deleting from binaries table... " + for bin in session.query(DBBinary).join(DBBinary.poolfile).filter(PoolFile.last_used <= delete_date): + Logger.log(["delete binary", bin.poolfile.filename]) + if not Options["No-Action"]: + session.delete(bin) if not Options["No-Action"]: - print "Deleting from binaries table... " - session.execute("""DELETE FROM binaries WHERE EXISTS - (SELECT 1 FROM files WHERE binaries.file = files.id - AND files.last_used <= :deldate)""", - {'deldate': delete_date}) + session.commit() ######################################## @@ -200,41 +208,37 @@ def clean(now_date, delete_date, max_delete, session): os.mkdir(dest) # Delete from source - if not Options["No-Action"]: - print "Deleting from source table... " - session.execute("""DELETE FROM dsc_files - WHERE EXISTS - (SELECT 1 FROM source s, files f, dsc_files df - WHERE f.last_used <= :deletedate - AND s.file = f.id AND s.id = df.source - AND df.id = dsc_files.id)""", {'deletedate': delete_date}) - session.execute("""DELETE FROM source - WHERE EXISTS - (SELECT 1 FROM files - WHERE source.file = files.id - AND files.last_used <= :deletedate)""", {'deletedate': delete_date}) + print "Deleting from source table... " + q = session.execute(""" +SELECT s.id, f.filename FROM source s, files f + WHERE f.last_used <= :deletedate + AND s.file = f.id""", {'deletedate': delete_date}) + for s in q.fetchall(): + Logger.log(["delete source", s[1], s[0]]) + if not Options["No-Action"]: + session.execute("DELETE FROM dsc_files WHERE source = :s_id", {"s_id":s[0]}) + session.execute("DELETE FROM source WHERE id = :s_id", {"s_id":s[0]}) + if not Options["No-Action"]: session.commit() # Delete files from the pool - query = """SELECT l.path, f.filename FROM location l, files f - WHERE f.last_used <= :deletedate AND l.id = f.location""" + old_files = session.query(PoolFile).filter(PoolFile.last_used <= delete_date) if max_delete is not None: - query += " LIMIT %d" % max_delete + old_files = old_files.limit(max_delete) print "Limiting removals to %d" % max_delete - q = session.execute(query, {'deletedate': delete_date}) - for i in q.fetchall(): - filename = i[0] + i[1] + for pf in old_files: + filename = os.path.join(pf.location.path, pf.filename) if not os.path.exists(filename): utils.warn("can not find '%s'." % (filename)) continue + Logger.log(["delete pool file", filename]) if os.path.isfile(filename): if os.path.islink(filename): count += 1 - if Options["No-Action"]: - print "Removing symlink %s..." % (filename) - else: + Logger.log(["delete symlink", filename]) + if not Options["No-Action"]: os.unlink(filename) else: size += os.stat(filename)[stat.ST_SIZE] @@ -245,23 +249,21 @@ def clean(now_date, delete_date, max_delete, session): if os.path.exists(dest_filename): dest_filename = utils.find_next_free(dest_filename) - if Options["No-Action"]: - print "Cleaning %s -> %s ..." % (filename, dest_filename) - else: + Logger.log(["move to morgue", filename, dest_filename]) + if not Options["No-Action"]: utils.move(filename, dest_filename) + + if not Options["No-Action"]: + session.delete(pf) + else: utils.fubar("%s is neither symlink nor file?!" % (filename)) - # Delete from the 'files' table - # XXX: I've a horrible feeling that the max_delete stuff breaks here - mhy - # TODO: Change it so we do the DELETEs as we go; it'll be slower but - # more reliable if not Options["No-Action"]: - print "Deleting from files table... " - session.execute("DELETE FROM files WHERE last_used <= :deletedate", {'deletedate': delete_date}) session.commit() if count > 0: + Logger.log(["total", count, utils.size_type(size)]) print "Cleaned %d files, %s." % (count, utils.size_type(size)) ################################################################################ @@ -271,7 +273,7 @@ def clean_maintainers(now_date, delete_date, max_delete, session): # TODO Replace this whole thing with one SQL statement q = session.execute(""" -SELECT m.id FROM maintainer m +SELECT m.id, m.name FROM maintainer m WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.maintainer = m.id) AND NOT EXISTS (SELECT 1 FROM source s WHERE s.maintainer = m.id OR s.changedby = m.id) AND NOT EXISTS (SELECT 1 FROM src_uploaders u WHERE u.maintainer = m.id)""") @@ -280,14 +282,16 @@ SELECT m.id FROM maintainer m for i in q.fetchall(): maintainer_id = i[0] + Logger.log(["delete maintainer", i[1]]) if not Options["No-Action"]: session.execute("DELETE FROM maintainer WHERE id = :maint", {'maint': maintainer_id}) - count += 1 + count += 1 if not Options["No-Action"]: session.commit() if count > 0: + Logger.log(["total", count]) print "Cleared out %d maintainer entries." % (count) ################################################################################ @@ -297,7 +301,7 @@ def clean_fingerprints(now_date, delete_date, max_delete, session): # TODO Replace this whole thing with one SQL statement q = session.execute(""" -SELECT f.id FROM fingerprint f +SELECT f.id, f.fingerprint FROM fingerprint f WHERE f.keyring IS NULL AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.sig_fpr = f.id) AND NOT EXISTS (SELECT 1 FROM source s WHERE s.sig_fpr = f.id)""") @@ -306,14 +310,16 @@ SELECT f.id FROM fingerprint f for i in q.fetchall(): fingerprint_id = i[0] + Logger.log(["delete fingerprint", i[1]]) if not Options["No-Action"]: session.execute("DELETE FROM fingerprint WHERE id = :fpr", {'fpr': fingerprint_id}) - count += 1 + count += 1 if not Options["No-Action"]: session.commit() if count > 0: + Logger.log(["total", count]) print "Cleared out %d fingerprint entries." % (count) ################################################################################ @@ -330,32 +336,58 @@ def clean_queue_build(now_date, delete_date, max_delete, session): our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"])) count = 0 - q = session.execute("SELECT filename FROM queue_build WHERE last_used <= :deletedate", - {'deletedate': our_delete_date}) - for i in q.fetchall(): - filename = i[0] - if not os.path.exists(filename): - utils.warn("%s (from queue_build) doesn't exist." % (filename)) + for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date): + if not os.path.exists(qf.filename): + utils.warn("%s (from queue_build) doesn't exist." % (qf.filename)) continue - if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(filename): - utils.fubar("%s (from queue_build) should be a symlink but isn't." % (filename)) + if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(qf.filename): + utils.fubar("%s (from queue_build) should be a symlink but isn't." % (qf.filename)) - os.unlink(filename) + Logger.log(["delete queue build", qf.filename]) + if not Options["No-Action"]: + os.unlink(qf.filename) + session.delete(qf) count += 1 - session.execute("DELETE FROM queue_build WHERE last_used <= :deletedate", - {'deletedate': our_delete_date}) - - session.commit() + if not Options["No-Action"]: + session.commit() if count: + Logger.log(["total", count]) print "Cleaned %d queue_build files." % (count) ################################################################################ +def clean_empty_directories(session): + """ + Removes empty directories from pool directories. + """ + + count = 0 + + cursor = session.execute( + "SELECT DISTINCT(path) FROM location WHERE type = :type", + {'type': 'pool'}, + ) + bases = [x[0] for x in cursor.fetchall()] + + for base in bases: + for dirpath, dirnames, filenames in os.walk(base, topdown=False): + if not filenames and not dirnames: + to_remove = os.path.join(base, dirpath) + if not Options["No-Action"]: + Logger.log(["removing directory", to_remove]) + os.removedirs(to_remove) + count += 1 + + if count: + Logger.log(["total removed directories", count]) + +################################################################################ + def main(): - global Options + global Options, Logger cnf = Config() @@ -384,6 +416,8 @@ def main(): if Options["Help"]: usage() + Logger = daklog.Logger(cnf, "clean-suites", debug=Options["No-Action"]) + session = DBConn().session() now_date = datetime.now() @@ -397,6 +431,9 @@ def main(): clean_maintainers(now_date, delete_date, max_delete, session) clean_fingerprints(now_date, delete_date, max_delete, session) clean_queue_build(now_date, delete_date, max_delete, session) + clean_empty_directories(session) + + Logger.close() ################################################################################ diff --git a/dak/contents.py b/dak/contents.py index 9ac99951..c435afc5 100755 --- a/dak/contents.py +++ b/dak/contents.py @@ -37,7 +37,6 @@ Create all the contents files import sys import os import logging -import math import gzip import threading import Queue diff --git a/dak/cruft_report.py b/dak/cruft_report.py index cd63c2da..63374859 100755 --- a/dak/cruft_report.py +++ b/dak/cruft_report.py @@ -29,7 +29,7 @@ ################################################################################ -import commands, os, sys, time, re +import commands, os, sys, re import apt_pkg from daklib.config import Config diff --git a/dak/dak.py b/dak/dak.py index f3380091..052f3b3e 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -34,7 +34,6 @@ G{importgraph} ################################################################################ import sys -import imp import daklib.utils ################################################################################ diff --git a/dak/dakdb/update15.py b/dak/dakdb/update15.py new file mode 100644 index 00000000..535f9e67 --- /dev/null +++ b/dak/dakdb/update15.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Adding table for allowed source formats + +@contact: Debian FTP Master +@copyright: 2009 Raphael Hertzog +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +from daklib.dak_exceptions import DBUpdateError + +################################################################################ + +def do_update(self): + print "Adding tables listing allowed source formats" + + try: + c = self.db.cursor() + c.execute(""" + CREATE TABLE src_format ( + id SERIAL PRIMARY KEY, + format_name TEXT NOT NULL, + UNIQUE (format_name) + ) + """) + c.execute("INSERT INTO src_format (format_name) VALUES('1.0')") + c.execute("INSERT INTO src_format (format_name) VALUES('3.0 (quilt)')") + c.execute("INSERT INTO src_format (format_name) VALUES('3.0 (native)')") + + c.execute(""" + CREATE TABLE suite_src_formats ( + suite INT4 NOT NULL REFERENCES suite(id), + src_format INT4 NOT NULL REFERENCES src_format(id), + PRIMARY KEY (suite, src_format) + ) + """) + + print "Authorize format 1.0 on all suites by default" + c.execute("SELECT id FROM suite") + suites = c.fetchall() + c.execute("SELECT id FROM src_format WHERE format_name = '1.0'") + formats = c.fetchall() + for s in suites: + for f in formats: + c.execute("INSERT INTO suite_src_formats (suite, src_format) VALUES(%s, %s)", (s[0], f[0])) + + print "Authorize all other formats on tpu, unstable & experimental by default" + c.execute("SELECT id FROM suite WHERE suite_name IN ('testing-proposed-updates', 'unstable', 'experimental')") + suites = c.fetchall() + c.execute("SELECT id FROM src_format WHERE format_name != '1.0'") + formats = c.fetchall() + for s in suites: + for f in formats: + c.execute("INSERT INTO suite_src_formats (suite, src_format) VALUES(%s, %s)", (s[0], f[0])) + + c.execute("UPDATE config SET value = '15' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply source format update 15, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/generate_index_diffs.py b/dak/generate_index_diffs.py index 21c631b9..4222c0cf 100755 --- a/dak/generate_index_diffs.py +++ b/dak/generate_index_diffs.py @@ -34,7 +34,6 @@ import sys import os import tempfile -import subprocess import time import apt_pkg diff --git a/dak/import_keyring.py b/dak/import_keyring.py index 06597f85..0b670357 100755 --- a/dak/import_keyring.py +++ b/dak/import_keyring.py @@ -24,7 +24,6 @@ import apt_pkg, ldap, email.Utils from daklib.config import Config from daklib.dbconn import * -from daklib import utils # Globals diff --git a/dak/import_ldap_fingerprints.py b/dak/import_ldap_fingerprints.py index ec27acbe..337edb61 100755 --- a/dak/import_ldap_fingerprints.py +++ b/dak/import_ldap_fingerprints.py @@ -44,7 +44,7 @@ ################################################################################ -import commands, ldap, re, sys +import commands, ldap, sys import apt_pkg from daklib.config import Config diff --git a/dak/make_pkg_file_mapping.py b/dak/make_pkg_file_mapping.py index 38a6bec2..c457820f 100755 --- a/dak/make_pkg_file_mapping.py +++ b/dak/make_pkg_file_mapping.py @@ -31,9 +31,6 @@ and binary package version it has in a standard rfc2822-like format. ################################################################################ -import os -import sys - from daklib.dbconn import * ################################################################################ diff --git a/dak/new_security_install.py b/dak/new_security_install.py index 24e89b92..3eb19643 100755 --- a/dak/new_security_install.py +++ b/dak/new_security_install.py @@ -25,7 +25,7 @@ import apt_pkg, os, sys, pwd, time, commands from daklib import queue from daklib import daklog from daklib import utils -from daklib import database +from daklib.dbconn import DBConn, get_or_set_queue, get_suite_architectures from daklib.regexes import re_taint_free Cnf = None @@ -387,7 +387,7 @@ def generate_advisory(template): ver, suite) adv += "%s\n%s\n\n" % (suite_header, "-"*len(suite_header)) - arches = database.get_suite_architectures(suite) + arches = [x.arch_name for x in get_suite_architectures(suite)] if "source" in arches: arches.remove("source") if "all" in arches: @@ -492,9 +492,11 @@ def _do_Disembargo(): if os.getcwd() != Cnf["Dir::Queue::Embargoed"].rstrip("/"): utils.fubar("Can only disembargo from %s" % Cnf["Dir::Queue::Embargoed"]) + session = DBConn().session() + dest = Cnf["Dir::Queue::Unembargoed"] - emb_q = database.get_or_set_queue_id("embargoed") - une_q = database.get_or_set_queue_id("unembargoed") + emb_q = get_or_set_queue("embargoed", session) + une_q = get_or_set_queue("unembargoed", session) for c in changes: print "Disembargoing %s" % (c) @@ -505,7 +507,8 @@ def _do_Disembargo(): if "source" in Upload.pkg.changes["architecture"].keys(): print "Adding %s %s to disembargo table" % (Upload.pkg.changes["source"], Upload.pkg.changes["version"]) - Upload.projectB.query("INSERT INTO disembargo (package, version) VALUES ('%s', '%s')" % (Upload.pkg.changes["source"], Upload.pkg.changes["version"])) + session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", + {'package': Upload.pkg.changes["source"], 'version': Upload.pkg.changes["version"]}) files = {} for suite in Upload.pkg.changes["distribution"].keys(): @@ -518,10 +521,10 @@ def _do_Disembargo(): files[os.path.join(dest_dir, file)] = 1 files = files.keys() - Upload.projectB.query("BEGIN WORK") for f in files: - Upload.projectB.query("UPDATE queue_build SET queue = %s WHERE filename = '%s' AND queue = %s" % (une_q, f, emb_q)) - Upload.projectB.query("COMMIT WORK") + session.execute("UPDATE queue_build SET queue = :unembargoed WHERE filename = :filename AND queue = :embargoed", + {'unembargoed': une_q.queue_id, 'filename': f, 'embargoed': emb_q.queue_id}) + session.commit() for file in Upload.pkg.files.keys(): utils.copy(file, os.path.join(dest, file)) @@ -534,9 +537,14 @@ def _do_Disembargo(): utils.copy(k, os.path.join(dest, k)) os.unlink(k) + session.commit() + def do_Reject(): sudo("R", _do_Reject, True) def _do_Reject(): global changes + + session = DBConn().session() + for c in changes: print "Rejecting %s..." % (c) Upload.init_vars() @@ -558,8 +566,8 @@ def _do_Reject(): if not aborted: os.unlink(c[:-8]+".dak") for f in files: - Upload.projectB.query( - "DELETE FROM queue_build WHERE filename = '%s'" % (f)) + session.execute("DELETE FROM queue_build WHERE filename = :filename", + {'filename': f}) os.unlink(f) print "Updating buildd information..." @@ -569,6 +577,8 @@ def _do_Reject(): if os.path.exists(adv_file): os.unlink(adv_file) + session.commit() + def do_DropAdvisory(): for c in changes: Upload.init_vars() diff --git a/dak/process_accepted.py b/dak/process_accepted.py index d7db1172..b18346c8 100755 --- a/dak/process_accepted.py +++ b/dak/process_accepted.py @@ -40,14 +40,12 @@ import fcntl import os import sys from datetime import datetime -import re -import apt_pkg, commands +import apt_pkg from daklib import daklog from daklib.queue import * from daklib import utils from daklib.dbconn import * -from daklib.binary import copy_temporary_contents from daklib.dak_exceptions import * from daklib.regexes import re_default_answer, re_issource, re_fdnic from daklib.urgencylog import UrgencyLog @@ -210,7 +208,7 @@ def add_dsc_to_db(u, filename, session): df = DSCFile() df.source_id = source.source_id - # If the .orig.tar.gz is already in the pool, it's + # If the .orig tarball is already in the pool, it's # files id is stored in dsc_files by check_dsc(). files_id = dentry.get("files id", None) @@ -353,32 +351,37 @@ def install(u, session, log_urgency=True): add_deb_to_db(u, newfile, session) # If this is a sourceful diff only upload that is moving - # cross-component we need to copy the .orig.tar.gz into the new + # cross-component we need to copy the .orig files into the new # component too for the same reasons as above. - # - if u.pkg.changes["architecture"].has_key("source") and u.pkg.orig_tar_id and \ - u.pkg.orig_tar_location != dsc_location_id: - - oldf = get_poolfile_by_id(u.pkg.orig_tar_id, session) - old_filename = os.path.join(oldf.location.path, oldf.filename) - old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, - 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} - - new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) - - # TODO: Care about size/md5sum collisions etc - (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) - - if newf is None: - utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename)) - newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) - - # TODO: Check that there's only 1 here - source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0] - dscf = get_dscfiles(source_id = source.source_id, poolfile_id=u.pkg.orig_tar_id, session=session)[0] - dscf.poolfile_id = newf.file_id - session.add(dscf) - session.flush() + if u.pkg.changes["architecture"].has_key("source"): + for orig_file in u.pkg.orig_files.keys(): + if not u.pkg.orig_files[orig_file].has_key("id"): + continue # Skip if it's not in the pool + orig_file_id = u.pkg.orig_files[orig_file]["id"] + if u.pkg.orig_files[orig_file]["location"] == dsc_location_id: + continue # Skip if the location didn't change + + # Do the move + oldf = get_poolfile_by_id(orig_file_id, session) + old_filename = os.path.join(oldf.location.path, oldf.filename) + old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, + 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} + + new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) + + # TODO: Care about size/md5sum collisions etc + (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) + + if newf is None: + utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename)) + newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) + + # TODO: Check that there's only 1 here + source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0] + dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0] + dscf.poolfile_id = newf.file_id + session.add(dscf) + session.flush() # Install the files into the pool for newfile, entry in u.pkg.files.items(): @@ -452,15 +455,17 @@ def install(u, session, log_urgency=True): os.unlink(dest) os.symlink(src, dest) - # Update last_used on any non-upload .orig.tar.gz symlink - if u.pkg.orig_tar_id: + # Update last_used on any non-uploaded .orig symlink + for orig_file in u.pkg.orig_files.keys(): # Determine the .orig.tar.gz file name - for dsc_file in u.pkg.dsc_files.keys(): - if dsc_file.endswith(".orig.tar.gz"): - u.pkg.orig_tar_gz = os.path.join(dest_dir, dsc_file) + if not u.pkg.orig_files[orig_file].has_key("id"): + continue # Skip files not in the pool + # XXX: do we really want to update the orig_files dict here + # instead of using a temporary variable? + u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file) # Remove it from the list of packages for later processing by apt-ftparchive - qb = get_queue_build(u.pkg.orig_tar_gz, suite.suite_id, session) + qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session) if qb: qb.in_queue = False qb.last_used = now_date diff --git a/dak/process_new.py b/dak/process_new.py index 77f6e5af..ddedbe10 100755 --- a/dak/process_new.py +++ b/dak/process_new.py @@ -665,7 +665,7 @@ def do_new(upload, session): try: check_daily_lock() done = add_overrides (new, upload, session) - Logger.log([utils.getusername(), "NEW ACCEPT: %s" % (upload.pkg.changes_file)]) + Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)]) except CantGetLockError: print "Hello? Operator! Give me the number for 911!" print "Dinstall in the locked area, cant process packages, come back later" @@ -678,7 +678,7 @@ def do_new(upload, session): reject_message=Options["Manual-Reject"], note=get_new_comments(changes.get("source", ""), session=session)) if not aborted: - Logger.log([utils.getusername(), "NEW REJECT: %s" % (upload.pkg.changes_file)]) + Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)]) os.unlink(upload.pkg.changes_file[:-8]+".dak") done = 1 elif answer == 'N': @@ -687,7 +687,7 @@ def do_new(upload, session): elif answer == 'P' and not Options["Trainee"]: prod_maintainer(get_new_comments(changes.get("source", ""), session=session), upload) - Logger.log([utils.getusername(), "NEW PROD: %s" % (upload.pkg.changes_file)]) + Logger.log(["NEW PROD: %s" % (upload.pkg.changes_file)]) elif answer == 'R' and not Options["Trainee"]: confirm = utils.our_raw_input("Really clear note (y/N)? ").lower() if confirm == "y": @@ -761,12 +761,12 @@ def do_byhand(upload, session): done = 1 for f in byhand: del files[f] - Logger.log([utils.getusername(), "BYHAND ACCEPT: %s" % (upload.pkg.changes_file)]) + Logger.log(["BYHAND ACCEPT: %s" % (upload.pkg.changes_file)]) except CantGetLockError: print "Hello? Operator! Give me the number for 911!" print "Dinstall in the locked area, cant process packages, come back later" elif answer == 'M': - Logger.log([utils.getusername(), "BYHAND REJECT: %s" % (upload.pkg.changes_file)]) + Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)]) upload.do_reject(manual=1, reject_message=Options["Manual-Reject"]) os.unlink(upload.pkg.changes_file[:-8]+".dak") done = 1 @@ -887,7 +887,7 @@ def end(): if accept_count > 1: sets = "sets" sys.stderr.write("Accepted %d package %s, %s.\n" % (accept_count, sets, utils.size_type(int(accept_bytes)))) - Logger.log([utils.getusername(), "total",accept_count,accept_bytes]) + Logger.log(["total",accept_count,accept_bytes]) if not Options["No-Action"] and not Options["Trainee"]: Logger.close() diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py index cabdbf3f..34020205 100755 --- a/dak/process_unchecked.py +++ b/dak/process_unchecked.py @@ -34,23 +34,14 @@ Checks Debian packages from Incoming ################################################################################ -import commands import errno import fcntl import os -import re -import shutil -import stat import sys -import time import traceback -import tarfile -import apt_inst import apt_pkg -from debian_bundle import deb822 from daklib.dbconn import * -from daklib.binary import Binary from daklib import daklog from daklib.queue import * from daklib import utils @@ -244,7 +235,7 @@ def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, a u.pkg.write_dot_dak(dir) u.move_to_dir(dir, perms=perms) if build: - get_queue(queue.lower()).autobuild_upload(u.pkg, dir) + get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir) # Check for override disparities u.check_override() @@ -508,6 +499,7 @@ def process_it(changes_file): valid_dsc_p = u.check_dsc(not Options["No-Action"]) if valid_dsc_p: u.check_source() + u.check_lintian() u.check_hashes() u.check_urgency() u.check_timestamps() diff --git a/dak/queue_report.py b/dak/queue_report.py index c60358f0..8e338e52 100755 --- a/dak/queue_report.py +++ b/dak/queue_report.py @@ -37,7 +37,6 @@ from copy import copy import glob, os, stat, sys, time import apt_pkg -import cgi from daklib import utils from daklib.changes import Changes diff --git a/dak/rm.py b/dak/rm.py index 8ed03412..51400194 100755 --- a/dak/rm.py +++ b/dak/rm.py @@ -41,7 +41,6 @@ import commands import os -import re import sys import apt_pkg import apt_inst @@ -506,8 +505,8 @@ def main (): logfile.write("----------------------------------------------\n") logfile.flush() - dsc_type_id = get_override_type('dsc', session) - deb_type_id = get_override_type('deb', session) + dsc_type_id = get_override_type('dsc', session).overridetype_id + deb_type_id = get_override_type('deb', session).overridetype_id # Do the actual deletion print "Deleting...", diff --git a/dak/show_deferred.py b/dak/show_deferred.py index d3cf6530..e8e1621d 100755 --- a/dak/show_deferred.py +++ b/dak/show_deferred.py @@ -22,7 +22,6 @@ import sys, os, re, time import apt_pkg -import tempfile from debian_bundle import deb822 from daklib.dbconn import * from daklib import utils diff --git a/dak/show_new.py b/dak/show_new.py index be3d5114..b21efcce 100755 --- a/dak/show_new.py +++ b/dak/show_new.py @@ -32,6 +32,7 @@ import examine_package from daklib.queue import determine_new, check_valid from daklib import utils +from daklib.regexes import re_source_ext # Globals Cnf = None @@ -160,8 +161,9 @@ def do_pkg(changes_file): filestoexamine = [] for pkg in new.keys(): for fn in new[pkg]["files"]: - if ( c.files[fn].has_key("new") and not - c.files[fn]["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2"] ): + if (c.files[fn].has_key("new") and + (c.files[fn]["type"] == "dsc" or + not re_source_ext.match(c.files[fn]["type"]))): filestoexamine.append(fn) html_header(c.changes["source"], filestoexamine) diff --git a/dak/stats.py b/dak/stats.py index 583178b2..7c61e2a4 100755 --- a/dak/stats.py +++ b/dak/stats.py @@ -34,8 +34,7 @@ import sys import apt_pkg from daklib import utils -from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture, \ - BinAssociation +from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture ################################################################################ diff --git a/dak/transitions.py b/dak/transitions.py index acae2072..4c4ac78e 100755 --- a/dak/transitions.py +++ b/dak/transitions.py @@ -34,7 +34,6 @@ import time import errno import fcntl import tempfile -import pwd import apt_pkg from daklib.dbconn import * diff --git a/dak/update_db.py b/dak/update_db.py index 4999af3a..ecf5cd2a 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -44,7 +44,7 @@ from daklib.dak_exceptions import DBUpdateError ################################################################################ Cnf = None -required_database_schema = 14 +required_database_schema = 15 ################################################################################ diff --git a/daklib/changes.py b/daklib/changes.py index 1bb90753..ff232224 100755 --- a/daklib/changes.py +++ b/daklib/changes.py @@ -76,6 +76,10 @@ CHANGESFIELDS_DSCFILES_OPTIONAL = [ "files id" ] __all__.append('CHANGESFIELDS_DSCFILES_OPTIONAL') +CHANGESFIELDS_ORIGFILES = [ "id", "location" ] + +__all__.append('CHANGESFIELDS_ORIGFILES') + ############################################################################### class Changes(object): @@ -91,10 +95,7 @@ class Changes(object): self.dsc = {} self.files = {} self.dsc_files = {} - - self.orig_tar_id = None - self.orig_tar_location = "" - self.orig_tar_gz = None + self.orig_files = {} def file_summary(self): # changes["distribution"] may not exist in corner cases @@ -189,8 +190,24 @@ class Changes(object): self.files.update(p.load()) self.dsc_files.update(p.load()) - self.orig_tar_id = p.load() - self.orig_tar_location = p.load() + next_obj = p.load() + if isinstance(next_obj, dict): + self.orig_files.update(next_obj) + else: + # Auto-convert old dak files to new format supporting + # multiple tarballs + orig_tar_gz = None + for dsc_file in self.dsc_files.keys(): + if dsc_file.endswith(".orig.tar.gz"): + orig_tar_gz = dsc_file + self.orig_files[orig_tar_gz] = {} + if next_obj != None: + self.orig_files[orig_tar_gz]["id"] = next_obj + next_obj = p.load() + if next_obj != None and next_obj != "": + self.orig_files[orig_tar_gz]["location"] = next_obj + if len(self.orig_files[orig_tar_gz]) == 0: + del self.orig_files[orig_tar_gz] dump_file.close() @@ -240,6 +257,17 @@ class Changes(object): return ret + def sanitised_orig_files(self): + ret = {} + for name, entry in self.orig_files.items(): + ret[name] = {} + # Optional orig_files fields + for i in CHANGESFIELDS_ORIGFILES: + if entry.has_key(i): + ret[name][i] = entry[i] + + return ret + def write_dot_dak(self, dest_dir): """ Dump ourself into a cPickle file. @@ -281,8 +309,7 @@ class Changes(object): p.dump(self.sanitised_dsc()) p.dump(self.sanitised_files()) p.dump(self.sanitised_dsc_files()) - p.dump(self.orig_tar_id) - p.dump(self.orig_tar_location) + p.dump(self.sanitised_orig_files()) dump_file.close() diff --git a/daklib/daklog.py b/daklib/daklog.py index 0cca205e..dfcae368 100755 --- a/daklib/daklog.py +++ b/daklib/daklog.py @@ -58,13 +58,12 @@ class Logger: logfile = utils.open_file(logfilename, 'a') os.umask(umask) self.logfile = logfile - # Log the start of the program - user = pwd.getpwuid(os.getuid())[0] - self.log(["program start", user]) + self.log(["program start"]) def log (self, details): "Log an event" - # Prepend the timestamp and program name + # Prepend timestamp, program name, and user name + details.insert(0, utils.getusername()) details.insert(0, self.program) timestamp = time.strftime("%Y%m%d%H%M%S") details.insert(0, timestamp) diff --git a/daklib/database.py b/daklib/database.py deleted file mode 100755 index cbdfad04..00000000 --- a/daklib/database.py +++ /dev/null @@ -1,968 +0,0 @@ -#!/usr/bin/env python - -""" DB access functions -@group readonly: get_suite_id, get_section_id, get_priority_id, get_override_type_id, - get_architecture_id, get_archive_id, get_component_id, get_location_id, - get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites, - get_suite_architectures, get_new_comments, has_new_comment -@group read/write: get_or_set*, set_files_id -@group writeonly: add_new_comment, delete_new_comments - -@contact: Debian FTP Master -@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup -@copyright: 2009 Joerg Jaspert -@license: GNU General Public License version 2 or later -""" - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -################################################################################ - -import sys -import time -import types -import utils -import pg - -################################################################################ - -Cnf = None #: Configuration, apt_pkg.Configuration -projectB = None #: database connection, pgobject -suite_id_cache = {} #: cache for suites -section_id_cache = {} #: cache for sections -priority_id_cache = {} #: cache for priorities -override_type_id_cache = {} #: cache for overrides -architecture_id_cache = {} #: cache for architectures -archive_id_cache = {} #: cache for archives -component_id_cache = {} #: cache for components -location_id_cache = {} #: cache for locations -maintainer_id_cache = {} #: cache for maintainers -keyring_id_cache = {} #: cache for keyrings -source_id_cache = {} #: cache for sources - -files_id_cache = {} #: cache for files -maintainer_cache = {} #: cache for maintainer names -fingerprint_id_cache = {} #: cache for fingerprints -queue_id_cache = {} #: cache for queues -uid_id_cache = {} #: cache for uids -suite_version_cache = {} #: cache for suite_versions (packages) -suite_bin_version_cache = {} -cache_preloaded = False - -################################################################################ - -def init (config, sql): - """ - database module init. - - @type config: apt_pkg.Configuration - @param config: apt config, see U{http://apt.alioth.debian.org/python-apt-doc/apt_pkg/cache.html#Configuration} - - @type sql: pgobject - @param sql: database connection - - """ - global Cnf, projectB - - Cnf = config - projectB = sql - -################################################################################ - -def get_suite_id (suite): - """ - Returns database id for given C{suite}. - Results are kept in a cache during runtime to minimize database queries. - - @type suite: string - @param suite: The name of the suite - - @rtype: int - @return: the database id for the given suite - - """ - global suite_id_cache - - if suite_id_cache.has_key(suite): - return suite_id_cache[suite] - - q = projectB.query("SELECT id FROM suite WHERE suite_name = '%s'" % (suite)) - ql = q.getresult() - if not ql: - return -1 - - suite_id = ql[0][0] - suite_id_cache[suite] = suite_id - - return suite_id - -def get_section_id (section): - """ - Returns database id for given C{section}. - Results are kept in a cache during runtime to minimize database queries. - - @type section: string - @param section: The name of the section - - @rtype: int - @return: the database id for the given section - - """ - global section_id_cache - - if section_id_cache.has_key(section): - return section_id_cache[section] - - q = projectB.query("SELECT id FROM section WHERE section = '%s'" % (section)) - ql = q.getresult() - if not ql: - return -1 - - section_id = ql[0][0] - section_id_cache[section] = section_id - - return section_id - -def get_priority_id (priority): - """ - Returns database id for given C{priority}. - Results are kept in a cache during runtime to minimize database queries. - - @type priority: string - @param priority: The name of the priority - - @rtype: int - @return: the database id for the given priority - - """ - global priority_id_cache - - if priority_id_cache.has_key(priority): - return priority_id_cache[priority] - - q = projectB.query("SELECT id FROM priority WHERE priority = '%s'" % (priority)) - ql = q.getresult() - if not ql: - return -1 - - priority_id = ql[0][0] - priority_id_cache[priority] = priority_id - - return priority_id - -def get_override_type_id (type): - """ - Returns database id for given override C{type}. - Results are kept in a cache during runtime to minimize database queries. - - @type type: string - @param type: The name of the override type - - @rtype: int - @return: the database id for the given override type - - """ - global override_type_id_cache - - if override_type_id_cache.has_key(type): - return override_type_id_cache[type] - - q = projectB.query("SELECT id FROM override_type WHERE type = '%s'" % (type)) - ql = q.getresult() - if not ql: - return -1 - - override_type_id = ql[0][0] - override_type_id_cache[type] = override_type_id - - return override_type_id - -def get_architecture_id (architecture): - """ - Returns database id for given C{architecture}. - Results are kept in a cache during runtime to minimize database queries. - - @type architecture: string - @param architecture: The name of the override type - - @rtype: int - @return: the database id for the given architecture - - """ - global architecture_id_cache - - if architecture_id_cache.has_key(architecture): - return architecture_id_cache[architecture] - - q = projectB.query("SELECT id FROM architecture WHERE arch_string = '%s'" % (architecture)) - ql = q.getresult() - if not ql: - return -1 - - architecture_id = ql[0][0] - architecture_id_cache[architecture] = architecture_id - - return architecture_id - -def get_archive_id (archive): - """ - Returns database id for given C{archive}. - Results are kept in a cache during runtime to minimize database queries. - - @type archive: string - @param archive: The name of the override type - - @rtype: int - @return: the database id for the given archive - - """ - global archive_id_cache - - archive = archive.lower() - - if archive_id_cache.has_key(archive): - return archive_id_cache[archive] - - q = projectB.query("SELECT id FROM archive WHERE lower(name) = '%s'" % (archive)) - ql = q.getresult() - if not ql: - return -1 - - archive_id = ql[0][0] - archive_id_cache[archive] = archive_id - - return archive_id - -def get_component_id (component): - """ - Returns database id for given C{component}. - Results are kept in a cache during runtime to minimize database queries. - - @type component: string - @param component: The name of the component - - @rtype: int - @return: the database id for the given component - - """ - global component_id_cache - - component = component.lower() - - if component_id_cache.has_key(component): - return component_id_cache[component] - - q = projectB.query("SELECT id FROM component WHERE lower(name) = '%s'" % (component)) - ql = q.getresult() - if not ql: - return -1 - - component_id = ql[0][0] - component_id_cache[component] = component_id - - return component_id - -def get_location_id (location, component, archive): - """ - Returns database id for the location behind the given combination of - - B{location} - the path of the location, eg. I{/srv/ftp.debian.org/ftp/pool/} - - B{component} - the id of the component as returned by L{get_component_id} - - B{archive} - the id of the archive as returned by L{get_archive_id} - Results are kept in a cache during runtime to minimize database queries. - - @type location: string - @param location: the path of the location - - @type component: int - @param component: the id of the component - - @type archive: int - @param archive: the id of the archive - - @rtype: int - @return: the database id for the location - - """ - global location_id_cache - - cache_key = location + '_' + component + '_' + location - if location_id_cache.has_key(cache_key): - return location_id_cache[cache_key] - - archive_id = get_archive_id (archive) - if component != "": - component_id = get_component_id (component) - if component_id != -1: - q = projectB.query("SELECT id FROM location WHERE path = '%s' AND component = %d AND archive = %d" % (location, component_id, archive_id)) - else: - q = projectB.query("SELECT id FROM location WHERE path = '%s' AND archive = %d" % (location, archive_id)) - ql = q.getresult() - if not ql: - return -1 - - location_id = ql[0][0] - location_id_cache[cache_key] = location_id - - return location_id - -def get_source_id (source, version): - """ - Returns database id for the combination of C{source} and C{version} - - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - - B{version} - Results are kept in a cache during runtime to minimize database queries. - - @type source: string - @param source: source package name - - @type version: string - @param version: the source version - - @rtype: int - @return: the database id for the source - - """ - global source_id_cache - - cache_key = source + '_' + version + '_' - if source_id_cache.has_key(cache_key): - return source_id_cache[cache_key] - - q = projectB.query("SELECT id FROM source s WHERE s.source = '%s' AND s.version = '%s'" % (source, version)) - - if not q.getresult(): - return None - - source_id = q.getresult()[0][0] - source_id_cache[cache_key] = source_id - - return source_id - -def get_suite_version(source, suite): - """ - Returns database id for a combination of C{source} and C{suite}. - - - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - - B{suite} - a suite name, eg. I{unstable} - - Results are kept in a cache during runtime to minimize database queries. - - @type source: string - @param source: source package name - - @type suite: string - @param suite: the suite name - - @rtype: string - @return: the version for I{source} in I{suite} - - """ - - global suite_version_cache - cache_key = "%s_%s" % (source, suite) - - if suite_version_cache.has_key(cache_key): - return suite_version_cache[cache_key] - - q = projectB.query(""" - SELECT s.version FROM source s, suite su, src_associations sa - WHERE sa.source=s.id - AND sa.suite=su.id - AND su.suite_name='%s' - AND s.source='%s'""" - % (suite, source)) - - if not q.getresult(): - return None - - version = q.getresult()[0][0] - suite_version_cache[cache_key] = version - - return version - -def get_latest_binary_version_id(binary, section, suite, arch): - global suite_bin_version_cache - cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch) - cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all")) - - # Check for the cache hit for its arch, then arch all - if suite_bin_version_cache.has_key(cache_key): - return suite_bin_version_cache[cache_key] - if suite_bin_version_cache.has_key(cache_key_all): - return suite_bin_version_cache[cache_key_all] - if cache_preloaded == True: - return # package does not exist - - q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section))) - - if not q.getresult(): - return False - - highest_bid = q.getresult()[0][0] - - suite_bin_version_cache[cache_key] = highest_bid - return highest_bid - -def preload_binary_id_cache(): - global suite_bin_version_cache, cache_preloaded - - # Get suite info - q = projectB.query("SELECT id FROM suite") - suites = q.getresult() - - # Get arch mappings - q = projectB.query("SELECT id FROM architecture") - arches = q.getresult() - - for suite in suites: - for arch in arches: - q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0]))) - - for bi in q.getresult(): - cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0]) - suite_bin_version_cache[cache_key] = int(bi[0]) - - cache_preloaded = True - -def get_suite_architectures(suite): - """ - Returns list of architectures for C{suite}. - - @type suite: string, int - @param suite: the suite name or the suite_id - - @rtype: list - @return: the list of architectures for I{suite} - """ - - suite_id = None - if type(suite) == str: - suite_id = get_suite_id(suite) - elif type(suite) == int: - suite_id = suite - else: - return None - - sql = """ SELECT a.arch_string FROM suite_architectures sa - JOIN architecture a ON (a.id = sa.architecture) - WHERE suite='%s' """ % (suite_id) - - q = projectB.query(sql) - return map(lambda x: x[0], q.getresult()) - -def get_suite_untouchable(suite): - """ - Returns true if the C{suite} is untouchable, otherwise false. - - @type suite: string, int - @param suite: the suite name or the suite_id - - @rtype: boolean - @return: status of suite - """ - - suite_id = None - if type(suite) == str: - suite_id = get_suite_id(suite.lower()) - elif type(suite) == int: - suite_id = suite - else: - return None - - sql = """ SELECT untouchable FROM suite WHERE id='%s' """ % (suite_id) - - q = projectB.query(sql) - if q.getresult()[0][0] == "f": - return False - else: - return True - -################################################################################ - -def get_or_set_maintainer_id (maintainer): - """ - If C{maintainer} does not have an entry in the maintainer table yet, create one - and return the new id. - If C{maintainer} already has an entry, simply return the existing id. - - Results are kept in a cache during runtime to minimize database queries. - - @type maintainer: string - @param maintainer: the maintainer name - - @rtype: int - @return: the database id for the maintainer - - """ - global maintainer_id_cache - - if maintainer_id_cache.has_key(maintainer): - return maintainer_id_cache[maintainer] - - q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer)) - if not q.getresult(): - projectB.query("INSERT INTO maintainer (name) VALUES ('%s')" % (maintainer)) - q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer)) - maintainer_id = q.getresult()[0][0] - maintainer_id_cache[maintainer] = maintainer_id - - return maintainer_id - -################################################################################ - -def get_or_set_keyring_id (keyring): - """ - If C{keyring} does not have an entry in the C{keyrings} table yet, create one - and return the new id. - If C{keyring} already has an entry, simply return the existing id. - - Results are kept in a cache during runtime to minimize database queries. - - @type keyring: string - @param keyring: the keyring name - - @rtype: int - @return: the database id for the keyring - - """ - global keyring_id_cache - - if keyring_id_cache.has_key(keyring): - return keyring_id_cache[keyring] - - q = projectB.query("SELECT id FROM keyrings WHERE name = '%s'" % (keyring)) - if not q.getresult(): - projectB.query("INSERT INTO keyrings (name) VALUES ('%s')" % (keyring)) - q = projectB.query("SELECT id FROM keyrings WHERE name = '%s'" % (keyring)) - keyring_id = q.getresult()[0][0] - keyring_id_cache[keyring] = keyring_id - - return keyring_id - -################################################################################ - -def get_or_set_uid_id (uid): - """ - If C{uid} does not have an entry in the uid table yet, create one - and return the new id. - If C{uid} already has an entry, simply return the existing id. - - Results are kept in a cache during runtime to minimize database queries. - - @type uid: string - @param uid: the uid. - - @rtype: int - @return: the database id for the uid - - """ - - global uid_id_cache - - if uid_id_cache.has_key(uid): - return uid_id_cache[uid] - - q = projectB.query("SELECT id FROM uid WHERE uid = '%s'" % (uid)) - if not q.getresult(): - projectB.query("INSERT INTO uid (uid) VALUES ('%s')" % (uid)) - q = projectB.query("SELECT id FROM uid WHERE uid = '%s'" % (uid)) - uid_id = q.getresult()[0][0] - uid_id_cache[uid] = uid_id - - return uid_id - -################################################################################ - -def get_or_set_fingerprint_id (fingerprint): - """ - If C{fingerprint} does not have an entry in the fingerprint table yet, create one - and return the new id. - If C{fingerprint} already has an entry, simply return the existing id. - - Results are kept in a cache during runtime to minimize database queries. - - @type fingerprint: string - @param fingerprint: the fingerprint - - @rtype: int - @return: the database id for the fingerprint - - """ - global fingerprint_id_cache - - if fingerprint_id_cache.has_key(fingerprint): - return fingerprint_id_cache[fingerprint] - - q = projectB.query("SELECT id FROM fingerprint WHERE fingerprint = '%s'" % (fingerprint)) - if not q.getresult(): - projectB.query("INSERT INTO fingerprint (fingerprint) VALUES ('%s')" % (fingerprint)) - q = projectB.query("SELECT id FROM fingerprint WHERE fingerprint = '%s'" % (fingerprint)) - fingerprint_id = q.getresult()[0][0] - fingerprint_id_cache[fingerprint] = fingerprint_id - - return fingerprint_id - -################################################################################ - -def get_files_id (filename, size, md5sum, location_id): - """ - Returns -1, -2 or the file_id for filename, if its C{size} and C{md5sum} match an - existing copy. - - The database is queried using the C{filename} and C{location_id}. If a file does exist - at that location, the existing size and md5sum are checked against the provided - parameters. A size or checksum mismatch returns -2. If more than one entry is - found within the database, a -1 is returned, no result returns None, otherwise - the file id. - - Results are kept in a cache during runtime to minimize database queries. - - @type filename: string - @param filename: the filename of the file to check against the DB - - @type size: int - @param size: the size of the file to check against the DB - - @type md5sum: string - @param md5sum: the md5sum of the file to check against the DB - - @type location_id: int - @param location_id: the id of the location as returned by L{get_location_id} - - @rtype: int / None - @return: Various return values are possible: - - -2: size/checksum error - - -1: more than one file found in database - - None: no file found in database - - int: file id - - """ - global files_id_cache - - cache_key = "%s_%d" % (filename, location_id) - - if files_id_cache.has_key(cache_key): - return files_id_cache[cache_key] - - size = int(size) - q = projectB.query("SELECT id, size, md5sum FROM files WHERE filename = '%s' AND location = %d" % (filename, location_id)) - ql = q.getresult() - if ql: - if len(ql) != 1: - return -1 - ql = ql[0] - orig_size = int(ql[1]) - orig_md5sum = ql[2] - if orig_size != size or orig_md5sum != md5sum: - return -2 - files_id_cache[cache_key] = ql[0] - return files_id_cache[cache_key] - else: - return None - -################################################################################ - -def get_or_set_queue_id (queue): - """ - If C{queue} does not have an entry in the queue table yet, create one - and return the new id. - If C{queue} already has an entry, simply return the existing id. - - Results are kept in a cache during runtime to minimize database queries. - - @type queue: string - @param queue: the queue name (no full path) - - @rtype: int - @return: the database id for the queue - - """ - global queue_id_cache - - if queue_id_cache.has_key(queue): - return queue_id_cache[queue] - - q = projectB.query("SELECT id FROM queue WHERE queue_name = '%s'" % (queue)) - if not q.getresult(): - projectB.query("INSERT INTO queue (queue_name) VALUES ('%s')" % (queue)) - q = projectB.query("SELECT id FROM queue WHERE queue_name = '%s'" % (queue)) - queue_id = q.getresult()[0][0] - queue_id_cache[queue] = queue_id - - return queue_id - -################################################################################ - -def set_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id): - """ - Insert a new entry into the files table and return its id. - - @type filename: string - @param filename: the filename - - @type size: int - @param size: the size in bytes - - @type md5sum: string - @param md5sum: md5sum of the file - - @type sha1sum: string - @param sha1sum: sha1sum of the file - - @type sha256sum: string - @param sha256sum: sha256sum of the file - - @type location_id: int - @param location_id: the id of the location as returned by L{get_location_id} - - @rtype: int - @return: the database id for the new file - - """ - global files_id_cache - - projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', '%s', '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id)) - - return get_files_id (filename, size, md5sum, location_id) - - ### currval has issues with postgresql 7.1.3 when the table is big - ### it was taking ~3 seconds to return on auric which is very Not - ### Cool(tm). - ## - ##q = projectB.query("SELECT id FROM files WHERE id = currval('files_id_seq')") - ##ql = q.getresult()[0] - ##cache_key = "%s_%d" % (filename, location_id) - ##files_id_cache[cache_key] = ql[0] - ##return files_id_cache[cache_key] - -################################################################################ - -def get_maintainer (maintainer_id): - """ - Return the name of the maintainer behind C{maintainer_id}. - - Results are kept in a cache during runtime to minimize database queries. - - @type maintainer_id: int - @param maintainer_id: the id of the maintainer, eg. from L{get_or_set_maintainer_id} - - @rtype: string - @return: the name of the maintainer - - """ - global maintainer_cache - - if not maintainer_cache.has_key(maintainer_id): - q = projectB.query("SELECT name FROM maintainer WHERE id = %s" % (maintainer_id)) - maintainer_cache[maintainer_id] = q.getresult()[0][0] - - return maintainer_cache[maintainer_id] - -################################################################################ - -def get_suites(pkgname, src=False): - """ - Return the suites in which C{pkgname} can be found. If C{src} is True query for source - package, else binary package. - - @type pkgname: string - @param pkgname: name of the package - - @type src: bool - @param src: if True look for source packages, false (default) looks for binary. - - @rtype: list - @return: list of suites, or empty list if no match - - """ - if src: - sql = """ - SELECT suite_name - FROM source, - src_associations, - suite - WHERE source.id = src_associations.source - AND source.source = '%s' - AND src_associations.suite = suite.id - """ % (pkgname) - else: - sql = """ - SELECT suite_name - FROM binaries, - bin_associations, - suite - WHERE binaries.id = bin_associations.bin - AND package = '%s' - AND bin_associations.suite = suite.id - """ % (pkgname) - - q = projectB.query(sql) - return map(lambda x: x[0], q.getresult()) - - -################################################################################ - -def get_new_comments(package): - """ - Returns all the possible comments attached to C{package} in NEW. All versions. - - @type package: string - @param package: name of the package - - @rtype: list - @return: list of strings containing comments for all versions from all authors for package - """ - - comments = [] - query = projectB.query(""" SELECT version, comment, author, notedate - FROM new_comments - WHERE package = '%s' - ORDER BY notedate - """ % (package)) - - for row in query.getresult(): - comments.append("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s\n" % (row[2], row[0], row[3], row[1])) - comments.append("-"*72) - - return comments - -def has_new_comment(package, version, ignore_trainee=False): - """ - Returns true if the given combination of C{package}, C{version} has a comment. - If C{ignore_trainee} is true, comments from a trainee are ignored. - - @type package: string - @param package: name of the package - - @type version: string - @param version: package version - - @type ignore_trainee: boolean - @param ignore_trainee: ignore trainee comments - - @rtype: boolean - @return: true/false - """ - - trainee="" - if ignore_trainee: - trainee='AND trainee=false' - - exists = projectB.query("""SELECT 1 FROM new_comments - WHERE package='%s' - AND version='%s' - %s - LIMIT 1""" - % (package, version, trainee) ).getresult() - - if not exists: - return False - else: - return True - -def add_new_comment(package, version, comment, author, trainee=False): - """ - Add a new comment for C{package}, C{version} written by C{author} - - @type package: string - @param package: name of the package - - @type version: string - @param version: package version - - @type comment: string - @param comment: the comment - - @type author: string - @param author: the authorname - - @type trainee: boolean - @param trainee: trainee comment - """ - - projectB.query(""" INSERT INTO new_comments (package, version, comment, author, trainee) - VALUES ('%s', '%s', '%s', '%s', '%s') - """ % (package, version, pg.escape_string(comment), pg.escape_string(author), trainee)) - - return - -def delete_new_comments(package, version): - """ - Delete a comment for C{package}, C{version}, if one exists - """ - - projectB.query(""" DELETE FROM new_comments - WHERE package = '%s' AND version = '%s' - """ % (package, version)) - return - -def delete_all_new_comments(package): - """ - Delete all comments for C{package}, if they exist - """ - - projectB.query(""" DELETE FROM new_comments - WHERE package = '%s' - """ % (package)) - return - -################################################################################ -def copy_temporary_contents(package, version, arch, deb, reject): - """ - copy the previously stored contents from the temp table to the permanant one - - during process-unchecked, the deb should have been scanned and the - contents stored in pending_content_associations - """ - - # first see if contents exist: - - arch_id = get_architecture_id (arch) - - exists = projectB.query("""SELECT 1 FROM pending_content_associations - WHERE package='%s' - AND version='%s' - AND architecture=%d LIMIT 1""" - % (package, version, arch_id) ).getresult() - - if not exists: - # This should NOT happen. We should have added contents - # during process-unchecked. if it did, log an error, and send - # an email. - subst = { - "__PACKAGE__": package, - "__VERSION__": version, - "__ARCH__": arch, - "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"], - "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"] } - - message = utils.TemplateSubst(subst, Cnf["Dir::Templates"]+"/missing-contents") - utils.send_mail( message ) - - exists = Binary(deb, reject).scan_package() - - if exists: - sql = """INSERT INTO content_associations(binary_pkg,filepath,filename) - SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations - WHERE package='%s' - AND version='%s' - AND architecture=%d""" % (package, version, arch_id) - projectB.query(sql) - projectB.query("""DELETE from pending_content_associations - WHERE package='%s' - AND version='%s' - AND architecture=%d""" % (package, version, arch_id)) - - return exists diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 8ee90768..6d5497fc 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -39,7 +39,7 @@ import traceback from inspect import getargspec -from sqlalchemy import create_engine, Table, MetaData, select +from sqlalchemy import create_engine, Table, MetaData from sqlalchemy.orm import sessionmaker, mapper, relation # Don't remove this, we re-export the exceptions to scripts which import us @@ -59,21 +59,46 @@ __all__ = ['IntegrityError', 'SQLAlchemyError'] ################################################################################ def session_wrapper(fn): + """ + Wrapper around common ".., session=None):" handling. If the wrapped + function is called without passing 'session', we create a local one + and destroy it when the function ends. + + Also attaches a commit_or_flush method to the session; if we created a + local session, this is a synonym for session.commit(), otherwise it is a + synonym for session.flush(). + """ + def wrapped(*args, **kwargs): private_transaction = False + + # Find the session object session = kwargs.get('session') - # No session specified as last argument or in kwargs, create one. - if session is None and len(args) <= len(getargspec(fn)[0]) - 1: - private_transaction = True - kwargs['session'] = DBConn().session() + if session is None: + if len(args) <= len(getargspec(fn)[0]) - 1: + # No session specified as last argument or in kwargs + private_transaction = True + session = kwargs['session'] = DBConn().session() + else: + # Session is last argument in args + session = args[-1] + if session is None: + args = list(args) + session = args[-1] = DBConn().session() + private_transaction = True + + if private_transaction: + session.commit_or_flush = session.commit + else: + session.commit_or_flush = session.flush try: return fn(*args, **kwargs) finally: if private_transaction: # We created a session; close it. - kwargs['session'].close() + session.close() wrapped.__doc__ = fn.__doc__ wrapped.func_name = fn.func_name @@ -419,6 +444,7 @@ class ContentFilename(object): __all__.append('ContentFilename') +@session_wrapper def get_or_set_contents_file_id(filename, session=None): """ Returns database id for given filename. @@ -435,10 +461,6 @@ def get_or_set_contents_file_id(filename, session=None): @rtype: int @return: the database id for the given component """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True q = session.query(ContentFilename).filter_by(filename=filename) @@ -448,15 +470,9 @@ def get_or_set_contents_file_id(filename, session=None): cf = ContentFilename() cf.filename = filename session.add(cf) - if privatetrans: - session.commit() - else: - session.flush() + session.commit_or_flush() ret = cf.cafilename_id - if privatetrans: - session.close() - return ret __all__.append('get_or_set_contents_file_id') @@ -523,6 +539,7 @@ class ContentFilepath(object): __all__.append('ContentFilepath') +@session_wrapper def get_or_set_contents_path_id(filepath, session=None): """ Returns database id for given path. @@ -539,10 +556,6 @@ def get_or_set_contents_path_id(filepath, session=None): @rtype: int @return: the database id for the given path """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True q = session.query(ContentFilepath).filter_by(filepath=filepath) @@ -552,15 +565,9 @@ def get_or_set_contents_path_id(filepath, session=None): cf = ContentFilepath() cf.filepath = filepath session.add(cf) - if privatetrans: - session.commit() - else: - session.flush() + session.commit_or_flush() ret = cf.cafilepath_id - if privatetrans: - session.close() - return ret __all__.append('get_or_set_contents_path_id') @@ -820,6 +827,7 @@ class Fingerprint(object): __all__.append('Fingerprint') +@session_wrapper def get_or_set_fingerprint(fpr, session=None): """ Returns Fingerprint object for given fpr. @@ -838,10 +846,6 @@ def get_or_set_fingerprint(fpr, session=None): @rtype: Fingerprint @return: the Fingerprint object for the given fpr """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True q = session.query(Fingerprint).filter_by(fingerprint=fpr) @@ -851,15 +855,9 @@ def get_or_set_fingerprint(fpr, session=None): fingerprint = Fingerprint() fingerprint.fingerprint = fpr session.add(fingerprint) - if privatetrans: - session.commit() - else: - session.flush() + session.commit_or_flush() ret = fingerprint - if privatetrans: - session.close() - return ret __all__.append('get_or_set_fingerprint') @@ -875,6 +873,7 @@ class Keyring(object): __all__.append('Keyring') +@session_wrapper def get_or_set_keyring(keyring, session=None): """ If C{keyring} does not have an entry in the C{keyrings} table yet, create one @@ -886,28 +885,17 @@ def get_or_set_keyring(keyring, session=None): @rtype: Keyring @return: the Keyring object for this keyring - """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - - try: - obj = session.query(Keyring).filter_by(keyring_name=keyring).first() - if obj is None: - obj = Keyring(keyring_name=keyring) - session.add(obj) - if privatetrans: - session.commit() - else: - session.flush() + q = session.query(Keyring).filter_by(keyring_name=keyring) + try: + return q.one() + except NoResultFound: + obj = Keyring(keyring_name=keyring) + session.add(obj) + session.commit_or_flush() return obj - finally: - if privatetrans: - session.close() __all__.append('get_or_set_keyring') @@ -973,6 +961,7 @@ class Maintainer(object): __all__.append('Maintainer') +@session_wrapper def get_or_set_maintainer(name, session=None): """ Returns Maintainer object for given maintainer name. @@ -991,10 +980,6 @@ def get_or_set_maintainer(name, session=None): @rtype: Maintainer @return: the Maintainer object for the given maintainer """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True q = session.query(Maintainer).filter_by(name=name) try: @@ -1003,19 +988,14 @@ def get_or_set_maintainer(name, session=None): maintainer = Maintainer() maintainer.name = name session.add(maintainer) - if privatetrans: - session.commit() - else: - session.flush() + session.commit_or_flush() ret = maintainer - if privatetrans: - session.close() - return ret __all__.append('get_or_set_maintainer') +@session_wrapper def get_maintainer(maintainer_id, session=None): """ Return the name of the maintainer behind C{maintainer_id} or None if that @@ -1028,16 +1008,7 @@ def get_maintainer(maintainer_id, session=None): @return: the Maintainer with this C{maintainer_id} """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - - try: - return session.query(Maintainer).get(maintainer_id) - finally: - if privatetrans: - session.close() + return session.query(Maintainer).get(maintainer_id) __all__.append('get_maintainer') @@ -1443,23 +1414,27 @@ class Queue(object): session.add(qb) - # If the .orig.tar.gz is in the pool, create a symlink to - # it (if one doesn't already exist) - if changes.orig_tar_id: - # Determine the .orig.tar.gz file name - for dsc_file in changes.dsc_files.keys(): - if dsc_file.endswith(".orig.tar.gz"): - filename = dsc_file - - dest = os.path.join(dest_dir, filename) + # If the .orig tarballs are in the pool, create a symlink to + # them (if one doesn't already exist) + for dsc_file in changes.dsc_files.keys(): + # Skip all files except orig tarballs + from daklib.regexes import re_is_orig_source + if not re_is_orig_source.match(dsc_file): + continue + # Skip orig files not identified in the pool + if not (changes.orig_files.has_key(dsc_file) and + changes.orig_files[dsc_file].has_key("id")): + continue + orig_file_id = changes.orig_files[dsc_file]["id"] + dest = os.path.join(dest_dir, dsc_file) # If it doesn't exist, create a symlink if not os.path.exists(dest): q = session.execute("SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id", - {'id': changes.orig_tar_id}) + {'id': orig_file_id}) res = q.fetchone() if not res: - return "[INTERNAL ERROR] Couldn't find id %s in files table." % (changes.orig_tar_id) + return "[INTERNAL ERROR] Couldn't find id %s in files table." % (orig_file_id) src = os.path.join(res[0], res[1]) os.symlink(src, dest) @@ -1489,9 +1464,10 @@ class Queue(object): __all__.append('Queue') @session_wrapper -def get_queue(queuename, session=None): +def get_or_set_queue(queuename, session=None): """ - Returns Queue object for given C{queue name}. + Returns Queue object for given C{queue name}, creating it if it does not + exist. @type queuename: string @param queuename: The name of the queue @@ -1507,11 +1483,17 @@ def get_queue(queuename, session=None): q = session.query(Queue).filter_by(queue_name=queuename) try: - return q.one() + ret = q.one() except NoResultFound: - return None + queue = Queue() + queue.queue_name = queuename + session.add(queue) + session.commit_or_flush() + ret = queue -__all__.append('get_queue') + return ret + +__all__.append('get_or_set_queue') ################################################################################ @@ -1799,6 +1781,17 @@ __all__.append('SrcAssociation') ################################################################################ +class SrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.format_name) + +__all__.append('SrcFormat') + +################################################################################ + class SrcUploader(object): def __init__(self, *args, **kwargs): pass @@ -1969,6 +1962,42 @@ __all__.append('get_suite_architectures') ################################################################################ +class SuiteSrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.suite_id, self.src_format_id) + +__all__.append('SuiteSrcFormat') + +@session_wrapper +def get_suite_src_formats(suite, session=None): + """ + Returns list of allowed SrcFormat for C{suite}. + + @type suite: str + @param suite: Suite name to search for + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: the list of allowed source formats for I{suite} + """ + + q = session.query(SrcFormat) + q = q.join(SuiteSrcFormat) + q = q.join(Suite).filter_by(suite_name=suite) + q = q.order_by('format_name') + + return q.all() + +__all__.append('get_suite_src_formats') + +################################################################################ + class Uid(object): def __init__(self, *args, **kwargs): pass @@ -1990,6 +2019,7 @@ class Uid(object): __all__.append('Uid') +@session_wrapper def add_database_user(uidname, session=None): """ Adds a database user @@ -2006,19 +2036,12 @@ def add_database_user(uidname, session=None): @return: the uid object for the given uidname """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - session.execute("CREATE USER :uid", {'uid': uidname}) - - if privatetrans: - session.commit() - session.close() + session.commit_or_flush() __all__.append('add_database_user') +@session_wrapper def get_or_set_uid(uidname, session=None): """ Returns uid object for given uidname. @@ -2037,11 +2060,6 @@ def get_or_set_uid(uidname, session=None): @return: the uid object for the given uidname """ - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - q = session.query(Uid).filter_by(uid=uidname) try: @@ -2050,15 +2068,9 @@ def get_or_set_uid(uidname, session=None): uid = Uid() uid.uid = uidname session.add(uid) - if privatetrans: - session.commit() - else: - session.flush() + session.commit_or_flush() ret = uid - if privatetrans: - session.close() - return ret __all__.append('get_or_set_uid') @@ -2116,9 +2128,11 @@ class DBConn(Singleton): self.tbl_section = Table('section', self.db_meta, autoload=True) self.tbl_source = Table('source', self.db_meta, autoload=True) self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True) + self.tbl_src_format = Table('src_format', self.db_meta, autoload=True) self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True) self.tbl_suite = Table('suite', self.db_meta, autoload=True) self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) + self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) self.tbl_uid = Table('uid', self.db_meta, autoload=True) def __setupmappers(self): @@ -2280,6 +2294,10 @@ class DBConn(Singleton): source_id = self.tbl_src_associations.c.source, source = relation(DBSource))) + mapper(SrcFormat, self.tbl_src_format, + properties = dict(src_format_id = self.tbl_src_format.c.id, + format_name = self.tbl_src_format.c.format_name)) + mapper(SrcUploader, self.tbl_src_uploaders, properties = dict(uploader_id = self.tbl_src_uploaders.c.id, source_id = self.tbl_src_uploaders.c.source, @@ -2298,6 +2316,12 @@ class DBConn(Singleton): arch_id = self.tbl_suite_architectures.c.architecture, architecture = relation(Architecture))) + mapper(SuiteSrcFormat, self.tbl_suite_src_formats, + properties = dict(suite_id = self.tbl_suite_src_formats.c.suite, + suite = relation(Suite, backref='suitesrcformats'), + src_format_id = self.tbl_suite_src_formats.c.src_format, + src_format = relation(SrcFormat))) + mapper(Uid, self.tbl_uid, properties = dict(uid_id = self.tbl_uid.c.id, fingerprint = relation(Fingerprint))) diff --git a/daklib/queue.py b/daklib/queue.py index 489b1ef8..890df374 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -26,7 +26,6 @@ Queue utility functions for dak ############################################################################### -import cPickle import errno import os import pg @@ -39,6 +38,7 @@ import utils import commands import shutil import textwrap +import tempfile from types import * import yaml @@ -50,7 +50,7 @@ from config import Config from holding import Holding from dbconn import * from summarystats import SummaryStats -from utils import parse_changes +from utils import parse_changes, check_dsc_files from textutils import fix_maintainer from binary import Binary @@ -73,7 +73,7 @@ def get_type(f, session): # Determine the type if f.has_key("dbtype"): file_type = f["dbtype"] - elif f["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]: + elif re_source_ext.match(f["type"]): file_type = "dsc" else: utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (file_type)) @@ -724,7 +724,7 @@ class Upload(object): self.rejects.append("%s: changes file doesn't say %s for Source" % (f, entry["package"])) # Ensure the source version matches the version in the .changes file - if entry["type"] == "orig.tar.gz": + if re_is_orig_source.match(f): changes_version = self.pkg.changes["chopversion2"] else: changes_version = self.pkg.changes["chopversion"] @@ -932,7 +932,7 @@ class Upload(object): self.rejects.append("source only uploads are not supported.") ########################################################################### - def check_dsc(self, action=True): + def check_dsc(self, action=True, session=None): """Returns bool indicating whether or not the source changes are valid""" # Ensure there is source to check if not self.pkg.changes["architecture"].has_key("source"): @@ -992,10 +992,11 @@ class Upload(object): if not re_valid_version.match(self.pkg.dsc["version"]): self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"])) - # Bumping the version number of the .dsc breaks extraction by stable's - # dpkg-source. So let's not do that... - if self.pkg.dsc["format"] != "1.0": - self.rejects.append("%s: incompatible 'Format' version produced by a broken version of dpkg-dev 1.9.1{3,4}." % (dsc_filename)) + # Only a limited list of source formats are allowed in each suite + for dist in self.pkg.changes["distribution"].keys(): + allowed = [ x.format_name for x in get_suite_src_formats(dist, session) ] + if self.pkg.dsc["format"] not in allowed: + self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed))) # Validate the Maintainer field try: @@ -1009,11 +1010,6 @@ class Upload(object): for field_name in [ "build-depends", "build-depends-indep" ]: field = self.pkg.dsc.get(field_name) if field: - # Check for broken dpkg-dev lossage... - if field.startswith("ARRAY"): - self.rejects.append("%s: invalid %s field produced by a broken version of dpkg-dev (1.10.11)" % \ - (dsc_filename, field_name.title())) - # Have apt try to parse them... try: apt_pkg.ParseSrcDepends(field) @@ -1027,19 +1023,8 @@ class Upload(object): if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]: self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version)) - # Ensure there is a .tar.gz in the .dsc file - has_tar = False - for f in self.pkg.dsc_files.keys(): - m = re_issource.match(f) - if not m: - self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f)) - continue - ftype = m.group(3) - if ftype == "orig.tar.gz" or ftype == "tar.gz": - has_tar = True - - if not has_tar: - self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename)) + # Ensure the Files field contain only what's expected + self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files)) # Ensure source is newer than existing source in target suites session = DBConn().session() @@ -1076,23 +1061,26 @@ class Upload(object): if not os.path.exists(src): return ftype = m.group(3) - if ftype == "orig.tar.gz" and self.pkg.orig_tar_gz: + if re_is_orig_source.match(f) and pkg.orig_files.has_key(f) and \ + pkg.orig_files[f].has_key("path"): continue dest = os.path.join(os.getcwd(), f) os.symlink(src, dest) - # If the orig.tar.gz is not a part of the upload, create a symlink to the - # existing copy. - if self.pkg.orig_tar_gz: - dest = os.path.join(os.getcwd(), os.path.basename(self.pkg.orig_tar_gz)) - os.symlink(self.pkg.orig_tar_gz, dest) + # If the orig files are not a part of the upload, create symlinks to the + # existing copies. + for orig_file in self.pkg.orig_files.keys(): + if not self.pkg.orig_files[orig_file].has_key("path"): + continue + dest = os.path.join(os.getcwd(), os.path.basename(orig_file)) + os.symlink(self.pkg.orig_files[orig_file]["path"], dest) # Extract the source cmd = "dpkg-source -sn -x %s" % (dsc_filename) (result, output) = commands.getstatusoutput(cmd) if (result != 0): self.rejects.append("'dpkg-source -x' failed for %s [return code: %s]." % (dsc_filename, result)) - self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] "), "") + self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] ")) return if not cnf.Find("Dir::Queue::BTSVersionTrack"): @@ -1128,10 +1116,11 @@ class Upload(object): # We should probably scrap or rethink the whole reprocess thing # Bail out if: # a) there's no source - # or b) reprocess is 2 - we will do this check next time when orig.tar.gz is in 'files' - # or c) the orig.tar.gz is MIA + # or b) reprocess is 2 - we will do this check next time when orig + # tarball is in 'files' + # or c) the orig files are MIA if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \ - or self.pkg.orig_tar_gz == -1: + or len(self.pkg.orig_files) == 0: return tmpdir = utils.temp_dirname() @@ -1217,6 +1206,94 @@ class Upload(object): self.ensure_hashes() + ########################################################################### + def check_lintian(self): + # Only check some distributions + valid_dist = False + for dist in ('unstable', 'experimental'): + if dist in self.pkg.changes['distribution']: + valid_dist = True + break + + if not valid_dist: + return + + cnf = Config() + tagfile = cnf.get("Dinstall::LintianTags") + if tagfile is None: + # We don't have a tagfile, so just don't do anything. + return + # Parse the yaml file + sourcefile = file(tagfile, 'r') + sourcecontent = sourcefile.read() + sourcefile.close() + try: + lintiantags = yaml.load(sourcecontent)['lintian'] + except yaml.YAMLError, msg: + utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg)) + return + + # Now setup the input file for lintian. lintian wants "one tag per line" only, + # so put it together like it. We put all types of tags in one file and then sort + # through lintians output later to see if its a fatal tag we detected, or not. + # So we only run lintian once on all tags, even if we might reject on some, but not + # reject on others. + # Additionally build up a set of tags + tags = set() + (fd, temp_filename) = utils.temp_filename() + temptagfile = os.fdopen(fd, 'w') + for tagtype in lintiantags: + for tag in lintiantags[tagtype]: + temptagfile.write("%s\n" % tag) + tags.add(tag) + temptagfile.close() + + # So now we should look at running lintian at the .changes file, capturing output + # to then parse it. + command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file) + (result, output) = commands.getstatusoutput(command) + # We are done with lintian, remove our tempfile + os.unlink(temp_filename) + if (result == 2): + utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result)) + utils.warn(utils.prefix_multi_line_string(output, " [possible output:] ")) + + if len(output) == 0: + return + + # We have output of lintian, this package isn't clean. Lets parse it and see if we + # are having a victim for a reject. + # W: tzdata: binary-without-manpage usr/sbin/tzconfig + for line in output.split('\n'): + m = re_parse_lintian.match(line) + if m is None: + continue + + etype = m.group(1) + epackage = m.group(2) + etag = m.group(3) + etext = m.group(4) + + # So lets check if we know the tag at all. + if etag not in tags: + continue + + if etype == 'O': + # We know it and it is overriden. Check that override is allowed. + if etag in lintiantags['warning']: + # The tag is overriden, and it is allowed to be overriden. + # Don't add a reject message. + pass + elif etag in lintiantags['error']: + # The tag is overriden - but is not allowed to be + self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag)) + else: + # Tag is known, it is not overriden, direct reject. + self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext)) + # Now tell if they *might* override it. + if etag in lintiantags['warning']: + self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage)) + ########################################################################### def check_urgency(self): cnf = Config() @@ -1670,7 +1747,7 @@ distribution.""" # yes # This routine returns None on success or an error on failure - res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) + res = get_or_set_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) if res: utils.fubar(res) @@ -2068,7 +2145,7 @@ distribution.""" """ @warning: NB: this function can remove entries from the 'files' index [if - the .orig.tar.gz is a duplicate of the one in the archive]; if + the orig tarball is a duplicate of the one in the archive]; if you're iterating over 'files' and call this function as part of the loop, be sure to add a check to the top of the loop to ensure you haven't just tried to dereference the deleted entry. @@ -2076,7 +2153,8 @@ distribution.""" """ Cnf = Config() - self.pkg.orig_tar_gz = None + self.pkg.orig_files = {} # XXX: do we need to clear it? + orig_files = self.pkg.orig_files # Try and find all files mentioned in the .dsc. This has # to work harder to cope with the multiple possible @@ -2110,7 +2188,7 @@ distribution.""" if len(ql) > 0: # Ignore exact matches for .orig.tar.gz match = 0 - if dsc_name.endswith(".orig.tar.gz"): + if re_is_orig_source.match(dsc_name): for i in ql: if self.pkg.files.has_key(dsc_name) and \ int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \ @@ -2120,13 +2198,15 @@ distribution.""" # This would fix the stupidity of changing something we often iterate over # whilst we're doing it del self.pkg.files[dsc_name] - self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename) + if not orig_files.has_key(dsc_name): + orig_files[dsc_name] = {} + orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename) match = 1 if not match: self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name)) - elif dsc_name.endswith(".orig.tar.gz"): + elif re_is_orig_source.match(dsc_name): # Check in the pool ql = get_poolfile_like_name(dsc_name, session) @@ -2164,9 +2244,11 @@ distribution.""" # need this for updating dsc_files in install() dsc_entry["files id"] = x.file_id # See install() in process-accepted... - self.pkg.orig_tar_id = x.file_id - self.pkg.orig_tar_gz = old_file - self.pkg.orig_tar_location = x.location.location_id + if not orig_files.has_key(dsc_name): + orig_files[dsc_name] = {} + orig_files[dsc_name]["id"] = x.file_id + orig_files[dsc_name]["path"] = old_file + orig_files[dsc_name]["location"] = x.location.location_id else: # TODO: Record the queues and info in the DB so we don't hardcode all this crap # Not there? Check the queue directories... @@ -2180,11 +2262,12 @@ distribution.""" in_otherdir_fh.close() actual_size = os.stat(in_otherdir)[stat.ST_SIZE] found = in_otherdir - self.pkg.orig_tar_gz = in_otherdir + if not orig_files.has_key(dsc_name): + orig_files[dsc_name] = {} + orig_files[dsc_name]["path"] = in_otherdir if not found: self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (file, dsc_name)) - self.pkg.orig_tar_gz = -1 continue else: self.rejects.append("%s refers to %s, but I can't find it in the queue." % (file, dsc_name)) diff --git a/daklib/regexes.py b/daklib/regexes.py index d1f0d381..6be99977 100755 --- a/daklib/regexes.py +++ b/daklib/regexes.py @@ -42,7 +42,11 @@ re_arch_from_filename = re.compile(r"/binary-[^/]+/") re_extract_src_version = re.compile (r"(\S+)\s*\((.*)\)") re_isadeb = re.compile (r"(.+?)_(.+?)_(.+)\.u?deb$") -re_issource = re.compile (r"(.+)_(.+?)\.(orig\.tar\.gz|diff\.gz|tar\.gz|dsc)$") +orig_source_ext_re = r"orig(?:-.+)?\.tar\.(?:gz|bz2)" +re_orig_source_ext = re.compile(orig_source_ext_re + "$") +re_source_ext = re.compile("(" + orig_source_ext_re + r"|debian\.tar\.(?:gz|bz2)|diff\.gz|tar\.(?:gz|bz2)|dsc)$") +re_issource = re.compile(r"(.+)_(.+?)\." + re_source_ext.pattern) +re_is_orig_source = re.compile (r"(.+)_(.+?)\.orig(?:-.+)?\.tar\.(?:gz|bz2)$") re_single_line_field = re.compile(r"^(\S*?)\s*:\s*(.*)") re_multi_line_field = re.compile(r"^\s(.*)") @@ -108,3 +112,4 @@ re_user_mails = re.compile(r"^(pub|uid):[^rdin].*<(.*@.*)>.*$", re.MULTILINE); re_user_name = re.compile(r"^pub:.*:(.*)<.*$", re.MULTILINE); re_re_mark = re.compile(r'^RE:') +re_parse_lintian = re.compile(r"^(W|E|O): (.*?): ([^ ]*) ?(.*)$") diff --git a/daklib/srcformats.py b/daklib/srcformats.py new file mode 100644 index 00000000..0a74c192 --- /dev/null +++ b/daklib/srcformats.py @@ -0,0 +1,65 @@ +import re + +srcformats = [] + +class SourceFormat(type): + def __new__(cls, name, bases, attrs): + klass = super(SourceFormat, cls).__new__(cls, name, bases, attrs) + srcformats.append(klass) + + assert str(klass.name) + assert iter(klass.requires) + assert iter(klass.disallowed) + + klass.re_format = re.compile(klass.format) + + return klass + + @classmethod + def reject_msgs(cls, has): + if len(cls.requires) != len([x for x in cls.requires if has[x]]): + yield "lack of required files for format %s" % cls.name + + for key in cls.disallowed: + if has[key]: + yield "contains source files not allowed in format %s" % cls.name + +class FormatOne(SourceFormat): + __metaclass__ = SourceFormat + + name = '1.0' + format = r'1.0' + + requires = () + disallowed = ('debian_tar', 'more_orig_tar') + + @classmethod + def reject_msgs(cls, has): + if not (has['native_tar_gz'] or (has['orig_tar_gz'] and has['debian_diff'])): + yield "no .tar.gz or .orig.tar.gz+.diff.gz in 'Files' field." + if has['native_tar_gz'] and has['debian_diff']: + yield "native package with diff makes no sense" + if (has['orig_tar_gz'] != has['orig_tar']) or \ + (has['native_tar_gz'] != has['native_tar']): + yield "contains source files not allowed in format %s" % cls.name + + for msg in super(FormatOne, cls).reject_msgs(has): + yield msg + +class FormatThree(SourceFormat): + __metaclass__ = SourceFormat + + name = '3.x (native)' + format = r'3\.\d+ \(native\)' + + requires = ('native_tar',) + disallowed = ('orig_tar', 'debian_diff', 'debian_tar', 'more_orig_tar') + +class FormatThreeQuilt(SourceFormat): + __metaclass__ = SourceFormat + + name = '3.x (quilt)' + format = r'3\.\d+ \(quilt\)' + + requires = ('orig_tar', 'debian_tar') + disallowed = ('debian_diff', 'native_tar') diff --git a/daklib/utils.py b/daklib/utils.py index a9dea920..788bcd41 100755 --- a/daklib/utils.py +++ b/daklib/utils.py @@ -22,7 +22,6 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -import codecs import commands import email.Header import os @@ -39,14 +38,18 @@ import time import re import string import email as modemail +import subprocess from dbconn import DBConn, get_architecture, get_component, get_suite from dak_exceptions import * from textutils import fix_maintainer from regexes import re_html_escaping, html_escaping, re_single_line_field, \ re_multi_line_field, re_srchasver, re_verwithext, \ - re_parse_maintainer, re_taint_free, re_gpg_uid, re_re_mark, \ - re_whitespace_comment + re_taint_free, re_gpg_uid, re_re_mark, \ + re_whitespace_comment, re_issource + +from srcformats import srcformats +from collections import defaultdict ################################################################################ @@ -60,6 +63,22 @@ key_uid_email_cache = {} #: Cache for email addresses from gpg key uids known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)), ("sha256", apt_pkg.sha256sum, (1, 8))] #: hashes we accept for entries in .changes/.dsc +# Monkeypatch commands.getstatusoutput as it returns a "0" exit code in +# all situations under lenny's Python. +import commands +def dak_getstatusoutput(cmd): + pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + output = "".join(pipe.stdout.readlines()) + + ret = pipe.wait() + if ret is None: + ret = 0 + + return ret, output +commands.getstatusoutput = dak_getstatusoutput + ################################################################################ def html_escape(s): @@ -332,6 +351,83 @@ def check_size(where, files): ################################################################################ +def check_dsc_files(dsc_filename, dsc=None, dsc_files=None): + """ + Verify that the files listed in the Files field of the .dsc are + those expected given the announced Format. + + @type dsc_filename: string + @param dsc_filename: path of .dsc file + + @type dsc: dict + @param dsc: the content of the .dsc parsed by C{parse_changes()} + + @type dsc_files: dict + @param dsc_files: the file list returned by C{build_file_list()} + + @rtype: list + @return: all errors detected + """ + rejmsg = [] + + # Parse the file if needed + if dsc is None: + dsc = parse_changes(dsc_filename, signing_rules=1); + + if dsc_files is None: + dsc_files = build_file_list(dsc, is_a_dsc=1) + + # Ensure .dsc lists proper set of source files according to the format + # announced + has = defaultdict(lambda: 0) + + ftype_lookup = ( + (r'orig.tar.gz', ('orig_tar_gz', 'orig_tar')), + (r'diff.gz', ('debian_diff',)), + (r'tar.gz', ('native_tar_gz', 'native_tar')), + (r'debian\.tar\.(gz|bz2)', ('debian_tar',)), + (r'orig\.tar\.(gz|bz2)', ('orig_tar',)), + (r'tar\.(gz|bz2)', ('native_tar',)), + (r'orig-.+\.tar\.(gz|bz2)', ('more_orig_tar',)), + ) + + for f in dsc_files.keys(): + m = re_issource.match(f) + if not m: + rejmsg.append("%s: %s in Files field not recognised as source." + % (dsc_filename, f)) + continue + + # Populate 'has' dictionary by resolving keys in lookup table + matched = False + for regex, keys in ftype_lookup: + if re.match(regex, m.group(3)): + matched = True + for key in keys: + has[key] += 1 + break + + # File does not match anything in lookup table; reject + if not matched: + reject("%s: unexpected source file '%s'" % (dsc_filename, f)) + + # Check for multiple files + for file_type in ('orig_tar', 'native_tar', 'debian_tar', 'debian_diff'): + if has[file_type] > 1: + rejmsg.append("%s: lists multiple %s" % (dsc_filename, file_type)) + + # Source format specific tests + for format in srcformats: + if format.re_format.match(dsc['format']): + rejmsg.extend([ + '%s: %s' % (dsc_filename, x) for x in format.reject_msgs(has) + ]) + break + + return rejmsg + +################################################################################ + def check_hash_fields(what, manifest): """ check_hash_fields ensures that there are no checksum fields in the @@ -442,10 +538,10 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): format = format[:2] if is_a_dsc: - # format = (1,0) are the only formats we currently accept, # format = (0,0) are missing format headers of which we still # have some in the archive. - if format != (1,0) and format != (0,0): + if format != (1,0) and format != (0,0) and \ + format != (3,0,"quilt") and format != (3,0,"native"): raise UnknownFormatError, "%s" % (changes.get("format","0.0")) else: if (format < (1,5) or format > (1,8)): diff --git a/docs/TODO b/docs/TODO index 5a2b885f..8edd4e4e 100644 --- a/docs/TODO +++ b/docs/TODO @@ -4,16 +4,7 @@ Various ------- -* Lintian based automated rejects - - Have a set of lintian tags each package *must* not have. If it does - -> reject. - - If a tag is overriden by the maintainer, do not reject, but put it - into NEW. If the override is ok note that in a table and dont act on - it for any future uploads of this package anymore. - - possibly have two classes of tags. one for "shouldnt happen by - accident" and one "shouldnt happen". the first gets ignored from us - if overwritten in the package, the second only us can overwrite. - - its a suite option in dak, not active for all at once. +* Implement autosigning, see ftpmaster_autosigning on ftp-master host in text/. * Throw away all DD uploaded .debs. (Depend on "Lintian based automated rejects") @@ -27,8 +18,6 @@ Various - its a suite option, not active for all at once. - should have all buildd machines under dsa control -* Implement autosigning, see ftpmaster_autosigning on ftp-master host in text/. - * Check TODO.old and move still-valid/useful entries over here. * need a testsuite _badly_ @@ -47,10 +36,6 @@ Various - needs updateX.py written and then the rest of the code changed to deal with it. -* Checkout SQL Alchemy and probably use that for our database layer. - -* reject on > or < in a version constraint - * use pythonX.Y-tarfile to check orig.tar.gz timestamps too. * the .dak stuff is fundamentally braindamaged for various reasons, it diff --git a/tests/test_regexes.py b/tests/test_regexes.py index 7c43d097..766e73e4 100755 --- a/tests/test_regexes.py +++ b/tests/test_regexes.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import unittest import os, sys @@ -29,3 +31,33 @@ class re_single_line_field(unittest.TestCase): self.assertEqual(self.MATCH(': ::').groups(), ('', '::')) self.assertEqual(self.MATCH('Foo::bar').groups(), ('Foo', ':bar')) self.assertEqual(self.MATCH('Foo: :bar').groups(), ('Foo', ':bar')) + +class re_parse_lintian(unittest.TestCase): + MATCH = regexes.re_parse_lintian.match + + def testBinary(self): + self.assertEqual( + self.MATCH('W: pkgname: some-tag path/to/file').groups(), + ('W', 'pkgname', 'some-tag', 'path/to/file') + ) + + def testBinaryNoDescription(self): + self.assertEqual( + self.MATCH('W: pkgname: some-tag').groups(), + ('W', 'pkgname', 'some-tag', '') + ) + + def testSource(self): + self.assertEqual( + self.MATCH('W: pkgname source: some-tag').groups(), + ('W', 'pkgname source', 'some-tag', '') + ) + + def testSourceNoDescription(self): + self.assertEqual( + self.MATCH('W: pkgname source: some-tag path/to/file').groups(), + ('W', 'pkgname source', 'some-tag', 'path/to/file') + ) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_srcformats.py b/tests/test_srcformats.py new file mode 100755 index 00000000..9fec4a87 --- /dev/null +++ b/tests/test_srcformats.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python + +import unittest + +import os, sys +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from collections import defaultdict + +from daklib import srcformats + +class SourceFormatTestCase(unittest.TestCase): + def get_rejects(self, has_vars): + has = defaultdict(lambda: 0) + has.update(has_vars) + return list(self.fmt.reject_msgs(has)) + + def assertAccepted(self, has): + self.assertEqual(self.get_rejects(has), []) + + def assertRejected(self, has): + self.assertNotEqual(self.get_rejects(has), []) + +class FormatOneTestCase(SourceFormatTestCase): + fmt = srcformats.FormatOne + + def testEmpty(self): + self.assertRejected({}) + + def testNative(self): + self.assertAccepted({'native_tar': 1, 'native_tar_gz': 1}) + + def testStandard(self): + self.assertAccepted({ + 'orig_tar': 1, + 'orig_tar_gz': 1, + 'debian_diff': 1, + }) + + def testDisallowed(self): + self.assertRejected({ + 'native_tar': 1, + 'native_tar_gz': 1, + 'debian_tar': 1, + }) + self.assertRejected({ + 'orig_tar': 1, + 'orig_tar_gz': 1, + 'debian_diff': 0, + }) + self.assertRejected({ + 'native_tar': 1, + 'native_tar_gz': 1, + 'more_orig_tar': 1, + }) + self.assertRejected({ + 'native_tar': 1, + 'native_tar_gz': 1, + 'debian_diff': 1, + }) + +class FormatTreeTestCase(SourceFormatTestCase): + fmt = srcformats.FormatThree + + def testEmpty(self): + self.assertRejected({}) + + def testSimple(self): + self.assertAccepted({'native_tar': 1}) + + def testDisallowed(self): + self.assertRejected({'native_tar': 1, 'orig_tar': 1}) + self.assertRejected({'native_tar': 1, 'debian_diff': 1}) + self.assertRejected({'native_tar': 1, 'debian_tar': 1}) + self.assertRejected({'native_tar': 1, 'more_orig_tar': 1}) + +class FormatTreeQuiltTestCase(SourceFormatTestCase): + fmt = srcformats.FormatThreeQuilt + + def testEmpty(self): + self.assertRejected({}) + + def testSimple(self): + self.assertAccepted({'orig_tar': 1, 'debian_tar': 1}) + + def testMultipleTarballs(self): + self.assertAccepted({ + 'orig_tar': 1, + 'debian_tar': 1, + 'more_orig_tar': 42, + }) + + def testDisallowed(self): + self.assertRejected({ + 'orig_tar': 1, + 'debian_tar': 1, + 'debian_diff': 1 + }) + self.assertRejected({ + 'orig_tar': 1, + 'debian_tar': 1, + 'native_tar': 1, + }) + +if __name__ == '__main__': + unittest.main()