From: Mike O'Connor Date: Sat, 31 Oct 2009 10:29:34 +0000 (+0000) Subject: merge from ftp-master X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=3b50b545815298b77b8eb68930acb6fde01ea4d4;hp=ebbc5a6d36f10612e6b5c2d112b5081fbc8e0831;p=dak.git merge from ftp-master --- diff --git a/config/debian/cron.dinstall b/config/debian/cron.dinstall index 1c9fa5af..200c7a64 100755 --- a/config/debian/cron.dinstall +++ b/config/debian/cron.dinstall @@ -197,9 +197,6 @@ function accepted() { function cruft() { log "Checking for cruft in overrides" dak check-overrides - - log "Fixing symlinks in $ftpdir" - symlinks -d -r $ftpdir } function msfl() { @@ -252,6 +249,7 @@ function mpfm() { function packages() { log "Generating Packages and Sources files" cd $configdir + GZIP='--rsyncable' ; export GZIP apt-ftparchive generate apt.conf } @@ -905,14 +903,6 @@ GO=( ) stage $GO & -GO=( - FUNC="aptftpcleanup" - TIME="apt-ftparchive cleanup" - ARGS="" - ERR="false" -) -stage $GO & - GO=( FUNC="merkel3" TIME="merkel ddaccessible sync" @@ -927,6 +917,14 @@ GO=( ARGS="" ERR="" ) +stage $GO & + +GO=( + FUNC="aptftpcleanup" + TIME="apt-ftparchive cleanup" + ARGS="" + ERR="false" +) stage $GO log "Daily cron scripts successful, all done" diff --git a/config/debian/cron.weekly b/config/debian/cron.weekly index 5ab9d8b8..34f0c64a 100755 --- a/config/debian/cron.weekly +++ b/config/debian/cron.weekly @@ -57,6 +57,9 @@ apt-ftparchive -q clean apt.conf.buildd echo "Update wanna-build database dump" /org/ftp.debian.org/scripts/nfu/get-w-b-db +echo "Fixing symlinks in $ftpdir" +symlinks -d -r $ftpdir + echo "Finally, all is done, compressing logfile" exec > /dev/null 2>&1 diff --git a/config/debian/lintian.tags b/config/debian/lintian.tags index 1c05410c..0dabaf58 100644 --- a/config/debian/lintian.tags +++ b/config/debian/lintian.tags @@ -10,7 +10,11 @@ lintian: - binary-with-bad-dynamic-table - usr-share-doc-symlink-without-dependency - mknod-in-maintainer-script + - package-contains-info-dir-file error: + - wrong-file-owner-uid-or-gid + - bad-relation + - FSSTND-dir-in-usr - binary-in-etc - missing-dependency-on-perlapi - copyright-lists-upstream-authors-with-dh_make-boilerplate diff --git a/dak/add_user.py b/dak/add_user.py index 77de3e3f..28d31208 100755 --- a/dak/add_user.py +++ b/dak/add_user.py @@ -46,28 +46,28 @@ Adds a new user to the dak databases and keyrings # Stolen from userdir-ldap # Compute a random password using /dev/urandom. def GenPass(): - # Generate a 10 character random string - SaltVals = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/." - Rand = open("/dev/urandom") - Password = "" - for i in range(0,15): - Password = Password + SaltVals[ord(Rand.read(1)[0]) % len(SaltVals)] - return Password + # Generate a 10 character random string + SaltVals = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/." + Rand = open("/dev/urandom") + Password = "" + for i in range(0,15): + Password = Password + SaltVals[ord(Rand.read(1)[0]) % len(SaltVals)] + return Password # Compute the MD5 crypted version of the given password def HashPass(Password): - import crypt - # Hash it telling glibc to use the MD5 algorithm - if you dont have - # glibc then just change Salt = "$1$" to Salt = "" - SaltVals = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/." - Salt = "$1$" - Rand = open("/dev/urandom") - for x in range(0,10): - Salt = Salt + SaltVals[ord(Rand.read(1)[0]) % len(SaltVals)] - Pass = crypt.crypt(Password,Salt) - if len(Pass) < 14: - raise "Password Error", "MD5 password hashing failed, not changing the password!" - return Pass + import crypt + # Hash it telling glibc to use the MD5 algorithm - if you dont have + # glibc then just change Salt = "$1$" to Salt = "" + SaltVals = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789/." + Salt = "$1$" + Rand = open("/dev/urandom") + for x in range(0,10): + Salt = Salt + SaltVals[ord(Rand.read(1)[0]) % len(SaltVals)] + Pass = crypt.crypt(Password,Salt) + if len(Pass) < 14: + raise "Password Error", "MD5 password hashing failed, not changing the password!" + return Pass ################################################################################ @@ -112,8 +112,8 @@ def main(): ] for i in [ "help", "create" ]: - if not Cnf.has_key("Add-User::Options::%s" % (i)): - Cnf["Add-User::Options::%s" % (i)] = "" + if not Cnf.has_key("Add-User::Options::%s" % (i)): + Cnf["Add-User::Options::%s" % (i)] = "" apt_pkg.ParseCommandLine(Cnf, Arguments, sys.argv) @@ -138,10 +138,10 @@ def main(): (result, output) = commands.getstatusoutput(cmd) m = re_gpg_fingerprint.search(output) if not m: - print output + print output utils.fubar("0x%s: (1) No fingerprint found in gpg output but it returned 0?\n%s" \ - % (Cnf["Add-User::Options::Key"], utils.prefix_multi_line_string(output, \ - " [GPG output:] "))) + % (Cnf["Add-User::Options::Key"], utils.prefix_multi_line_string(output, \ + " [GPG output:] "))) primary_key = m.group(1) primary_key = primary_key.replace(" ","") @@ -174,70 +174,69 @@ def main(): yn = utils.our_raw_input(prompt).lower() if yn == "y": -# Create an account for the user? - summary = "" - if Cnf.FindB("Add-User::CreateAccount") or Cnf["Add-User::Options::Create"]: - password = GenPass() - pwcrypt = HashPass(password) - if Cnf.has_key("Add-User::GID"): - cmd = "sudo /usr/sbin/useradd -g users -m -p '%s' -c '%s' -G %s %s" \ - % (pwcrypt, name, Cnf["Add-User::GID"], uid) - else: - cmd = "sudo /usr/sbin/useradd -g users -m -p '%s' -c '%s' %s" \ - % (pwcrypt, name, uid) - (result, output) = commands.getstatusoutput(cmd) - if (result != 0): - utils.fubar("Invocation of '%s' failed:\n%s\n" % (cmd, output), result) - try: - summary+=createMail(uid, password, Cnf["Add-User::Options::Key"], Cnf["Dinstall::GPGKeyring"]) - except: - summary="" - utils.warn("Could not prepare password information for mail, not sending password.") - -# Now add user to the database. - # Note that we provide a session, so we're responsible for committing - uidobj = get_or_set_uid(uid, session=session) - uid_id = uidobj.uid_id - add_database_user(uid) - session.commit() -# The following two are kicked out in rhona, so we don't set them. kelly adds -# them as soon as she installs a package with unknown ones, so no problems to expect here. -# Just leave the comment in, to not think about "Why the hell aren't they added" in -# a year, if we ever touch uma again. -# maint_id = database.get_or_set_maintainer_id(name) -# session.execute("INSERT INTO fingerprint (fingerprint, uid) VALUES (:fingerprint, uid)", -# {'fingerprint': primary_key, 'uid': uid_id}) - -# Lets add user to the email-whitelist file if its configured. - if Cnf.has_key("Dinstall::MailWhiteList") and Cnf["Dinstall::MailWhiteList"] != "": - file = utils.open_file(Cnf["Dinstall::MailWhiteList"], "a") - for mail in emails: - file.write(mail+'\n') - file.close() - - print "Added:\nUid:\t %s (ID: %s)\nMaint:\t %s\nFP:\t %s" % (uid, uid_id, \ - name, primary_key) - -# Should we send mail to the newly added user? - if Cnf.FindB("Add-User::SendEmail"): - mail = name + "<" + emails[0] +">" - Subst = {} - Subst["__NEW_MAINTAINER__"] = mail - Subst["__UID__"] = uid - Subst["__KEYID__"] = Cnf["Add-User::Options::Key"] - Subst["__PRIMARY_KEY__"] = primary_key - Subst["__FROM_ADDRESS__"] = Cnf["Dinstall::MyEmailAddress"] - Subst["__HOSTNAME__"] = Cnf["Dinstall::MyHost"] - Subst["__SUMMARY__"] = summary - new_add_message = utils.TemplateSubst(Subst,Cnf["Dir::Templates"]+"/add-user.added") - utils.send_mail(new_add_message) + # Create an account for the user? + summary = "" + if Cnf.FindB("Add-User::CreateAccount") or Cnf["Add-User::Options::Create"]: + password = GenPass() + pwcrypt = HashPass(password) + if Cnf.has_key("Add-User::GID"): + cmd = "sudo /usr/sbin/useradd -g users -m -p '%s' -c '%s' -G %s %s" \ + % (pwcrypt, name, Cnf["Add-User::GID"], uid) + else: + cmd = "sudo /usr/sbin/useradd -g users -m -p '%s' -c '%s' %s" \ + % (pwcrypt, name, uid) + (result, output) = commands.getstatusoutput(cmd) + if (result != 0): + utils.fubar("Invocation of '%s' failed:\n%s\n" % (cmd, output), result) + try: + summary+=createMail(uid, password, Cnf["Add-User::Options::Key"], Cnf["Dinstall::GPGKeyring"]) + except: + summary="" + utils.warn("Could not prepare password information for mail, not sending password.") + + # Now add user to the database. + # Note that we provide a session, so we're responsible for committing + uidobj = get_or_set_uid(uid, session=session) + uid_id = uidobj.uid_id + add_database_user(uid) + session.commit() + + # The following two are kicked out in rhona, so we don't set them. kelly adds + # them as soon as she installs a package with unknown ones, so no problems to expect here. + # Just leave the comment in, to not think about "Why the hell aren't they added" in + # a year, if we ever touch uma again. + # maint_id = database.get_or_set_maintainer_id(name) + # session.execute("INSERT INTO fingerprint (fingerprint, uid) VALUES (:fingerprint, uid)", + # {'fingerprint': primary_key, 'uid': uid_id}) + + # Lets add user to the email-whitelist file if its configured. + if Cnf.has_key("Dinstall::MailWhiteList") and Cnf["Dinstall::MailWhiteList"] != "": + f = utils.open_file(Cnf["Dinstall::MailWhiteList"], "a") + for mail in emails: + f.write(mail+'\n') + f.close() + + print "Added:\nUid:\t %s (ID: %s)\nMaint:\t %s\nFP:\t %s" % (uid, uid_id, \ + name, primary_key) + + # Should we send mail to the newly added user? + if Cnf.FindB("Add-User::SendEmail"): + mail = name + "<" + emails[0] +">" + Subst = {} + Subst["__NEW_MAINTAINER__"] = mail + Subst["__UID__"] = uid + Subst["__KEYID__"] = Cnf["Add-User::Options::Key"] + Subst["__PRIMARY_KEY__"] = primary_key + Subst["__FROM_ADDRESS__"] = Cnf["Dinstall::MyEmailAddress"] + Subst["__HOSTNAME__"] = Cnf["Dinstall::MyHost"] + Subst["__SUMMARY__"] = summary + new_add_message = utils.TemplateSubst(Subst,Cnf["Dir::Templates"]+"/add-user.added") + utils.send_mail(new_add_message) else: - uid = None - + uid = None ####################################################################################### if __name__ == '__main__': main() - diff --git a/dak/check_archive.py b/dak/check_archive.py index 2162068e..b9837d30 100755 --- a/dak/check_archive.py +++ b/dak/check_archive.py @@ -185,8 +185,8 @@ def check_override(): print suite_name print "-" * len(suite_name) print - suite = get_suite(suite) - q = s.execute(""" + suite = get_suite(suite_name) + q = session.execute(""" SELECT DISTINCT b.package FROM binaries b, bin_associations ba WHERE b.id = ba.bin AND ba.suite = :suiteid AND NOT EXISTS (SELECT 1 FROM override o WHERE o.suite = :suiteid AND o.package = b.package)""" @@ -195,7 +195,7 @@ SELECT DISTINCT b.package FROM binaries b, bin_associations ba for j in q.fetchall(): print j[0] - q = s.execute(""" + q = session.execute(""" SELECT DISTINCT s.source FROM source s, src_associations sa WHERE s.id = sa.source AND sa.suite = :suiteid AND NOT EXISTS (SELECT 1 FROM override o WHERE o.suite = :suiteid and o.package = s.source)""" @@ -427,8 +427,8 @@ def check_indices_files_exist(): """ for suite in [ "stable", "testing", "unstable" ]: for component in Cnf.ValueList("Suite::%s::Components" % (suite)): - architectures = database.get_suite_architectures(suite) - for arch in [ i.lower() for i in architectures ]: + architectures = get_suite_architectures(suite) + for arch in [ i.arch_string.lower() for i in architectures ]: if arch == "source": validate_sources(suite, component) elif arch == "all": @@ -475,6 +475,7 @@ def chk_bd_process_dir (unused, dirname, filenames): def check_build_depends(): """ Validate build-dependencies of .dsc files in the archive """ + cnf = Config() os.path.walk(cnf["Dir::Root"], chk_bd_process_dir, None) ################################################################################ diff --git a/dak/clean_queues.py b/dak/clean_queues.py index a5b15427..f30d7f12 100755 --- a/dak/clean_queues.py +++ b/dak/clean_queues.py @@ -83,10 +83,10 @@ def init (cnf): os.chdir(incoming) # Remove a file to the morgue -def remove (f): +def remove (from_dir, f): fname = os.path.basename(f) if os.access(f, os.R_OK): - Logger.log(["move file to morgue", fname, del_dir]) + Logger.log(["move file to morgue", from_dir, fname, del_dir]) if Options["Verbose"]: print "Removing '%s' (to '%s')." % (fname, del_dir) if Options["No-Action"]: @@ -106,11 +106,11 @@ def remove (f): # [Used for Incoming/REJECT] # def flush_old (): - Logger.log(["check Incoming/REJECT for old files"]) + Logger.log(["check Incoming/REJECT for old files", os.getcwd()]) for f in os.listdir('.'): if os.path.isfile(f): if os.stat(f)[stat.ST_MTIME] < delete_date: - remove(f) + remove('Incoming/REJECT', f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) @@ -122,7 +122,7 @@ def flush_orphans (): all_files = {} changes_files = [] - Logger.log(["check Incoming for old orphaned files"]) + Logger.log(["check Incoming for old orphaned files", os.getcwd()]) # Build up the list of all files in the directory for i in os.listdir('.'): if os.path.isfile(i): @@ -163,7 +163,7 @@ def flush_orphans (): # a .dsc) and should be deleted if old enough. for f in all_files.keys(): if os.stat(f)[stat.ST_MTIME] < delete_date: - remove(f) + remove('Incoming', f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) diff --git a/dak/clean_suites.py b/dak/clean_suites.py index 52b2a8cc..72a1d5a8 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -163,6 +163,7 @@ def check_files(now_date, delete_date, max_delete, session): SELECT id, filename FROM files f WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id) AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id) + AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id) AND last_used IS NULL ORDER BY filename""") diff --git a/dak/contents.py b/dak/contents.py index 4211e98e..53d74227 100755 --- a/dak/contents.py +++ b/dak/contents.py @@ -39,6 +39,7 @@ import os import logging import gzip import threading +import traceback import Queue import apt_pkg import datetime diff --git a/dak/cruft_report.py b/dak/cruft_report.py index 63374859..4541bf6e 100755 --- a/dak/cruft_report.py +++ b/dak/cruft_report.py @@ -92,8 +92,9 @@ def do_anais(architecture, binaries_list, source, session): WHERE ba.suite = :suiteid AND ba.bin = b.id AND b.architecture = a.id AND b.package = :package""", {'suiteid': suite_id, 'package': binary}) + ql = q.fetchall() versions = [] - for i in q.fetchall(): + for i in ql: arch = i[0] version = i[1] if architectures.has_key(arch): @@ -357,9 +358,9 @@ def main (): # Set up checks based on mode if Options["Mode"] == "daily": - checks = [ "nbs", "nviu", "obsolete source" ] + checks = [ "nbs", "nviu", "nvit", "obsolete source" ] elif Options["Mode"] == "full": - checks = [ "nbs", "nviu", "obsolete source", "nfu", "dubious nbs", "bnb", "bms", "anais" ] + checks = [ "nbs", "nviu", "nvit", "obsolete source", "nfu", "dubious nbs", "bnb", "bms", "anais" ] else: utils.warn("%s is not a recognised mode - only 'full' or 'daily' are understood." % (Options["Mode"])) usage(1) @@ -520,6 +521,9 @@ def main (): if "nviu" in checks: do_newer_version('unstable', 'experimental', 'NVIU', session) + if "nvit" in checks: + do_newer_version('testing', 'testing-proposed-updates', 'NVIT', session) + if "nbs" in checks: do_nbs(real_nbs) diff --git a/dak/dak.py b/dak/dak.py index 052f3b3e..e424836f 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -34,8 +34,13 @@ G{importgraph} ################################################################################ import sys +import traceback import daklib.utils +from daklib.daklog import Logger +from daklib.config import Config +from daklib.dak_exceptions import CantOpenError + ################################################################################ def init(): @@ -129,6 +134,8 @@ def init(): "Generate statistics"), ("bts-categorize", "Categorize uncategorized bugs filed against ftp.debian.org"), + ("import-known-changes", + "import old changes files into known_changes table"), ("add-user", "Add a user to the archive"), ] @@ -152,6 +159,12 @@ Available commands:""" def main(): """Launch dak functionality.""" + + try: + logger = Logger(Config(), 'dak top-level', print_starting=False) + except CantOpenError: + logger = None + functionality = init() modules = [ command for (command, _) in functionality ] @@ -189,7 +202,21 @@ def main(): # Invoke the module module = __import__(cmdname.replace("-","_")) - module.main() + try: + module.main() + except KeyboardInterrupt: + msg = 'KeyboardInterrupt caught; exiting' + print msg + if logger: + logger.log([msg]) + sys.exit(1) + except SystemExit: + pass + except: + if logger: + for line in traceback.format_exc().split('\n')[:-1]: + logger.log(['exception', line]) + raise ################################################################################ diff --git a/dak/dakdb/update16.py b/dak/dakdb/update16.py new file mode 100755 index 00000000..eca9b48e --- /dev/null +++ b/dak/dakdb/update16.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Adding tables for key-based ACLs and blocks + +@contact: Debian FTP Master +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +from daklib.dak_exceptions import DBUpdateError + +################################################################################ + +def do_update(self): + print "Adding tables for handling key-based ACLs and upload blocks" + + try: + c = self.db.cursor() + + # Fix up some older table permissions + c.execute("GRANT SELECT ON src_format TO public") + c.execute("GRANT ALL ON src_format TO ftpmaster") + c.execute("GRANT USAGE ON src_format_id_seq TO ftpmaster") + + c.execute("GRANT SELECT ON suite_src_formats TO public") + c.execute("GRANT ALL ON suite_src_formats TO ftpmaster") + + # Source ACLs table + print "Source ACLs table" + c.execute(""" + CREATE TABLE source_acl ( + id SERIAL PRIMARY KEY, + access_level TEXT UNIQUE NOT NULL + ) + """) + + ## Can upload all packages + c.execute("INSERT INTO source_acl (access_level) VALUES ('full')") + ## Can upload only packages marked as DM upload allowed + c.execute("INSERT INTO source_acl (access_level) VALUES ('dm')") + + c.execute("GRANT SELECT ON source_acl TO public") + c.execute("GRANT ALL ON source_acl TO ftpmaster") + c.execute("GRANT USAGE ON source_acl_id_seq TO ftpmaster") + + # Binary ACLs table + print "Binary ACLs table" + c.execute(""" + CREATE TABLE binary_acl ( + id SERIAL PRIMARY KEY, + access_level TEXT UNIQUE NOT NULL + ) + """) + + ## Can upload any architectures of binary packages + c.execute("INSERT INTO binary_acl (access_level) VALUES ('full')") + ## Can upload debs where architectures are based on the map table binary_acl_map + c.execute("INSERT INTO binary_acl (access_level) VALUES ('map')") + + c.execute("GRANT SELECT ON binary_acl TO public") + c.execute("GRANT ALL ON binary_acl TO ftpmaster") + c.execute("GRANT USAGE ON binary_acl_id_seq TO ftpmaster") + + # This is only used if binary_acl is 2 for the fingerprint concerned + c.execute(""" + CREATE TABLE binary_acl_map ( + id SERIAL PRIMARY KEY, + fingerprint_id INT4 REFERENCES fingerprint (id) NOT NULL, + architecture_id INT4 REFERENCES architecture (id) NOT NULL, + + UNIQUE (fingerprint_id, architecture_id) + )""") + + c.execute("GRANT SELECT ON binary_acl_map TO public") + c.execute("GRANT ALL ON binary_acl_map TO ftpmaster") + c.execute("GRANT USAGE ON binary_acl_map_id_seq TO ftpmaster") + + ## NULL means no source upload access (i.e. any upload containing source + ## will be rejected) + c.execute("ALTER TABLE fingerprint ADD COLUMN source_acl_id INT4 REFERENCES source_acl(id) DEFAULT NULL") + + ## NULL means no binary upload access + c.execute("ALTER TABLE fingerprint ADD COLUMN binary_acl_id INT4 REFERENCES binary_acl(id) DEFAULT NULL") + + ## TRUE here means that if the person doesn't have binary upload permissions for + ## an architecture, we'll reject the .changes. FALSE means that we'll simply + ## dispose of those particular binaries + c.execute("ALTER TABLE fingerprint ADD COLUMN binary_reject BOOLEAN NOT NULL DEFAULT TRUE") + + # Blockage table (replaces the hard coded stuff we used to have in extensions) + print "Adding blockage table" + c.execute(""" + CREATE TABLE upload_blocks ( + id SERIAL PRIMARY KEY, + source TEXT NOT NULL, + version TEXT DEFAULT NULL, + fingerprint_id INT4 REFERENCES fingerprint (id), + uid_id INT4 REFERENCES uid (id), + reason TEXT NOT NULL, + + CHECK (fingerprint_id IS NOT NULL OR uid_id IS NOT NULL) + )""") + + c.execute("GRANT SELECT ON upload_blocks TO public") + c.execute("GRANT ALL ON upload_blocks TO ftpmaster") + c.execute("GRANT USAGE ON upload_blocks_id_seq TO ftpmaster") + + c.execute("ALTER TABLE keyrings ADD COLUMN default_source_acl_id INT4 REFERENCES source_acl (id) DEFAULT NULL") + c.execute("ALTER TABLE keyrings ADD COLUMN default_binary_acl_id INT4 REFERENCES binary_acl (id) DEFAULT NULL") + c.execute("ALTER TABLE keyrings ADD COLUMN default_binary_reject BOOLEAN NOT NULL DEFAULT TRUE") + # Set up keyring priorities + c.execute("ALTER TABLE keyrings ADD COLUMN priority INT4 NOT NULL DEFAULT 100") + # And then we don't need the DM stuff any more + c.execute("ALTER TABLE keyrings DROP COLUMN debian_maintainer") + + # Default ACLs for keyrings + c.execute(""" + CREATE TABLE keyring_acl_map ( + id SERIAL PRIMARY KEY, + keyring_id INT4 REFERENCES keyrings (id) NOT NULL, + architecture_id INT4 REFERENCES architecture (id) NOT NULL, + + UNIQUE (keyring_id, architecture_id) + )""") + + c.execute("GRANT SELECT ON keyring_acl_map TO public") + c.execute("GRANT ALL ON keyring_acl_map TO ftpmaster") + c.execute("GRANT USAGE ON keyring_acl_map_id_seq TO ftpmaster") + + # Set up some default stuff; default to old behaviour + print "Setting up some defaults" + + c.execute("""UPDATE keyrings SET default_source_acl_id = (SELECT id FROM source_acl WHERE access_level = 'full'), + default_binary_acl_id = (SELECT id FROM binary_acl WHERE access_level = 'full')""") + + c.execute("""UPDATE keyrings SET default_source_acl_id = (SELECT id FROM source_acl WHERE access_level = 'dm'), + default_binary_acl_id = (SELECT id FROM binary_acl WHERE access_level = 'full') + WHERE name = 'debian-maintainers.gpg'""") + + c.execute("""UPDATE keyrings SET priority = 90 WHERE name = 'debian-maintainers.gpg'""") + + # Initialize the existing keys + c.execute("""UPDATE fingerprint SET binary_acl_id = (SELECT default_binary_acl_id FROM keyrings + WHERE keyrings.id = fingerprint.keyring)""") + + c.execute("""UPDATE fingerprint SET source_acl_id = (SELECT default_source_acl_id FROM keyrings + WHERE keyrings.id = fingerprint.keyring)""") + + print "Updating config version" + c.execute("UPDATE config SET value = '16' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply ACLs update (16), rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update17.py b/dak/dakdb/update17.py old mode 100644 new mode 100755 index b5bbb3cc..beca9425 --- a/dak/dakdb/update17.py +++ b/dak/dakdb/update17.py @@ -44,7 +44,7 @@ def do_update(self): file text, binary_id integer, UNIQUE(file,binary_id))""" ) - + c.execute("""ALTER TABLE ONLY bin_contents ADD CONSTRAINT bin_contents_bin_fkey FOREIGN KEY (binary_id) REFERENCES binaries(id) @@ -54,6 +54,7 @@ def do_update(self): c.execute("GRANT ALL ON bin_contents TO ftpmaster;") c.execute("GRANT SELECT ON bin_contents TO public;") + c.execute("UPDATE config SET value = '17' WHERE name = 'db_revision'") self.db.commit() diff --git a/dak/dakdb/update18.py b/dak/dakdb/update18.py new file mode 100755 index 00000000..c3ace63d --- /dev/null +++ b/dak/dakdb/update18.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Adding table to get rid of queue/done checks + +@contact: Debian FTP Master +@copyright: 2009 Joerg Jaspert +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +import os +import datetime +from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError +from daklib.config import Config +from daklib.changes import Changes +from daklib.utils import parse_changes, warn, gpgv_get_status_output, process_gpgv_output + +################################################################################ + +def check_signature (sig_filename, data_filename=""): + keyrings = [ + "/home/joerg/keyring/keyrings/debian-keyring.gpg", + "/home/joerg/keyring/keyrings/debian-keyring.pgp", + "/home/joerg/keyring/keyrings/debian-maintainers.gpg", + "/home/joerg/keyring/keyrings/debian-role-keys.gpg", + "/home/joerg/keyring/keyrings/emeritus-keyring.pgp", + "/home/joerg/keyring/keyrings/emeritus-keyring.gpg", + "/home/joerg/keyring/keyrings/removed-keys.gpg", + "/home/joerg/keyring/keyrings/removed-keys.pgp" + ] + + keyringargs = " ".join(["--keyring %s" % x for x in keyrings ]) + + # Build the command line + status_read, status_write = os.pipe() + cmd = "gpgv --status-fd %s %s %s" % (status_write, keyringargs, sig_filename) + + # Invoke gpgv on the file + (output, status, exit_status) = gpgv_get_status_output(cmd, status_read, status_write) + + # Process the status-fd output + (keywords, internal_error) = process_gpgv_output(status) + + # If we failed to parse the status-fd output, let's just whine and bail now + if internal_error: + warn("Couldn't parse signature") + return None + + # usually one would check for bad things here. We, however, do not care. + + # Next check gpgv exited with a zero return code + if exit_status: + warn("Couldn't parse signature") + return None + + # Sanity check the good stuff we expect + if not keywords.has_key("VALIDSIG"): + warn("Couldn't parse signature") + else: + args = keywords["VALIDSIG"] + if len(args) < 1: + warn("Couldn't parse signature") + else: + fingerprint = args[0] + + return fingerprint + +################################################################################ + +def do_update(self): + print "Adding known_changes table" + + try: + c = self.db.cursor() + c.execute(""" + CREATE TABLE known_changes ( + id SERIAL PRIMARY KEY, + changesname TEXT NOT NULL, + seen TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), + source TEXT NOT NULL, + binaries TEXT NOT NULL, + architecture TEXT NOT NULL, + version TEXT NOT NULL, + distribution TEXT NOT NULL, + urgency TEXT NOT NULL, + maintainer TEXT NOT NULL, + fingerprint TEXT NOT NULL, + changedby TEXT NOT NULL, + date TEXT NOT NULL, + UNIQUE (changesname) + ) + """) + c.execute("CREATE INDEX changesname_ind ON known_changes(changesname)") + c.execute("CREATE INDEX changestimestamp_ind ON known_changes(seen)") + c.execute("CREATE INDEX changessource_ind ON known_changes(source)") + c.execute("CREATE INDEX changesdistribution_ind ON known_changes(distribution)") + c.execute("CREATE INDEX changesurgency_ind ON known_changes(urgency)") + + c.execute("GRANT ALL ON known_changes TO ftpmaster;") + c.execute("GRANT SELECT ON known_changes TO public;") + + c.execute("UPDATE config SET value = '18' WHERE name = 'db_revision'") + self.db.commit() + + print "Done. Now looking for old changes files" + count = 0 + failure = 0 + cnf = Config() + for directory in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]: + checkdir = cnf["Dir::Queue::%s" % (directory) ] + if os.path.exists(checkdir): + print "Looking into %s" % (checkdir) + for filename in os.listdir(checkdir): + if not filename.endswith(".changes"): + # Only interested in changes files. + continue + try: + count += 1 + print "Directory %s, file %7d, failures %3d. (%s)" % (directory, count, failure, filename) + changes = Changes() + changes.changes_file = filename + changesfile = os.path.join(checkdir, filename) + changes.changes = parse_changes(changesfile, signing_rules=-1) + changes.changes["fingerprint"] = check_signature(changesfile) + changes.add_known_changes(directory) + except InvalidDscError, line: + warn("syntax error in .dsc file '%s', line %s." % (f, line)) + failure += 1 + except ChangesUnicodeError: + warn("found invalid changes file, not properly utf-8 encoded") + failure += 1 + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply knownchanges update 18, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update19.py b/dak/dakdb/update19.py index f530375c..49a4dbc7 100644 --- a/dak/dakdb/update19.py +++ b/dak/dakdb/update19.py @@ -2,10 +2,10 @@ # coding=utf8 """ -Adding a trainee field to the process-new notes +Move to using the C version of debversion @contact: Debian FTP Master -@copyright: 2009 Mike O'Connor +@copyright: 2009 Mark Hymers @license: GNU General Public License version 2 or later """ @@ -30,210 +30,78 @@ Adding a trainee field to the process-new notes import psycopg2 import time +import os +import datetime +import traceback + from daklib.dak_exceptions import DBUpdateError +from daklib.config import Config ################################################################################ -def suites(): - """ - return a list of suites to operate on - """ - if Config().has_key( "%s::%s" %(options_prefix,"Suite")): - suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")]) - else: - suites = [ 'unstable', 'testing' ] -# suites = Config().SubTree("Suite").List() - - return suites - -def arches(cursor, suite): - """ - return a list of archs to operate on - """ - arch_list = [] - cursor.execute("""SELECT s.architecture, a.arch_string - FROM suite_architectures s - JOIN architecture a ON (s.architecture=a.id) - WHERE suite = :suite""", {'suite' : suite }) - - while True: - r = cursor.fetchone() - if not r: - break - - if r[1] != "source" and r[1] != "all": - arch_list.append((r[0], r[1])) - - return arch_list - def do_update(self): - """ - Adding contents table as first step to maybe, finally getting rid - of apt-ftparchive - """ - - print __doc__ + print "Converting database to use new C based debversion type" try: c = self.db.cursor() - c.execute("""CREATE TABLE pending_bin_contents ( - id serial NOT NULL, - package text NOT NULL, - version debversion NOT NULL, - arch int NOT NULL, - filename text NOT NULL, - type int NOT NULL, - PRIMARY KEY(id))""" ); - - c.execute("""CREATE TABLE deb_contents ( - filename text, - section text, - package text, - binary_id integer, - arch integer, - suite integer, - component text)""" ) - - c.execute("""CREATE TABLE udeb_contents ( - filename text, - section text, - package text, - binary_id integer, - suite integer, - arch integer, - component text )""" ) - - c.execute("""ALTER TABLE ONLY deb_contents - ADD CONSTRAINT deb_contents_arch_fkey - FOREIGN KEY (arch) REFERENCES architecture(id) - ON DELETE CASCADE;""") - - c.execute("""ALTER TABLE ONLY udeb_contents - ADD CONSTRAINT udeb_contents_arch_fkey - FOREIGN KEY (arch) REFERENCES architecture(id) - ON DELETE CASCADE;""") - - c.execute("""ALTER TABLE ONLY deb_contents - ADD CONSTRAINT deb_contents_pkey - PRIMARY KEY (filename,package,arch,suite);""") - - c.execute("""ALTER TABLE ONLY udeb_contents - ADD CONSTRAINT udeb_contents_pkey - PRIMARY KEY (filename,package,arch,suite);""") - - c.execute("""ALTER TABLE ONLY deb_contents - ADD CONSTRAINT deb_contents_suite_fkey - FOREIGN KEY (suite) REFERENCES suite(id) - ON DELETE CASCADE;""") - - c.execute("""ALTER TABLE ONLY udeb_contents - ADD CONSTRAINT udeb_contents_suite_fkey - FOREIGN KEY (suite) REFERENCES suite(id) - ON DELETE CASCADE;""") - - c.execute("""ALTER TABLE ONLY deb_contents - ADD CONSTRAINT deb_contents_binary_fkey - FOREIGN KEY (binary_id) REFERENCES binaries(id) - ON DELETE CASCADE;""") - - c.execute("""ALTER TABLE ONLY udeb_contents - ADD CONSTRAINT udeb_contents_binary_fkey - FOREIGN KEY (binary_id) REFERENCES binaries(id) - ON DELETE CASCADE;""") - - c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" ) - - - suites = self.suites() - - for suite in [i.lower() for i in suites]: - suite_id = DBConn().get_suite_id(suite) - arch_list = arches(c, suite_id) - arch_list = arches(c, suite_id) - - for (arch_id,arch_str) in arch_list: - c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) ) - - for section, sname in [("debian-installer","main"), - ("non-free/debian-installer", "nonfree")]: - c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) ) - - - c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS $$ - event = TD["event"] - if event == "DELETE" or event == "UPDATE": - - plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2", - ["int","int"]), - [TD["old"]["bin"], TD["old"]["suite"]]) - - if event == "INSERT" or event == "UPDATE": - - content_data = plpy.execute(plpy.prepare( - """SELECT s.section, b.package, b.architecture, c.name, ot.type - FROM override o - JOIN override_type ot on o.type=ot.id - JOIN binaries b on b.package=o.package - JOIN files f on b.file=f.id - JOIN location l on l.id=f.location - JOIN section s on s.id=o.section - JOIN component c on c.id=l.component - WHERE b.id=$1 - AND o.suite=$2 - """, - ["int", "int"]), - [TD["new"]["bin"], TD["new"]["suite"]])[0] - - component_str = ""; - if not content_data["name"] === "main": - component_str=content_data["name"]+"/" - - filenames = plpy.execute(plpy.prepare( - "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1", - ["int"]), - [TD["new"]["bin"]]) - - for filename in filenames: - plpy.execute(plpy.prepare( - """INSERT INTO deb_contents - (file,section,package,binary_id,arch,suite,component) - VALUES($1,$2,$3,$4,$5,$6,$7)""", - ["text","text","text","int","int","int","text"]), - [filename["filename"], - content_data["section"], - content_data["package"], - TD["new"]["bin"], - content_data["architecture"], - TD["new"]["suite"], - component_str]) -$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER; -""") - - - c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS $$ - event = TD["event"] - if event == "UPDATE": - - otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),TD["new"]["type"] )[0]; - if otype["type"].endswith("deb"): - table_name = "%s_contents" % otype["type"] - plpy.execute(plpy.prepare("UPDATE %s set sections=$1" % table_name - ["text"]), - [TD["new"]["section"]]) - -$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER; -""") - c.execute( """CREATE TRIGGER bin_associations_contents_trigger - AFTER INSERT OR UPDATE OR DELETE ON bin_associations - FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""") - c.execute("""CREATE TRIGGER override_contents_trigger - AFTER UPDATE ON override - FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""") - + print "Temporarily converting columns to TEXT" + c.execute("ALTER TABLE binaries ALTER COLUMN version TYPE TEXT") + c.execute("ALTER TABLE source ALTER COLUMN version TYPE TEXT") + c.execute("ALTER TABLE upload_blocks ALTER COLUMN version TYPE TEXT") + c.execute("ALTER TABLE pending_content_associations ALTER COLUMN version TYPE TEXT") + + print "Dropping old debversion type" + c.execute("DROP OPERATOR >(debversion, debversion)") + c.execute("DROP OPERATOR <(debversion, debversion)") + c.execute("DROP OPERATOR <=(debversion, debversion)") + c.execute("DROP OPERATOR >=(debversion, debversion)") + c.execute("DROP OPERATOR =(debversion, debversion)") + c.execute("DROP OPERATOR <>(debversion, debversion)") + c.execute("DROP FUNCTION debversion_eq(debversion,debversion)") + c.execute("DROP FUNCTION debversion_ge(debversion,debversion)") + c.execute("DROP FUNCTION debversion_gt(debversion,debversion)") + c.execute("DROP FUNCTION debversion_le(debversion,debversion)") + c.execute("DROP FUNCTION debversion_lt(debversion,debversion)") + c.execute("DROP FUNCTION debversion_ne(debversion,debversion)") + c.execute("DROP FUNCTION debversion_compare(debversion,debversion)") + c.execute("DROP FUNCTION debversion_revision(debversion)") + c.execute("DROP FUNCTION debversion_version(debversion)") + c.execute("DROP FUNCTION debversion_epoch(debversion)") + c.execute("DROP FUNCTION debversion_split(debversion)") + c.execute("DROP TYPE debversion") + + # URGH - kill me now + print "Importing new debversion type" + f = open('/usr/share/postgresql/8.4/contrib/debversion.sql', 'r') + cmds = [] + curcmd = '' + for j in f.readlines(): + j = j.replace('\t', '').replace('\n', '').split('--')[0] + if not j.startswith('--'): + jj = j.split(';') + curcmd += " " + jj[0] + if len(jj) > 1: + for jjj in jj[1:]: + if jjj.strip() == '': + cmds.append(curcmd) + curcmd = '' + else: + curcmd += " " + jjj + + for cm in cmds: + c.execute(cm) + + print "Converting columns to new debversion type" + c.execute("ALTER TABLE binaries ALTER COLUMN version TYPE debversion") + c.execute("ALTER TABLE source ALTER COLUMN version TYPE debversion") + c.execute("ALTER TABLE upload_blocks ALTER COLUMN version TYPE debversion") + c.execute("ALTER TABLE pending_content_associations ALTER COLUMN version TYPE debversion") + + print "Committing" + c.execute("UPDATE config SET value = '19' WHERE name = 'db_revision'") self.db.commit() - except psycopg2.ProgrammingError, msg: + except psycopg2.InternalError, msg: self.db.rollback() - raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg)) - + raise DBUpdateError, "Unable to apply debversion update 19, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update20.py b/dak/dakdb/update20.py new file mode 100755 index 00000000..f4e34cb9 --- /dev/null +++ b/dak/dakdb/update20.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Add policy queue handling support + +@contact: Debian FTP Master +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +import os +import datetime +import traceback + +from daklib.dak_exceptions import DBUpdateError +from daklib.config import Config + +################################################################################ + +def do_update(self): + print "Updating use of queue table" + + try: + c = self.db.cursor() + + cnf = Config() + + print "Adding path to queue table" + c.execute("ALTER TABLE queue ADD COLUMN path TEXT") + c.execute("SELECT * FROM queue") + rows = c.fetchall() + seenqueues = {} + for row in rows: + dir = cnf["Dir::Queue::%s" % row[1]].rstrip('/') + seenqueues[row[1].lower()] = 1 + print "Setting %s queue to use path %s" % (row[1], dir) + c.execute("UPDATE queue SET path = %s WHERE id = %s", (dir, row[0])) + + print "Adding missing queues to the queue table" + for q in cnf.SubTree("Dir::Queue").keys(): + qname = q.lower() + if qname in seenqueues.keys(): + continue + if qname in ["done", "holding", "reject", "newstage", "btsversiontrack"]: + print "Skipping queue %s" % qname + continue + pth = cnf["Dir::Queue::%s" % qname].rstrip('/') + if not os.path.exists(pth): + print "Skipping %s as %s does not exist" % (qname, pth) + continue + + print "Adding %s queue with path %s" % (qname, pth) + c.execute("INSERT INTO queue (queue_name, path) VALUES (%s, %s)", (qname, pth)) + seenqueues[qname] = 1 + + print "Adding queue and approved_for columns to known_changes" + c.execute("ALTER TABLE known_changes ADD COLUMN in_queue INT4 REFERENCES queue(id) DEFAULT NULL") + c.execute("ALTER TABLE known_changes ADD COLUMN approved_for INT4 REFERENCES queue(id) DEFAULT NULL") + + print "Adding policy queue column to suite table" + c.execute("ALTER TABLE suite DROP COLUMN policy_engine") + c.execute("ALTER TABLE suite ADD COLUMN policy_queue_id INT4 REFERENCES queue(id) DEFAULT NULL") + # Handle some of our common cases automatically + if seenqueues.has_key('proposedupdates'): + c.execute("""UPDATE suite SET policy_queue_id = (SELECT id FROM queue WHERE queue_name = 'proposedupdates') + WHERE suite_name = 'proposed-updates'""") + + if seenqueues.has_key('oldproposedupdates'): + c.execute("""UPDATE suite SET policy_queue_id = (SELECT id FROM queue WHERE queue_name = 'oldproposedupdates') + WHERE suite_name = 'oldstable-proposed-updates'""") + + print "Committing" + c.execute("UPDATE config SET value = '20' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.InternalError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply debversion update 20, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update21.py b/dak/dakdb/update21.py new file mode 100755 index 00000000..8e36883f --- /dev/null +++ b/dak/dakdb/update21.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Modify queue autobuild support + +@contact: Debian FTP Master +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +import os +import datetime +import traceback + +from daklib.dak_exceptions import DBUpdateError +from daklib.config import Config + +################################################################################ + +def do_update(self): + print "Updating queue_build table" + + try: + c = self.db.cursor() + + cnf = Config() + + print "Adding copy_files field to queue table" + c.execute("ALTER TABLE queue ADD copy_pool_files BOOL NOT NULL DEFAULT FALSE") + + print "Adding queue_files table" + + c.execute("""CREATE TABLE queue_files ( + id SERIAL PRIMARY KEY, + queueid INT4 NOT NULL REFERENCES queue(id) ON DELETE RESTRICT, + insertdate TIMESTAMP NOT NULL DEFAULT now(), + lastused TIMESTAMP DEFAULT NULL, + filename TEXT NOT NULL, + fileid INT4 REFERENCES files(id) ON DELETE CASCADE)""") + + c.execute("""SELECT queue_build.filename, queue_build.last_used, queue_build.queue + FROM queue_build""") + + for r in c.fetchall(): + print r[0] + filename = r[0] + last_used = r[1] + queue = r[2] + try: + endlink = os.readlink(filename) + c.execute("SELECT files.id FROM files WHERE filename LIKE '%%%s'" % endlink[endlink.rindex('/')+1:]) + f = c.fetchone() + c.execute("""INSERT INTO queue_files (queueid, lastused, filename, fileid) VALUES + (%s, now(), %s, %s)""", (queue, filename[filename.rindex('/')+1:], f[0])) + except OSError, e: + print "Can't find file %s (%s)" % (filename, e) + + print "Dropping old queue_build table" + c.execute("DROP TABLE queue_build") + + print "Adding changes_pending_files table" + c.execute("""CREATE TABLE changes_pending_files ( + id SERIAL PRIMARY KEY, + changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE, + filename TEXT NOT NULL, + source BOOL NOT NULL DEFAULT FALSE, + filesize BIGINT NOT NULL, + md5sum TEXT NOT NULL, + sha1sum TEXT NOT NULL, + sha256sum TEXT NOT NULL)""") + + + print "Adding changes_pool_files table" + c.execute("""CREATE TABLE changes_pool_files ( + changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE, + fileid INT4 NOT NULL REFERENCES files(id) ON DELETE RESTRICT, + + PRIMARY KEY (changeid, fileid))""") + + print "Adding suite_queue_copy table" + c.execute("""CREATE TABLE suite_queue_copy ( + suite INT4 NOT NULL REFERENCES suite(id), + queue INT4 NOT NULL REFERENCES queue(id), + + PRIMARY KEY (suite, queue))""") + + # Link all suites from accepted + c.execute("""SELECT suite.id FROM suite""") + for s in c.fetchall(): + c.execute("""INSERT INTO suite_queue_copy (suite, queue) VALUES (%s, (SELECT id FROM queue WHERE queue_name = 'accepted'))""", s) + + # Parse the config and add any buildd stuff + cnf = Config() + c.execute("""INSERT INTO queue (queue_name, path) VALUES ('buildd', '%s')""" % cnf["Dir::QueueBuild"].rstrip('/')) + + for s in cnf.ValueList("Dinstall::QueueBuildSuites"): + c.execute("""INSERT INTO suite_queue_copy (suite, queue) + VALUES ( (SELECT id FROM suite WHERE suite_name = '%s'), + (SELECT id FROM queue WHERE queue_name = 'buildd'))""" % s.lower()) + + print "Committing" + c.execute("UPDATE config SET value = '21' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.InternalError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update23.py b/dak/dakdb/update23.py new file mode 100644 index 00000000..9d97172b --- /dev/null +++ b/dak/dakdb/update23.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Adding a trainee field to the process-new notes + +@contact: Debian FTP Master +@copyright: 2009 Mike O'Connor +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +from daklib.dak_exceptions import DBUpdateError + +################################################################################ + +def suites(): + """ + return a list of suites to operate on + """ + if Config().has_key( "%s::%s" %(options_prefix,"Suite")): + suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")]) + else: + suites = [ 'unstable', 'testing' ] +# suites = Config().SubTree("Suite").List() + + return suites + +def arches(cursor, suite): + """ + return a list of archs to operate on + """ + arch_list = [] + cursor.execute("""SELECT s.architecture, a.arch_string + FROM suite_architectures s + JOIN architecture a ON (s.architecture=a.id) + WHERE suite = :suite""", {'suite' : suite }) + + while True: + r = cursor.fetchone() + if not r: + break + + if r[1] != "source" and r[1] != "all": + arch_list.append((r[0], r[1])) + + return arch_list + +def do_update(self): + """ + Adding contents table as first step to maybe, finally getting rid + of apt-ftparchive + """ + + print __doc__ + + try: + c = self.db.cursor() + + c.execute("""CREATE TABLE pending_bin_contents ( + id serial NOT NULL, + package text NOT NULL, + version debversion NOT NULL, + arch int NOT NULL, + filename text NOT NULL, + type int NOT NULL, + PRIMARY KEY(id))""" ); + + c.execute("""CREATE TABLE deb_contents ( + filename text, + section text, + package text, + binary_id integer, + arch integer, + suite integer, + component text)""" ) + + c.execute("""CREATE TABLE udeb_contents ( + filename text, + section text, + package text, + binary_id integer, + suite integer, + arch integer, + component text )""" ) + + c.execute("""ALTER TABLE ONLY deb_contents + ADD CONSTRAINT deb_contents_arch_fkey + FOREIGN KEY (arch) REFERENCES architecture(id) + ON DELETE CASCADE;""") + + c.execute("""ALTER TABLE ONLY udeb_contents + ADD CONSTRAINT udeb_contents_arch_fkey + FOREIGN KEY (arch) REFERENCES architecture(id) + ON DELETE CASCADE;""") + + c.execute("""ALTER TABLE ONLY deb_contents + ADD CONSTRAINT deb_contents_pkey + PRIMARY KEY (filename,package,arch,suite);""") + + c.execute("""ALTER TABLE ONLY udeb_contents + ADD CONSTRAINT udeb_contents_pkey + PRIMARY KEY (filename,package,arch,suite);""") + + c.execute("""ALTER TABLE ONLY deb_contents + ADD CONSTRAINT deb_contents_suite_fkey + FOREIGN KEY (suite) REFERENCES suite(id) + ON DELETE CASCADE;""") + + c.execute("""ALTER TABLE ONLY udeb_contents + ADD CONSTRAINT udeb_contents_suite_fkey + FOREIGN KEY (suite) REFERENCES suite(id) + ON DELETE CASCADE;""") + + c.execute("""ALTER TABLE ONLY deb_contents + ADD CONSTRAINT deb_contents_binary_fkey + FOREIGN KEY (binary_id) REFERENCES binaries(id) + ON DELETE CASCADE;""") + + c.execute("""ALTER TABLE ONLY udeb_contents + ADD CONSTRAINT udeb_contents_binary_fkey + FOREIGN KEY (binary_id) REFERENCES binaries(id) + ON DELETE CASCADE;""") + + c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" ) + + + suites = self.suites() + + for suite in [i.lower() for i in suites]: + suite_id = DBConn().get_suite_id(suite) + arch_list = arches(c, suite_id) + arch_list = arches(c, suite_id) + + for (arch_id,arch_str) in arch_list: + c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) ) + + for section, sname in [("debian-installer","main"), + ("non-free/debian-installer", "nonfree")]: + c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) ) + + + c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS $$ + event = TD["event"] + if event == "DELETE" or event == "UPDATE": + + plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2", + ["int","int"]), + [TD["old"]["bin"], TD["old"]["suite"]]) + + if event == "INSERT" or event == "UPDATE": + + content_data = plpy.execute(plpy.prepare( + """SELECT s.section, b.package, b.architecture, c.name, ot.type + FROM override o + JOIN override_type ot on o.type=ot.id + JOIN binaries b on b.package=o.package + JOIN files f on b.file=f.id + JOIN location l on l.id=f.location + JOIN section s on s.id=o.section + JOIN component c on c.id=l.component + WHERE b.id=$1 + AND o.suite=$2 + """, + ["int", "int"]), + [TD["new"]["bin"], TD["new"]["suite"]])[0] + + component_str = ""; + if not content_data["name"] === "main": + component_str=content_data["name"]+"/" + + filenames = plpy.execute(plpy.prepare( + "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1", + ["int"]), + [TD["new"]["bin"]]) + + for filename in filenames: + plpy.execute(plpy.prepare( + """INSERT INTO deb_contents + (file,section,package,binary_id,arch,suite,component) + VALUES($1,$2,$3,$4,$5,$6,$7)""", + ["text","text","text","int","int","int","text"]), + [filename["filename"], + content_data["section"], + content_data["package"], + TD["new"]["bin"], + content_data["architecture"], + TD["new"]["suite"], + component_str]) +$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER; +""") + + + c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS $$ + event = TD["event"] + if event == "UPDATE": + + otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),TD["new"]["type"] )[0]; + if otype["type"].endswith("deb"): + table_name = "%s_contents" % otype["type"] + plpy.execute(plpy.prepare("UPDATE %s set sections=$1" % table_name + ["text"]), + [TD["new"]["section"]]) + +$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER; +""") + c.execute( """CREATE TRIGGER bin_associations_contents_trigger + AFTER INSERT OR UPDATE OR DELETE ON bin_associations + FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""") + c.execute("""CREATE TRIGGER override_contents_trigger + AFTER UPDATE ON override + FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""") + + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg)) + diff --git a/dak/generate_index_diffs.py b/dak/generate_index_diffs.py index 4222c0cf..7e4b0058 100755 --- a/dak/generate_index_diffs.py +++ b/dak/generate_index_diffs.py @@ -254,7 +254,7 @@ def genchanges(Options, outdir, oldfile, origfile, maxdiffs = 14): if not os.path.isdir(outdir): os.mkdir(outdir) - w = os.popen("diff --ed - %s | gzip -c -9 > %s.gz" % + w = os.popen("diff --ed - %s | gzip --rsyncable -c -9 > %s.gz" % (newfile, difffile), "w") pipe_file(oldf, w) oldf.close() diff --git a/dak/generate_releases.py b/dak/generate_releases.py index 9de4614d..31cae490 100755 --- a/dak/generate_releases.py +++ b/dak/generate_releases.py @@ -1,9 +1,12 @@ #!/usr/bin/env python -""" Create all the Release files """ - -# Copyright (C) 2001, 2002, 2006 Anthony Towns +""" Create all the Release files +@contact: Debian FTPMaster +@Copyright: 2001, 2002, 2006 Anthony Towns +@copyright: 2009 Joerg Jaspert +@license: GNU General Public License version 2 or later +""" # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or @@ -22,8 +25,12 @@ ################################################################################ -import sys, os, stat, time -import gzip, bz2 +import sys +import os +import stat +import time +import gzip +import bz2 import apt_pkg from daklib import utils @@ -372,13 +379,21 @@ def main (): dest = Cnf["Dir::Root"] + tree + "/Release.gpg" if os.path.exists(dest): os.unlink(dest) + inlinedest = Cnf["Dir::Root"] + tree + "/InRelease" + if os.path.exists(inlinedest): + os.unlink(inlinedest) for keyid in signkeyids: - if keyid != "": defkeyid = "--default-key %s" % keyid - else: defkeyid = "" + if keyid != "": + defkeyid = "--default-key %s" % keyid + else: + defkeyid = "" os.system("gpg %s %s %s --detach-sign <%s >>%s" % (keyring, defkeyid, arguments, Cnf["Dir::Root"] + tree + "/Release", dest)) + os.system("gpg %s %s %s --clearsign <%s >>%s" % + (keyring, defkeyid, arguments, + Cnf["Dir::Root"] + tree + "/Release", inlinedest)) ####################################################################################### diff --git a/dak/import_keyring.py b/dak/import_keyring.py index 0b670357..e26eb7e5 100755 --- a/dak/import_keyring.py +++ b/dak/import_keyring.py @@ -2,6 +2,7 @@ """ Imports a keyring into the database """ # Copyright (C) 2007 Anthony Towns +# Copyright (C) 2009 Mark Hymers # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -20,12 +21,11 @@ ################################################################################ import sys, os, re -import apt_pkg, ldap, email.Utils +import apt_pkg, ldap from daklib.config import Config from daklib.dbconn import * - # Globals Options = None @@ -38,6 +38,7 @@ def get_uid_info(session): for (keyid, uid, name) in q.fetchall(): byname[uid] = (keyid, name) byid[keyid] = (uid, name) + return (byname, byid) def get_fingerprint_info(session): @@ -49,126 +50,6 @@ def get_fingerprint_info(session): ################################################################################ -def get_ldap_name(entry): - name = [] - for k in ["cn", "mn", "sn"]: - ret = entry.get(k) - if ret and ret[0] != "" and ret[0] != "-": - name.append(ret[0]) - return " ".join(name) - -################################################################################ - -class Keyring(object): - gpg_invocation = "gpg --no-default-keyring --keyring %s" +\ - " --with-colons --fingerprint --fingerprint" - keys = {} - fpr_lookup = {} - - def de_escape_gpg_str(self, str): - esclist = re.split(r'(\\x..)', str) - for x in range(1,len(esclist),2): - esclist[x] = "%c" % (int(esclist[x][2:],16)) - return "".join(esclist) - - def __init__(self, keyring): - self.cnf = Config() - k = os.popen(self.gpg_invocation % keyring, "r") - keys = self.keys - key = None - fpr_lookup = self.fpr_lookup - signingkey = False - for line in k.xreadlines(): - field = line.split(":") - if field[0] == "pub": - key = field[4] - (name, addr) = email.Utils.parseaddr(field[9]) - name = re.sub(r"\s*[(].*[)]", "", name) - if name == "" or addr == "" or "@" not in addr: - name = field[9] - addr = "invalid-uid" - name = self.de_escape_gpg_str(name) - keys[key] = {"email": addr} - if name != "": keys[key]["name"] = name - keys[key]["aliases"] = [name] - keys[key]["fingerprints"] = [] - signingkey = True - elif key and field[0] == "sub" and len(field) >= 12: - signingkey = ("s" in field[11]) - elif key and field[0] == "uid": - (name, addr) = email.Utils.parseaddr(field[9]) - if name and name not in keys[key]["aliases"]: - keys[key]["aliases"].append(name) - elif signingkey and field[0] == "fpr": - keys[key]["fingerprints"].append(field[9]) - fpr_lookup[field[9]] = key - - def generate_desired_users(self): - if Options["Generate-Users"]: - format = Options["Generate-Users"] - return self.generate_users_from_keyring(format) - if Options["Import-Ldap-Users"]: - return self.import_users_from_ldap() - return ({}, {}) - - def import_users_from_ldap(self): - LDAPDn = self.cnf["Import-LDAP-Fingerprints::LDAPDn"] - LDAPServer = self.cnf["Import-LDAP-Fingerprints::LDAPServer"] - l = ldap.open(LDAPServer) - l.simple_bind_s("","") - Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL, - "(&(keyfingerprint=*)(gidnumber=%s))" % (self.cnf["Import-Users-From-Passwd::ValidGID"]), - ["uid", "keyfingerprint", "cn", "mn", "sn"]) - - ldap_fin_uid_id = {} - - byuid = {} - byname = {} - keys = self.keys - fpr_lookup = self.fpr_lookup - - for i in Attrs: - entry = i[1] - uid = entry["uid"][0] - name = get_ldap_name(entry) - fingerprints = entry["keyFingerPrint"] - keyid = None - for f in fingerprints: - key = fpr_lookup.get(f, None) - if key not in keys: continue - keys[key]["uid"] = uid - - if keyid != None: continue - keyid = get_or_set_uid(uid).uid - byuid[keyid] = (uid, name) - byname[uid] = (keyid, name) - - return (byname, byuid) - - def generate_users_from_keyring(self, format): - byuid = {} - byname = {} - keys = self.keys - any_invalid = False - for x in keys.keys(): - if keys[x]["email"] == "invalid-uid": - any_invalid = True - keys[x]["uid"] = format % "invalid-uid" - else: - uid = format % keys[x]["email"] - keyid = get_or_set_uid(uid).uid - byuid[keyid] = (uid, keys[x]["name"]) - byname[uid] = (keyid, keys[x]["name"]) - keys[x]["uid"] = uid - if any_invalid: - uid = format % "invalid-uid" - keyid = get_or_set_uid(uid).uid - byuid[keyid] = (uid, "ungeneratable user id") - byname[uid] = (keyid, "ungeneratable user id") - return (byname, byuid) - -################################################################################ - def usage (exit_code=0): print """Usage: dak import-keyring [OPTION]... [KEYRING] -h, --help show this help and exit. @@ -197,6 +78,7 @@ def main(): ### Parse options Options = cnf.SubTree("Import-Keyring::Options") + if Options["Help"]: usage() @@ -204,7 +86,6 @@ def main(): usage(1) ### Keep track of changes made - changes = [] # (uid, changes strings) ### Initialise @@ -216,22 +97,21 @@ def main(): ### Parse the keyring keyringname = keyring_names[0] - keyring = Keyring(keyringname) - - is_dm = "false" - if cnf.has_key("Import-Keyring::"+keyringname+"::Debian-Maintainer"): - session.execute("UPDATE keyrings SET debian_maintainer = :dm WHERE name = :name", - {'dm': cnf["Import-Keyring::"+keyringname+"::Debian-Maintainer"], - 'name': keyringname.split("/")[-1]}) + keyring = get_keyring(keyringname, session) + if not keyring: + print "E: Can't load keyring %s from database" % keyringname + sys.exit(1) - is_dm = cnf["Import-Keyring::"+keyringname+"::Debian-Maintainer"] - - keyring_id = get_or_set_keyring( - keyringname.split("/")[-1], session, - ).keyring_id + keyring.load_keys(keyringname) ### Generate new uid entries if they're needed (from LDAP or the keyring) - (desuid_byname, desuid_byid) = keyring.generate_desired_users() + if Options["Generate-Users"]: + format = Options["Generate-Users"] + (desuid_byname, desuid_byid) = keyring.generate_users_from_keyring(Options["Generate-Users"], session) + elif Options["Import-Ldap-Users"]: + (desuid_byname, desuid_byid) = keyring.import_users_from_ldap(session) + else: + (desuid_byname, desuid_byid) = ({}, {}) ### Cache all the existing uid entries (db_uid_byname, db_uid_byid) = get_uid_info(session) @@ -240,7 +120,7 @@ def main(): for keyid in desuid_byid.keys(): uid = (keyid, desuid_byid[keyid][0]) name = desuid_byid[keyid][1] - oname = db_uid_byname[keyid][1] + oname = db_uid_byid[keyid][1] if name and oname != name: changes.append((uid[1], "Full name: %s" % (name))) session.execute("UPDATE uid SET name = :name WHERE id = :keyid", @@ -258,17 +138,28 @@ def main(): if keyid == None: keyid = db_fin_info.get(keyring.keys[z]["fingerprints"][0], [None])[0] for y in keyring.keys[z]["fingerprints"]: - fpr[y] = (keyid,keyring_id) + fpr[y] = (keyid, keyring.keyring_id) # For any keys that used to be in this keyring, disassociate them. # We don't change the uid, leaving that for historical info; if # the id should change, it'll be set when importing another keyring. for f,(u,fid,kr) in db_fin_info.iteritems(): - if kr != keyring_id: continue - if f in fpr: continue + if kr != keyring.keyring_id: + continue + + if f in fpr: + continue + changes.append((db_uid_byid.get(u, [None])[0], "Removed key: %s" % (f))) - session.execute("UPDATE fingerprint SET keyring = NULL WHERE id = :fprid", {'fprid': fid}) + session.execute("""UPDATE fingerprint + SET keyring = NULL, + source_acl_id = NULL, + binary_acl_id = NULL, + binary_reject = TRUE + WHERE id = :fprid""", {'fprid': fid}) + + session.execute("""DELETE FROM binary_acl_map WHERE fingerprint_id = :fprid""", {'fprid': fid}) # For the keys in this keyring, add/update any fingerprints that've # changed. @@ -276,19 +167,36 @@ def main(): for f in fpr: newuid = fpr[f][0] newuiduid = db_uid_byid.get(newuid, [None])[0] + (olduid, oldfid, oldkid) = db_fin_info.get(f, [-1,-1,-1]) - if olduid == None: olduid = -1 - if oldkid == None: oldkid = -1 + + if olduid == None: + olduid = -1 + + if oldkid == None: + oldkid = -1 + if oldfid == -1: changes.append((newuiduid, "Added key: %s" % (f))) + fp = Fingerprint() + fp.fingerprint = f + fp.keyring_id = keyring.keyring_id if newuid: - session.execute("""INSERT INTO fingerprint (fingerprint, uid, keyring) - VALUES (:fpr, :uid, :keyring)""", - {'fpr': f, 'uid': uid, 'keyring': keyring_id}) - else: - session.execute("""INSERT INTO fingerprint (fingerprint, keyring) - VALUES (:fpr, :keyring)""", - {'fpr': f, 'keyring': keyring_id}) + fp.uid_id = newuid + + fp.binary_acl_id = keyring.default_binary_acl_id + fp.source_acl_id = keyring.default_source_acl_id + fp.default_binary_reject = keyring.default_binary_reject + session.add(fp) + session.flush() + + for k in keyring.keyring_acl_map: + ba = BinaryACLMap() + ba.fingerprint_id = fp.fingerprint_id + ba.architecture_id = k.architecture_id + session.add(ba) + session.flush() + else: if newuid and olduid != newuid: if olduid != -1: @@ -297,25 +205,62 @@ def main(): else: changes.append((newuiduid, "Linked key: %s" % f)) changes.append((newuiduid, " (formerly unowned)")) + session.execute("UPDATE fingerprint SET uid = :uid WHERE id = :fpr", {'uid': newuid, 'fpr': oldfid}) - if oldkid != keyring_id: + # Don't move a key from a keyring with a higher priority to a lower one + if oldkid != keyring.keyring_id: + movekey = False + if oldkid == -1: + movekey = True + else: + try: + oldkeyring = session.query(Keyring).filter_by(keyring_id=oldkid).one() + except NotFoundError: + print "ERROR: Cannot find old keyring with id %s" % oldkid + sys.exit(1) + + if oldkeyring.priority < keyring.priority: + movekey = True + # Only change the keyring if it won't result in a loss of permissions - q = session.execute("SELECT debian_maintainer FROM keyrings WHERE id = :keyring", - {'keyring': keyring_id}) - if is_dm == "false" and not q.fetchall()[0][0]: - session.execute("UPDATE fingerprint SET keyring = :keyring WHERE id = :fpr", - {'keyring': keyring_id, 'fpr': oldfid}) + if movekey: + session.execute("""DELETE FROM binary_acl_map WHERE fingerprint_id = :fprid""", {'fprid': oldfid}) + + session.execute("""UPDATE fingerprint + SET keyring = :keyring, + source_acl_id = :source_acl_id, + binary_acl_id = :binary_acl_id, + binary_reject = :binary_reject + WHERE id = :fpr""", + {'keyring': keyring.keyring_id, + 'source_acl_id': keyring.default_source_acl_id, + 'binary_acl_id': keyring.default_binary_acl_id, + 'binary_reject': keyring.default_binary_reject, + 'fpr': oldfid}) + + session.flush() + + for k in keyring.keyring_acl_map: + ba = BinaryACLMap() + ba.fingerprint_id = oldfid + ba.architecture_id = k.architecture_id + session.add(ba) + session.flush() + else: - print "Key %s exists in both DM and DD keyrings. Not demoting." % (f) + print "Key %s exists in both %s and %s keyrings. Not demoting." % (oldkeyring.keyring_name, + keyring.keyring_name) # All done! session.commit() + # Print a summary changesd = {} for (k, v) in changes: - if k not in changesd: changesd[k] = "" + if k not in changesd: + changesd[k] = "" changesd[k] += " %s\n" % (v) keys = changesd.keys() diff --git a/dak/import_known_changes.py b/dak/import_known_changes.py new file mode 100755 index 00000000..cdb1d3af --- /dev/null +++ b/dak/import_known_changes.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Import known_changes files + +@contact: Debian FTP Master +@copyright: 2009 Mike O'Connor +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import sys +import os +import logging +import threading +from daklib.dbconn import DBConn,get_knownchange +from daklib.config import Config +import apt_pkg +from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError +from daklib.changes import Changes +from daklib.utils import parse_changes, warn, gpgv_get_status_output, process_gpgv_output +import traceback + +# where in dak.conf all of our configuration will be stowed +options_prefix = "KnownChanges" +options_prefix = "%s::Options" % options_prefix + +log = logging.getLogger() + +################################################################################ + + +def usage (exit_code=0): + print """Usage: dak import-known-changes [options] + +OPTIONS + -j n + run with n threads concurrently + + -v, --verbose + show verbose information messages + + -q, --quiet + supress all output but errors + +""" + sys.exit(exit_code) + +def check_signature (sig_filename, data_filename=""): + fingerprint = None + + keyrings = [ + "/home/joerg/keyring/keyrings/debian-keyring.gpg", + "/home/joerg/keyring/keyrings/debian-keyring.pgp", + "/home/joerg/keyring/keyrings/debian-maintainers.gpg", + "/home/joerg/keyring/keyrings/debian-role-keys.gpg", + "/home/joerg/keyring/keyrings/emeritus-keyring.pgp", + "/home/joerg/keyring/keyrings/emeritus-keyring.gpg", + "/home/joerg/keyring/keyrings/removed-keys.gpg", + "/home/joerg/keyring/keyrings/removed-keys.pgp" + ] + + keyringargs = " ".join(["--keyring %s" % x for x in keyrings ]) + + # Build the command line + status_read, status_write = os.pipe() + cmd = "gpgv --status-fd %s %s %s" % (status_write, keyringargs, sig_filename) + + # Invoke gpgv on the file + (output, status, exit_status) = gpgv_get_status_output(cmd, status_read, status_write) + + # Process the status-fd output + (keywords, internal_error) = process_gpgv_output(status) + + # If we failed to parse the status-fd output, let's just whine and bail now + if internal_error: + warn("Couldn't parse signature") + return None + + # usually one would check for bad things here. We, however, do not care. + + # Next check gpgv exited with a zero return code + if exit_status: + warn("Couldn't parse signature") + return None + + # Sanity check the good stuff we expect + if not keywords.has_key("VALIDSIG"): + warn("Couldn't parse signature") + else: + args = keywords["VALIDSIG"] + if len(args) < 1: + warn("Couldn't parse signature") + else: + fingerprint = args[0] + + return fingerprint + + +class EndOfChanges(object): + """something enqueued to signify the last change""" + pass + + +class OneAtATime(object): + """ + a one space queue which sits between multiple possible producers + and multiple possible consumers + """ + def __init__(self): + self.next_in_line = None + self.read_lock = threading.Condition() + self.write_lock = threading.Condition() + self.die = False + + def plsDie(self): + self.die = True + self.write_lock.acquire() + self.write_lock.notifyAll() + self.write_lock.release() + + self.read_lock.acquire() + self.read_lock.notifyAll() + self.read_lock.release() + + def enqueue(self, next): + self.write_lock.acquire() + while self.next_in_line: + if self.die: + return + self.write_lock.wait() + + assert( not self.next_in_line ) + self.next_in_line = next + self.write_lock.release() + self.read_lock.acquire() + self.read_lock.notify() + self.read_lock.release() + + def dequeue(self): + self.read_lock.acquire() + while not self.next_in_line: + if self.die: + return + self.read_lock.wait() + + result = self.next_in_line + + self.next_in_line = None + self.read_lock.release() + self.write_lock.acquire() + self.write_lock.notify() + self.write_lock.release() + + if isinstance(result, EndOfChanges): + return None + + return result + +class ChangesToImport(object): + """A changes file to be enqueued to be processed""" + def __init__(self, checkdir, changesfile, count): + self.dirpath = checkdir + self.changesfile = changesfile + self.count = count + + def __str__(self): + return "#%d: %s in %s" % (self.count, self.changesfile, self.dirpath) + +class ChangesGenerator(threading.Thread): + """enqueues changes files to be imported""" + def __init__(self, parent, queue): + threading.Thread.__init__(self) + self.queue = queue + self.session = DBConn().session() + self.parent = parent + self.die = False + + def plsDie(self): + self.die = True + + def run(self): + cnf = Config() + count = 1 + for directory in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]: + checkdir = cnf["Dir::Queue::%s" % (directory) ] + if os.path.exists(checkdir): + print "Looking into %s" % (checkdir) + + for dirpath, dirnames, filenames in os.walk(checkdir, topdown=True): + if not filenames: + # Empty directory (or only subdirectories), next + continue + + for changesfile in filenames: + try: + if not changesfile.endswith(".changes"): + # Only interested in changes files. + continue + count += 1 + + if not get_knownchange(changesfile, self.session): + to_import = ChangesToImport(dirpath, changesfile, count) + if self.die: + return + self.queue.enqueue(to_import) + except KeyboardInterrupt: + print("got Ctrl-c in enqueue thread. terminating") + self.parent.plsDie() + sys.exit(1) + + self.queue.enqueue(EndOfChanges()) + +class ImportThread(threading.Thread): + def __init__(self, parent, queue): + threading.Thread.__init__(self) + self.queue = queue + self.session = DBConn().session() + self.parent = parent + self.die = False + + def plsDie(self): + self.die = True + + def run(self): + while True: + try: + if self.die: + return + to_import = self.queue.dequeue() + if not to_import: + return + + print( "Directory %s, file %7d, (%s)" % (to_import.dirpath[-10:], to_import.count, to_import.changesfile) ) + + changes = Changes() + changes.changes_file = to_import.changesfile + changesfile = os.path.join(to_import.dirpath, to_import.changesfile) + changes.changes = parse_changes(changesfile, signing_rules=-1) + changes.changes["fingerprint"] = check_signature(changesfile) + changes.add_known_changes(to_import.dirpath, self.session) + self.session.commit() + + except InvalidDscError, line: + warn("syntax error in .dsc file '%s', line %s." % (f, line)) + + except ChangesUnicodeError: + warn("found invalid changes file, not properly utf-8 encoded") + + except KeyboardInterrupt: + print("Caught C-c; on ImportThread. terminating.") + self.parent.plsDie() + sys.exit(1) + + except: + self.parent.plsDie() + sys.exit(1) + +class ImportKnownChanges(object): + def __init__(self,num_threads): + self.queue = OneAtATime() + self.threads = [ ChangesGenerator(self,self.queue) ] + + for i in range(num_threads): + self.threads.append( ImportThread(self,self.queue) ) + + try: + for thread in self.threads: + thread.start() + + except KeyboardInterrupt: + print("Caught C-c; terminating.") + utils.warn("Caught C-c; terminating.") + self.plsDie() + + def plsDie(self): + traceback.print_stack90 + for thread in self.threads: + print( "STU: before ask %s to die" % thread ) + thread.plsDie() + print( "STU: after ask %s to die" % thread ) + + self.threads=[] + sys.exit(1) + + +def main(): + cnf = Config() + + arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")), + ('j',"concurrency", "%s::%s" % (options_prefix,"Concurrency"),"HasArg"), + ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")), + ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")), + ] + + args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv) + + num_threads = 1 + + if len(args) > 0: + usage() + + if cnf.has_key("%s::%s" % (options_prefix,"Help")): + usage() + + level=logging.INFO + if cnf.has_key("%s::%s" % (options_prefix,"Quiet")): + level=logging.ERROR + + elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")): + level=logging.DEBUG + + + logging.basicConfig( level=level, + format='%(asctime)s %(levelname)s %(message)s', + stream = sys.stderr ) + + if Config().has_key( "%s::%s" %(options_prefix,"Concurrency")): + num_threads = int(Config()[ "%s::%s" %(options_prefix,"Concurrency")]) + + ImportKnownChanges(num_threads) + + + + +if __name__ == '__main__': + main() diff --git a/dak/make_suite_file_list.py b/dak/make_suite_file_list.py index 3c690a51..349a4ae0 100755 --- a/dak/make_suite_file_list.py +++ b/dak/make_suite_file_list.py @@ -54,10 +54,6 @@ Options = None #: Parsed CommandLine arguments ################################################################################ -def Dict(**dict): return dict - -################################################################################ - def usage (exit_code=0): print """Usage: dak make-suite-file-list [OPTION] Write out file lists suitable for use with apt-ftparchive. @@ -309,6 +305,8 @@ def write_filelists(packages, dislocated_files, session): ################################################################################ def do_da_do_da(): + cnf = Config() + # If we're only doing a subset of suites, ensure we do enough to # be able to do arch: all mapping. if Options["Suite"]: @@ -357,7 +355,7 @@ SELECT s.id, s.source, 'source', s.version, l.path, f.filename, c.name, f.id, # 'id' comes from either 'binaries' or 'source', so it's not unique unique_id += 1 - packages[unique_id] = Dict(sourceid=sourceid, pkg=pkg, arch=arch, version=version, + packages[unique_id] = dict(sourceid=sourceid, pkg=pkg, arch=arch, version=version, path=path, filename=filename, component=component, file_id=file_id, suite=suite, filetype = filetype) diff --git a/dak/new_security_install.py b/dak/new_security_install.py index 3eb19643..55a48749 100755 --- a/dak/new_security_install.py +++ b/dak/new_security_install.py @@ -152,9 +152,9 @@ def advisory_info(): svs = srcverarches.keys() svs.sort() for sv in svs: - as = srcverarches[sv].keys() - as.sort() - print " %s (%s)" % (sv, ", ".join(as)) + as_ = srcverarches[sv].keys() + as_.sort() + print " %s (%s)" % (sv, ", ".join(as_)) def prompt(opts, default): p = "" diff --git a/dak/process_new.py b/dak/process_new.py index 9a6c8e33..bec55df5 100755 --- a/dak/process_new.py +++ b/dak/process_new.py @@ -77,43 +77,13 @@ Sections = None ################################################################################ def recheck(upload, session): - files = upload.pkg.files - - cnf = Config() - for f in files.keys(): - # The .orig.tar.gz can disappear out from under us is it's a - # duplicate of one in the archive. - if not files.has_key(f): - continue - # Check that the source still exists - if files[f]["type"] == "deb": - source_version = files[f]["source version"] - source_package = files[f]["source package"] - if not upload.pkg.changes["architecture"].has_key("source") \ - and not upload.source_exists(source_package, source_version, upload.pkg.changes["distribution"].keys()): - source_epochless_version = re_no_epoch.sub('', source_version) - dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version) - found = 0 - for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]: - if cnf.has_key("Dir::Queue::%s" % (q)): - if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename): - found = 1 - if not found: - upload.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f)) - - # Version and file overwrite checks - if files[f]["type"] == "deb": - upload.check_binary_against_db(f, session) - elif files[f]["type"] == "dsc": - upload.check_source_against_db(f, session) - upload.check_dsc_against_db(f, session) - + upload.recheck() if len(upload.rejects) > 0: answer = "XXX" if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]: answer = 'S' - print "REJECT\n" + upload.rejects.join("\n"), + print "REJECT\n%s" % '\n'.join(upload.rejects) prompt = "[R]eject, Skip, Quit ?" while prompt.find(answer) == -1: @@ -124,7 +94,7 @@ def recheck(upload, session): answer = answer[:1].upper() if answer == 'R': - upload.do_reject(manual=0, reject_message=upload.rejects.join("\n")) + upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects)) os.unlink(upload.pkg.changes_file[:-8]+".dak") return 0 elif answer == 'S': @@ -704,6 +674,7 @@ def do_new(upload, session): elif answer == 'E' and not Options["Trainee"]: new = edit_overrides (new, upload, session) elif answer == 'M' and not Options["Trainee"]: + upload.pkg.remove_known_changes() aborted = upload.do_reject(manual=1, reject_message=Options["Manual-Reject"], note=get_new_comments(changes.get("source", ""), session=session)) @@ -745,7 +716,6 @@ def usage (exit_code=0): print """Usage: dak process-new [OPTION]... [CHANGES]... -a, --automatic automatic run -h, --help show this help and exit. - -C, --comments-dir=DIR use DIR as comments-dir, for [o-]p-u-new -m, --manual-reject=MSG manual reject with `msg' -n, --no-action don't do anything -t, --trainee FTP Trainee mode @@ -847,39 +817,6 @@ def lock_package(package): finally: os.unlink(path) -# def move_to_dir (upload, dest, perms=0660, changesperms=0664): -# utils.move (upload.pkg.changes_file, dest, perms=changesperms) -# file_keys = upload.pkg.files.keys() -# for f in file_keys: -# utils.move (f, dest, perms=perms) - -# def is_source_in_queue_dir(qdir): -# entries = [ x for x in os.listdir(qdir) if x.startswith(Upload.pkg.changes["source"]) -# and x.endswith(".changes") ] -# for entry in entries: -# # read the .dak -# u = queue.Upload(Cnf) -# u.pkg.changes_file = os.path.join(qdir, entry) -# u.update_vars() -# if not u.pkg.changes["architecture"].has_key("source"): -# # another binary upload, ignore -# continue -# if Upload.pkg.changes["version"] != u.pkg.changes["version"]: -# # another version, ignore -# continue -# # found it! -# return True -# return False - -# def move_to_holding(suite, queue_dir): -# print "Moving to %s holding area." % (suite.upper(),) -# if Options["No-Action"]: -# return -# Logger.log(["Moving to %s" % (suite,), Upload.pkg.changes_file]) -# Upload.dump_vars(queue_dir) -# move_to_dir(queue_dir, perms=0664) -# os.unlink(Upload.pkg.changes_file[:-8]+".dak") - def _accept(upload): if Options["No-Action"]: return @@ -887,87 +824,21 @@ def _accept(upload): upload.accept(summary, short_summary, targetdir=Config()["Dir::Queue::Newstage"]) os.unlink(upload.pkg.changes_file[:-8]+".dak") -# def do_accept_stableupdate(upload,suite, q): -# cnf = Config() -# queue_dir = cnf["Dir::Queue::%s" % (q,)] -# if not upload.pkg.changes["architecture"].has_key("source"): -# # It is not a sourceful upload. So its source may be either in p-u -# # holding, in new, in accepted or already installed. -# if is_source_in_queue_dir(queue_dir): -# # It's in p-u holding, so move it there. -# print "Binary-only upload, source in %s." % (q,) -# move_to_holding(suite, queue_dir) -# elif Upload.source_exists(Upload.pkg.changes["source"], -# Upload.pkg.changes["version"]): -# # dak tells us that there is source available. At time of -# # writing this means that it is installed, so put it into -# # accepted. -# print "Binary-only upload, source installed." -# Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)]) -# _accept() -# elif is_source_in_queue_dir(Cnf["Dir::Queue::Accepted"]): -# # The source is in accepted, the binary cleared NEW: accept it. -# print "Binary-only upload, source in accepted." -# Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)]) -# _accept() -# elif is_source_in_queue_dir(Cnf["Dir::Queue::New"]): -# # It's in NEW. We expect the source to land in p-u holding -# # pretty soon. -# print "Binary-only upload, source in new." -# move_to_holding(suite, queue_dir) -# elif is_source_in_queue_dir(Cnf["Dir::Queue::Newstage"]): -# # It's in newstage. Accept into the holding area -# print "Binary-only upload, source in newstage." -# Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)]) -# _accept() -# else: -# # No case applicable. Bail out. Return will cause the upload -# # to be skipped. -# print "ERROR" -# print "Stable update failed. Source not found." -# return -# else: -# # We are handling a sourceful upload. Move to accepted if currently -# # in p-u holding and to p-u holding otherwise. -# if is_source_in_queue_dir(queue_dir): -# print "Sourceful upload in %s, accepting." % (q,) -# _accept() -# else: -# move_to_holding(suite, queue_dir) - def do_accept(upload): print "ACCEPT" cnf = Config() if not Options["No-Action"]: (summary, short_summary) = upload.build_summaries() -# if cnf.FindB("Dinstall::SecurityQueueHandling"): -# upload.dump_vars(cnf["Dir::Queue::Embargoed"]) -# move_to_dir(cnf["Dir::Queue::Embargoed"]) -# upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"]) -# # Check for override disparities -# upload.Subst["__SUMMARY__"] = summary -# else: - # Stable updates need to be copied to proposed-updates holding - # area instead of accepted. Sourceful uploads need to go - # to it directly, binaries only if the source has not yet been - # accepted into p-u. - for suite, q in [("proposed-updates", "ProposedUpdates"), - ("oldstable-proposed-updates", "OldProposedUpdates")]: - if not upload.pkg.changes["distribution"].has_key(suite): - continue - utils.fubar("stable accept not supported yet") -# return do_accept_stableupdate(suite, q) - # Just a normal upload, accept it... - _accept(upload) - -def check_status(files): - new = byhand = 0 - for f in files.keys(): - if files[f]["type"] == "byhand": - byhand = 1 - elif files[f].has_key("new"): - new = 1 - return (new, byhand) + + if cnf.FindB("Dinstall::SecurityQueueHandling"): + upload.dump_vars(cnf["Dir::Queue::Embargoed"]) + upload.move_to_dir(cnf["Dir::Queue::Embargoed"]) + upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"]) + # Check for override disparities + upload.Subst["__SUMMARY__"] = summary + else: + # Just a normal upload, accept it... + _accept(upload) def do_pkg(changes_file, session): u = Upload() @@ -1024,58 +895,6 @@ def end(): ################################################################################ -# def do_comments(dir, opref, npref, line, fn): -# for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]: -# lines = open("%s/%s" % (dir, comm)).readlines() -# if len(lines) == 0 or lines[0] != line + "\n": continue -# changes_files = [ x for x in os.listdir(".") if x.startswith(comm[7:]+"_") -# and x.endswith(".changes") ] -# changes_files = sort_changes(changes_files) -# for f in changes_files: -# f = utils.validate_changes_file_arg(f, 0) -# if not f: continue -# print "\n" + f -# fn(f, "".join(lines[1:])) - -# if opref != npref and not Options["No-Action"]: -# newcomm = npref + comm[len(opref):] -# os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm)) - -# ################################################################################ - -# def comment_accept(changes_file, comments): -# Upload.pkg.changes_file = changes_file -# Upload.init_vars() -# Upload.update_vars() -# Upload.update_subst() -# files = Upload.pkg.files - -# if not recheck(): -# return # dak wants to REJECT, crap - -# (new, byhand) = check_status(files) -# if not new and not byhand: -# do_accept() - -# ################################################################################ - -# def comment_reject(changes_file, comments): -# Upload.pkg.changes_file = changes_file -# Upload.init_vars() -# Upload.update_vars() -# Upload.update_subst() - -# if not recheck(): -# pass # dak has its own reasons to reject as well, which is fine - -# reject(comments) -# print "REJECT\n" + reject_message, -# if not Options["No-Action"]: -# Upload.do_reject(0, reject_message) -# os.unlink(Upload.pkg.changes_file[:-8]+".dak") - -################################################################################ - def main(): global Options, Logger, Sections, Priorities @@ -1084,17 +903,16 @@ def main(): Arguments = [('a',"automatic","Process-New::Options::Automatic"), ('h',"help","Process-New::Options::Help"), - ('C',"comments-dir","Process-New::Options::Comments-Dir", "HasArg"), ('m',"manual-reject","Process-New::Options::Manual-Reject", "HasArg"), ('t',"trainee","Process-New::Options::Trainee"), ('n',"no-action","Process-New::Options::No-Action")] - for i in ["automatic", "help", "manual-reject", "no-action", "version", "comments-dir", "trainee"]: + for i in ["automatic", "help", "manual-reject", "no-action", "version", "trainee"]: if not cnf.has_key("Process-New::Options::%s" % (i)): cnf["Process-New::Options::%s" % (i)] = "" changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv) - if len(changes_files) == 0 and not cnf.get("Process-New::Options::Comments-Dir",""): + if len(changes_files) == 0: changes_files = utils.get_changes_files(cnf["Dir::Queue::New"]) Options = cnf.SubTree("Process-New::Options") @@ -1119,22 +937,13 @@ def main(): # Kill me now? **FIXME** cnf["Dinstall::Options::No-Mail"] = "" -# commentsdir = cnf.get("Process-New::Options::Comments-Dir","") -# if commentsdir: -# if changes_files != []: -# sys.stderr.write("Can't specify any changes files if working with comments-dir") -# sys.exit(1) -# do_comments(commentsdir, "ACCEPT.", "ACCEPTED.", "OK", comment_accept) -# do_comments(commentsdir, "REJECT.", "REJECTED.", "NOTOK", comment_reject) -# else: - if True: - for changes_file in changes_files: - changes_file = utils.validate_changes_file_arg(changes_file, 0) - if not changes_file: - continue - print "\n" + changes_file - - do_pkg (changes_file, session) + for changes_file in changes_files: + changes_file = utils.validate_changes_file_arg(changes_file, 0) + if not changes_file: + continue + print "\n" + changes_file + + do_pkg (changes_file, session) end() diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py index 5463f1a6..8a3e49d1 100755 --- a/dak/process_unchecked.py +++ b/dak/process_unchecked.py @@ -102,7 +102,7 @@ def init(): ################################################################################ def usage (exit_code=0): - print """Usage: dinstall [OPTION]... [CHANGES]... + print """Usage: dak process-unchecked [OPTION]... [CHANGES]... -a, --automatic automatic run -h, --help show this help and exit. -n, --no-action don't do anything @@ -191,10 +191,12 @@ def action(u): os.chdir(u.pkg.directory) u.do_reject(0, pi) elif answer == 'A': + u.pkg.add_known_changes( "Accepted" ) u.accept(summary, short_summary) u.check_override() u.remove() elif answer == queuekey: + u.pkg.add_known_changes( qu ) queue_info[qu]["process"](u, summary, short_summary) u.remove() elif answer == 'Q': @@ -507,7 +509,7 @@ def process_it(changes_file): action(u) - except SystemExit: + except (SystemExit, KeyboardInterrupt): raise except: diff --git a/dak/rm.py b/dak/rm.py index be3e1676..69b35971 100755 --- a/dak/rm.py +++ b/dak/rm.py @@ -554,7 +554,10 @@ def main (): if carbon_copy: Subst["__CC__"] += "\nCc: " + ", ".join(carbon_copy) Subst["__SUITE_LIST__"] = suites_list - Subst["__SUMMARY__"] = summary + summarymail = "%s\n------------------- Reason -------------------\n%s\n" % (summary, Options["Reason"]) + summarymail += "----------------------------------------------\n" + Subst["__SUMMARY__"] = summarymail + Subst["__SUBJECT__"] = "Removed package(s) from %s" % (suites_list) Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"] Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"] Subst["__WHOAMI__"] = whoami diff --git a/dak/show_deferred.py b/dak/show_deferred.py index e8e1621d..2bb643d1 100755 --- a/dak/show_deferred.py +++ b/dak/show_deferred.py @@ -101,7 +101,6 @@ def table_header(): Closes """ - return res def table_footer(): return '

non-NEW uploads are available, see the UploadQueue-README for more information.


\n' diff --git a/dak/update_db.py b/dak/update_db.py index 88d8e4e6..4e7704e4 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -44,7 +44,7 @@ from daklib.dak_exceptions import DBUpdateError ################################################################################ Cnf = None -required_database_schema = 17 +required_database_schema = 21 ################################################################################ diff --git a/daklib/binary.py b/daklib/binary.py index 8a0cf092..a70aadb9 100755 --- a/daklib/binary.py +++ b/daklib/binary.py @@ -251,12 +251,16 @@ class Binary(object): except: print >> sys.stderr, "E: %s has non-unicode filename: %s" % (package,tarinfo.name) + result = True + except: traceback.print_exc() result = False os.chdir(cwd) + return result + __all__.append('Binary') diff --git a/daklib/changes.py b/daklib/changes.py index ff232224..fd09cb7f 100755 --- a/daklib/changes.py +++ b/daklib/changes.py @@ -29,6 +29,8 @@ Changes class for dak import os import stat + +import datetime from cPickle import Unpickler, Pickler from errno import EPERM @@ -36,6 +38,8 @@ from apt_inst import debExtractControl from apt_pkg import ParseSection from utils import open_file, fubar, poolify +from config import * +from dbconn import * ############################################################################### @@ -173,6 +177,59 @@ class Changes(object): return summary + def remove_known_changes(self, session=None): + if session is None: + session = DBConn().session() + privatetrans = True + + session.delete(get_knownchange(self.changes_file, session)) + + if privatetrans: + session.commit() + session.close() + + + def mark_missing_fields(self): + """add "missing" in fields which we will require for the known_changes table""" + for key in ['urgency', 'maintainer', 'fingerprint', 'changed-by' ]: + if (not self.changes.has_key(key)) or (not self.changes[key]): + self.changes[key]='missing' + + def add_known_changes(self, dirpath, session=None): + """add "missing" in fields which we will require for the known_changes table""" + cnf = Config() + privatetrans = False + if session is None: + session = DBConn().session() + privatetrans = True + + changesfile = os.path.join(dirpath, self.changes_file) + filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile)) + + self.mark_missing_fields() + + session.execute( + """INSERT INTO known_changes + (changesname, seen, source, binaries, architecture, version, + distribution, urgency, maintainer, fingerprint, changedby, date) + VALUES (:changesfile,:filetime,:source,:binary, :architecture, + :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""", + { 'changesfile':self.changes_file, + 'filetime':filetime, + 'source':self.changes["source"], + 'binary':self.changes["binary"], + 'architecture':self.changes["architecture"], + 'version':self.changes["version"], + 'distribution':self.changes["distribution"], + 'urgency':self.changes["urgency"], + 'maintainer':self.changes["maintainer"], + 'fingerprint':self.changes["fingerprint"], + 'changedby':self.changes["changed-by"], + 'date':self.changes["date"]} ) + + if privatetrans: + session.commit() + session.close() def load_dot_dak(self, changesfile): """ diff --git a/daklib/config.py b/daklib/config.py index c86c1b36..2f24cd3a 100755 --- a/daklib/config.py +++ b/daklib/config.py @@ -36,7 +36,7 @@ from singleton import Singleton ################################################################################ -default_config = "/etc/dak/dak.conf" +default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties def which_conf_file(): if os.getenv("DAK_CONFIG"): diff --git a/daklib/daklog.py b/daklib/daklog.py index dfcae368..fb33b0bd 100755 --- a/daklib/daklog.py +++ b/daklib/daklog.py @@ -38,7 +38,7 @@ class Logger: logfile = None program = None - def __init__ (self, Cnf, program, debug=0): + def __init__ (self, Cnf, program, debug=0, print_starting=True): "Initialize a new Logger object" self.Cnf = Cnf self.program = program @@ -58,7 +58,8 @@ class Logger: logfile = utils.open_file(logfilename, 'a') os.umask(umask) self.logfile = logfile - self.log(["program start"]) + if print_starting: + self.log(["program start"]) def log (self, details): "Log an event" diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 18f427d4..921f1daa 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -34,13 +34,17 @@ ################################################################################ import os +import re import psycopg2 import traceback +import datetime from inspect import getargspec +import sqlalchemy from sqlalchemy import create_engine, Table, MetaData from sqlalchemy.orm import sessionmaker, mapper, relation +from sqlalchemy import types as sqltypes # Don't remove this, we re-export the exceptions to scripts which import us from sqlalchemy.exc import * @@ -54,6 +58,22 @@ from textutils import fix_maintainer ################################################################################ +# Patch in support for the debversion field type so that it works during +# reflection + +class DebVersion(sqltypes.Text): + def get_col_spec(self): + return "DEBVERSION" + +sa_major_version = sqlalchemy.__version__[0:3] +if sa_major_version == "0.5": + from sqlalchemy.databases import postgres + postgres.ischema_names['debversion'] = DebVersion +else: + raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + +################################################################################ + __all__ = ['IntegrityError', 'SQLAlchemyError'] ################################################################################ @@ -267,12 +287,12 @@ def get_suites_binary_in(package, session=None): __all__.append('get_suites_binary_in') @session_wrapper -def get_binary_from_id(id, session=None): +def get_binary_from_id(binary_id, session=None): """ Returns DBBinary object for given C{id} - @type id: int - @param id: Id of the required binary + @type binary_id: int + @param binary_id: Id of the required binary @type session: Session @param session: Optional SQLA session object (a temporary one will be @@ -282,7 +302,7 @@ def get_binary_from_id(id, session=None): @return: DBBinary object for the given binary (None if not present) """ - q = session.query(DBBinary).filter_by(binary_id=id) + q = session.query(DBBinary).filter_by(binary_id=binary_id) try: return q.one() @@ -388,6 +408,28 @@ __all__.append('get_binary_components') ################################################################################ +class BinaryACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_id + +__all__.append('BinaryACL') + +################################################################################ + +class BinaryACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_map_id + +__all__.append('BinaryACLMap') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -692,6 +734,10 @@ class PoolFile(object): def __repr__(self): return '' % self.filename + @property + def fullpath(self): + return os.path.join(self.location.path, self.filename) + __all__.append('PoolFile') @session_wrapper @@ -733,7 +779,7 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None): ret = (False, None) else: obj = q.one() - if obj.md5sum != md5sum or obj.filesize != filesize: + if obj.md5sum != md5sum or obj.filesize != int(filesize): ret = (False, obj) if ret is None: @@ -820,6 +866,33 @@ class Fingerprint(object): __all__.append('Fingerprint') +@session_wrapper +def get_fingerprint(fpr, session=None): + """ + Returns Fingerprint object for given fpr. + + @type fpr: string + @param fpr: The fpr to find / add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). + + @rtype: Fingerprint + @return: the Fingerprint object for the given fpr or None + """ + + q = session.query(Fingerprint).filter_by(fingerprint=fpr) + + try: + ret = q.one() + except NoResultFound: + ret = None + + return ret + +__all__.append('get_fingerprint') + @session_wrapper def get_or_set_fingerprint(fpr, session=None): """ @@ -857,20 +930,139 @@ __all__.append('get_or_set_fingerprint') ################################################################################ +# Helper routine for Keyring class +def get_ldap_name(entry): + name = [] + for k in ["cn", "mn", "sn"]: + ret = entry.get(k) + if ret and ret[0] != "" and ret[0] != "-": + name.append(ret[0]) + return " ".join(name) + +################################################################################ + class Keyring(object): + gpg_invocation = "gpg --no-default-keyring --keyring %s" +\ + " --with-colons --fingerprint --fingerprint" + + keys = {} + fpr_lookup = {} + def __init__(self, *args, **kwargs): pass def __repr__(self): return '' % self.keyring_name + def de_escape_gpg_str(self, txt): + esclist = re.split(r'(\\x..)', txt) + for x in range(1,len(esclist),2): + esclist[x] = "%c" % (int(esclist[x][2:],16)) + return "".join(esclist) + + def load_keys(self, keyring): + import email.Utils + + if not self.keyring_id: + raise Exception('Must be initialized with database information') + + k = os.popen(self.gpg_invocation % keyring, "r") + key = None + signingkey = False + + for line in k.xreadlines(): + field = line.split(":") + if field[0] == "pub": + key = field[4] + (name, addr) = email.Utils.parseaddr(field[9]) + name = re.sub(r"\s*[(].*[)]", "", name) + if name == "" or addr == "" or "@" not in addr: + name = field[9] + addr = "invalid-uid" + name = self.de_escape_gpg_str(name) + self.keys[key] = {"email": addr} + if name != "": + self.keys[key]["name"] = name + self.keys[key]["aliases"] = [name] + self.keys[key]["fingerprints"] = [] + signingkey = True + elif key and field[0] == "sub" and len(field) >= 12: + signingkey = ("s" in field[11]) + elif key and field[0] == "uid": + (name, addr) = email.Utils.parseaddr(field[9]) + if name and name not in self.keys[key]["aliases"]: + self.keys[key]["aliases"].append(name) + elif signingkey and field[0] == "fpr": + self.keys[key]["fingerprints"].append(field[9]) + self.fpr_lookup[field[9]] = key + + def import_users_from_ldap(self, session): + import ldap + cnf = Config() + + LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"] + LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"] + + l = ldap.open(LDAPServer) + l.simple_bind_s("","") + Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL, + "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]), + ["uid", "keyfingerprint", "cn", "mn", "sn"]) + + ldap_fin_uid_id = {} + + byuid = {} + byname = {} + + for i in Attrs: + entry = i[1] + uid = entry["uid"][0] + name = get_ldap_name(entry) + fingerprints = entry["keyFingerPrint"] + keyid = None + for f in fingerprints: + key = self.fpr_lookup.get(f, None) + if key not in self.keys: + continue + self.keys[key]["uid"] = uid + + if keyid != None: + continue + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, name) + byname[uid] = (keyid, name) + + return (byname, byuid) + + def generate_users_from_keyring(self, format, session): + byuid = {} + byname = {} + any_invalid = False + for x in self.keys.keys(): + if self.keys[x]["email"] == "invalid-uid": + any_invalid = True + self.keys[x]["uid"] = format % "invalid-uid" + else: + uid = format % self.keys[x]["email"] + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, self.keys[x]["name"]) + byname[uid] = (keyid, self.keys[x]["name"]) + self.keys[x]["uid"] = uid + + if any_invalid: + uid = format % "invalid-uid" + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, "ungeneratable user id") + byname[uid] = (keyid, "ungeneratable user id") + + return (byname, byuid) + __all__.append('Keyring') @session_wrapper -def get_or_set_keyring(keyring, session=None): +def get_keyring(keyring, session=None): """ - If C{keyring} does not have an entry in the C{keyrings} table yet, create one - and return the new Keyring + If C{keyring} does not have an entry in the C{keyrings} table yet, return None If C{keyring} already has an entry, simply return the existing Keyring @type keyring: string @@ -885,12 +1077,67 @@ def get_or_set_keyring(keyring, session=None): try: return q.one() except NoResultFound: - obj = Keyring(keyring_name=keyring) - session.add(obj) - session.commit_or_flush() - return obj + return None + +__all__.append('get_keyring') + +################################################################################ + +class KeyringACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.keyring_acl_map_id + +__all__.append('KeyringACLMap') + +################################################################################ + +class KnownChange(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.changesname + +__all__.append('KnownChange') + +@session_wrapper +def get_knownchange(filename, session=None): + """ + returns knownchange object for given C{filename}. + + @type archive: string + @param archive: the name of the arhive + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Archive + @return: Archive object for the given name (None if not present) + + """ + q = session.query(KnownChange).filter_by(changesname=filename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_knownchange') + +################################################################################ + +class KnownChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.known_change_pending_file_id -__all__.append('get_or_set_keyring') +__all__.append('KnownChangePendingFile') ################################################################################ @@ -1354,106 +1601,55 @@ class Queue(object): def __repr__(self): return '' % self.queue_name - def autobuild_upload(self, changes, srcpath, session=None): - """ - Update queue_build database table used for incoming autobuild support. + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. - @type changes: Changes - @param changes: changes object for the upload to process + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] - @type srcpath: string - @param srcpath: path for the queue file entries/link destinations + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the QueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(pf).add(f) + return f - @type session: SQLAlchemy session - @param session: Optional SQLAlchemy session. If this is passed, the - caller is responsible for ensuring a transaction has begun and - committing the results or rolling back based on the result code. If - not passed, a commit will be performed at the end of the function, - otherwise the caller is responsible for commiting. + # Prepare QueueFile object + qf = QueueFile() + qf.queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = dest - @rtype: NoneType or string - @return: None if the operation failed, a string describing the error if not - """ + targetpath = qf.fullpath + queuepath = os.path.join(self.path, poolfile_basename) - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - - # TODO: Remove by moving queue config into the database - conf = Config() - - for suitename in changes.changes["distribution"].keys(): - # TODO: Move into database as: - # buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build) - # buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink) - # This also gets rid of the SecurityQueueBuild hack below - if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"): - continue - - # Find suite object - s = get_suite(suitename, session) - if s is None: - return "INTERNAL ERROR: Could not find suite %s" % suitename - - # TODO: Get from database as above - dest_dir = conf["Dir::QueueBuild"] - - # TODO: Move into database as above - if conf.FindB("Dinstall::SecurityQueueBuild"): - dest_dir = os.path.join(dest_dir, suitename) - - for file_entry in changes.files.keys(): - src = os.path.join(srcpath, file_entry) - dest = os.path.join(dest_dir, file_entry) - - # TODO: Move into database as above - if conf.FindB("Dinstall::SecurityQueueBuild"): - # Copy it since the original won't be readable by www-data - import utils - utils.copy(src, dest) - else: - # Create a symlink to it - os.symlink(src, dest) - - qb = QueueBuild() - qb.suite_id = s.suite_id - qb.queue_id = self.queue_id - qb.filename = dest - qb.in_queue = True - - session.add(qb) - - exists, symlinked = utils.ensure_orig_files(changes, dest, session) - - # Add symlinked files to the list of packages for later processing - # by apt-ftparchive - for filename in symlinked: - qb = QueueBuild() - qb.suite_id = s.suite_id - qb.queue_id = self.queue_id - qb.filename = filename - qb.in_queue = True - session.add(qb) - - # Update files to ensure they are not removed prematurely - for filename in exists: - qb = get_queue_build(filename, s.suite_id, session) - if qb is None: - qb.in_queue = True - qb.last_used = None - session.add(qb) + try: + if self.copy_pool_files: + # We need to copy instead of symlink + import utils + utils.copy(targetfile, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetfile, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None - if privatetrans: - session.commit() - session.close() + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf - return None __all__.append('Queue') @session_wrapper -def get_or_set_queue(queuename, session=None): +def get_queue(queuename, session=None): """ Returns Queue object for given C{queue name}, creating it if it does not exist. @@ -1472,60 +1668,22 @@ def get_or_set_queue(queuename, session=None): q = session.query(Queue).filter_by(queue_name=queuename) try: - ret = q.one() + return q.one() except NoResultFound: - queue = Queue() - queue.queue_name = queuename - session.add(queue) - session.commit_or_flush() - ret = queue - - return ret + return None -__all__.append('get_or_set_queue') +__all__.append('get_queue') ################################################################################ -class QueueBuild(object): +class QueueFile(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueBuild') - -@session_wrapper -def get_queue_build(filename, suite, session=None): - """ - Returns QueueBuild object for given C{filename} and C{suite}. - - @type filename: string - @param filename: The name of the file + return '' % (self.filename, self.queue_id) - @type suiteid: int or str - @param suiteid: Suite name or ID - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - if isinstance(suite, int): - q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite) - else: - q = session.query(QueueBuild).filter_by(filename=filename) - q = q.join(Suite).filter_by(suite_name=suite) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue_build') +__all__.append('QueueFile') ################################################################################ @@ -1759,6 +1917,17 @@ __all__.append('get_source_in_suite') ################################################################################ +class SourceACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.source_acl_id + +__all__.append('SourceACL') + +################################################################################ + class SrcAssociation(object): def __init__(self, *args, **kwargs): pass @@ -2078,6 +2247,17 @@ __all__.append('get_uid_from_fingerprint') ################################################################################ +class UploadBlock(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.source, self.upload_block_id) + +__all__.append('UploadBlock') + +################################################################################ + class DBConn(Singleton): """ database module init. @@ -2097,16 +2277,22 @@ class DBConn(Singleton): self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True) self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True) self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) + self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) + self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) self.tbl_component = Table('component', self.db_meta, autoload=True) self.tbl_config = Table('config', self.db_meta, autoload=True) self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) + self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True) + self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True) self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True) self.tbl_files = Table('files', self.db_meta, autoload=True) self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) + self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True) + self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True) self.tbl_location = Table('location', self.db_meta, autoload=True) self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True) @@ -2115,17 +2301,20 @@ class DBConn(Singleton): self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True) self.tbl_priority = Table('priority', self.db_meta, autoload=True) self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True) + self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True) self.tbl_section = Table('section', self.db_meta, autoload=True) self.tbl_source = Table('source', self.db_meta, autoload=True) + self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True) self.tbl_src_format = Table('src_format', self.db_meta, autoload=True) self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True) self.tbl_suite = Table('suite', self.db_meta, autoload=True) self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) + self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True) self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True) self.tbl_uid = Table('uid', self.db_meta, autoload=True) + self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) def __setupmappers(self): mapper(Architecture, self.tbl_architecture, @@ -2185,6 +2374,14 @@ class DBConn(Singleton): binassociations = relation(BinAssociation, primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin)))) + mapper(BinaryACL, self.tbl_binary_acl, + properties = dict(binary_acl_id = self.tbl_binary_acl.c.id)) + + mapper(BinaryACLMap, self.tbl_binary_acl_map, + properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id, + fingerprint = relation(Fingerprint, backref="binary_acl_map"), + architecture = relation(Architecture))) + mapper(Component, self.tbl_component, properties = dict(component_id = self.tbl_component.c.id, component_name = self.tbl_component.c.name)) @@ -2210,12 +2407,29 @@ class DBConn(Singleton): uid_id = self.tbl_fingerprint.c.uid, uid = relation(Uid), keyring_id = self.tbl_fingerprint.c.keyring, - keyring = relation(Keyring))) + keyring = relation(Keyring), + source_acl = relation(SourceACL), + binary_acl = relation(BinaryACL))) mapper(Keyring, self.tbl_keyrings, properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) + mapper(KnownChange, self.tbl_known_changes, + properties = dict(known_change_id = self.tbl_known_changes.c.id, + poolfiles = relation(PoolFile, + secondary=self.tbl_changes_pool_files, + backref="changeslinks"), + files = relation(KnownChangePendingFile, backref="changesfile"))) + + mapper(KnownChangePendingFile, self.tbl_changes_pending_files, + properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) + + mapper(KeyringACLMap, self.tbl_keyring_acl_map, + properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, + keyring = relation(Keyring, backref="keyring_acl_map"), + architecture = relation(Architecture))) + mapper(Location, self.tbl_location, properties = dict(location_id = self.tbl_location.c.id, component_id = self.tbl_location.c.component, @@ -2253,10 +2467,9 @@ class DBConn(Singleton): mapper(Queue, self.tbl_queue, properties = dict(queue_id = self.tbl_queue.c.id)) - mapper(QueueBuild, self.tbl_queue_build, - properties = dict(suite_id = self.tbl_queue_build.c.suite, - queue_id = self.tbl_queue_build.c.queue, - queue = relation(Queue, backref='queuebuild'))) + mapper(QueueFile, self.tbl_queue_files, + properties = dict(queue = relation(Queue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='queueinstances'))) mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id, @@ -2278,7 +2491,11 @@ class DBConn(Singleton): srcfiles = relation(DSCFile, primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)), srcassociations = relation(SrcAssociation, - primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)))) + primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)), + srcuploaders = relation(SrcUploader))) + + mapper(SourceACL, self.tbl_source_acl, + properties = dict(source_acl_id = self.tbl_source_acl.c.id)) mapper(SrcAssociation, self.tbl_src_associations, properties = dict(sa_id = self.tbl_src_associations.c.id, @@ -2301,7 +2518,9 @@ class DBConn(Singleton): primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id)))) mapper(Suite, self.tbl_suite, - properties = dict(suite_id = self.tbl_suite.c.id)) + properties = dict(suite_id = self.tbl_suite.c.id, + policy_queue = relation(Queue), + copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite, @@ -2319,6 +2538,11 @@ class DBConn(Singleton): properties = dict(uid_id = self.tbl_uid.c.id, fingerprint = relation(Fingerprint))) + mapper(UploadBlock, self.tbl_upload_blocks, + properties = dict(upload_block_id = self.tbl_upload_blocks.c.id, + fingerprint = relation(Fingerprint, backref="uploadblocks"), + uid = relation(Uid, backref="uploadblocks"))) + ## Connection functions def __createconn(self): from config import Config diff --git a/daklib/extensions.py b/daklib/extensions.py deleted file mode 100755 index 88de8700..00000000 --- a/daklib/extensions.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -""" -Utility functions for extensions - -@contact: Debian FTP Master -@copyright: 2008 Anthony Towns -@license: GNU General Public License version 2 or later -""" - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -dak_functions_to_replace = {} -dak_replaced_functions = {} - -def replace_dak_function(module, name): - """ - Decorator to make a function replace a standard dak function - in a given module. - - @type module: string - @param module: name of module where replaced function is in - - @type name: string - @param name: name of the function to replace - """ - - def x(f): - def myfunc(*a,**kw): - global replaced_funcs - f(dak_replaced_functions[name], *a, **kw) - myfunc.__name__ = f.__name__ - myfunc.__doc__ = f.__doc__ - myfunc.__dict__.update(f.__dict__) - - fnname = "%s:%s" % (module, name) - if fnname in dak_functions_to_replace: - raise Exception, \ - "%s in %s already marked to be replaced" % (name, module) - dak_functions_to_replace["%s:%s" % (module,name)] = myfunc - return f - return x - -################################################################################ - -def init(name, module, userext): - global dak_replaced_functions - - # This bit should be done automatically too - dak_replaced_functions = {} - for f,newfunc in dak_functions_to_replace.iteritems(): - m,f = f.split(":",1) - if len(f) > 0 and m == name: - dak_replaced_functions[f] = module.__dict__[f] - module.__dict__[f] = newfunc diff --git a/daklib/queue.py b/daklib/queue.py index f7d999a3..1694deb4 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -28,7 +28,6 @@ Queue utility functions for dak import errno import os -import pg import stat import sys import time @@ -38,7 +37,6 @@ import utils import commands import shutil import textwrap -import tempfile from types import * import yaml @@ -213,28 +211,14 @@ def check_valid(new): ############################################################################### -def lookup_uid_from_fingerprint(fpr, session): - uid = None - uid_name = "" - # This is a stupid default, but see the comments below - is_dm = False - - user = get_uid_from_fingerprint(fpr, session) - - if user is not None: - uid = user.uid - if user.name is None: - uid_name = '' - else: - uid_name = user.name - - # Check the relevant fingerprint (which we have to have) - for f in user.fingerprint: - if f.fingerprint == fpr: - is_dm = f.keyring.debian_maintainer - break - - return (uid, uid_name, is_dm) +def check_status(files): + new = byhand = 0 + for f in files.keys(): + if files[f]["type"] == "byhand": + byhand = 1 + elif files[f].has_key("new"): + new = 1 + return (new, byhand) ############################################################################### @@ -287,19 +271,20 @@ class Upload(object): self.pkg.reset() def package_info(self): - msg = '' - - if len(self.rejects) > 0: - msg += "Reject Reasons:\n" - msg += "\n".join(self.rejects) + """ + Format various messages from this Upload to send to the maintainer. + """ - if len(self.warnings) > 0: - msg += "Warnings:\n" - msg += "\n".join(self.warnings) + msgs = ( + ('Reject Reasons', self.rejects), + ('Warnings', self.warnings), + ('Notes', self.notes), + ) - if len(self.notes) > 0: - msg += "Notes:\n" - msg += "\n".join(self.notes) + msg = '' + for title, messages in msgs: + if messages: + msg += '\n\n%s:\n%s' % (title, '\n'.join(messages)) return msg @@ -311,7 +296,7 @@ class Upload(object): # If 'dak process-unchecked' crashed out in the right place, architecture may still be a string. if not self.pkg.changes.has_key("architecture") or not \ - isinstance(self.pkg.changes["architecture"], DictType): + isinstance(self.pkg.changes["architecture"], dict): self.pkg.changes["architecture"] = { "Unknown" : "" } # and maintainer2047 may not exist. @@ -421,7 +406,7 @@ class Upload(object): fix_maintainer (self.pkg.changes["maintainer"]) except ParseMaintError, msg: self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \ - % (filename, changes["maintainer"], msg)) + % (filename, self.pkg.changes["maintainer"], msg)) # ...likewise for the Changed-By: field if it exists. try: @@ -452,9 +437,8 @@ class Upload(object): # Check there isn't already a changes file of the same name in one # of the queue directories. base_filename = os.path.basename(filename) - for d in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]: - if os.path.exists(os.path.join(Cnf["Dir::Queue::%s" % (d) ], base_filename)): - self.rejects.append("%s: a file with this name already exists in the %s directory." % (base_filename, d)) + if get_knownchange(base_filename): + self.rejects.append("%s: a file with this name already exists." % (base_filename)) # Check the .changes is non-empty if not self.pkg.files: @@ -769,7 +753,7 @@ class Upload(object): # Validate the component if not get_component(entry["component"], session): - self.rejects.append("file '%s' has unknown component '%s'." % (f, component)) + self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"])) return # See if the package is NEW @@ -784,7 +768,7 @@ class Upload(object): location = cnf["Dir::Pool"] l = get_location(location, entry["component"], archive, session) if l is None: - self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive)) + self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive)) entry["location id"] = -1 else: entry["location id"] = l.location_id @@ -1025,16 +1009,21 @@ class Upload(object): ########################################################################### - def ensure_all_source_exists(self, dest_dir=None): - """ - Ensure that dest_dir contains all the orig tarballs for the specified - changes. If it does not, symlink them into place. + def get_changelog_versions(self, source_dir): + """Extracts a the source package and (optionally) grabs the + version history out of debian/changelog for the BTS.""" - If dest_dir is None, populate the current directory. - """ + cnf = Config() - if dest_dir is None: - dest_dir = os.getcwd() + # Find the .dsc (again) + dsc_filename = None + for f in self.pkg.files.keys(): + if self.pkg.files[f]["type"] == "dsc": + dsc_filename = f + + # If there isn't one, we have nothing to do. (We have reject()ed the upload already) + if not dsc_filename: + return # Create a symlink mirror of the source files in our temporary directory for f in self.pkg.files.keys(): @@ -1045,8 +1034,8 @@ class Upload(object): if not os.path.exists(src): return ftype = m.group(3) - if re_is_orig_source.match(f) and pkg.orig_files.has_key(f) and \ - pkg.orig_files[f].has_key("path"): + if re_is_orig_source.match(f) and self.pkg.orig_files.has_key(f) and \ + self.pkg.orig_files[f].has_key("path"): continue dest = os.path.join(os.getcwd(), f) os.symlink(src, dest) @@ -1059,26 +1048,6 @@ class Upload(object): dest = os.path.join(os.getcwd(), os.path.basename(orig_file)) os.symlink(self.pkg.orig_files[orig_file]["path"], dest) - ########################################################################### - - def get_changelog_versions(self, source_dir): - """Extracts a the source package and (optionally) grabs the - version history out of debian/changelog for the BTS.""" - - cnf = Config() - - # Find the .dsc (again) - dsc_filename = None - for f in self.pkg.files.keys(): - if self.pkg.files[f]["type"] == "dsc": - dsc_filename = f - - # If there isn't one, we have nothing to do. (We have reject()ed the upload already) - if not dsc_filename: - return - - self.ensure_all_source_exists() - # Extract the source cmd = "dpkg-source -sn -x %s" % (dsc_filename) (result, output) = commands.getstatusoutput(cmd) @@ -1211,7 +1180,98 @@ class Upload(object): self.ensure_hashes() ########################################################################### + + def ensure_orig(self, target_dir='.', session=None): + """ + Ensures that all orig files mentioned in the changes file are present + in target_dir. If they do not exist, they are symlinked into place. + + An list containing the symlinks that were created are returned (so they + can be removed). + """ + + symlinked = [] + cnf = Config() + + for filename, entry in self.pkg.dsc_files.iteritems(): + if not re_is_orig_source.match(filename): + # File is not an orig; ignore + continue + + if os.path.exists(filename): + # File exists, no need to continue + continue + + def symlink_if_valid(path): + f = utils.open_file(path) + md5sum = apt_pkg.md5sum(f) + f.close() + + fingerprint = (os.stat(path)[stat.ST_SIZE], md5sum) + expected = (int(entry['size']), entry['md5sum']) + + if fingerprint != expected: + return False + + dest = os.path.join(target_dir, filename) + + os.symlink(path, dest) + symlinked.append(dest) + + return True + + session_ = session + if session is None: + session_ = DBConn().session() + + found = False + + # Look in the pool + for poolfile in get_poolfile_like_name('/%s' % filename, session_): + poolfile_path = os.path.join( + poolfile.location.path, poolfile.filename + ) + + if symlink_if_valid(poolfile_path): + found = True + break + + if session is None: + session_.close() + + if found: + continue + + # Look in some other queues for the file + queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates', + 'OldProposedUpdates', 'Embargoed', 'Unembargoed') + + for queue in queues: + if not cnf.get('Dir::Queue::%s' % queue): + continue + + queuefile_path = os.path.join( + cnf['Dir::Queue::%s' % queue], filename + ) + + if not os.path.exists(queuefile_path): + # Does not exist in this queue + continue + + if symlink_if_valid(queuefile_path): + break + + return symlinked + + ########################################################################### + def check_lintian(self): + cnf = Config() + + # Don't reject binary uploads + if not self.pkg.changes['architecture'].has_key('source'): + return + # Only check some distributions valid_dist = False for dist in ('unstable', 'experimental'): @@ -1222,13 +1282,11 @@ class Upload(object): if not valid_dist: return - self.ensure_all_source_exists() - - cnf = Config() tagfile = cnf.get("Dinstall::LintianTags") if tagfile is None: # We don't have a tagfile, so just don't do anything. return + # Parse the yaml file sourcefile = file(tagfile, 'r') sourcecontent = sourcefile.read() @@ -1239,6 +1297,9 @@ class Upload(object): utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg)) return + # Try and find all orig mentioned in the .dsc + symlinked = self.ensure_orig() + # Now setup the input file for lintian. lintian wants "one tag per line" only, # so put it together like it. We put all types of tags in one file and then sort # through lintians output later to see if its a fatal tag we detected, or not. @@ -1258,8 +1319,12 @@ class Upload(object): # to then parse it. command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file) (result, output) = commands.getstatusoutput(command) - # We are done with lintian, remove our tempfile + + # We are done with lintian, remove our tempfile and any symlinks we created os.unlink(temp_filename) + for symlink in symlinked: + os.unlink(symlink) + if (result == 2): utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result)) utils.warn(utils.prefix_multi_line_string(output, " [possible output:] ")) @@ -1267,6 +1332,10 @@ class Upload(object): if len(output) == 0: return + def log(*txt): + if self.logger: + self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt)) + # We have output of lintian, this package isn't clean. Lets parse it and see if we # are having a victim for a reject. # W: tzdata: binary-without-manpage usr/sbin/tzconfig @@ -1293,12 +1362,16 @@ class Upload(object): elif etag in lintiantags['error']: # The tag is overriden - but is not allowed to be self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag)) + log("ftpmaster does not allow tag to be overridable", etag) else: # Tag is known, it is not overriden, direct reject. self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext)) # Now tell if they *might* override it. if etag in lintiantags['warning']: + log("auto rejecting", "overridable", etag) self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage)) + else: + log("auto rejecting", "not overridable", etag) ########################################################################### def check_urgency(self): @@ -1361,7 +1434,201 @@ class Upload(object): except: self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_type, sys.exc_value)) + def check_if_upload_is_sponsored(self, uid_email, uid_name): + if uid_email in [self.pkg.changes["maintaineremail"], self.pkg.changes["changedbyemail"]]: + sponsored = False + elif uid_name in [self.pkg.changes["maintainername"], self.pkg.changes["changedbyname"]]: + sponsored = False + if uid_name == "": + sponsored = True + else: + sponsored = True + if ("source" in self.pkg.changes["architecture"] and uid_email and utils.is_email_alias(uid_email)): + sponsor_addresses = utils.gpg_get_key_addresses(self.pkg.changes["fingerprint"]) + if (self.pkg.changes["maintaineremail"] not in sponsor_addresses and + self.pkg.changes["changedbyemail"] not in sponsor_addresses): + self.pkg.changes["sponsoremail"] = uid_email + + return sponsored + + ########################################################################### + # check_signed_by_key checks + ########################################################################### + + def check_signed_by_key(self): + """Ensure the .changes is signed by an authorized uploader.""" + session = DBConn().session() + + # First of all we check that the person has proper upload permissions + # and that this upload isn't blocked + fpr = get_fingerprint(self.pkg.changes['fingerprint'], session=session) + + if fpr is None: + self.rejects.append("Cannot find fingerprint %s" % self.pkg.changes["fingerprint"]) + return + + # TODO: Check that import-keyring adds UIDs properly + if not fpr.uid: + self.rejects.append("Cannot find uid for fingerprint %s. Please contact ftpmaster@debian.org" % fpr.fingerprint) + return + + # Check that the fingerprint which uploaded has permission to do so + self.check_upload_permissions(fpr, session) + + # Check that this package is not in a transition + self.check_transition(session) + + session.close() + + + def check_upload_permissions(self, fpr, session): + # Check any one-off upload blocks + self.check_upload_blocks(fpr, session) + + # Start with DM as a special case + # DM is a special case unfortunately, so we check it first + # (keys with no source access get more access than DMs in one + # way; DMs can only upload for their packages whether source + # or binary, whereas keys with no access might be able to + # upload some binaries) + if fpr.source_acl.access_level == 'dm': + self.check_dm_source_upload(fpr, session) + else: + # Check source-based permissions for other types + if self.pkg.changes["architecture"].has_key("source"): + if fpr.source_acl.access_level is None: + rej = 'Fingerprint %s may not upload source' % fpr.fingerprint + rej += '\nPlease contact ftpmaster if you think this is incorrect' + self.rejects.append(rej) + return + else: + # If not a DM, we allow full upload rights + uid_email = "%s@debian.org" % (fpr.uid.uid) + self.check_if_upload_is_sponsored(uid_email, fpr.uid.name) + + + # Check binary upload permissions + # By this point we know that DMs can't have got here unless they + # are allowed to deal with the package concerned so just apply + # normal checks + if fpr.binary_acl.access_level == 'full': + return + + # Otherwise we're in the map case + tmparches = self.pkg.changes["architecture"].copy() + tmparches.pop('source', None) + + for bam in fpr.binary_acl_map: + tmparches.pop(bam.architecture.arch_string, None) + + if len(tmparches.keys()) > 0: + if fpr.binary_reject: + rej = ".changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint + rej += "\narchitectures involved are: ", ",".join(tmparches.keys()) + self.rejects.append(rej) + else: + # TODO: This is where we'll implement reject vs throw away binaries later + rej = "Uhm. I'm meant to throw away the binaries now but that's not implemented yet" + rej += "\nPlease complain to ftpmaster@debian.org as this shouldn't have been turned on" + rej += "\nFingerprint: %s", (fpr.fingerprint) + self.rejects.append(rej) + + + def check_upload_blocks(self, fpr, session): + """Check whether any upload blocks apply to this source, source + version, uid / fpr combination""" + + def block_rej_template(fb): + rej = 'Manual upload block in place for package %s' % fb.source + if fb.version is not None: + rej += ', version %s' % fb.version + return rej + + for fb in session.query(UploadBlock).filter_by(source = self.pkg.changes['source']).all(): + # version is None if the block applies to all versions + if fb.version is None or fb.version == self.pkg.changes['version']: + # Check both fpr and uid - either is enough to cause a reject + if fb.fpr is not None: + if fb.fpr.fingerprint == fpr.fingerprint: + self.rejects.append(block_rej_template(fb) + ' for fingerprint %s\nReason: %s' % (fpr.fingerprint, fb.reason)) + if fb.uid is not None: + if fb.uid == fpr.uid: + self.rejects.append(block_rej_template(fb) + ' for uid %s\nReason: %s' % (fb.uid.uid, fb.reason)) + + + def check_dm_upload(self, fpr, session): + # Quoth the GR (http://www.debian.org/vote/2007/vote_003): + ## none of the uploaded packages are NEW + rej = False + for f in self.pkg.files.keys(): + if self.pkg.files[f].has_key("byhand"): + self.rejects.append("%s may not upload BYHAND file %s" % (fpr.uid.uid, f)) + rej = True + if self.pkg.files[f].has_key("new"): + self.rejects.append("%s may not upload NEW file %s" % (fpr.uid.uid, f)) + rej = True + + if rej: + return + + ## the most recent version of the package uploaded to unstable or + ## experimental includes the field "DM-Upload-Allowed: yes" in the source + ## section of its control file + q = session.query(DBSource).filter_by(source=self.pkg.changes["source"]) + q = q.join(SrcAssociation) + q = q.join(Suite).filter(Suite.suite_name.in_(['unstable', 'experimental'])) + q = q.order_by(desc('source.version')).limit(1) + + r = q.all() + + if len(r) != 1: + rej = "Could not find existing source package %s in unstable or experimental and this is a DM upload" % self.pkg.changes["source"] + self.rejects.append(rej) + return + + r = r[0] + if not r.dm_upload_allowed: + rej = "Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version (%s)" % (self.pkg.changes["source"], r.version) + self.rejects.append(rej) + return + + ## the Maintainer: field of the uploaded .changes file corresponds with + ## the owner of the key used (ie, non-developer maintainers may not sponsor + ## uploads) + if self.check_if_upload_is_sponsored(fpr.uid.uid, fpr.uid.name): + self.rejects.append("%s (%s) is not authorised to sponsor uploads" % (fpr.uid.uid, fpr.fingerprint)) + + ## the most recent version of the package uploaded to unstable or + ## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie, + ## non-developer maintainers cannot NMU or hijack packages) + + # srcuploaders includes the maintainer + accept = False + for sup in r.srcuploaders: + (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer() + # Eww - I hope we never have two people with the same name in Debian + if email == fpr.uid.uid or name == fpr.uid.name: + accept = True + break + + if not accept: + self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (fpr.uid.uid, self.pkg.changes["source"])) + return + + ## none of the packages are being taken over from other source packages + for b in self.pkg.changes["binary"].keys(): + for suite in self.pkg.changes["distribution"].keys(): + q = session.query(DBSource) + q = q.join(DBBinary).filter_by(package=b) + q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite) + + for s in q.all(): + if s.source != self.pkg.changes["source"]: + self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (fpr.uid.uid, b, s, suite)) + + + def check_transition(self, session): cnf = Config() @@ -1434,92 +1701,9 @@ transition is done.""" return ########################################################################### - def check_signed_by_key(self): - """Ensure the .changes is signed by an authorized uploader.""" - session = DBConn().session() - - self.check_transition(session) - - (uid, uid_name, is_dm) = lookup_uid_from_fingerprint(self.pkg.changes["fingerprint"], session=session) - - # match claimed name with actual name: - if uid is None: - # This is fundamentally broken but need us to refactor how we get - # the UIDs/Fingerprints in order for us to fix it properly - uid, uid_email = self.pkg.changes["fingerprint"], uid - may_nmu, may_sponsor = 1, 1 - # XXX by default new dds don't have a fingerprint/uid in the db atm, - # and can't get one in there if we don't allow nmu/sponsorship - elif is_dm is False: - # If is_dm is False, we allow full upload rights - uid_email = "%s@debian.org" % (uid) - may_nmu, may_sponsor = 1, 1 - else: - # Assume limited upload rights unless we've discovered otherwise - uid_email = uid - may_nmu, may_sponsor = 0, 0 - - if uid_email in [self.pkg.changes["maintaineremail"], self.pkg.changes["changedbyemail"]]: - sponsored = 0 - elif uid_name in [self.pkg.changes["maintainername"], self.pkg.changes["changedbyname"]]: - sponsored = 0 - if uid_name == "": sponsored = 1 - else: - sponsored = 1 - if ("source" in self.pkg.changes["architecture"] and - uid_email and utils.is_email_alias(uid_email)): - sponsor_addresses = utils.gpg_get_key_addresses(self.pkg.changes["fingerprint"]) - if (self.pkg.changes["maintaineremail"] not in sponsor_addresses and - self.pkg.changes["changedbyemail"] not in sponsor_addresses): - self.pkg.changes["sponsoremail"] = uid_email - - if sponsored and not may_sponsor: - self.rejects.append("%s is not authorised to sponsor uploads" % (uid)) - - if not sponsored and not may_nmu: - should_reject = True - highest_sid, highest_version = None, None - - # XXX: This reimplements in SQLA what existed before but it's fundamentally fucked - # It ignores higher versions with the dm_upload_allowed flag set to false - # I'm keeping the existing behaviour for now until I've gone back and - # checked exactly what the GR says - mhy - for si in get_sources_from_name(source=self.pkg.changes['source'], dm_upload_allowed=True, session=session): - if highest_version is None or apt_pkg.VersionCompare(si.version, highest_version) == 1: - highest_sid = si.source_id - highest_version = si.version - - if highest_sid is None: - self.rejects.append("Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version" % self.pkg.changes["source"]) - else: - for sup in session.query(SrcUploader).join(DBSource).filter_by(source_id=highest_sid): - (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer() - if email == uid_email or name == uid_name: - should_reject = False - break - - if should_reject is True: - self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (uid, self.pkg.changes["source"])) - - for b in self.pkg.changes["binary"].keys(): - for suite in self.pkg.changes["distribution"].keys(): - q = session.query(DBSource) - q = q.join(DBBinary).filter_by(package=b) - q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite) - - for s in q.all(): - if s.source != self.pkg.changes["source"]: - self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (uid, b, s, suite)) - - for f in self.pkg.files.keys(): - if self.pkg.files[f].has_key("byhand"): - self.rejects.append("%s may not upload BYHAND file %s" % (uid, f)) - if self.pkg.files[f].has_key("new"): - self.rejects.append("%s may not upload NEW file %s" % (uid, f)) - - session.close() - + # End check_signed_by_key checks ########################################################################### + def build_summaries(self): """ Build a summary of changes the upload introduces. """ @@ -1739,23 +1923,12 @@ distribution.""" os.rename(temp_filename, filename) os.chmod(filename, 0644) - # Its is Cnf["Dir::Queue::Accepted"] here, not targetdir! - # we do call queue_build too - # well yes, we'd have had to if we were inserting into accepted - # now. thats database only. - # urgh, that's going to get messy - # so i make the p-n call to it *also* using accepted/ - # but then the packages will be in the queue_build table without the files being there - # as the buildd queue is only regenerated whenever unchecked runs - # ah, good point - # so it will work out, as unchecked move it over - # that's all completely sick - # yes - # This routine returns None on success or an error on failure - res = get_or_set_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) - if res: - utils.fubar(res) + # TODO: Replace queue copying using the new queue.add_file_from_pool routine + # and by looking up which queues in suite.copy_queues + #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) + #if res: + # utils.fubar(res) def check_override(self): @@ -1792,14 +1965,14 @@ distribution.""" ########################################################################### - def remove(self, dir=None): + def remove(self, from_dir=None): """ Used (for instance) in p-u to remove the package from unchecked """ - if dir is None: + if from_dir is None: os.chdir(self.pkg.directory) else: - os.chdir(dir) + os.chdir(from_dir) for f in self.pkg.files.keys(): os.unlink(f) @@ -1964,7 +2137,7 @@ distribution.""" return 0 ################################################################################ - def in_override_p(self, package, component, suite, binary_type, file, session): + def in_override_p(self, package, component, suite, binary_type, filename, session): """ Check if a package already has override entries in the DB @@ -1980,8 +2153,8 @@ distribution.""" @type binary_type: string @param binary_type: type of the package - @type file: string - @param file: filename we check + @type filename: string + @param filename: filename we check @return: the database result. But noone cares anyway. @@ -2007,8 +2180,8 @@ distribution.""" # Remember the section and priority so we can check them later if appropriate if len(result) > 0: result = result[0] - self.pkg.files[file]["override section"] = result.section.section - self.pkg.files[file]["override priority"] = result.priority.priority + self.pkg.files[filename]["override section"] = result.section.section + self.pkg.files[filename]["override priority"] = result.priority.priority return result return None @@ -2036,13 +2209,13 @@ distribution.""" ################################################################################ - def cross_suite_version_check(self, sv_list, file, new_version, sourceful=False): + def cross_suite_version_check(self, sv_list, filename, new_version, sourceful=False): """ @type sv_list: list @param sv_list: list of (suite, version) tuples to check - @type file: string - @param file: XXX + @type filename: string + @param filename: XXX @type new_version: string @param new_version: XXX @@ -2067,7 +2240,7 @@ distribution.""" vercmp = apt_pkg.VersionCompare(new_version, existent_version) if suite in must_be_newer_than and sourceful and vercmp < 1: - self.rejects.append("%s: old version (%s) in %s >= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite)) + self.rejects.append("%s: old version (%s) in %s >= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite)) if suite in must_be_older_than and vercmp > -1: cansave = 0 @@ -2100,7 +2273,7 @@ distribution.""" self.rejects.append("Won't propogate NEW packages.") elif apt_pkg.VersionCompare(new_version, add_version) < 0: # propogation would be redundant. no need to reject though. - self.warnings.append("ignoring versionconflict: %s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite)) + self.warnings.append("ignoring versionconflict: %s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite)) cansave = 1 elif apt_pkg.VersionCompare(new_version, add_version) > 0 and \ apt_pkg.VersionCompare(add_version, target_version) >= 0: @@ -2111,29 +2284,29 @@ distribution.""" cansave = 1 if not cansave: - self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite)) + self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite)) ################################################################################ - def check_binary_against_db(self, file, session): + def check_binary_against_db(self, filename, session): # Ensure version is sane q = session.query(BinAssociation) - q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[file]["package"]) - q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all'])) + q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[filename]["package"]) + q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[filename]["architecture"], 'all'])) self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ], - file, self.pkg.files[file]["version"], sourceful=False) + filename, self.pkg.files[filename]["version"], sourceful=False) # Check for any existing copies of the file - q = session.query(DBBinary).filter_by(package=self.pkg.files[file]["package"]) - q = q.filter_by(version=self.pkg.files[file]["version"]) - q = q.join(Architecture).filter_by(arch_string=self.pkg.files[file]["architecture"]) + q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"]) + q = q.filter_by(version=self.pkg.files[filename]["version"]) + q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"]) if q.count() > 0: - self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file)) + self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename) ################################################################################ - def check_source_against_db(self, file, session): + def check_source_against_db(self, filename, session): """ """ source = self.pkg.dsc.get("source") @@ -2144,10 +2317,10 @@ distribution.""" q = q.join(DBSource).filter(DBSource.source==source) self.cross_suite_version_check([ (x.suite.suite_name, x.source.version) for x in q.all() ], - file, version, sourceful=True) + filename, version, sourceful=True) ################################################################################ - def check_dsc_against_db(self, file, session): + def check_dsc_against_db(self, filename, session): """ @warning: NB: this function can remove entries from the 'files' index [if @@ -2273,15 +2446,53 @@ distribution.""" orig_files[dsc_name]["path"] = in_otherdir if not found: - self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (file, dsc_name)) + self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name)) continue else: - self.rejects.append("%s refers to %s, but I can't find it in the queue." % (file, dsc_name)) + self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name)) continue if actual_md5 != dsc_entry["md5sum"]: - self.rejects.append("md5sum for %s doesn't match %s." % (found, file)) + self.rejects.append("md5sum for %s doesn't match %s." % (found, filename)) if actual_size != int(dsc_entry["size"]): - self.rejects.append("size for %s doesn't match %s." % (found, file)) + self.rejects.append("size for %s doesn't match %s." % (found, filename)) + + ################################################################################ + # This is used by process-new and process-holding to recheck a changes file + # at the time we're running. It mainly wraps various other internal functions + # and is similar to accepted_checks - these should probably be tidied up + # and combined + def recheck(self, session): + cnf = Config() + for f in self.pkg.files.keys(): + # The .orig.tar.gz can disappear out from under us is it's a + # duplicate of one in the archive. + if not self.pkg.files.has_key(f): + continue + + entry = self.pkg.files[f] + + # Check that the source still exists + if entry["type"] == "deb": + source_version = entry["source version"] + source_package = entry["source package"] + if not self.pkg.changes["architecture"].has_key("source") \ + and not source_exists(source_package, source_version, self.pkg.changes["distribution"].keys(), session): + source_epochless_version = re_no_epoch.sub('', source_version) + dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version) + found = False + for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]: + if cnf.has_key("Dir::Queue::%s" % (q)): + if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename): + found = True + if not found: + self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f)) + + # Version and file overwrite checks + if entry["type"] == "deb": + self.check_binary_against_db(f, session) + elif entry["type"] == "dsc": + self.check_source_against_db(f, session) + self.check_dsc_against_db(f, session) ################################################################################ def accepted_checks(self, overwrite_checks, session): diff --git a/daklib/utils.py b/daklib/utils.py index accf5fdb..c3e4dbb3 100755 --- a/daklib/utils.py +++ b/daklib/utils.py @@ -64,15 +64,18 @@ key_uid_email_cache = {} #: Cache for email addresses from gpg key uids known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)), ("sha256", apt_pkg.sha256sum, (1, 8))] #: hashes we accept for entries in .changes/.dsc -# Monkeypatch commands.getstatusoutput as it returns a "0" exit code in -# all situations under lenny's Python. -import commands +# Monkeypatch commands.getstatusoutput as it may not return the correct exit +# code in lenny's Python. This also affects commands.getoutput and +# commands.getstatus. def dak_getstatusoutput(cmd): pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output = "".join(pipe.stdout.readlines()) + if output[-1:] == '\n': + output = output[:-1] + ret = pipe.wait() if ret is None: ret = 0 @@ -305,13 +308,13 @@ def check_hash(where, files, hashname, hashfunc): try: try: file_handle = open_file(f) - + # Check for the hash entry, to not trigger a KeyError. if not files[f].has_key(hash_key(hashname)): rejmsg.append("%s: misses %s checksum in %s" % (f, hashname, where)) continue - + # Actually check the hash for correctness. if hashfunc(file_handle) != files[f][hash_key(hashname)]: rejmsg.append("%s: %s check failed in %s" % (f, hashname, @@ -529,7 +532,8 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): raise NoFilesFieldError # Validate .changes Format: field - validate_changes_format(parse_format(changes['format']), field) + if not is_a_dsc: + validate_changes_format(parse_format(changes['format']), field) includes_section = (not is_a_dsc) and field == "files" @@ -554,7 +558,7 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): (section, component) = extract_component_from_section(section) - files[name] = Dict(size=size, section=section, + files[name] = dict(size=size, section=section, priority=priority, component=component) files[name][hashname] = md5 @@ -612,7 +616,7 @@ def send_mail (message, filename=""): if len(match) == 0: del message_raw[field] else: - message_raw.replace_header(field, string.join(match, ", ")) + message_raw.replace_header(field, ', '.join(match)) # Change message fields in order if we don't have a To header if not message_raw.has_key("To"): @@ -753,12 +757,12 @@ def which_alias_file(): ################################################################################ -def TemplateSubst(map, filename): +def TemplateSubst(subst_map, filename): """ Perform a substition of template """ templatefile = open_file(filename) template = templatefile.read() - for x in map.keys(): - template = template.replace(x, str(map[x])) + for k, v in subst_map.iteritems(): + template = template.replace(k, str(v)) templatefile.close() return template @@ -1091,10 +1095,6 @@ def split_args (s, dwim=1): ################################################################################ -def Dict(**dict): return dict - -######################################## - def gpgv_get_status_output(cmd, status_read, status_write): """ Our very own version of commands.getouputstatus(), hacked to support @@ -1362,9 +1362,9 @@ def check_signature (sig_filename, data_filename="", keyrings=None, autofetch=No rejects.append("signature on %s does not appear to be valid [No SIG_ID]." % (sig_filename)) # Finally ensure there's not something we don't recognise - known_keywords = Dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="", + known_keywords = dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="", SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="", - NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="") + NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="",POLICY_URL="") for keyword in keywords.keys(): if not known_keywords.has_key(keyword): @@ -1484,7 +1484,7 @@ def is_email_alias(email): ################################################################################ -def get_changes_files(dir): +def get_changes_files(from_dir): """ Takes a directory and lists all .changes files in it (as well as chdir'ing to the directory; this is due to broken behaviour on the part of p-u/p-a @@ -1494,10 +1494,10 @@ def get_changes_files(dir): """ try: # Much of the rest of p-u/p-a depends on being in the right place - os.chdir(dir) - changes_files = [x for x in os.listdir(dir) if x.endswith('.changes')] + os.chdir(from_dir) + changes_files = [x for x in os.listdir(from_dir) if x.endswith('.changes')] except OSError, e: - fubar("Failed to read list from directory %s (%s)" % (dir, e)) + fubar("Failed to read list from directory %s (%s)" % (from_dir, e)) return changes_files @@ -1510,50 +1510,3 @@ apt_pkg.ReadConfigFileISC(Cnf,default_config) if which_conf_file() != default_config: apt_pkg.ReadConfigFileISC(Cnf,which_conf_file()) - -############################################################################### - -def ensure_orig_files(changes, dest_dir, session): - """ - Ensure that dest_dir contains all the orig tarballs for the specified - changes. If it does not, symlink them into place. - - Returns a 2-tuple (already_exists, symlinked) containing a list of files - that were already there and a list of files that were symlinked into place. - """ - - exists, symlinked = [], [] - - for dsc_file in changes.dsc_files: - - # Skip all files that are not orig tarballs - if not re_is_orig_source.match(dsc_file): - continue - - # Skip orig files not identified in the pool - if not (dsc_file in changes.orig_files and - 'id' in changes.orig_files[dsc_file]): - continue - - dest = os.path.join(dest_dir, dsc_file) - - if os.path.exists(dest): - exists.append(dest) - continue - - orig_file_id = changes.orig_files[dsc_file]['id'] - - c = session.execute( - 'SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id', - {'id': orig_file_id} - ) - - res = c.fetchone() - if not res: - return "[INTERNAL ERROR] Couldn't find id %s in files table." % orig_file_id - - src = os.path.join(res[0], res[1]) - os.symlink(src, dest) - symlinked.append(dest) - - return (exists, symlinked) diff --git a/docs/NEWS b/docs/NEWS deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/README.new-incoming b/docs/README.new-incoming deleted file mode 100644 index 8ebd0e2f..00000000 --- a/docs/README.new-incoming +++ /dev/null @@ -1,123 +0,0 @@ -[An updated version of the proposal sent to debian-devel-announce@l.d.o. - Debian-specific, but useful as a general overview of New Incoming.] - - New Incoming System - =================== - -This document outlines the new system for handling Incoming -directories on ftp-master and non-US. - -The old system: ---------------- - - o incoming was a world writable directory - - o incoming was available to everyone through http://incoming.debian.org/ - - o incoming was processed once a day by dinstall - - o uploads in incoming had to have been there > 24 hours before they - were REJECTed. If they were processed before that and had - problems they were SKIPped (with no notification to the maintainer - and/or uploader). - -The new system: ---------------- - - o There's 4 incoming directories: - - @ "unchecked" - where uploads from Queue Daemons and maintainers - initially go. - - @ "accepted" - where accepted packages stay until the daily - dinstall run. - - @ "new" - where NEW packages (and their dependents[1]) requiring - human processing go after being automatically - checked by dinstall. - - @ "byhand" - where BYHAND packages (and their dependents[1]) - requiring human intervention go after being - automatically checked by dinstall. - - In addition there's 3 support directories: - - @ "reject" - where rejected uploads go - - @ "done" - where the .changes files for packages that have been - installed go. - - @ "holding" - a temporary working area for dinstall to hold - packages while checking them. - - o Packages in 'unchecked' are automatically checked every 15 minutes - and are either: REJECT, ACCEPT, NEW or BYHAND. - - o Only 'unchecked' is locally world-writeable. The others are all, - of course, locally world-readable but only 'accepted' and 'byhand' - are publicly visible on http://incoming.debian.org/ - - o 'accepted' and 'byhand' are made available to the auto-builders so - they can build out of them. - - o 'accepted' is processed once a day as before. - - o Maintainer/uploader & list notification and bug closures are - changed to be done for ACCEPTs, not INSTALLs. - [Rationale: this reduces the load both on our list server and our - BTS server; it also gives people better notice of uploads to - avoid duplication of work especially, for example, in the case of - NMUs.] - [NB: see [3] for clarifications of when mails are sent.] - -Why: ----- - - o Security (no more replaceable file races) - o Integrity (new http://i.d.o contains only signed (+installable) uploads[2]) - o Needed for crypto-in-main integration - o Allows safe auto-building out of accepted - o Allows previously-prohibitively-expensive checks to be added to dinstall - o Much faster feedback on packages; no more 48 hour waits before - finding out your package has been REJECTed. - -What breaks: ------------- - - o people who upload packages but then want to retract or replace the - upload. - - * solution: mostly "Don't do that then"; i.e. test your uploads - properly. Uploads can still be replaced, simply by uploading a - higher versioned replacement. Total retraction is harder but - usually only relevant for NEW packages. - -================================================================================ - -[1] For versions of dependents meaning: binaries compiled from the - source of BYHAND or NEW uploads. Due to dak's fascist - source-must-exist checking, these binaries must be held back until - the BYHAND/NEW uploads are processed. - -[2] When this mail was initially written there was still at least one - upload queue which will accept unsigned uploads from any - source. [I've since discovered it's been deactivated, but not, - AFAIK because it allowed unsigned uploads.] - -[3] - --> reject - / - / -unchecked -----------------------------[*]------> accepted ---------------> pool - \ ^ ^ - | / / - |--> new -- / - | |[4] / - | V / - |--> byhand --/ - -[4] This is a corner case, included for completeness, ignore - it. [Boring details: NEW trumps BYHAND, so it's possible for a - upload with both BYHAND and NEW components to go from 'unchecked' - -> 'new' -> 'byhand' -> 'accepted'] - diff --git a/docs/README.quotes b/docs/README.quotes index e531a241..d6bd125b 100644 --- a/docs/README.quotes +++ b/docs/README.quotes @@ -367,3 +367,10 @@ Canadians: This is a lighthouse. Your call. mhy: Error: "!!!11111iiiiiioneoneoneone" is not a valid command. dak: oh shut up mhy: Error: "oh" is not a valid command. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + hey, I think something's wrong with your git repo + when I git pulled this last time, I got something that looked almost like python instead of dak + sgran: slander + sorry, I take it back, I've had a better look now diff --git a/docs/manpages/Makefile b/docs/manpages/Makefile deleted file mode 100644 index 75cf3cc0..00000000 --- a/docs/manpages/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/make -f - -SGMLMANPAGES = check-overrides.1.sgml clean-suites.1.sgml control-overrides.1.sgml control-suite.1.sgml import-users-from-passwd.1.sgml ls.1.sgml make-maintainers.1.sgml override.1.sgml poolize.1.sgml process-accepted.1.sgml process-new.1.sgml rm.1.sgml - -MANPAGES = $(patsubst %.sgml, dak_%, $(SGMLMANPAGES)) - - -all: $(MANPAGES) - -dak_%: %.sgml - docbook2man $< > /dev/null - -clean: - rm -f $(MANPAGES) manpage.links manpage.log manpage.refs diff --git a/docs/manpages/check-overrides.1.sgml b/docs/manpages/check-overrides.1.sgml deleted file mode 100644 index a4a7c146..00000000 --- a/docs/manpages/check-overrides.1.sgml +++ /dev/null @@ -1,61 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_check-overrides - 1 - - - - - dak check-overrides - Utility to alter or display the contents of a suite - - - - - - dak check-overrides - - - - - Description</> - <para> - <command>dak check-overrides</command> is a cruft checker for overrides. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Notes</> - - <Para>dak check-overrides is not a good idea with New Incoming as it doesn't take into account queue/accepted. You can minimize the impact of this by running it immediately after 'dak process-accepted' but that's still racy because 'dak process-new' doesn't lock with 'dak process-accepted'. A better long term fix is the evil plan for accepted to be in the DB.</> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak check-overrides</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/clean-suites.1.sgml b/docs/manpages/clean-suites.1.sgml deleted file mode 100644 index 621bbc34..00000000 --- a/docs/manpages/clean-suites.1.sgml +++ /dev/null @@ -1,82 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_clean-suites</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak clean-suites</> - <refpurpose>Utility to clean out old packages</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak clean-suites</> - <arg><option><replaceable>options</replaceable></></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak clean-suites</command> is a utility to clean out old packages. It will clean out any binary packages not referenced by a suite and any source packages not referenced by a suite and not referenced by any binary packages. Cleaning is not actual deletion, but rather, removal of packages from the pool to a 'morgue' directory. The 'morgue' directory is split into dated sub-directories to keep things sane in big archives. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <variablelist> - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Don't actually clean any packages.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak clean-suites uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Clean-Suites::StayOfExecution - - This is the number of seconds unreferenced packages are left before being cleaned. - - - - Clean-Suites::MorgueSubDir - - If not blank, this is the subdirectory in the morgue used to hold removed packages. - - - - - - Diagnostics</> - <para> - <command>dak clean-suites</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/control-overrides.1.sgml b/docs/manpages/control-overrides.1.sgml deleted file mode 100644 index 26440ad4..00000000 --- a/docs/manpages/control-overrides.1.sgml +++ /dev/null @@ -1,98 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_control-overrides</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak control-overrides</> - <refpurpose>Utility to manipulate the packages overrides</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak control-overrides</> - <arg><option><replaceable>options</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak control-overrides</command> is the command line tool to handle override files. Override files can be listed or updated. - </para> - </refsect1> - <RefSect1><Title>Options</> - - <VariableList> - <varlistentry> - <term><option>-a/--add</option></term> - <listitem> - <para>Add entries to the override DB. Changes and deletions are ignored.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-c/--component=<replaceable>component</replaceable></option></> - <ListItem><Para>Uses the override DB for the component listed.</para> - </listitem> - </VarListEntry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-l/--list</option></term> - <listitem> - <para>Lists the override DB to stdout.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-q/--quiet</option></term> - <listitem> - <para>Be less verbose about what has been done.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem><Para>Uses the override DB for the suite listed.</para></listitem> - </varlistentry> - - <VarListEntry><term><option>-S/--set</option></term> - <ListItem><Para>Set the override DB to the provided input.</PARA></LISTITEM> - </VarListEntry> - - <varlistentry> - <term><option>-t/--type=<replaceable>type</replaceable></option></term> - <listitem> - <para>Uses the override DB for the type listed. Possible values are: <literal>deb</literal>, <literal>udeb</literal> and <literal>dsc</literal>.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak control-overrides</command> returns zero on normal operation, non-zero on error. - </para> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/control-suite.1.sgml b/docs/manpages/control-suite.1.sgml deleted file mode 100644 index 12c89c5a..00000000 --- a/docs/manpages/control-suite.1.sgml +++ /dev/null @@ -1,82 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_control-suite</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak control-suite</> - <refpurpose>Utility to alter or display the contents of a suite</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak control-suite</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>file...</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak control-suite</command> is a utility to alter or display the contents of a suite. Input for alterations is taken either from filename(s) supplied or stdin. The format for both input and output is lines each with a whitespace separated list of: <literal>package</literal>, <literal>version</literal> and <literal>architecture</literal>. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--add=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Add to the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-l/--list=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>List the contents of the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-r/--remove=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Remove from the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-s/--set=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Set the suite to exactly the input.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak control-suite</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/dak.ent b/docs/manpages/dak.ent deleted file mode 100644 index 1860e8e5..00000000 --- a/docs/manpages/dak.ent +++ /dev/null @@ -1,20 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> - -<!-- Boiler plate docinfo section --> -<!ENTITY dak-docinfo " - <docinfo> - <address><email>james@nocrew.org</email></address> - <author><firstname>James</firstname> <surname>Troup</surname></author> - <copyright><year>2000-2001</year> <holder>James Troup</holder></copyright> - <date>15 January 2001</date> - </docinfo> -"> - -<!-- Boiler plate Author section --> -<!ENTITY manauthor " - <RefSect1><Title>Author - - dak was written by James Troup james@nocrew.org. - - -"> diff --git a/docs/manpages/import-users-from-passwd.1.sgml b/docs/manpages/import-users-from-passwd.1.sgml deleted file mode 100644 index 0fd48511..00000000 --- a/docs/manpages/import-users-from-passwd.1.sgml +++ /dev/null @@ -1,94 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_import-users-from-passwd - 1 - - - - - dak import-users-from-passwd - Utility to sync PostgreSQL users with system users - - - - - - dak import-users-from-passwd - - - - - Description</> - <para> - <command>dak import-users-from-passwd</command> is a utility to sync PostgreSQL's user database with the system's users. It is designed to allow the use of 'peer sameuser' authentication. It simply adds any users in the password file into PostgreSQL's pg_user table if they are already not there. It will also warn you about users who are in the pg_user table but not in the password file. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-n/--no-action<replaceable></replaceable></option></> - <ListItem> - <Para>Don't actually do anything.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-q/--quiet<replaceable></replaceable></option></> - <ListItem> - <Para>Be quiet, i.e. display as little output as possible.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-v/--verbose</option></> - <ListItem> - <Para>Be verbose, i.e. display more output than normal.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak import-users-from-passwd uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Import-Users-From-Passwd::ValidGID - - Each user's primary GID is compared with this, if it's not blank. If they match, the user will be processed, if they don't, the user will be skipped. - - - - Import-Users-From-Passwd::KnownPostgresUsers - - This is a comma-separated list of users who are in PostgreSQL's pg_user table but are not in the password file. - - - - - - Diagnostics</> - <para> - <command>dak import-users-from-passwd</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/ls.1.sgml b/docs/manpages/ls.1.sgml deleted file mode 100644 index c7c4f29a..00000000 --- a/docs/manpages/ls.1.sgml +++ /dev/null @@ -1,104 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_ls</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak ls</> - <refpurpose>Utility to display information about packages</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak ls</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak ls</command> is a utility to display information about packages, specificaly what suites they are in and for which architectures. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--architecture=<replaceable>architecture</replaceable></option></> - <ListItem> - <Para>Only list package information for the listed architecture(s).</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-b/--binary-type=<replaceable>binary type</replaceable></option></> - <ListItem> - <Para>Only show package information for the binary type ('deb' or 'udeb').</PARA> - </LISTITEM> - </VarListEntry> - - <varlistentry><term><option>-c/--component=<replaceable>component</replaceable></option></term> - <listitem> - <para>Only list package information for the listed component(s).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-g/--greaterorequal</option></term> - <term><option>-G/--greaterthan</option></term> - <listitem> - <para>Determine the highest version of each package in the target suite (which is forced to just unstable if one was not specificed) and, in addition to the normal output, also print a line suitable for sending in a reply to a buildd as a 'dep-wait' command. For <option>-g/--greaterorequal</option>, the versioned dependency is a >= one, e.g. <literallayout>dep-retry libgdbm-dev (>= 1.8.3-2)</literallayout></para> - <para>And for <option>-G/--greaterthan</option>, the versioned dependency is a >> one, e.g. <literallayout>dep-retry libflac-dev (>> 1.1.0-10)</literallayout></para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-r/--regex</option></term> - <listitem> - <para>Treat the <replaceable>package</replaceable> argument as a regex, rather than doing an exact search.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Only list package information for the suite(s) listed.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-S/--source-and-binary</option></> - <ListItem> - <Para>For each package which is a source package, also show information about the binary packages it produces.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak ls</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/make-maintainers.1.sgml b/docs/manpages/make-maintainers.1.sgml deleted file mode 100644 index 8cc324ce..00000000 --- a/docs/manpages/make-maintainers.1.sgml +++ /dev/null @@ -1,85 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_make-maintainers</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak make-maintainers</> - <refpurpose>Utility to generate an index of package's maintainers</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak make-maintainers</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>extra file...</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak make-maintainers</command> is a utility to generate an index of package's maintainers. The output format is: -<literallayout>package~version maintainer</literallayout> - The input format of extra files can either be in this form or in the old style which is similar, but lacking the version number, i.e.: -<literallayout>package maintainer</literallayout> - dak Make-Maintainers will auto-detect the type of layout of the extra file. If the extra file is in the old style format the records in it are assumed to supersed any that were seen earlier (i.e. either from earlier extra files or generated from the SQL). - </Para> - <para> - dak Make-Maintainers determines the maintainer of a package by comparing suite priority (see 'Configuration') and then version number. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <variablelist> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak make-maintainers uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Suite::<SUITE>::Priority - - Suite priority overrides the version checks dak make-maintainers does. A package in higher priority suite overrides versions in lower priority suites even if the version number in the higher priority suite is older. - - - - - - - New versus Old output format - Converting the new output format to the old output format is easy with some simple sed + awk, e.g. -sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' - - - - Diagnostics</> - <para> - <command>dak make-maintainers</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/override.1.sgml b/docs/manpages/override.1.sgml deleted file mode 100644 index 12afac55..00000000 --- a/docs/manpages/override.1.sgml +++ /dev/null @@ -1,87 +0,0 @@ -<!-- -*- mode: sgml -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_override</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak override</> - <refpurpose>Make micromodifications or queries to the overrides table</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak override</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - <arg><option><replaceable>section</replaceable></></arg> - <arg><option><replaceable>priority</replaceable></></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak override</command> makes micromodifications and queries the overrides. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-d/--done=<replaceable>BUG#</replaceable></option></> - <ListItem> - <Para>Close the listed bugs as part of adjusting the overrides</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Show what dak override would do but make no changes</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Affect the overrides in suite listed. The default is <literal>unstable</literal></PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Common use</> - <para> - <command>dak override</command> when invoked with only a package name will tell you what section and priority the given package has. - </PARA> - <para> - <command>dak override</command> when invoked with a package and one or two other values will set the section and/or priority to the values given. You may use a single period ('.') to represent "do not change" or you can ommit the value you do not want to change. - </PARA> - </RefSect1> - <RefSect1><Title>Notes</> - - <Para><command>dak override</command> essentially lets you do what <command>dak control-overrides</command> does only on the microscopic scale rather than the macroscopic scale of <command>dak control-overrides</command>. Use with care.</> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak override</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/process-accepted.1.sgml b/docs/manpages/process-accepted.1.sgml deleted file mode 100644 index 1f3cf4e6..00000000 --- a/docs/manpages/process-accepted.1.sgml +++ /dev/null @@ -1,100 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_process-accepted</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak process-accepted</> - <refpurpose>Installs packages from accepted</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak process-accepted</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>changes_file</replaceable></arg> - <arg><option><replaceable>...</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak process-accepted</command> is the program which installs packages from the accepted directory into the distribution. - </PARA></REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - - <varlistentry> - <term><option>-a/--automatic</option></term> - <listitem> - <para>Run automatically; i.e. perform the default action if it's possible to do so without user interaction. Intend for use in cron jobs and the like.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-m/--manual-reject=<replaceable>message</replaceable></option></term> - <listitem> - <para>Perform a manual rejection of the package. The <replaceable>message</replaceable> is included in the rejection notice sent to the uploader. If no <replaceable>message</replaceable> is given, an editor will be spawned so one can be added to the rejection notice. - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-n/--no-action</option></term> - <listitem> - <para>Don't actually install anything; just show what would be done.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-p/--no-lock</option></term> - <listitem> - <para>Don't check the lockfile. Obviously dangerous and should only be used for cron jobs (if at all).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-v/--version</option></term> - <listitem> - <para>Display the version number and then exit.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak process-accepted</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - <refsect1> - <title>Acknowledgements - dak process-accepted is based very heavily on dinstall, written by Guy Maor maor@debian.org; in fact it started out life as a dinstall clone. - - - &manauthor; - - diff --git a/docs/manpages/process-new.1.sgml b/docs/manpages/process-new.1.sgml deleted file mode 100644 index f99c6cfc..00000000 --- a/docs/manpages/process-new.1.sgml +++ /dev/null @@ -1,95 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_process-new - 1 - - - - - dak process-new - Processes BYHAND and NEW packages - - - - - - dak process-new - - changes_file - - - - - Description</> - <para> - <command>dak process-new</command> is the program which installs packages from the accepted directory into the distribution. - </PARA></REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - - <varlistentry> - <term><option>-a/--automatic</option></term> - <listitem> - <para>Run automatically; i.e. perform the default action if it's possible to do so without user interaction. Intend for use in cron jobs and the like.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-m/--manual-reject=<replaceable>message</replaceable></option></term> - <listitem> - <para>Perform a manual rejection of the package. The <replaceable>message</replaceable> is included in the rejection notice sent to the uploader. If no <replaceable>message</replaceable> is given, an editor will be spawned so one can be added to the rejection notice. - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-n/--no-action</option></term> - <listitem> - <para>Don't actually install anything; just show what would be done.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-p/--no-lock</option></term> - <listitem> - <para>Don't check the lockfile. Obviously dangerous and should only be used for cron jobs (if at all).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-v/--version</option></term> - <listitem> - <para>Display the version number and then exit.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak process-new</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/rm.1.sgml b/docs/manpages/rm.1.sgml deleted file mode 100644 index 5b2eaf93..00000000 --- a/docs/manpages/rm.1.sgml +++ /dev/null @@ -1,215 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_rm</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak rm</> - <refpurpose>Utility to remove/add packages from suites</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak rm</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - <arg><option><replaceable>...</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak rm</command> is the command line tool to add and remove package sets from suites with enforced logging, optional bug closing and override updates. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--architecture=<replaceable>architecture</replaceable></option></> - <ListItem> - <Para>Restrict the packages being considered to the architecture(s) listed.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-b/--binary</option></> - <ListItem> - <Para>Only look at binary packages.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-c/--component=<replaceable>component</replaceable></option></> - <ListItem> - <Para>Restrict the packages being considered to those found in the component(s) listed. The default is <literal>main</literal>.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-C/--carbon-copy=<replaceable>[ bug number | 'package' | email address ]</replaceable></option></> - <ListItem> - <Para>Carbon copy the bug closing mail to the address(es) given. If the removal was not requested by the maintainer, this option should always be used to inform the maintainer of the package's removal. 3 types of address are accepted.</PARA> - <itemizedlist> - <listitem> - <para>number - assumed to be a bug number, and expanded to nnnnn@bugs.debian.org.</para> - </listitem> - <listitem> - <para>'<literal>package</literal>' - carbon copy package@package.debian.org for each package given as an argument.</para> - </listitem> - <listitem> - <para>anything containing '@' - assumed to be an email address, and carbon copied as is.</para> - </listitem> - </itemizedlist> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-d/--done=<replaceable>done</replaceable></option></> - <ListItem> - <Para>Close the bug(s) listed on successful completion.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-m/--reason=<replaceable>reason</replaceable></option></> - <ListItem> - <Para>The reason for the removal or addition of the package(s). This is a required option; if not provided an editor will be spawned so the reason can be added there.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Don't actually do anything; just show what would be done.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-p/--partial</option></> - <ListItem> - <Para>Partial removal of a package, so the package is not removed from the overrides. This option is implied by <option>-a/--architecture</option>.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-R/--rdep-check</option></> - <ListItem> - <Para>Check the reverse dependencies (and build-dependencies) of the packages that are to be removed and warn if anything will break.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Only add/remove the packages from the suite(s) listed. The default is <literal>unstable</literal></PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-S/--source-only</option></> - <ListItem> - <Para>Only look at source packages.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>How packages are chosen - There are 3 methods for selecting packages. - - - Source + Binaries. (default) - In this mode dak rm will assume each of the package(s) passed as arguments are source packages and will also remove any binary packages built from these source packages. - - - Binary only. - Only binary packages are searched; source packages are ignored. This mode is chosen by use of the switch. - This should only be used for orphaned binary packages (i.e. those no longer built by source packages); otherwise, in any system (e.g. Debian) which has auto-building, pointless (and uninstallable) recompiles will be triggered. - - - Source only. - Only source packages are searched; binary packages are ignored. This mode is chosen by use of the switch. - - - - - - Configuration - dak rm uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Rm::MyEmailAddress - - This is used as the From: line for bug closing mails as per the -d/--done command line switch. It, obviously, must be a RFC-822 valid email address. - - - - Rm::LogFile - - This is the (absolute) file name of the logfile that dak rm unconditionally writes too. This can not be empty or an invalid file. - - - - - - - Examples - The first example is of a source+binaries package removal. - - -$ dak rm -d 68136 -m "Requested by tbm@; confirmed with maintainer. Superseded by libgmp2" gmp1 -Working... done. -Will remove the following packages from unstable: - - gmp1 | 1.3.2-8.2 | source, alpha, hppa, arm, i386, m68k, powerpc, sparc - gmp1-dev | 1.3.2-8.2 | alpha, hppa, arm, i386, m68k, powerpc, sparc - - -------------------- Reason ------------------- -Requested by tbm@; confirmed with maintainer. Superseded by libgmp2 ----------------------------------------------- - -Continue (y/N)? y - Deleting... done. -$ - - - The second example is of a binary-only multi-package removal. - - -$ dak rm -d 82562 -m "Requested by paul@; NBS." -b libgtkextra{5,9,10} -Working... done. -Will remove the following packages from unstable: - -libgtkextra10 | 0.99.10-2 | alpha, i386, m68k, powerpc, sparc -libgtkextra5 | 0.99.5-1 | alpha, i386, m68k, powerpc, sparc -libgtkextra9 | 0.99.9-1 | alpha, i386, m68k, powerpc, sparc - -Will also close bugs: 82562 - -------------------- Reason ------------------- -Requested by paul@; NBS. ----------------------------------------------- - -Continue (y/N)? y - Deleting... done. -$ - - - - - Diagnostics</> - <para> - <command>dak rm</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/scripts/debian/byhand-di b/scripts/debian/byhand-di index 0a004f38..67db5516 100755 --- a/scripts/debian/byhand-di +++ b/scripts/debian/byhand-di @@ -95,6 +95,10 @@ mv "$TMPDIR/installer-$ARCH/current" "$TARGET" find "$TARGET/$VERSION" -type d -exec chmod 755 {} + find "$TARGET/$VERSION" -type f -exec chmod 644 {} + +# Make sure nothing symlinks outside of the ftpdir +# Shouldnt happen, but better be sure. +symlinks -d -r /srv/ftp.debian.org/ftp + trap - EXIT cleanup diff --git a/scripts/debian/mkfilesindices b/scripts/debian/mkfilesindices index b9d31a02..c16fde6a 100755 --- a/scripts/debian/mkfilesindices +++ b/scripts/debian/mkfilesindices @@ -27,7 +27,7 @@ echo "Generating sources list..." cd $base/ftp find ./dists -maxdepth 1 \! -type d find ./dists \! -type d | grep "/source/" -) | sort -u | gzip -9 > source.list.gz +) | sort -u | gzip --rsyncable -9 > source.list.gz echo "Generating arch lists..." @@ -39,7 +39,7 @@ for a in $ARCHES; do cd $base/ftp find ./dists -maxdepth 1 \! -type d find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)" - ) | sort -u | gzip -9 > arch-$a.list.gz + ) | sort -u | gzip --rsyncable -9 > arch-$a.list.gz done echo "Generating suite lists..." @@ -62,7 +62,7 @@ printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At projectb | done ) suite_list $id | tr -d ' ' | sed 's,^/srv/ftp.debian.org/ftp,.,' - ) | sort -u | gzip -9 > suite-${suite}.list.gz + ) | sort -u | gzip --rsyncable -9 > suite-${suite}.list.gz done echo "Finding everything on the ftp site to generate sundries $(date +"%X")..." @@ -83,7 +83,7 @@ done (cd $base/ftp/ for dist in sid squeeze; do - find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip -9 > $base/ftp/indices/files/components/translation-$dist.list.gz + find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip --rsyncable -9 > $base/ftp/indices/files/components/translation-$dist.list.gz done ) diff --git a/scripts/debian/mklslar b/scripts/debian/mklslar index 19363f1f..231f7f8c 100755 --- a/scripts/debian/mklslar +++ b/scripts/debian/mklslar @@ -26,11 +26,11 @@ if [ -r ${filename}.gz ] ; then mv -f ${filename}.gz $filename.old.gz mv -f .$filename.new $filename rm -f $filename.patch.gz - zcat $filename.old.gz | diff -u - $filename | gzip -9cfn - >$filename.patch.gz + zcat $filename.old.gz | diff -u - $filename | gzip --rsyncable -9cfn - >$filename.patch.gz rm -f $filename.old.gz else mv -f .$filename.new $filename fi -gzip -9cfN $filename >$filename.gz +gzip --rsyncable -9cfN $filename >$filename.gz rm -f $filename diff --git a/scripts/debian/mkmaintainers b/scripts/debian/mkmaintainers index a0abaa1f..41e8727c 100755 --- a/scripts/debian/mkmaintainers +++ b/scripts/debian/mkmaintainers @@ -17,7 +17,7 @@ set -e if [ $rc = 1 ] || [ ! -f Maintainers ] ; then echo -n "installing Maintainers ... " mv -f .new-maintainers Maintainers - gzip -9v <Maintainers >.new-maintainers.gz + gzip --rsyncable -9v <Maintainers >.new-maintainers.gz mv -f .new-maintainers.gz Maintainers.gz elif [ $rc = 0 ] ; then echo '(same as before)' diff --git a/templates/contents b/templates/contents index 48950314..1021da45 100644 --- a/templates/contents +++ b/templates/contents @@ -12,22 +12,24 @@ the first is listed. As all Contents files are shipped compressed, the best way to search quickly for a file is with the Unix `zgrep' utility, as in: - `zgrep <regular expression> CONTENTS.gz': + `zgrep <regular expression> CONTENTSFILE.gz': - $ zgrep nose Contents.gz - etc/nosendfile net/sendfile - usr/X11R6/bin/noseguy x11/xscreensaver - usr/X11R6/man/man1/noseguy.1x.gz x11/xscreensaver - usr/doc/examples/ucbmpeg/mpeg_encode/nosearch.param graphics/ucbmpeg - usr/lib/cfengine/bin/noseyparker admin/cfengine + $ zgrep -i debian/ Contents-amd64.gz + usr/share/IlohaMail/debian/Ilohamail.apache web/ilohamail + usr/share/R/debian/r-cran.mk devel/r-base-dev + usr/share/apt-listbugs/debian/apt_preferences.rb admin/apt-listbugs + usr/share/apt-listbugs/debian/bts.rb admin/apt-listbugs + usr/share/apt-listbugs/debian/btssoap.rb admin/apt-listbugs + usr/share/apt-listbugs/debian/bug.rb admin/apt-listbugs + usr/share/apt-listbugs/debian/mytempfile.rb admin/apt-listbugs This list contains files in all packages, even though not all of the packages are installed on an actual system at once. If you want to find out which packages on an installed Debian system provide a particular file, you can use `dpkg --search <filename>': - $ dpkg --search /usr/bin/dselect - dpkg: /usr/bin/dselect + $ dpkg --search apt-get + apt: /usr/bin/apt-get -FILE LOCATION \ No newline at end of file +FILE LOCATION diff --git a/templates/rm.bug-close b/templates/rm.bug-close index 7e521e8a..78addd22 100644 --- a/templates/rm.bug-close +++ b/templates/rm.bug-close @@ -6,7 +6,7 @@ X-Debian: DAK MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit -Subject: Bug#__BUG_NUMBER__: fixed +Subject: Bug#__BUG_NUMBER__: __SUBJECT__ We believe that the bug you reported is now fixed; the following package(s) have been removed from __SUITE_LIST__: diff --git a/web/index.html b/web/index.html index 1b684159..9f0adcae 100644 --- a/web/index.html +++ b/web/index.html @@ -112,22 +112,6 @@ <p>The source is managed in git and is available from: <a href="http://ftp-master.debian.org/git/">http://ftp-master.debian.org/git/</a></p> - - <p>The old bzr tree is obsolete and no longer available. All - information in it is now in git</p> - - <p>The old CVS tree is obsolete but still available for historical purposes. - It's at <strong>:pserver:anonymous@cvs.debian.org:/cvs/dak</strong>; - the module is 'dak' and the login password is blank. - The old CVS repository can be <a href="http://cvs.debian.org/?cvsroot=dak">browsed</a> - courtesy of viewcvs.</p> - - <p>You can also install the <a href="http://packages.debian.org/unstable/devel/dak">dak Package</a> - if you want to look at it and maybe run your own copy.</p> - - <p>The <strong>really</strong> old dinstall scripts are still available - from <strong>:pserver:anonymous@cvs.debian.org:/cvs/ftp-maint</strong>; - the modules are 'dinstall' and 'masterfiles'.</p> </div> <div id="new"> diff --git a/web/x.png b/web/x.png deleted file mode 100644 index b759b0a3..00000000 Binary files a/web/x.png and /dev/null differ diff --git a/web/x4.png b/web/x4.png deleted file mode 100644 index bc376286..00000000 Binary files a/web/x4.png and /dev/null differ diff --git a/web/x5.png b/web/x5.png deleted file mode 100644 index 022f519c..00000000 Binary files a/web/x5.png and /dev/null differ