From: James Troup Date: Fri, 24 Nov 2000 00:20:10 +0000 (+0000) Subject: Initial revision X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=07241fcd65bb2808804fe2a6e53807997ee9025b;p=dak.git Initial revision --- 07241fcd65bb2808804fe2a6e53807997ee9025b diff --git a/.cvsignore b/.cvsignore new file mode 100644 index 00000000..8c195168 --- /dev/null +++ b/.cvsignore @@ -0,0 +1,3 @@ +*.pyc +Packages +Sources diff --git a/TODO b/TODO new file mode 100644 index 00000000..4f1daf86 --- /dev/null +++ b/TODO @@ -0,0 +1,74 @@ +Show Stopper +------------ + + o finish new cron.daily file + o pg_dump & friends. + + o finish rhona + + o Testing... lots of it. + + o jenna needs to munge files in stable + o need to handle (i.e. not crash, but reject) -sa builds. + +Non-Show Stopper +---------------- + + o CD building scripts need fixing + + o need a way to sync katie.conf and the DB for things like architecture + o need a poolifier that will poolify X mb a day.. (catherine) + + o Optimize all the queries by using EXAMINE and building some INDEXs + o Need CIPE tunnel for pandora<->auric setup. [Culus] (?) + + o enclose all the setting SQL stuff in transactions + + o ?assumes running in incoming? ?problem? + + o project/orphaned should be a timed dist so that things only stay + in there temporarily (say 3 months) [aj] + + == + + o jenna: fix the misfeature of handling sid's binary-hurd-i386 brokeness (? still exist ?) + o ability to rebuild all other tables from dists _or_ pools (in the event of disaster) (?) + o check errors on apt_pkg calls so we don't bomb out on daily runs (?) + o check to see if mutli-component binary packages from a single component source are supported + +=================================================================================================== + +Packaging TODO +-------------- + + o Install python libraries (db_access and utils) + o Install config file + o Fix stuff to look in sensible places for libs and config file in debian package (?) + o man pages and/or documentation + +Future Enhancements +------------------- + + o make the --help and --version options do stuff for all scripts + o check for .dsc when source is mentioneD? + o fix parse_changes()/build_file_list() to sanity check filenames + o saftey check and/or rename debs so they match what they should be + o charisma can't handle whitespace-only lines (for the moment, this is feature) + o Fix problems with bad .sig .changes having no Maintainer field to REJECT to + o Dep checking + o Should use $EDITOR, not hardcode vi + o should reject timestamp fucked debs + o dpkg 1.7 Changed-By field support + o Secure incoming handling + o revamp NEW package handling so we odn't have to read in the override file + o make mkmaintainers obey override changes + o Report stuff (? needed ?) + o handle the case of 1:1.1 which would overwrite 1.1 + o aj's binary-all stuff (foo-doc depending on foo) (?) + o heidi should report suite name not ID [aj] + + o fubar and warn/error wrappers like in C + o generic way of saying isabinary and isadsc. + o substitution stuff (cf. userdir-ldap) for announce, reject etc. + + o s/distribution/suite/g diff --git a/db_access.py b/db_access.py new file mode 100644 index 00000000..a9fd1d1a --- /dev/null +++ b/db_access.py @@ -0,0 +1,187 @@ +# DB access fucntions +# Copyright (C) 2000 James Troup +# $Id: db_access.py,v 1.1.1.1 2000-11-24 00:20:09 troup Exp $ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +import pg, string + +Cnf = None +projectB = None +suite_id_cache = {} +architecture_id_cache = {} +archive_id_cache = {} +component_id_cache = {} +location_id_cache = {} +maintainer_id_cache = {} +source_id_cache = {} +files_id_cache = {} + +def init (config, sql): + global Cnf, projectB + + Cnf = config; + projectB = sql; + +############################################################################################ + +def get_suite_id (suite): + global suite_id_cache + + if suite_id_cache.has_key(suite): + return suite_id_cache[suite] + + q = projectB.query("SELECT id FROM suite WHERE suite_name = '%s'" % (suite)) + suite_id = q.getresult()[0][0] + suite_id_cache[suite] = suite_id + + return suite_id + +def get_architecture_id (architecture): + global architecture_id_cache + + if architecture_id_cache.has_key(architecture): + return architecture_id_cache[architecture] + + q = projectB.query("SELECT id FROM architecture WHERE arch_string = '%s'" % (architecture)) + architecture_id = q.getresult()[0][0] + architecture_id_cache[architecture] = architecture_id + + return architecture_id + +def get_archive_id (archive): + global archive_id_cache + + if archive_id_cache.has_key(archive): + return archive_id_cache[archive] + + q = projectB.query("SELECT id FROM archive WHERE name = '%s'" % (archive)) + archive_id = q.getresult()[0][0] + archive_id_cache[archive] = archive_id + + return archive_id + +def get_component_id (component): + global component_id_cache + + if component_id_cache.has_key(component): + return component_id_cache[component] + + q = projectB.query("SELECT id FROM component WHERE lower(name) = '%s'" % (string.lower(component))) + ql = q.getresult(); + if ql == []: + return -1; + + component_id = ql[0][0] + component_id_cache[component] = component_id + + return component_id + +def get_location_id (location, component, archive): + global location_id_cache + + cache_key = location + '~' + component + '~' + location + if location_id_cache.has_key(cache_key): + return location_id_cache[cache_key] + + archive_id = get_archive_id (archive) + if component != "": + component_id = get_component_id (component) + if component_id != -1: + q = projectB.query("SELECT id FROM location WHERE path = '%s' AND component = %d AND archive = %d" % (location, component_id, archive_id)) + else: + q = projectB.query("SELECT id FROM location WHERE path = '%s' AND archive = %d" % (location, archive_id)) + location_id = q.getresult()[0][0] + location_id_cache[cache_key] = location_id + + return location_id + +def get_source_id (source, version): + global source_id_cache + + cache_key = source + '~' + version + '~' + if source_id_cache.has_key(cache_key): + return source_id_cache[cache_key] + + q = projectB.query("SELECT id FROM source s WHERE s.source = '%s' AND s.version = '%s'" % (source, version)) + + if not q.getresult(): + return None + + source_id = q.getresult()[0][0] + source_id_cache[cache_key] = source_id + + return source_id + +########################################################################################## + +def get_or_set_maintainer_id (maintainer): + global maintainer_id_cache + + if maintainer_id_cache.has_key(maintainer): + return maintainer_id_cache[maintainer] + + q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer)) + if not q.getresult(): + projectB.query("INSERT INTO maintainer (name) VALUES ('%s')" % (maintainer)) + q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer)) + maintainer_id = q.getresult()[0][0] + maintainer_id_cache[maintainer] = maintainer_id + + return maintainer_id + +########################################################################################## + +def get_files_id (filename, size, md5sum, location_id): + global files_id_cache + + cache_key = "%s~%d" % (filename, location_id); + + if files_id_cache.has_key(cache_key): + return files_id_cache[files] + + q = projectB.query("SELECT id, size, md5sum FROM files WHERE filename = '%s' AND location = %d" % (filename, location_id)); + ql = q.getresult(); + if ql: + if len(ql) != 1: + return -1; + ql = ql[0] + orig_size = ql[1]; + orig_md5sum = ql[2]; + if orig_size != size or orig_md5sum != md5sum: + return -2; + files_id_cache[cache_key] = ql[0] + return files_id_cache[cache_key] + else: + return None + + +########################################################################################## + +def set_files_id (filename, size, md5sum, location_id): + global files_id_cache + + cache_key = "%s~%d" % (filename, location_id); + + #print "INSERT INTO files (filename, size, md5sum, location) VALUES ('%s', %d, '%s', %d)" % (filename, long(size), md5sum, location_id); + projectB.query("INSERT INTO files (filename, size, md5sum, location) VALUES ('%s', %d, '%s', %d)" % (filename, long(size), md5sum, location_id)); + q = projectB.query("SELECT id FROM files WHERE id = currval('files_id_seq')"); + ql = q.getresult()[0]; + files_id_cache[cache_key] = ql[0] + + return files_id_cache[cache_key] + +########################################################################################## + diff --git a/init_pool.sql b/init_pool.sql new file mode 100644 index 00000000..e7bfb87d --- /dev/null +++ b/init_pool.sql @@ -0,0 +1,108 @@ +DROP DATABASE projectb; +CREATE DATABASE projectb; + +\c projectb + +CREATE TABLE archive ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + origin_server TEXT, + description TEXT +); + +CREATE TABLE component ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + description TEXT, + meets_dfsg BOOLEAN +); + +CREATE TABLE architecture ( + id SERIAL PRIMARY KEY, + arch_string TEXT UNIQUE NOT NULL, + description TEXT +); + +CREATE TABLE maintainer ( + id SERIAL PRIMARY KEY, + name TEXT UNIQUE NOT NULL +); + +CREATE TABLE location ( + id SERIAL PRIMARY KEY, + path TEXT NOT NULL, + component INT4 REFERENCES component, + archive INT4 REFERENCES archive, + type TEXT NOT NULL +); + +-- No references below here to allow sane population; added post-population + +CREATE TABLE files ( + id SERIAL PRIMARY KEY, + filename TEXT NOT NULL, + size INT8 NOT NULL, + md5sum TEXT NOT NULL, + location INT4 NOT NULL, -- REFERENCES location + last_used TIMESTAMP, + unique (filename, location) +); + +CREATE TABLE source ( + id SERIAL PRIMARY KEY, + source TEXT NOT NULL, + version TEXT NOT NULL, + maintainer INT4 NOT NULL, -- REFERENCES maintainer + file INT4 UNIQUE NOT NULL, -- REFERENCES files + unique (source, version) +); + +CREATE TABLE dsc_files ( + id SERIAL PRIMARY KEY, + source INT4 NOT NULL, -- REFERENCES source, + file INT4 NOT NULL, -- RERENCES files + unique (source, file) +); + +CREATE TABLE binaries ( + id SERIAL PRIMARY KEY, + package TEXT NOT NULL, + version TEXT NOT NULL, + maintainer INT4 NOT NULL, -- REFERENCES maintainer + source INT4, -- REFERENCES source, + architecture INT4 NOT NULL, -- REFERENCES architecture + file INT4 UNIQUE NOT NULL, -- REFERENCES files, + type TEXT NOT NULL, +-- joeyh@ doesn't want .udebs and .debs with the same name, which is why the unique () doesn't mention type + unique (package, version, source, architecture) +); + +CREATE TABLE suite ( + id SERIAL PRIMARY KEY, + suite_name TEXT NOT NULL, + version TEXT NOT NULL, + origin TEXT, + label TEXT, + policy_engine TEXT, + description TEXT +); + +CREATE TABLE suite_architectures ( + suite INT4 NOT NULL, -- REFERENCES suite + architecture INT4 NOT NULL, -- REFERENCES architecture + unique (suite, architecture) +); + +CREATE TABLE bin_associations ( + id SERIAL PRIMARY KEY, + suite INT4 NOT NULL, -- REFERENCES suite + bin INT4 NOT NULL, -- REFERENCES binaries + unique (suite, bin) +); + +CREATE TABLE src_associations ( + id SERIAL PRIMARY KEY, + suite INT4 NOT NULL, -- REFERENCES suite + source INT4 NOT NULL, -- REFERENCES source + unique (suite, source) +); diff --git a/katie b/katie new file mode 100755 index 00000000..a3c3ea00 --- /dev/null +++ b/katie @@ -0,0 +1,1037 @@ +#!/usr/bin/env python + +# Installs Debian packaes +# Copyright (C) 2000 James Troup +# $Id: katie,v 1.1.1.1 2000-11-24 00:20:08 troup Exp $ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +# Based (almost entirely) on dinstall by Guy Maor + +######################################################################################### + +# Cartman: "I'm trying to make the best of a bad situation, I don't +# need to hear crap from a bunch of hippy freaks living in +# denial. Screw you guys, I'm going home." +# +# Kyle: "But Cartman, we're trying to..." +# +# Cartman: "uhh.. screw you guys... home." + +######################################################################################### + +import FCNTL, commands, fcntl, getopt, os, pg, pwd, re, shutil, stat, string, sys, tempfile, time +import apt_inst, apt_pkg +import utils, db_access + +############################################################################### + +re_isanum = re.compile (r'^\d+$'); +re_isadeb = re.compile (r'.*\.u?deb$'); +re_issource = re.compile (r'(.+)_(.+?)\.(orig\.tar\.gz|diff\.gz|tar\.gz|dsc)'); +re_dpackage = re.compile (r'^package:\s*(.*)', re.IGNORECASE); +re_darchitecture = re.compile (r'^architecture:\s*(.*)', re.IGNORECASE); +re_dversion = re.compile (r'^version:\s*(.*)', re.IGNORECASE); +re_dsection = re.compile (r'^section:\s*(.*)', re.IGNORECASE); +re_dpriority = re.compile (r'^priority:\s*(.*)', re.IGNORECASE); +re_changes = re.compile (r'changes$'); +re_override_package = re.compile(r'(\S*)\s+.*'); +re_default_answer = re.compile(r"\[(.*)\]"); +re_fdnic = re.compile("\n\n"); + +############################################################################### + +# +reject_footer = """If you don't understand why your files were rejected, or if the +override file requires editing, reply to this email. + +Your rejected files are in incoming/REJECT/. (Some may also be in +incoming/ if your .changes file was unparsable.) If only some of the +files need to repaired, you may move any good files back to incoming/. +Please remove any bad files from incoming/REJECT/.""" +# +new_ack_footer = """Your package contains new components which requires manual editing of +the override file. It is ok otherwise, so please be patient. New +packages are usually added to the override file about once a week. + +You may have gotten the distribution wrong. You'll get warnings above +if files already exist in other distributions.""" +# +installed_footer = """If the override file requires editing, file a bug on ftp.debian.org. + +Thank you for your contribution to Debian GNU.""" + +######################################################################################### + +# Globals +Cnf = None; +reject_message = ""; +changes = {}; +dsc = {}; +dsc_files = {}; +files = {}; +projectB = None; +new_ack_new = {}; +new_ack_old = {}; +overrides = {}; +install_count = 0; +install_bytes = 0.0; +reprocess = 0; +orig_tar_id = None; + +######################################################################################### + +def usage (exit_code): + print """Usage: dinstall [OPTION]... [CHANGES]... + -a, --automatic automatic run + -d, --debug=VALUE debug + -k, --ack-new acknowledge new packages + -m, --manual-reject=MSG manual reject with `msg' + -n, --dry-run don't do anything + -p, --no-lock don't check lockfile !! for cron.daily only !! + -r, --no-version-check override version check + -u, --distribution=DIST override distribution to `dist'""" + sys.exit(exit_code) + +def check_signature (filename): + global reject_message + + (result, output) = commands.getstatusoutput("gpg --emulate-md-encode-bug --batch --no-options --no-default-keyring --always-trust --load-extension rsaref --keyring=%s --keyring=%s < %s >/dev/null" % (Cnf["Dinstall::PGPKeyring"], Cnf["Dinstall::GPGKeyring"], filename)) + if (result != 0): + reject_message = "Rejected: GPG signature check failed on `%s'.\n%s\n" % (filename, output) + return 0 + return 1 + +##################################################################################################################### + +def read_override_file (filename, suite, component): + global overrides; + + file = utils.open_file(filename, 'r'); + for line in file.readlines(): + line = string.strip(utils.re_comments.sub('', line)) + override_package = re_override_package.sub(r'\1', line) + if override_package != "": + overrides[suite][component][override_package] = 1 + file.close() + + +# See if a given package is in the override file. Caches and only loads override files on demand. + +def in_override_p (package, component, suite): + global overrides; + + # FIXME: nasty non-US speficic hack + if string.lower(component[:7]) == "non-us/": + component = component[7:]; + if not overrides.has_key(suite) or not overrides[suite].has_key(component): + if not overrides.has_key(suite): + overrides[suite] = {} + if not overrides[suite].has_key(component): + overrides[suite][component] = {} + if Cnf.has_key("Suite::%s::SingleOverrideFile" % (suite)): # legacy mixed suite (i.e. experimental) + override_filename = Cnf["Dir::OverrideDir"] + 'override.' + Cnf["Suite::%s::OverrideCodeName" % (suite)]; + read_override_file (override_filename, suite, component); + else: # all others. + for src in ("", ".src"): + override_filename = Cnf["Dir::OverrideDir"] + 'override.' + Cnf["Suite::%s::OverrideCodeName" % (suite)] + '.' + component + src; + read_override_file (override_filename, suite, component); + + return overrides[suite][component].get(package, None); + +##################################################################################################################### + +def check_changes(filename): + global reject_message, changes, files + + # Parse the .changes field into a dictionary [FIXME - need to trap errors, pass on to reject_message etc.] + try: + changes = utils.parse_changes(filename) + except utils.cant_open_exc: + reject_message = "Rejected: can't read changes file '%s'.\n" % (filename) + return 0; + except utils.changes_parse_error_exc, line: + reject_message = "Rejected: error parsing changes file '%s', can't grok: %s.\n" % (filename, line) + changes["maintainer822"] = Cnf["Dinstall::MyEmailAddress"]; + return 0; + + # Parse the Files field from the .changes into another dictionary [FIXME need to trap errors as above] + files = utils.build_file_list(changes, "") + + # Check for mandatory fields + for i in ("source", "binary", "architecture", "version", "distribution","maintainer", "files"): + if not changes.has_key(i): + reject_message = "Rejected: Missing field `%s' in changes file." % (i) + return 0 # Avoid errors during later tests + + # Fix the Maintainer: field to be RFC822 compatible + (changes["maintainer822"], changes["maintainername"], changes["maintaineremail"]) = utils.fix_maintainer (changes["maintainer"]) + + # Override the Distribution: field if appropriate + if Cnf["Dinstall::Options::Override-Distribution"] != "": + reject_message = reject_message + "Warning: Distribution was overriden from %s to %s.\n" % (changes["distribution"], Cnf["Dinstall::Options::Override-Distribution"]) + changes["distribution"] = Cnf["Dinstall::Options::Override-Distribution"] + + # Split multi-value fields into a lower-level dictionary + for i in ("architecture", "distribution", "binary", "closes"): + o = changes.get(i, "") + if o != "": + del changes[i] + changes[i] = {} + for j in string.split(o): + changes[i][j] = 1 + + # Ensure all the values in Closes: are numbers + if changes.has_key("closes"): + for i in changes["closes"].keys(): + if re_isanum.match (i) == None: + reject_message = reject_message + "Rejected: `%s' from Closes field isn't a number.\n" % (i) + + + # Map frozen to unstable if frozen doesn't exist + if changes["distribution"].has_key("frozen") and not Cnf.has_key("Suite::Frozen"): + del changes["distribution"]["frozen"] + reject_message = reject_message + "Mapping frozen to unstable.\n" + + # Ensure target distributions exist + for i in changes["distribution"].keys(): + if not Cnf.has_key("Suite::%s" % (i)): + reject_message = reject_message + "Rejected: Unknown distribution `%s'.\n" % (i) + + # Map unreleased arches from stable to unstable + if changes["distribution"].has_key("stable"): + for i in changes["architecture"].keys(): + if not Cnf.has_key("Suite::Stable::Architectures::%s" % (i)): + reject_message = reject_message + "Mapping stable to unstable for unreleased arch `%s'.\n" % (i) + del changes["distribution"]["stable"] + + # Map arches not being released from frozen to unstable + if changes["distribution"].has_key("frozen"): + for i in changes["architecture"].keys(): + if not Cnf.has_key("Suite::Frozen::Architectures::%s" % (i)): + reject_message = reject_message + "Mapping frozen to unstable for non-releasing arch `%s'.\n" % (i) + del changes["distribution"]["frozen"] + + # Handle uploads to stable + if changes["distribution"].has_key("stable"): + # If running from within proposed-updates kill non-stable distributions + if string.find(os.getcwd(), 'proposed-updates') != -1: + for i in ("frozen", "unstable"): + if changes["distributions"].has_key(i): + reject_message = reject_message + "Removing %s from distribution list.\n" + del changes["distribution"][i] + # Otherwise (normal case) map stable to updates + else: + reject_message = reject_message + "Mapping stable to updates.\n"; + del changes["distribution"]["stable"]; + changes["distribution"]["proposed-updates"] = 1; + + # chopversion = no epoch; chopversion2 = no epoch and no revision (e.g. for .orig.tar.gz comparison) + changes["chopversion"] = utils.re_no_epoch.sub('', changes["version"]) + changes["chopversion2"] = utils.re_no_revision.sub('', changes["chopversion"]) + + if string.find(reject_message, "Rejected:") != -1: + return 0 + else: + return 1 + +def check_files(): + global reject_message + + archive = utils.where_am_i(); + + for file in files.keys(): + # Check the file is readable + if os.access(file,os.R_OK) == 0: + reject_message = reject_message + "Rejected: Can't read `%s'.\n" % (file) + files[file]["type"] = "unreadable"; + continue + # If it's byhand skip remaining checks + if files[file]["section"] == "byhand": + files[file]["byhand"] = 1; + files[file]["type"] = "byhand"; + # Checks for a binary package... + elif re_isadeb.match(file) != None: + # Extract package information using dpkg-deb + control = apt_pkg.ParseSection(apt_inst.debExtractControl(utils.open_file(file,"r"))) + + # Check for mandatory fields + if control.Find("Package") == None: + reject_message = reject_message + "Rejected: %s: No package field in control.\n" % (file) + if control.Find("Architecture") == None: + reject_message = reject_message + "Rejected: %s: No architecture field in control.\n" % (file) + if control.Find("Version") == None: + reject_message = reject_message + "Rejected: %s: No version field in control.\n" % (file) + + # Ensure the package name matches the one give in the .changes + if not changes["binary"].has_key(control.Find("Package", "")): + reject_message = reject_message + "Rejected: %s: control file lists name as `%s', which isn't in changes file.\n" % (file, control.Find("Package", "")) + + # Validate the architecture + if not Cnf.has_key("Suite::Unstable::Architectures::%s" % (control.Find("Architecture", ""))): + reject_message = reject_message + "Rejected: Unknown architecture '%s'.\n" % (control.Find("Architecture", "")) + + # Check the architecture matches the one given in the .changes + if not changes["architecture"].has_key(control.Find("Architecture", "")): + reject_message = reject_message + "Rejected: %s: control file lists arch as `%s', which isn't in changes file.\n" % (file, control.Find("Architecture", "")) + # Check the section & priority match those given in the .changes (non-fatal) + if control.Find("Section") != None and files[file]["section"] != "" and files[file]["section"] != control.Find("Section"): + reject_message = reject_message + "Warning: %s control file lists section as `%s', but changes file has `%s'.\n" % (file, control.Find("Section", ""), files[file]["section"]) + if control.Find("Priority") != None and files[file]["priority"] != "" and files[file]["priority"] != control.Find("Priority"): + reject_message = reject_message + "Warning: %s control file lists priority as `%s', but changes file has `%s'.\n" % (file, control.Find("Priority", ""), files[file]["priority"]) + + epochless_version = utils.re_no_epoch.sub('', control.Find("Version", "")) + + files[file]["package"] = control.Find("Package"); + files[file]["architecture"] = control.Find("Architecture"); + files[file]["version"] = control.Find("Version"); + files[file]["maintainer"] = control.Find("Maintainer", ""); + if file[-5:] == ".udeb": + files[file]["dbtype"] = "udeb"; + elif file[-4:] == ".deb": + files[file]["dbtype"] = "deb"; + else: + reject_message = reject_message + "Rejected: %s is neither a .deb or a .udeb.\n " % (file); + files[file]["type"] = "deb"; + files[file]["fullname"] = "%s_%s_%s.deb" % (control.Find("Package", ""), epochless_version, control.Find("Architecture", "")) + files[file]["source"] = control.Find("Source", ""); + if files[file]["source"] == "": + files[file]["source"] = files[file]["package"]; + # Checks for a source package... + else: + m = re_issource.match(file) + if m != None: + files[file]["package"] = m.group(1) + files[file]["version"] = m.group(2) + files[file]["type"] = m.group(3) + + # Ensure the source package name matches the Source filed in the .changes + if changes["source"] != files[file]["package"]: + reject_message = reject_message + "Rejected: %s: changes file doesn't say %s for Source\n" % (file, files[file]["package"]) + + # Ensure the source version matches the version in the .changes file + if files[file]["type"] == "orig.tar.gz": + changes_version = changes["chopversion2"] + else: + changes_version = changes["chopversion"] + if changes_version != files[file]["version"]: + reject_message = reject_message + "Rejected: %s: should be %s according to changes file.\n" % (file, changes_version) + + # Ensure the .changes lists source in the Architecture field + if not changes["architecture"].has_key("source"): + reject_message = reject_message + "Rejected: %s: changes file doesn't list `source' in Architecture field.\n" % (file) + + # Check the signature of a .dsc file + if files[file]["type"] == "dsc": + check_signature(file) + + files[file]["fullname"] = file + + # Not a binary or source package? Assume byhand... + else: + files[file]["byhand"] = 1; + files[file]["type"] = "byhand"; + + files[file]["oldfiles"] = {} + for suite in changes["distribution"].keys(): + # Skip byhand + if files[file].has_key("byhand"): + continue + + if Cnf.has_key("Suite:%s::Components" % (suite)) and not Cnf.has_key("Suite::%s::Components::%s" % (suite, files[file]["component"])): + reject_message = reject_message + "Rejected: unknown component `%s' for suite `%s'.\n" % (files[file]["component"], suite) + continue + + # See if the package is NEW + if not in_override_p(files[file]["package"], files[file]["component"], suite): + files[file]["new"] = 1 + + # Find any old binary packages + if files[file]["type"] == "deb": + q = projectB.query("SELECT b.id, b.version, f.filename, l.path, c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f WHERE b.package = '%s' AND s.suite_name = '%s' AND a.arch_string = '%s' AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id AND f.location = l.id AND l.component = c.id AND b.file = f.id" + % (files[file]["package"], suite, files[file]["architecture"])) + oldfiles = q.dictresult() + for oldfile in oldfiles: + files[file]["oldfiles"][suite] = oldfile + # Check versions [NB: per-suite only; no cross-suite checking done (yet)] + if apt_pkg.VersionCompare(files[file]["version"], oldfile["version"]) != 1: + if Cnf["Dinstall::Options::No-Version-Check"]: + reject_message = reject_message + "Overriden rejection" + else: + reject_message = reject_message + "Rejected" + reject_message = reject_message + ": %s Old version `%s' >= new version `%s'.\n" % (file, oldfile["version"], files[file]["version"]) + # Find any old .dsc files + elif files[file]["type"] == "dsc": + q = projectB.query("SELECT s.id, s.version, f.filename, l.path, c.name FROM source s, src_associations sa, suite su, location l, component c, files f WHERE s.source = '%s' AND su.suite_name = '%s' AND sa.source = s.id AND sa.suite = su.id AND f.location = l.id AND l.component = c.id AND f.id = s.file" + % (files[file]["package"], suite)) + oldfiles = q.dictresult() + if len(oldfiles) >= 1: + files[file]["oldfiles"][suite] = oldfiles[0] + + # Validate the component + component = files[file]["component"]; + component_id = db_access.get_component_id(component); + if component_id == -1: + reject_message = reject_message + "Rejected: file '%s' has unknown component '%s'.\n" % (file, component); + continue; + + # Check the md5sum & size against existing files (if any) + location = Cnf["Dir::PoolDir"]; + files[file]["location id"] = db_access.get_location_id (location, component, archive); + files_id = db_access.get_files_id(component + '/' + file, files[file]["size"], files[file]["md5sum"], files[file]["location id"]); + if files_id == -1: + reject_message = reject_message + "Rejected: INTERNAL ERROR, get_files_id() returned multiple matches for %s.\n" % (file) + elif files_id == -2: + reject_message = reject_message + "Rejected: md5sum and/or size mismatch on existing copy of %s.\n" % (file) + files[file]["files id"] = files_id + + # Check for packages that have moved from one component to another + if files[file]["oldfiles"].has_key(suite) and files[file]["oldfiles"][suite]["name"] != files[file]["component"]: + files[file]["othercomponents"] = files[file]["oldfiles"][suite]["name"]; + + + if string.find(reject_message, "Rejected:") != -1: + return 0 + else: + return 1 + +############################################################################### + +def check_dsc (): + global dsc, dsc_files, reject_message, reprocess, orig_tar_id; + + for file in files.keys(): + if files[file]["type"] == "dsc": + try: + dsc = utils.parse_changes(file) + except utils.cant_open_exc: + reject_message = reject_message + "Rejected: can't read changes file '%s'.\n" % (filename) + return 0; + except utils.changes_parse_error_exc, line: + reject_message = reject_message + "Rejected: error parsing changes file '%s', can't grok: %s.\n" % (filename, line) + return 0; + try: + dsc_files = utils.build_file_list(dsc, 1) + except utils.no_files_exc: + reject_message = reject_message + "Rejected: no Files: field in .dsc file.\n"; + continue; + + # Try and find all files mentioned in the .dsc. This has + # to work harder to cope with the multiple possible + # locations of an .orig.tar.gz. + for dsc_file in dsc_files.keys(): + if files.has_key(dsc_file): + actual_md5 = files[dsc_file]["md5sum"] + found = "%s in incoming" % (dsc_file) + elif dsc_file[-12:] == ".orig.tar.gz": + # Check in Incoming + # See comment above process_it() for explanation... + if os.access(dsc_file, os.R_OK) != 0: + files[dsc_file] = {}; + files[dsc_file]["size"] = os.stat(dsc_file)[stat.ST_SIZE]; + files[dsc_file]["md5sum"] = dsc_files[dsc_file]["md5sum"]; + files[dsc_file]["section"] = files[file]["section"]; + files[dsc_file]["priority"] = files[file]["priority"]; + files[dsc_file]["component"] = files[file]["component"]; + reprocess = 1; + return 1; + # Check in the pool + q = projectB.query("SELECT l.path, f.filename, l.type, f.id FROM files f, location l WHERE f.filename ~ '/%s' AND l.id = f.location" % (dsc_file)); + ql = q.getresult(); + if len(ql) > 0: + old_file = ql[0][0] + ql[0][1]; + actual_md5 = apt_pkg.md5sum(utils.open_file(old_file,"r")); + found = old_file; + suite_type = ql[0][2]; + # See install()... + if suite_type == "legacy" or suite_type == "legacy-mixed": + orig_tar_id = ql[0][3]; + else: + reject_message = reject_message + "Rejected: %s refers to %s, but I can't find it in Incoming or in the pool.\n" % (file, dsc_file); + continue; + else: + reject_message = reject_message + "Rejected: %s refers to %s, but I can't find it in Incoming." % (file, dsc_file); + continue; + if actual_md5 != dsc_files[dsc_file]["md5sum"]: + reject_message = reject_message + "Rejected: md5sum for %s doesn't match %s.\n" % (found, file) + + if string.find(reject_message, "Rejected:") != -1: + return 0 + else: + return 1 + +############################################################################### + +def check_md5sums (): + global reject_message; + + for file in files.keys(): + try: + file_handle = utils.open_file(file,"r"); + except utils.cant_open_exc: + pass; + else: + if apt_pkg.md5sum(file_handle) != files[file]["md5sum"]: + reject_message = reject_message + "Rejected: md5sum check failed for %s.\n" % (file); + +##################################################################################################################### + +def action (changes_filename): + byhand = confirm = suites = summary = new = ""; + + # changes["distribution"] may not exist in corner cases + # (e.g. unreadable changes files) + if not changes.has_key("distribution"): + changes["distribution"] = {}; + + for suite in changes["distribution"].keys(): + if Cnf.has_key("Suite::%s::Confirm"): + confirm = confirm + suite + ", " + suites = suites + suite + ", " + confirm = confirm[:-2] + suites = suites[:-2] + + for file in files.keys(): + if files[file].has_key("byhand"): + byhand = 1 + summary = summary + file + " byhand\n" + elif files[file].has_key("new"): + new = 1 + summary = summary + "(new) %s %s %s\n" % (file, files[file]["priority"], files[file]["section"]) + if files[file].has_key("othercomponents"): + summary = summary + "WARNING: Already present in %s distribution.\n" % (files[file]["othercomponents"]) + if files[file]["type"] == "deb": + summary = summary + apt_pkg.ParseSection(apt_inst.debExtractControl(utils.open_file(file,"r")))["Description"] + '\n'; + else: + files[file]["pool name"] = utils.poolify (changes["source"], files[file]["component"]) + destination = Cnf["Dir::PoolRoot"] + files[file]["pool name"] + file + summary = summary + file + "\n to " + destination + "\n" + + short_summary = summary; + + # This is for direport's benefit... + f = re_fdnic.sub("\n .\n", changes.get("changes","")); + + if confirm or byhand or new: + summary = summary + "Changes: " + f; + + summary = summary + announce (short_summary, 0) + + (prompt, answer) = ("", "XXX") + if Cnf["Dinstall::Options::No-Action"] or Cnf["Dinstall::Options::Automatic"]: + answer = 'S' + + if string.find(reject_message, "Rejected") != -1: + if time.time()-os.path.getmtime(changes_filename) < 86400: + print "SKIP (too new)\n" + reject_message,; + prompt = "[S]kip, Manual reject, Quit ?"; + else: + print "REJECT\n" + reject_message,; + prompt = "[R]eject, Manual reject, Skip, Quit ?"; + if Cnf["Dinstall::Options::Automatic"]: + answer = 'R'; + elif new: + print "NEW to %s\n%s%s" % (suites, reject_message, summary),; + prompt = "[S]kip, New ack, Manual reject, Quit ?"; + if Cnf["Dinstall::Options::Automatic"] and Cnf["Dinstall::Options::Ack-New"]: + answer = 'N'; + elif byhand: + print "BYHAND\n" + reject_message + summary,; + prompt = "[I]nstall, Manual reject, Skip, Quit ?"; + elif confirm: + print "CONFIRM to %s\n%s%s" % (confirm, reject_message, summary), + prompt = "[I]nstall, Manual reject, Skip, Quit ?"; + else: + print "INSTALL\n" + reject_message + summary,; + prompt = "[I]nstall, Manual reject, Skip, Quit ?"; + if Cnf["Dinstall::Options::Automatic"]: + answer = 'I'; + + while string.find(prompt, answer) == -1: + print prompt,; + answer = utils.our_raw_input() + m = re_default_answer.match(prompt) + if answer == "": + answer = m.group(1) + answer = string.upper(answer[:1]) + + if answer == 'R': + reject (changes_filename, ""); + elif answer == 'M': + manual_reject (changes_filename); + elif answer == 'I': + install (changes_filename, summary, short_summary); + elif answer == 'N': + acknowledge_new (changes_filename, summary); + elif answer == 'Q': + sys.exit(0) + +##################################################################################################################### + +def install (changes_filename, summary, short_summary): + global install_count, install_bytes + + print "Installing." + + archive = utils.where_am_i(); + + # Begin a transaction; if we bomb out anywhere between here and the COMMIT WORK below, the DB will not be changed. + projectB.query("BEGIN WORK"); + + # Add the .dsc file to the DB + for file in files.keys(): + if files[file]["type"] == "dsc": + package = dsc["source"] + version = dsc["version"] # NB: not files[file]["version"], that has no epoch + maintainer = dsc["maintainer"] + maintainer = string.replace(maintainer, "'", "\\'") + maintainer_id = db_access.get_or_set_maintainer_id(maintainer); + filename = files[file]["pool name"] + file; + dsc_location_id = files[file]["location id"]; + if not files[file]["files id"]: + files[file]["files id"] = db_access.set_files_id (filename, files[file]["size"], files[file]["md5sum"], dsc_location_id) + dsc_file_id = files[file]["files id"] + projectB.query("INSERT INTO source (source, version, maintainer, file) VALUES ('%s', '%s', %d, %d)" + % (package, version, maintainer_id, files[file]["files id"])) + + for suite in changes["distribution"].keys(): + suite_id = db_access.get_suite_id(suite); + projectB.query("INSERT INTO src_associations (suite, source) VALUES (%d, currval('source_id_seq'))" % (suite_id)) + + + # Add the .diff.gz and {.orig,}.tar.gz files to the DB (files and dsc_files) + for file in files.keys(): + if files[file]["type"] == "diff.gz" or files[file]["type"] == "orig.tar.gz" or files[file]["type"] == "tar.gz": + if not files[file]["files id"]: + filename = files[file]["pool name"] + file; + files[file]["files id"] = db_access.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["location id"]) + projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files[file]["files id"])); + + # Add the .deb files to the DB + for file in files.keys(): + if files[file]["type"] == "deb": + package = files[file]["package"] + version = files[file]["version"] + maintainer = files[file]["maintainer"] + maintainer = string.replace(maintainer, "'", "\\'") + maintainer_id = db_access.get_or_set_maintainer_id(maintainer); + architecture = files[file]["architecture"] + architecture_id = db_access.get_architecture_id (architecture); + type = files[file]["dbtype"]; + component = files[file]["component"] + source = files[file]["source"] + source_version = "" + if string.find(source, "(") != -1: + m = utils.re_extract_src_version.match(source) + source = m.group(1) + source_version = m.group(2) + if not source_version: + source_version = version + filename = files[file]["pool name"] + file; + if not files[file]["files id"]: + files[file]["files id"] = db_access.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["location id"]) + source_id = db_access.get_source_id (source, source_version); + if source_id: + projectB.query("INSERT INTO binaries (package, version, maintainer, source, architecture, file, type) VALUES ('%s', '%s', %d, %d, %d, %d, '%s')" + % (package, version, maintainer_id, source_id, architecture_id, files[file]["files id"], type)); + else: + projectB.query("INSERT INTO binaries (package, version, maintainer, architecture, file, type) VALUES ('%s', '%s', %d, %d, %d, '%s')" + % (package, version, maintainer_id, architecture_id, files[file]["files id"], type)); + for suite in changes["distribution"].keys(): + suite_id = db_access.get_suite_id(suite); + projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id)); + + # Install the files into the pool + for file in files.keys(): + if files[file].has_key("byhand"): + continue + destination = Cnf["Dir::PoolDir"] + files[file]["pool name"] + file + destdir = os.path.dirname(destination) + utils.move (file, destination) + install_bytes = install_bytes + float(files[file]["size"]) + + # Copy the .changes file across for suite which need it. + for suite in changes["distribution"].keys(): + if Cnf.has_key("Suties::%s::CopyChanges" % (suite)): + destination = Cnf["Dir::RootDir"] + Cnf["Suite::%s::CopyChanges" % (suite)] + os.path.basename(changes_filename) + copy_file (changes_filename, destination) + + # If the .orig.tar.gz is in a legacy directory we need to poolify + # it, so that apt-get source (and anything else that goes by the + # "Directory:" field in the Sources.gz file) works. + if orig_tar_id != None: + q = projectB.query("SELECT l.path, f.filename, f.id as files_id, df.source, df.id as dsc_files_id, f.size, f.md5sum FROM files f, dsc_files df, location l WHERE df.source IN (SELECT source FROM dsc_files WHERE file = %s) AND f.id = df.file AND l.id = f.location" % (orig_tar_id)); + qd = q.dictresult(); + for qid in qd: + # First move the files to the new location + legacy_filename = qid["path"]+qid["filename"]; + pool_location = utils.poolify (files[file]["package"], files[file]["component"]); + pool_filename = pool_location + os.path.basename(qid["filename"]); + destination = Cnf["Dir::PoolDir"] + pool_location + utils.move(legacy_filename, destination); + # Update the DB: files table + new_files_id = db_access.set_files_id(pool_filename, qid["size"], qid["md5sum"], dsc_location_id); + # Update the DB: dsc_files table + projectB.query("INSERT INTO dsc_files (source, file) VALUES (%s, %s)" % (qid["source"], new_files_id)); + # Update the DB: source table + if legacy_filename[-4:] == ".dsc": + projectB.query("UPDATE source SET file = %s WHERE id = %d" % (new_files_id, qid["source"])); + + for qid in qd: + # Remove old data from the DB: dsc_files table + projectB.query("DELETE FROM dsc_files WHERE id = %s" % (qid["dsc_files_id"])); + # Remove old data from the DB: files table + projectB.query("DELETE FROM files WHERE id = %s" % (qid["files_id"])); + + utils.move (changes_filename, Cnf["Dir::IncomingDir"] + 'DONE/' + os.path.basename(changes_filename)) + + projectB.query("COMMIT WORK"); + + install_count = install_count + 1; + + if not Cnf["Dinstall::Options::No-Mail"]: + mail_message = """Return-Path: %s +From: %s +To: %s +Bcc: troup@auric.debian.org +Subject: %s INSTALLED + +%s +Installing: +%s + +%s""" % (Cnf["Dinstall::MyEmailAddress"], Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], changes_filename, reject_message, summary, installed_footer) + utils.send_mail (mail_message, "") + announce (short_summary, 1) + +##################################################################################################################### + +def reject (changes_filename, manual_reject_mail_filename): + print "Rejecting.\n" + + base_changes_filename = os.path.basename(changes_filename); + reason_filename = re_changes.sub("reason", base_changes_filename); + reject_filename = "%s/REJECT/%s" % (Cnf["Dir::IncomingDir"], reason_filename); + + # Move the .changes files and it's contents into REJECT/ + utils.move (changes_filename, "%s/REJECT/%s" % (Cnf["Dir::IncomingDir"], base_changes_filename)); + for file in files.keys(): + if os.access(file,os.R_OK) == 0: + utils.move (file, "%s/REJECT/%s" % (Cnf["Dir::IncomingDir"], file)); + + # If this is not a manual rejection generate the .reason file and rejection mail message + if manual_reject_mail_filename == "": + if os.path.exists(reject_filename): + os.unlink(reject_filename); + fd = os.open(reject_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644); + os.write(fd, reject_message); + os.close(fd); + reject_mail_message = """From: %s +To: %s +Bcc: troup@auric.debian.org +Subject: %s REJECTED + +%s +=== +%s""" % (Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], changes_filename, reject_message, reject_footer); + else: # Have a manual rejection file to use + reject_mail_message = ""; # avoid 's + + # Send the rejection mail if appropriate + if not Cnf["Dinstall::Options::No-Mail"]: + utils.send_mail (reject_mail_message, manual_reject_mail_filename); + +################################################################## + +def manual_reject (changes_filename): + # Build up the rejection email + user_email_address = string.replace(string.split(pwd.getpwuid(os.getuid())[4],',')[0], '.', '') + user_email_address = user_email_address + " <%s@%s>" % (pwd.getpwuid(os.getuid())[0], Cnf["Dinstall::MyHost"]) + manual_reject_message = Cnf.get("Dinstall::Options::Manual-Reject", "") + + reject_mail_message = """From: %s +Cc: %s +To: %s +Bcc: troup@auric.debian.org +Subject: %s REJECTED + +%s +%s +=== +%s""" % (user_email_address, Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], changes_filename, manual_reject_message, reject_message, reject_footer) + + # Write the rejection email out as the .reason file + reason_filename = re_changes.sub("reason", os.path.basename(changes_filename)); + reject_filename = "%s/REJECT/%s" % (Cnf["Dir::IncomingDir"], reason_filename) + if os.path.exists(reject_filename): + os.unlink(reject_filename); + fd = os.open(reject_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644); + os.write(fd, reject_mail_message); + os.close(fd); + + # If we weren't given one, spawn an editor so the user can add one in + if manual_reject_message == "": + result = os.system("vi +6 %s" % (reject_file)) + if result != 0: + sys.stderr.write ("vi invocation failed for `%s'!" % (reject_file)) + sys.exit(result) + + # Then process it as if it were an automatic rejection + reject (changes_filename, reject_filename) + +##################################################################################################################### + +def acknowledge_new (changes_filename, summary): + global new_ack_new; + + new_ack_new[changes_filename] = 1; + + if new_ack_old.has_key(changes_filename): + print "Ack already sent."; + return; + + print "Sending new ack."; + if not Cnf["Dinstall::Options::No-Mail"]: + new_ack_message = """Return-Path: %s +From: %s +To: %s +Bcc: troup@auric.debian.org +Subject: %s is NEW + +%s +%s""" % (Cnf["Dinstall::MyEmailAddress"], Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], changes_filename, summary, new_ack_footer); + utils.send_mail(new_ack_message,""); + +##################################################################################################################### + +def announce (short_summary, action): + # Only do announcements for source uploads with a recent dpkg-dev installed + if float(changes.get("format", 0)) < 1.6 or not changes["architecture"].has_key("source"): + return "" + + lists_done = {} + summary = "" + + for dist in changes["distribution"].keys(): + list = Cnf["Suite::%s::Announce" % (dist)] + if lists_done.has_key(list): + continue + lists_done[list] = 1 + summary = summary + "Announcing to %s\n" % (list) + + if action: + mail_message = """Return-Path: %s +From: %s +To: %s +Bcc: troup@auric.debian.org +Subject: Installed %s %s (%s) + +%s + +Installed: +%s +""" % (Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], list, changes["source"], changes["version"], string.join(changes["architecture"].keys(), ' ' ), + changes["filecontents"], short_summary) + utils.send_mail (mail_message, "") + + (dsc_rfc822, dsc_name, dsc_email) = utils.fix_maintainer (dsc.get("maintainer",Cnf["Dinstall::MyEmailAddress"])); + bugs = changes["closes"].keys() + bugs.sort() + if dsc_name == changes["maintainername"]: + summary = summary + "Closing bugs: " + for bug in bugs: + summary = summary + "%s " % (bug) + if action: + mail_message = """Return-Path: %s +From: %s +To: %s-close@bugs.debian.org +Bcc: troup@auric.debian.org +Subject: Bug#%s: fixed in %s %s + +We believe that the bug you reported is fixed in the latest version of +%s, which has been installed in the Debian FTP archive: + +%s""" % (Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], bug, bug, changes["source"], changes["version"], changes["source"], short_summary) + + if changes["distribution"].has_key("stable"): + mail_message = mail_message + """Note that this package is not part of the released stable Debian +distribution. It may have dependencies on other unreleased software, +or other instabilities. Please take care if you wish to install it. +The update will eventually make its way into the next released Debian +distribution.""" + + mail_message = mail_message + """A summary of the changes between this version and the previous one is +attached. + +Thank you for reporting the bug, which will now be closed. If you +have further comments please address them to %s@bugs.debian.org, +and the maintainer will reopen the bug report if appropriate. + +Debian distribution maintenance software +pp. +%s (supplier of updated %s package) + +(This message was generated automatically at their request; if you +believe that there is a problem with it please contact the archive +administrators by mailing ftpmaster@debian.org) + + +%s""" % (bug, changes["maintainer"], changes["source"], changes["filecontents"]) + + utils.send_mail (mail_message, "") + else: # NMU + summary = summary + "Setting bugs to severity fixed: " + control_message = "" + for bug in bugs: + summary = summary + "%s " % (bug) + control_message = control_message + "severity %s fixed\n" % (bug) + if action and control_message != "": + mail_message = """Return-Path: %s +From: %s +To: control@bugs.debian.org +Bcc: troup@auric.debian.org, %s +Subject: Fixed in NMU of %s %s + +%s +quit + +This message was generated automatically in response to a +non-maintainer upload. The .changes file follows. + +%s +""" % (Cnf["Dinstall::MyEmailAddress"], changes["maintainer822"], changes["maintainer822"], changes["source"], changes["version"], control_message, changes["filecontents"]) + utils.send_mail (mail_message, "") + summary = summary + "\n" + + return summary + +############################################################################### + +# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in +# Incoming. -1 will reference the .orig.tar.gz, but -2 will not. +# dsccheckdistrib() can find the .orig.tar.gz but it will not have +# processed it during it's checks of -2. If -1 has been deleted or +# otherwise not checked by da-install, the .orig.tar.gz will not have +# been checked at all. To get round this, we force the .orig.tar.gz +# into the .changes structure and reprocess the .changes file. + +def process_it (changes_file): + global reprocess, orig_tar_id; + + reprocess = 1; + orig_tar_id = None; + + check_signature (changes_file); + check_changes (changes_file); + while reprocess: + reprocess = 0; + check_files (); + check_md5sums (); + check_dsc (); + + action(changes_file); + +############################################################################### + +def main(): + global Cnf, projectB, reject_message, install_bytes, new_ack_old + + apt_pkg.init(); + + Cnf = apt_pkg.newConfiguration(); + apt_pkg.ReadConfigFileISC(Cnf,utils.which_conf_file()); + + Arguments = [('a',"automatic","Dinstall::Options::Automatic"), + ('d',"debug","Dinstall::Options::Debug", "IntVal"), + ('h',"help","Dinstall::Options::Help"), + ('k',"ack-new","Dinstall::Options::Ack-New"), + ('m',"manual-reject","Dinstall::Options::Manual-Reject", "HasArg"), + ('n',"no-action","Dinstall::Options::No-Action"), + ('p',"no-lock", "Dinstall::Options::No-Lock"), + ('r',"no-version-check", "Dinstall::Options::No-Version-Check"), + ('s',"no-mail", "Dinstall::Options::No-Mail"), + ('u',"override-distribution", "Dinstall::Options::Override-Distribution", "HasArg"), + ('v',"version","Dinstall::Options::Version")]; + + changes_files = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv); + + if Cnf["Dinstall::Options::Help"]: + usage(0); + + if Cnf["Dinstall::Options::Version"]: + print "katie version 0.0000000000"; + usage(0); + + postgresql_user = None; # Default == Connect as user running program. + + # -n/--dry-run invalidates some other options which would involve things happening + if Cnf["Dinstall::Options::No-Action"]: + Cnf["Dinstall::Options::Automatic"] = "" + Cnf["Dinstall::Options::Ack-New"] = "" + postgresql_user = Cnf["DB::ROUser"]; + + projectB = pg.connect('projectb', Cnf["DB::Host"], int(Cnf["DB::Port"]), None, None, postgresql_user); + + db_access.init(Cnf, projectB); + + # Check that we aren't going to clash with the daily cron job + + if os.path.exists("%s/Archive_Maintenance_In_Progress" % (Cnf["Dir::RootDir"])) and not Cnf["Dinstall::Options::No-Lock"]: + sys.stderr.write("Archive maintenance in progress. Try again later.\n"); + sys.exit(2); + + # Obtain lock if not in no-action mode + + if not Cnf["Dinstall::Options::No-Action"]: + lock_fd = os.open(Cnf["Dinstall::LockFile"], os.O_RDWR); + fcntl.lockf(lock_fd, FCNTL.F_TLOCK); + + # Read in the list of already-acknowledged NEW packages + new_ack_list = utils.open_file(Cnf["Dinstall::NewAckList"],'r'); + new_ack_old = {}; + for line in new_ack_list.readlines(): + new_ack_old[line[:-1]] = 1; + new_ack_list.close(); + + # Process the changes files + for changes_file in changes_files: + reject_message = "" + print "\n" + changes_file; + process_it (changes_file); + + install_mag = " b"; + if install_bytes > 10000: + install_bytes = install_bytes / 1000; + install_mag = " Kb"; + if install_bytes > 10000: + install_bytes = install_bytes / 1000; + install_mag = " Mb"; + if install_count: + sets = "set" + if install_count > 1: + sets = "sets" + sys.stderr.write("Installed %d package %s, %d%s.\n" % (install_count, sets, int(install_bytes), install_mag)) + + # Write out the list of already-acknowledged NEW packages + if Cnf["Dinstall::Options::Ack-New"]: + new_ack_list = utils.open_file(Cnf["Dinstall::NewAckList"],'w') + for i in new_ack_new.keys(): + new_ack_list.write(i+'\n') + new_ack_list.close() + + +if __name__ == '__main__': + main() + diff --git a/neve b/neve new file mode 100755 index 00000000..bb8b5f8c --- /dev/null +++ b/neve @@ -0,0 +1,421 @@ +#!/usr/bin/env python + +# Populate the DB +# Copyright (C) 2000 James Troup +# $Id: neve,v 1.1.1.1 2000-11-24 00:20:09 troup Exp $ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# 04:36| elmo: you're making me waste 5 seconds per architecture!!!!!! YOU BASTARD!!!!! + +################################################################################ + +# This code is a horrible mess for two reasons: + +# (o) For Debian's usage, it's doing something like 160k INSERTs, +# even on auric, that makes the program unusable unless we get +# involed in sorts of silly optimization games (local dicts to avoid +# redundant SELECTS, using COPY FROM rather than INSERTS etc.) + +# (o) It's very site specific, because I don't expect to use this +# script again in a hurry, and I don't want to spend any more time +# on it than absolutely necessary. + +############################################################################################################### + +import commands, os, pg, re, sys, string, tempfile +import apt_pkg +import db_access, utils + +############################################################################################################### + +re_arch_from_filename = re.compile(r"binary-[^/]+") + +############################################################################################################### + +Cnf = None; +projectB = None; +files_id_cache = {}; +source_cache = {}; +arch_all_cache = {}; +binary_cache = {}; +# +files_id_serial = 0; +source_id_serial = 0; +src_associations_id_serial = 0; +dsc_files_id_serial = 0; +files_query_cache = None; +source_query_cache = None; +src_associations_query_cache = None; +dsc_files_query_cache = None; +orig_tar_gz_cache = {}; +# +binaries_id_serial = 0; +binaries_query_cache = None; +bin_associations_id_serial = 0; +bin_associations_query_cache = None; +# +source_cache_for_binaries = {}; + +############################################################################################################### + +# Prepares a filename or directory (s) to be file.filename by stripping any part of the location (sub) from it. +def poolify (s, sub): + for i in xrange(len(sub)): + if sub[i:] == s[0:len(sub)-i]: + return s[len(sub)-i:]; + return s; + +def update_archives (): + projectB.query("DELETE FROM archive") + for archive in Cnf.SubTree("Archive").List(): + SubSec = Cnf.SubTree("Archive::%s" % (archive)); + projectB.query("INSERT INTO archive (name, origin_server, description) VALUES ('%s', '%s', '%s')" + % (archive, SubSec["OriginServer"], SubSec["Description"])); + +def update_components (): + projectB.query("DELETE FROM component") + for component in Cnf.SubTree("Component").List(): + SubSec = Cnf.SubTree("Component::%s" % (component)); + projectB.query("INSERT INTO component (name, description, meets_dfsg) VALUES ('%s', '%s', '%s')" % + (component, SubSec["Description"], SubSec["MeetsDFSG"])); + +def update_locations (): + projectB.query("DELETE FROM location") + for location in Cnf.SubTree("Location").List(): + SubSec = Cnf.SubTree("Location::%s" % (location)); + archive_id = db_access.get_archive_id(SubSec["archive"]); + type = SubSec.Find("type"); + if type == "legacy-mixed": + projectB.query("INSERT INTO location (path, archive, type) VALUES ('%s', %d, '%s')" % (location, archive_id, SubSec["type"])); + else: + for component in Cnf.SubTree("Component").List(): + component_id = db_access.get_component_id(component); + projectB.query("INSERT INTO location (path, component, archive, type) VALUES ('%s', %d, %d, '%s')" % + (location, component_id, archive_id, SubSec["type"])); + +def update_architectures (): + projectB.query("DELETE FROM architecture") + for arch in Cnf.SubTree("Architectures").List(): + projectB.query("INSERT INTO architecture (arch_string, description) VALUES ('%s', '%s')" % (arch, Cnf["Architectures::%s" % (arch)])) + +def update_suites (): + projectB.query("DELETE FROM suite") + for suite in Cnf.SubTree("Suite").List(): + SubSec = Cnf.SubTree("Suite::%s" %(suite)) + projectB.query("INSERT INTO suite (suite_name, version, origin, description) VALUES ('%s', '%s', '%s', '%s')" + % (string.lower(suite), SubSec["Version"], SubSec["Origin"], SubSec["Description"])) + for architecture in Cnf.SubTree("Suite::%s::Architectures" % (suite)).List(): + architecture_id = db_access.get_architecture_id (architecture); + projectB.query("INSERT INTO suite_architectures (suite, architecture) VALUES (currval('suite_id_seq'), %d)" % (architecture_id)); + +############################################################################################################## + +def get_or_set_files_id (filename, size, md5sum, location_id): + global files_id_cache, files_id_serial, files_query_cache; + + cache_key = string.join((filename, size, md5sum, repr(location_id)), '~') + if not files_id_cache.has_key(cache_key): + files_id_serial = files_id_serial + 1 + files_query_cache.write("%d\t%s\t%s\t%s\t%d\n" % (files_id_serial, filename, size, md5sum, location_id)); + files_id_cache[cache_key] = files_id_serial + + return files_id_cache[cache_key] + +############################################################################################################## + +def process_sources (location, filename, suite, component, archive): + global source_cache, source_query_cache, src_associations_query_cache, dsc_files_query_cache, source_id_serial, src_associations_id_serial, dsc_files_id_serial, source_cache_for_binaries, orig_tar_gz_cache; + + suite = string.lower(suite) + suite_id = db_access.get_suite_id(suite); + if suite == 'stable': + testing_id = db_access.get_suite_id("testing"); + try: + file = utils.open_file (filename, "r") + except utils.cant_open_exc: + print "WARNING: can't open '%s'" % (filename); + return; + Scanner = apt_pkg.ParseTagFile(file) + while Scanner.Step() != 0: + package = Scanner.Section["package"] + version = Scanner.Section["version"] + maintainer = Scanner.Section["maintainer"] + maintainer = string.replace(maintainer, "'", "\\'") + maintainer_id = db_access.get_or_set_maintainer_id(maintainer); + directory = Scanner.Section["directory"] + location_id = db_access.get_location_id (location, component, archive) + if directory[-1:] != "/": + directory = directory + '/'; + directory = poolify (directory, location); + if directory != "" and directory[-1:] != "/": + directory = directory + '/'; + no_epoch_version = utils.re_no_epoch.sub('', version) + # Add all files referenced by the .dsc to the files table + ids = []; + for line in string.split(Scanner.Section["files"],'\n'): + id = None; + (md5sum, size, filename) = string.split(string.strip(line)); + # Don't duplicate .orig.tar.gz's + if filename[-12:] == ".orig.tar.gz": + cache_key = "%s~%s~%s" % (filename, size, md5sum); + if orig_tar_gz_cache.has_key(cache_key): + id = orig_tar_gz_cache[cache_key]; + else: + id = get_or_set_files_id (directory + filename, size, md5sum, location_id); + orig_tar_gz_cache[cache_key] = id; + else: + id = get_or_set_files_id (directory + filename, size, md5sum, location_id); + ids.append(id); + # If this is the .dsc itself; save the ID for later. + if filename[-4:] == ".dsc": + files_id = id; + filename = directory + package + '_' + no_epoch_version + '.dsc' + cache_key = "%s~%s" % (package, version) + if not source_cache.has_key(cache_key): + nasty_key = "%s~%s" % (package, version) + source_id_serial = source_id_serial + 1; + if not source_cache_for_binaries.has_key(nasty_key): + source_cache_for_binaries[nasty_key] = source_id_serial; + tmp_source_id = source_id_serial; + source_cache[cache_key] = source_id_serial; + source_query_cache.write("%d\t%s\t%s\t%d\t%d\n" % (source_id_serial, package, version, maintainer_id, files_id)) + for id in ids: + dsc_files_id_serial = dsc_files_id_serial + 1; + dsc_files_query_cache.write("%d\t%d\t%d\n" % (dsc_files_id_serial, tmp_source_id,id)); + else: + tmp_source_id = source_cache[cache_key]; + + src_associations_id_serial = src_associations_id_serial + 1; + src_associations_query_cache.write("%d\t%d\t%d\n" % (src_associations_id_serial, suite_id, tmp_source_id)) + # populate 'testing' with a mirror of 'stable' + if suite == "stable": + src_associations_id_serial = src_associations_id_serial + 1; + src_associations_query_cache.write("%d\t%d\t%d\n" % (src_associations_id_serial, testing_id, tmp_source_id)) + + file.close() + +############################################################################################################## + +def process_packages (location, filename, suite, component, archive): + global arch_all_cache, binary_cache, binaries_id_serial, binaries_query_cache, bin_associations_id_serial, bin_associations_query_cache; + + count_total = 0; + count_bad = 0; + suite = string.lower(suite); + suite_id = db_access.get_suite_id(suite); + if suite == "stable": + testing_id = db_access.get_suite_id("testing"); + try: + file = utils.open_file (filename, "r") + except utils.cant_open_exc: + print "WARNING: can't open '%s'" % (filename); + return; + Scanner = apt_pkg.ParseTagFile(file); + while Scanner.Step() != 0: + package = Scanner.Section["package"] + version = Scanner.Section["version"] + maintainer = Scanner.Section["maintainer"] + maintainer = string.replace(maintainer, "'", "\\'") + maintainer_id = db_access.get_or_set_maintainer_id(maintainer); + architecture = Scanner.Section["architecture"] + architecture_id = db_access.get_architecture_id (architecture); + if not Scanner.Section.has_key("source"): + source = package + else: + source = Scanner.Section["source"] + source_version = "" + if string.find(source, "(") != -1: + m = utils.re_extract_src_version.match(source) + source = m.group(1) + source_version = m.group(2) + if not source_version: + source_version = version + filename = Scanner.Section["filename"] + location_id = db_access.get_location_id (location, component, archive) + filename = poolify (filename, location) + if architecture == "all": + filename = re_arch_from_filename.sub("binary-all", filename); + cache_key = "%s~%s" % (source, source_version); + source_id = source_cache_for_binaries.get(cache_key, None); + size = Scanner.Section["size"]; + md5sum = Scanner.Section["md5sum"]; + files_id = get_or_set_files_id (filename, size, md5sum, location_id); + type = "deb"; # FIXME + cache_key = "%s~%s~%s~%d~%d~%d" % (package, version, repr(source_id), architecture_id, location_id, files_id); + if not arch_all_cache.has_key(cache_key): + arch_all_cache[cache_key] = 1; + cache_key = "%s~%s~%s~%d" % (package, version, repr(source_id), architecture_id); + if not binary_cache.has_key(cache_key): + if not source_id: + source_id = "\N"; + count_bad = count_bad + 1; + else: + source_id = repr(source_id); + binaries_id_serial = binaries_id_serial + 1; + binaries_query_cache.write("%d\t%s\t%s\t%d\t%s\t%d\t%d\t%s\n" % (binaries_id_serial, package, version, maintainer_id, source_id, architecture_id, files_id, type)); + binary_cache[cache_key] = binaries_id_serial; + tmp_binaries_id = binaries_id_serial; + else: + tmp_binaries_id = binary_cache[cache_key]; + + bin_associations_id_serial = bin_associations_id_serial + 1; + bin_associations_query_cache.write("%d\t%d\t%d\n" % (bin_associations_id_serial, suite_id, tmp_binaries_id)); + if suite == "stable": + bin_associations_id_serial = bin_associations_id_serial + 1; + bin_associations_query_cache.write("%d\t%d\t%d\n" % (bin_associations_id_serial, testing_id, tmp_binaries_id)); + count_total = count_total +1; + + file.close(); + if count_bad != 0: + print "%d binary packages processed; %d with no source match which is %.2f%%" % (count_total, count_bad, (float(count_bad)/count_total)*100); + else: + print "%d binary packages processed; 0 with no source match which is 0%%" % (count_total); + +############################################################################################################## + +def do_sources(location, prefix, suite, component, server): + temp_filename = tempfile.mktemp(); + fd = os.open(temp_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0700); + os.close(fd); + sources = location + prefix + 'Sources.gz'; + (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (sources, temp_filename)); + if (result != 0): + sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output)); + sys.exit(result); + print 'Processing '+sources+'...'; + process_sources (location, temp_filename, suite, component, server); + os.unlink(temp_filename); + +############################################################################################################## + +def main (): + global Cnf, projectB, query_cache, files_query_cache, source_query_cache, src_associations_query_cache, dsc_files_query_cache, bin_associations_query_cache, binaries_query_cache; + + apt_pkg.init(); + + Cnf = apt_pkg.newConfiguration(); + apt_pkg.ReadConfigFileISC(Cnf,utils.which_conf_file()); + + print "Re-Creating DB..." + (result, output) = commands.getstatusoutput("psql -f init_pool.sql") + if (result != 0): + sys.exit(2) + print output + + projectB = pg.connect('projectb', 'localhost', -1, None, None, 'postgres') + + db_access.init (Cnf, projectB); + + print "Adding static tables from conf file..." + projectB.query("BEGIN WORK"); + update_architectures(); + update_components(); + update_archives(); + update_locations(); + update_suites(); + projectB.query("COMMIT WORK"); + + files_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"files","w"); + source_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"source","w"); + src_associations_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"src_associations","w"); + dsc_files_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"dsc_files","w"); + binaries_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"binaries","w"); + bin_associations_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"bin_associations","w"); + + projectB.query("BEGIN WORK"); + # Process Sources files to popoulate `source' and friends + for location in Cnf.SubTree("Location").List(): + SubSec = Cnf.SubTree("Location::%s" % (location)); + server = SubSec["Archive"]; + type = Cnf.Find("Location::%s::Type" % (location)); + if type == "legacy-mixed": + prefix = '' + suite = Cnf.Find("Location::%s::Suite" % (location)); + do_sources(location, prefix, suite, "", server); + elif type == "legacy": + for suite in Cnf.SubTree("Location::%s::Suites" % (location)).List(): + for component in Cnf.SubTree("Component").List(): + prefix = Cnf.Find("Suite::%s::CodeName" % (suite)) + '/' + component + '/source/' + do_sources(location, prefix, suite, component, server); + elif type == "pool": + continue; +# for component in Cnf.SubTree("Component").List(): +# prefix = component + '/' +# do_sources(location, prefix); + else: + sys.stderr.write("Unknown location type ('%s').\n" % (type)); + sys.exit(2); + + # Process Packages files to populate `binaries' and friends + + for location in Cnf.SubTree("Location").List(): + SubSec = Cnf.SubTree("Location::%s" % (location)); + server = SubSec["Archive"]; + type = Cnf.Find("Location::%s::Type" % (location)); + if type == "legacy-mixed": + packages = location + 'Packages'; + suite = Cnf.Find("Location::%s::Suite" % (location)); + print 'Processing '+location+'...'; + process_packages (location, packages, suite, "", server); + elif type == "legacy": + for suite in Cnf.SubTree("Location::%s::Suites" % (location)).List(): + for component in Cnf.SubTree("Component").List(): + for architecture in Cnf.SubTree("Suite::%s::Architectures" % (suite)).List(): + if architecture == "source" or architecture == "all": + continue; + packages = location + Cnf.Find("Suite::%s::CodeName" % (suite)) + '/' + component + '/binary-' + architecture + '/Packages' + print 'Processing '+packages+'...'; + process_packages (location, packages, suite, component, server); + elif type == "pool": + continue; + + files_query_cache.close(); + source_query_cache.close(); + src_associations_query_cache.close(); + dsc_files_query_cache.close(); + binaries_query_cache.close(); + bin_associations_query_cache.close(); + print "Writing data to `files' table..."; + projectB.query("COPY files FROM '%s'" % (Cnf["Neve::ExportDir"]+"files")); + print "Writing data to `source' table..."; + projectB.query("COPY source FROM '%s'" % (Cnf["Neve::ExportDir"]+"source")); + print "Writing data to `src_associations' table..."; + projectB.query("COPY src_associations FROM '%s'" % (Cnf["Neve::ExportDir"]+"src_associations")); + print "Writing data to `dsc_files' table..."; + projectB.query("COPY dsc_files FROM '%s'" % (Cnf["Neve::ExportDir"]+"dsc_files")); + print "Writing data to `binaries' table..."; + projectB.query("COPY binaries FROM '%s'" % (Cnf["Neve::ExportDir"]+"binaries")); + print "Writing data to `bin_associations' table..."; + projectB.query("COPY bin_associations FROM '%s'" % (Cnf["Neve::ExportDir"]+"bin_associations")); + print "Committing..."; + projectB.query("COMMIT WORK"); + + # Add the constraints and otherwise generally clean up the database. + # See add_constraints.sql for more details... + + print "Running add_constraints.sql..."; + (result, output) = commands.getstatusoutput("psql projectb < add_constraints.sql"); + print output + if (result != 0): + sys.stderr.write("psql invocation failed!\n"); + sys.exit(result); + + return; + +if __name__ == '__main__': + main() diff --git a/rhona b/rhona new file mode 100755 index 00000000..be70dddb --- /dev/null +++ b/rhona @@ -0,0 +1,164 @@ +#!/usr/bin/env python + +# rhona, cleans up unassociated binary (and source) packages +# Copyright (C) 2000 James Troup +# $Id: rhona,v 1.1.1.1 2000-11-24 00:20:10 troup Exp $ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +# 07:05| well.. *shrug*.. no, probably not.. but to fix it, +# | we're going to have to implement reference counting +# | through dependencies.. do we really want to go down +# | that road? +# +# 07:05| elmo: Augh! + +import pg, string, os, sys, time +import apt_pkg +import utils + +projectB = None +Cnf = None + +def check_binaries(): + + # A nicer way to do this would be `SELECT bin FROM + # bin_associations EXCEPT SELECT id from binaries WHERE + # last_update IS NULL', but it seems postgresql can't handle that + # query as it hadn't return after I left it running for 20 minutes + # on auric. + + linked_binaries = {}; + q = projectB.query("SELECT bin FROM bin_associations"); + ql = q.getresult(); + for i in ql: + linked_binaries[i[0]] = ""; + + all_binaries = {}; + q = projectB.query("SELECT b.id, b.file FROM binaries b, files f WHERE f.last_used IS NULL AND f.id = b.file") + ql = q.getresult(); + for i in ql: + all_binaries[i[0]] = i[1]; + + projectB.query("BEGIN WORK"); + for id in all_binaries.keys(): + if not linked_binaries.has_key(id): + date = time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time()-(4*(24*60*60)))) + projectB.query("UPDATE files SET last_used = '%s' WHERE id = %s" % (date, all_binaries[id])) + projectB.query("COMMIT WORK"); + + # Check for any binaries which are marked for eventual deletion but are now used again. + + all_marked_binaries = {}; + q = projectB.query("SELECT b.id, b.file FROM binaries b, files f WHERE f.last_used IS NOT NULL AND f.id = b.file") + ql = q.getresult(); + for i in ql: + all_marked_binaries[i[0]] = i[1]; + projectB.query("BEGIN WORK"); + for id in all_marked_binaries.keys(): + if linked_binaries.has_key(id): + # Can't imagine why this would happen, so warn about it for now. + print "W: %s has released %s from the target list." % (id, all_marked_binaries[id]); + projectB.query("UPDATE files SET last_used = NULL WHERE id = %s" % (all_marked_binaries[id])); + projectB.query("COMMIT WORK"); + +def check_sources(): + + # A nicer way to do this would be using `EXCEPT', but see the + # commeint in process_binary. + + linked_sources = {}; + q = projectB.query("SELECT source FROM binaries WHERE source is not null"); + ql = q.getresult(); + for i in ql: + linked_sources[i[0]] = ""; + + all_sources = {}; + q = projectB.query("SELECT s.id, s.file FROM source s, files f WHERE f.last_used IS NULL AND f.id = s.file") + ql = q.getresult(); + for i in ql: + all_sources[i[0]] = i[1]; + + projectB.query("BEGIN WORK"); + for id in all_sources.keys(): + if not linked_sources.has_key(id): + date = time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time()-(4*(24*60*60)))) + projectB.query("UPDATE files SET last_used = '%s' WHERE id = %s" % (date, all_sources[id])) + # Delete all other files references by .dsc too if they're not used by anyone else + q = projectB.query("SELECT f.id FROM files f, dsc_files d WHERE d.source = %d AND d.file = f.id" % (id)); + ql = q.getresult(); + for i in ql: + q_others = projectB.query("SELECT id FROM dsc_files d WHERE file = %s" % (i[0])); + ql_others = q.getresult(); + if len(ql) == 1: + projectB.query("UPDATE files SET last_used = '%s' WHERE id = %s" % (date, i[0])); + projectB.query("COMMIT WORK"); + + # Check for any sources which are marked for eventual deletion but are now used again. + # Need to check versus dsc_files too! + + all_marked_sources = {}; + q = projectB.query("SELECT s.id, s.file FROM source s, files f WHERE f.last_used IS NOT NULL AND f.id = s.file"); + ql = q.getresult(); + for i in ql: + all_marked_sources[i[0]] = i[1]; + projectB.query("BEGIN WORK"); + for id in all_marked_sources.keys(): + if linked_sources.has_key(id): + # Can't imagine why this would happen, so warn about it for now. + print "W: %s has released %s from the target list." % (id, all_marked_sources[id]); + projectB.query("UPDATE files SET last_used = NULL WHERE id = %s" % (all_marked_sources[id])); + # Unmark all other files references by .dsc too + q = projectB.query("SELECT id FROM dsc_files WHERE source = %d" % (id)); + ql = q.getresult(); + for i in ql: + projectB.query("UPDATE files SET last_used = NULL WHERE id = %s" % (i[0])); + projectB.query("COMMIT WORK"); + +def clean(): + date = time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time()-int(Cnf["Rhona::StayOfExecution"]))); + q = projectB.query("SELECT l.path, f.filename FROM location l, files f WHERE f.last_used < '%s' AND l.id = f.location" % (date)) + ql = q.getresult(); + for i in ql: + filename = i[0] + i[1]; + dest = Cnf["Rhona::Morgue"]+os.path.basename(filename); + if not os.path.exists(filename): + sys.stderr.write("E: can not find %s.\n" % (filename)); + continue; + print "Cleaning %s to %s..." % (filename, dest) + #utils.move(filename, dest); + #projectB.query("DELETE FROM binaries WHERE id = %s" % (i[0])); + #FIXME: need to remove from "or source" + files + dsc_files.. etc. + +def main(): + global Cnf, projectB; + + projectB = pg.connect('projectb', 'localhost'); + + apt_pkg.init(); + + Cnf = apt_pkg.newConfiguration(); + apt_pkg.ReadConfigFileISC(Cnf,utils.which_conf_file()); + + print "Checking for orphaned binary packages..." + check_binaries(); + print "Checking for orphaned source packages..." + check_sources(); + print "Cleaning orphaned packages..." + clean(); + +if __name__ == '__main__': + main() + diff --git a/utils.py b/utils.py new file mode 100644 index 00000000..29175bbd --- /dev/null +++ b/utils.py @@ -0,0 +1,233 @@ +# Utility functions +# Copyright (C) 2000 James Troup +# $Id: utils.py,v 1.1.1.1 2000-11-24 00:20:09 troup Exp $ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +import commands, os, re, socket, shutil, stat, string, sys, tempfile + +re_comments = re.compile(r"\#.*") +re_no_epoch = re.compile(r"^\d*\:") +re_no_revision = re.compile(r"\-[^-]*$") +re_arch_from_filename = re.compile(r"/binary-[^/]+/") +re_extract_src_version = re.compile (r"(\S+)\s*\((.*)\)") + +changes_parse_error_exc = "Can't parse line in .changes file"; +nk_format_exc = "Unknown Format: in .changes file"; +no_files_exc = "No Files: field in .dsc file."; +cant_open_exc = "Can't read file."; +unknown_hostname_exc = "Unknown hostname"; + +###################################################################################### + +def open_file(filename, mode): + try: + f = open(filename, mode); + except IOError: + raise cant_open_exc, filename + return f + +###################################################################################### + +# From reportbug +def our_raw_input(): + sys.stdout.flush() + try: + ret = raw_input() + return ret + except EOFError: + sys.stderr.write('\nUser interrupt (^D).\n') + raise SystemExit + +###################################################################################### + +def parse_changes(filename): + changes_in = open_file(filename,'r'); + error = "" + changes = {}; + lines = changes_in.readlines(); + for line in lines: + if re.match('^-----BEGIN PGP SIGNATURE', line): + break; + if re.match(r'^\s*$|^-----BEGIN PGP SIGNED MESSAGE', line): + continue; + slf = re.match(r'^(\S*)\s*:\s*(.*)', line); + if slf: + field = string.lower(slf.groups()[0]); + changes[field] = slf.groups()[1]; + continue; + mld = re.match(r'^ \.$', line); + if mld: + changes[field] = changes[field] + '\n'; + continue; + mlf = re.match(r'^\s(.*)', line); + if mlf: + changes[field] = changes[field] + mlf.groups()[0] + '\n'; + continue; + error = error + line; + changes_in.close(); + changes["filecontents"] = string.join (lines, ""); + if error != "": + raise changes_parse_error_exc, error; + return changes; + +###################################################################################### + +# Dropped support for 1.4 and ``buggy dchanges 3.4'' (?!) compared to di.pl + +def build_file_list(changes, dsc): + files = {} + format = changes.get("format", "") + if format != "": + format = float(format) + if dsc == "" and (format < 1.5 or format > 2.0): + raise nk_format_exc, changes["format"]; + + # No really, this has happened. Think 0 length .dsc file. + if not changes.has_key("files"): + raise no_files_exc + + for i in string.split(changes["files"], "\n"): + if i == "": + break + s = string.split(i) + section = priority = component = "" + if dsc != "": + (md5, size, name) = s + else: + (md5, size, section, priority, name) = s + + if section == "": section = "-" + if priority == "": priority = "-" + + if string.find(section, '/') != -1: + component = string.split(section, '/')[0] + if string.lower(component) == "non-us": + component = string.split(section, '/')[0]+ '/' + string.split(section, '/')[1] + + if component == "": + component = "main" + + files[name] = { "md5sum" : md5, + "size" : size, + "section": section, + "priority": priority, + "component": component } + + return files + +###################################################################################### + +# Fix the `Maintainer:' field to be an RFC822 compatible address. +# cf. Packaging Manual (4.2.4) +# +# 06:28| 'The standard sucks, but my tool is supposed to +# interoperate with it. I know - I'll fix the suckage +# and make things incompatible!' + +def fix_maintainer (maintainer): + m = re.match(r"^\s*(\S.*\S)\s*\<([^\> \t]+)\>", maintainer) + rfc822 = maintainer + name = "" + email = "" + if m != None and len(m.groups()) == 2: + name = m.group(1) + email = m.group(2) + if re.search(r'[,.]', name) != None: + rfc822 = re.sub(r"^\s*(\S.*\S)\s*\<([^\> \t]+)\>", r"\2 (\1)", maintainer) + return (rfc822, name, email) + +###################################################################################### + +# sendmail wrapper, takes _either_ a message string or a file as arguments +def send_mail (message, filename): + #### FIXME, how do I get this out of Cnf in katie? + sendmail_command = "/usr/sbin/sendmail -oi -t"; + + # Sanity check arguments + if message != "" and filename != "": + sys.stderr.write ("send_mail() can't be called with both arguments as non-null! (`%s' and `%s')\n%s" % (message, filename)) + sys.exit(1) + # If we've been passed a string dump it into a temporary file + if message != "": + filename = tempfile.mktemp() + fd = os.open(filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0700) + os.write (fd, message) + os.close (fd) + # Invoke sendmail + (result, output) = commands.getstatusoutput("%s < %s" % (sendmail_command, filename)) + if (result != 0): + sys.stderr.write ("Sendmail invocation (`%s') failed for `%s'!\n%s" % (sendmail_command, filename, output)) + sys.exit(result) + # Clean up any temporary files + if message !="": + os.unlink (filename) + +###################################################################################### + +def poolify (source, component): + if component != "": + component = component + '/'; + if source[:3] == "lib": + return component + source[:4] + '/' + source + '/' + else: + return component + source[:1] + '/' + source + '/' + +###################################################################################### + +def move (src, dest): + if os.path.exists(dest) and stat.S_ISDIR(os.stat(dest)[stat.ST_MODE]): + dest_dir = dest; + else: + dest_dir = os.path.dirname(dest); + if not os.path.exists(dest_dir): + umask = os.umask(00000); + os.makedirs(dest_dir, 02775); + os.umask(umask); + #print "Moving %s to %s..." % (src, dest); + shutil.copy2(src, dest); + os.chmod(dest, 0664); + os.unlink(src); + +###################################################################################### + +# FIXME: this is inherently nasty. Can't put this mapping in a conf +# file because the conf file depends on the archive.. doh. Maybe an +# archive independent conf file is needed. + +def where_am_i (): + res = socket.gethostbyaddr(socket.gethostname()); + if res[0] == 'pandora.debian.org': + return 'non-US'; + elif res[1] == 'auric.debian.org': + return 'ftp-master'; + else: + raise unknown_hostname_exc, res; + +###################################################################################### + +# FIXME: this isn't great either. + +def which_conf_file (): + archive = where_am_i (); + if archive == 'non-US': + return '/org/non-us.debian.org/katie/katie.conf-non-US'; + elif archive == 'ftp-master': + return '/org/ftp.debian.org/katie/katie.conf'; + else: + raise unknown_hostname_exc, archive + +###################################################################################### +