From: Mark Hymers Date: Sat, 14 Mar 2009 10:44:58 +0000 (+0000) Subject: Merge commit 'ftpmaster/master' X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=11dce36cabea5fa916ef6da415b4669898ba9ff7;hp=0df2038a32f77898d2dc85bd5278ded0ced14ee9;p=dak.git Merge commit 'ftpmaster/master' --- diff --git a/config/debian/cron.dinstall b/config/debian/cron.dinstall index 383ba334..cc2db8ad 100755 --- a/config/debian/cron.dinstall +++ b/config/debian/cron.dinstall @@ -43,6 +43,12 @@ function cleanup() { rm -f ${LOCK_ACCEPTED} } +# If we error out this one is called, *FOLLOWED* by cleanup above +function onerror() { + ERRDATE=$(date "+%Y.%m.%d-%H:%M:%S") + cat "$LOGFILE" | mail -s "ATTENTION ATTENTION! dinstall error at ${ERRDATE} (Be quiet, Brain, or I'll stab you with a Q-tip)" cron@ftp-master.debian.org +} + ######################################################################## # the actual dinstall functions follow # ######################################################################## @@ -74,9 +80,20 @@ function pgdump_pre() { function pgdump_post() { log "Creating post-daily-cron-job backup of projectb database..." cd $base/backup - POSTDUMP=$base/backup/dump_$(date +%Y.%m.%d-%H:%M:%S) - pg_dump projectb > $POSTDUMP - ln -sf $POSTDUMP current + POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S) + pg_dump projectb > $base/backup/dump_$POSTDUMP + pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP + ln -sf $base/backup/dump_$POSTDUMP current + ln -sf $base/backup/dumpall_$POSTDUMP currentall +} + +# Load the dak-dev projectb +function pgdakdev() { + cd $base/backup + echo "drop database projectb" | psql -p 5433 template1 + cat currentall | psql -p 5433 template1 + createdb -p 5433 -T template0 projectb + fgrep -v '\connect' current | psql -p 5433 projectb } # Updating various files @@ -267,9 +284,9 @@ function bts() { } function merkel2() { - # Push katie@merkel so it syncs the projectb there. Returns immediately, the sync runs detached + # Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached log "Trigger merkels projectb sync" - ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb katie@merkel.debian.org sleep 1 + ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1 } function runparts() { @@ -329,20 +346,29 @@ function savetimestamp() { echo ${NOW} > "${dbdir}/dinstallstart" } +function maillogfile() { + cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org +} + function renamelogfile() { - if [ -f "${dbdir}/dinstallstart" ]; then - RENAMETO=$(cat "${dbdir}/dinstallstart") - mv "$LOGFILE" "$logdir/dinstall_${RENAMETO}.log" - logstats "$logdir/dinstall_${RENAMETO}.log" - bzip2 -9 "$logdir/dinstall_${RENAMETO}.log" - else - error "Problem, I don't know when dinstall started, unable to do log statistics." - NOW=`date "+%Y.%m.%d-%H:%M:%S"` - mv "$LOGFILE" "$logdir/dinstall_${NOW}.log" - bzip2 -9 "$logdir/dinstall_${NOW}.log" - fi + if [ -f "${dbdir}/dinstallstart" ]; then + NOW=$(cat "${dbdir}/dinstallstart") + maillogfile + mv "$LOGFILE" "$logdir/dinstall_${NOW}.log" + logstats "$logdir/dinstall_${NOW}.log" + bzip2 -9 "$logdir/dinstall_${NOW}.log" + else + error "Problem, I don't know when dinstall started, unable to do log statistics." + NOW=`date "+%Y.%m.%d-%H:%M:%S"` + maillogfile + mv "$LOGFILE" "$logdir/dinstall_${NOW}.log" + bzip2 -9 "$logdir/dinstall_${NOW}.log" + fi } +function testingsourcelist() { + dak ls -s testing -f heidi -r .| egrep 'source$' > ${webdir}/testing.list +} ######################################################################## ######################################################################## @@ -384,10 +410,10 @@ function stage() { # it has to cd first! cd ${configdir} - if [ -f "${LOCK_STOP}" ]; then - log "${LOCK_STOP} exists, exiting immediately" - exit 42 - fi + if [ -f "${LOCK_STOP}" ]; then + log "${LOCK_STOP} exists, exiting immediately" + exit 42 + fi if [ "${ERR}" = "false" ]; then set +e @@ -406,10 +432,10 @@ function stage() { ts "${TIME}" fi - if [ -f "${LOCK_STOP}" ]; then - log "${LOCK_STOP} exists, exiting immediately" - exit 42 - fi + if [ -f "${LOCK_STOP}" ]; then + log "${LOCK_STOP} exists, exiting immediately" + exit 42 + fi } ######################################################################## @@ -466,7 +492,8 @@ LOCK_BRITNEY="$lockdir/britney.lock" LOCK_STOP="$lockdir/archive.stop" lockfile -l 3600 "${LOCK_DAILY}" -trap cleanup EXIT ERR TERM HUP INT QUIT +trap onerror ERR +trap cleanup EXIT TERM HUP INT QUIT touch "${LOCK_BRITNEY}" @@ -663,6 +690,14 @@ GO=( ) stage $GO +GO=( + FUNC="pgdakdev" + TIME="dak-dev db" + ARGS="" + ERR="false" +) +stage $GO + GO=( FUNC="expire" TIME="expire_dumps" @@ -691,7 +726,7 @@ GO=( FUNC="bts" TIME="" ARGS="" - ERR="" + ERR="false" ) stage $GO @@ -703,8 +738,6 @@ GO=( ) stage $GO -ulimit -m 90000 -d 90000 -s 10000 -v 200000 - GO=( FUNC="runparts" TIME="run-parts" @@ -729,6 +762,13 @@ GO=( ) stage $GO +GO=( + FUNC="testingsourcelist" + TIME="" + ARGS="" + ERR="false" +) + rm -f ${LOCK_BRITNEY} GO=( @@ -751,8 +791,6 @@ log "Daily cron scripts successful, all done" exec > "$logdir/afterdinstall.log" 2>&1 -cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org - GO=( FUNC="renamelogfile" TIME="" diff --git a/config/debian/dak.conf b/config/debian/dak.conf index 54987821..c4c49542 100644 --- a/config/debian/dak.conf +++ b/config/debian/dak.conf @@ -1,7 +1,7 @@ Dinstall { GPGKeyring { - "/srv/keyring.debian.org/keyrings/debian-keyring.gpg"; + "/srv/keyring.debian.org/keyrings/debian-keyring.gpg"; "/srv/keyring.debian.org/keyrings/debian-keyring.pgp"; "/srv/ftp.debian.org/keyrings/debian-maintainers.gpg"; }; @@ -214,7 +214,6 @@ Suite CodeName "etch"; OverrideCodeName "etch"; Priority "5"; - Untouchable "1"; ChangeLogBase "dists/oldstable/"; UdebComponents { @@ -282,7 +281,6 @@ Suite CodeName "lenny"; OverrideCodeName "lenny"; Priority "5"; - Untouchable "1"; ChangeLogBase "dists/stable/"; UdebComponents { @@ -712,3 +710,9 @@ Urgency critical; }; }; + +Contents +{ + Header "contents"; + Root "/srv/ftp.debian.org/test/"; +} diff --git a/dak/bts_categorize.py b/dak/bts_categorize.py index fd9cd090..663690a1 100755 --- a/dak/bts_categorize.py +++ b/dak/bts_categorize.py @@ -122,10 +122,14 @@ class BugClassifier(object): controls = "" bc = BugClassifier() - for bug in bc.unclassified_bugs(): - controls += bc.classify_bug(bug) - - return controls + try: + for bug in bc.unclassified_bugs(): + controls += bc.classify_bug(bug) + + return controls + except: + log.error("couldn't retrieve bugs from soap interface: %s" % sys.exc_info()[0]) + return None def send_email(commands, simulate=False): global Cnf diff --git a/dak/contents.py b/dak/contents.py new file mode 100755 index 00000000..1efb361f --- /dev/null +++ b/dak/contents.py @@ -0,0 +1,403 @@ +#!/usr/bin/env python +""" +Create all the contents files + +@contact: Debian FTPMaster +@copyright: 2008, 2009 Michael Casadevall +@copyright: 2009 Mike O'Connor +@license: GNU General Public License version 2 or later +""" + +################################################################################ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# there is the idea to slowly replace contents files +# with a new generation of such files. +# having more info. + +# of course that wont help for now where we need to generate them :) + +################################################################################ + +import sys +import os +import logging +import math +import gzip +import apt_pkg +from daklib import utils +from daklib.binary import Binary +from daklib.config import Config +from daklib.dbconn import DBConn +################################################################################ + +def usage (exit_code=0): + print """Usage: dak contents [options] command [arguments] + +COMMANDS + generate + generate Contents-$arch.gz files + + bootstrap + scan the debs in the existing pool and load contents in the the database + + cruft + remove files/paths which are no longer referenced by a binary + +OPTIONS + -h, --help + show this help and exit + + -v, --verbose + show verbose information messages + + -q, --quiet + supress all output but errors + + -s, --suite={stable,testing,unstable,...} + only operate on a single suite + + -a, --arch={i386,amd64} + only operate on a single architecture +""" + sys.exit(exit_code) + +################################################################################ + +# where in dak.conf all of our configuration will be stowed + +options_prefix = "Contents" +options_prefix = "%s::Options" % options_prefix + +log = logging.getLogger() + +################################################################################ + +# get all the arches delivered for a given suite +# this should probably exist somehere common +arches_q = """PREPARE arches_q(int) as + SELECT s.architecture, a.arch_string + FROM suite_architectures s + JOIN architecture a ON (s.architecture=a.id) + WHERE suite = $1""" + +# find me the .deb for a given binary id +debs_q = """PREPARE debs_q(int, int) as + SELECT b.id, f.filename FROM bin_assoc_by_arch baa + JOIN binaries b ON baa.bin=b.id + JOIN files f ON b.file=f.id + WHERE suite = $1 + AND arch = $2""" + +# ask if we already have contents associated with this binary +olddeb_q = """PREPARE olddeb_q(int) as + SELECT 1 FROM content_associations + WHERE binary_pkg = $1 + LIMIT 1""" + +# find me all of the contents for a given .deb +contents_q = """PREPARE contents_q(int,int,int,int) as + SELECT (p.path||'/'||n.file) AS fn, + comma_separated_list(s.section||'/'||b.package) + FROM content_associations c + JOIN content_file_paths p ON (c.filepath=p.id) + JOIN content_file_names n ON (c.filename=n.id) + JOIN binaries b ON (b.id=c.binary_pkg) + JOIN bin_associations ba ON (b.id=ba.bin) + JOIN override o ON (o.package=b.package) + JOIN section s ON (s.id=o.section) + WHERE (b.architecture = $1 OR b.architecture = $2) + AND ba.suite = $3 + AND o.suite = $3 + AND b.type = 'deb' + AND o.type = $4 + GROUP BY fn + ORDER BY fn""" + +udeb_contents_q = """PREPARE udeb_contents_q(int,int,int,int,int) as + SELECT (p.path||'/'||n.file) as fn, + comma_separated_list(s.section||'/'||b.package) + FROM content_associations c + JOIN content_file_paths p ON (c.filepath=p.id) + JOIN content_file_names n ON (c.filename=n.id) + JOIN binaries b ON (b.id=c.binary_pkg) + JOIN bin_associations ba ON (b.id=ba.bin) + JOIN override o ON (o.package=b.package) + JOIN section s ON (s.id=o.section) + WHERE (b.architecture = $1 OR b.architecture = $2) + AND s.id = $3 + AND ba.suite = $4 + AND o.suite = $4 + AND b.type = 'udeb' + AND o.type = $5 + GROUP BY fn + ORDER BY fn""" + + +# clear out all of the temporarily stored content associations +# this should be run only after p-a has run. after a p-a +# run we should have either accepted or rejected every package +# so there should no longer be anything in the queue +remove_pending_contents_cruft_q = """DELETE FROM pending_content_associations""" + +# delete any filenames we are storing which have no binary associated with them +remove_filename_cruft_q = """DELETE FROM content_file_names + WHERE id IN (SELECT cfn.id FROM content_file_names cfn + LEFT JOIN content_associations ca + ON ca.filename=cfn.id + WHERE ca.id IS NULL)""" + +# delete any paths we are storing which have no binary associated with them +remove_filepath_cruft_q = """DELETE FROM content_file_paths + WHERE id IN (SELECT cfn.id FROM content_file_paths cfn + LEFT JOIN content_associations ca + ON ca.filepath=cfn.id + WHERE ca.id IS NULL)""" +class Contents(object): + """ + Class capable of generating Contents-$arch.gz files + + Usage GenerateContents().generateContents( ["main","contrib","non-free"] ) + """ + + def __init__(self): + self.header = None + + def reject(self, message): + log.error("E: %s" % message) + + def _getHeader(self): + """ + Internal method to return the header for Contents.gz files + + This is boilerplate which explains the contents of the file and how + it can be used. + """ + if self.header == None: + if Config().has_key("Contents::Header"): + try: + h = open(os.path.join( Config()["Dir::Templates"], + Config()["Contents::Header"] ), "r") + self.header = h.read() + h.close() + except: + log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"], + traceback.format_exc() )) + self.header = False + else: + self.header = False + + return self.header + + # goal column for section column + _goal_column = 54 + + def _write_content_file(self, cursor, filename): + """ + Internal method for writing all the results to a given file. + The cursor should have a result set generated from a query already. + """ + filepath = Config()["Contents::Root"] + filename + filedir = os.path.dirname(filepath) + if not os.path.isdir(filedir): + os.makedirs(filedir) + f = gzip.open(filepath, "w") + try: + header = self._getHeader() + + if header: + f.write(header) + + while True: + contents = cursor.fetchone() + if not contents: + return + + num_tabs = max(1, + int(math.ceil((self._goal_column - len(contents[0])-1) / 8))) + f.write(contents[0] + ( '\t' * num_tabs ) + contents[-1] + "\n") + + finally: + f.close() + + def cruft(self): + """ + remove files/paths from the DB which are no longer referenced + by binaries and clean the temporary table + """ + cursor = DBConn().cursor(); + cursor.execute( "BEGIN WORK" ) + cursor.execute( remove_pending_contents_cruft_q ) + cursor.execute( remove_filename_cruft_q ) + cursor.execute( remove_filepath_cruft_q ) + cursor.execute( "COMMIT" ) + + + def bootstrap(self): + """ + scan the existing debs in the pool to populate the contents database tables + """ + pooldir = Config()[ 'Dir::Pool' ] + + cursor = DBConn().cursor(); + DBConn().prepare("debs_q",debs_q) + DBConn().prepare("olddeb_q",olddeb_q) + DBConn().prepare("arches_q",arches_q) + + suites = self._suites() + for suite in [i.lower() for i in suites]: + suite_id = DBConn().get_suite_id(suite) + + arch_list = self._arches(cursor, suite_id) + arch_all_id = DBConn().get_architecture_id("all") + for arch_id in arch_list: + cursor.execute( "EXECUTE debs_q(%d, %d)" % ( suite_id, arch_id[0] ) ) + + count = 0 + while True: + deb = cursor.fetchone() + if not deb: + break + count += 1 + cursor1 = DBConn().cursor(); + cursor1.execute( "EXECUTE olddeb_q(%d)" % (deb[0] ) ) + old = cursor1.fetchone() + if old: + log.debug( "already imported: %s" % (deb[1]) ) + else: + log.debug( "scanning: %s" % (deb[1]) ) + debfile = os.path.join( pooldir, deb[1] ) + if os.path.exists( debfile ): + Binary(debfile, self.reject).scan_package( deb[0] ) + else: + log.error( "missing .deb: %s" % deb[1] ) + + def generate(self): + """ + Generate Contents-$arch.gz files for every available arch in each given suite. + """ + cursor = DBConn().cursor(); + + DBConn().prepare( "arches_q", arches_q ) + DBConn().prepare( "contents_q", contents_q ) + DBConn().prepare( "udeb_contents_q", udeb_contents_q ) + + debtype_id=DBConn().get_override_type_id("deb") + udebtype_id=DBConn().get_override_type_id("udeb") + + suites = self._suites() + + # Get our suites, and the architectures + for suite in [i.lower() for i in suites]: + suite_id = DBConn().get_suite_id(suite) + arch_list = self._arches(cursor, suite_id) + + arch_all_id = DBConn().get_architecture_id("all") + + for arch_id in arch_list: + cursor.execute("EXECUTE contents_q(%d,%d,%d,%d)" % (arch_id[0], arch_all_id, suite_id, debtype_id)) + self._write_content_file(cursor, "dists/%s/Contents-%s.gz" % (suite, arch_id[1])) + + # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free) + # This is HORRIBLY debian specific :-/ + for section, fn_pattern in [("debian-installer","dists/%s/Contents-udeb-%s.gz"), + ("non-free/debian-installer", "dists/%s/Contents-udeb-nf-%s.gz")]: + + for arch_id in arch_list: + section_id = DBConn().get_section_id(section) # all udebs should be here) + if section_id != -1: + cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d,%d,%d)" % (arch_id[0], arch_all_id, section_id, suite_id, udebtype_id)) + + self._write_content_file(cursor, fn_pattern % (suite, arch_id[1])) + + +################################################################################ + + def _suites(self): + """ + return a list of suites to operate on + """ + if Config().has_key( "%s::%s" %(options_prefix,"Suite")): + suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")]) + else: + suites = Config().SubTree("Suite").List() + + return suites + + def _arches(self, cursor, suite): + """ + return a list of archs to operate on + """ + arch_list = [ ] + if Config().has_key( "%s::%s" %(options_prefix,"Arch")): + archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")]) + for arch_name in archs: + arch_list.append((DBConn().get_architecture_id(arch_name), arch_name)) + else: + cursor.execute("EXECUTE arches_q(%d)" % (suite)) + while True: + r = cursor.fetchone() + if not r: + break + + if r[1] != "source" and r[1] != "all": + arch_list.append((r[0], r[1])) + + return arch_list + +################################################################################ + +def main(): + cnf = Config() + + arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")), + ('s',"suite", "%s::%s" % (options_prefix,"Suite"),"HasArg"), + ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")), + ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")), + ('a',"arch", "%s::%s" % (options_prefix,"Arch"),"HasArg"), + ] + + commands = {'generate' : Contents.generate, + 'bootstrap' : Contents.bootstrap, + 'cruft' : Contents.cruft, + } + + args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv) + + if (len(args) < 1) or not commands.has_key(args[0]): + usage() + + if cnf.has_key("%s::%s" % (options_prefix,"Help")): + usage() + + level=logging.INFO + if cnf.has_key("%s::%s" % (options_prefix,"Quiet")): + level=logging.ERROR + + elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")): + level=logging.DEBUG + + + logging.basicConfig( level=level, + format='%(asctime)s %(levelname)s %(message)s', + stream = sys.stderr ) + + commands[args[0]](Contents()) + +if __name__ == '__main__': + main() diff --git a/dak/control_overrides.py b/dak/control_overrides.py index 1add8f5b..5d6ba46c 100755 --- a/dak/control_overrides.py +++ b/dak/control_overrides.py @@ -297,7 +297,7 @@ def main (): if action == "list": list_overrides(suite, component, otype) else: - if Cnf.has_key("Suite::%s::Untouchable" % suite) and Cnf["Suite::%s::Untouchable" % suite] != 0: + if database.get_suite_untouchable(suite): utils.fubar("%s: suite is untouchable" % suite) noaction = 0 diff --git a/dak/dak.py b/dak/dak.py index 981a31a9..61749490 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -33,8 +33,10 @@ G{importgraph} ################################################################################ -import sys, imp -import daklib.utils, daklib.extensions +import sys +import imp +import daklib.utils +import daklib.extensions ################################################################################ @@ -112,6 +114,8 @@ def init(): "Generate package <-> file mapping"), ("generate-releases", "Generate Release files"), + ("contents", + "Generate content files"), ("generate-index-diffs", "Generate .diff/Index files"), ("clean-suites", diff --git a/dak/dakdb/update4.py b/dak/dakdb/update4.py old mode 100755 new mode 100644 index 8c55d09b..1a9d9c3a --- a/dak/dakdb/update4.py +++ b/dak/dakdb/update4.py @@ -1,12 +1,10 @@ #!/usr/bin/env python - """ Database Update Script - Get suite_architectures table use sane values @contact: Debian FTP Master @copyright: 2009 Joerg Jaspert @license: GNU General Public License version 2 or later - """ # This program is free software; you can redistribute it and/or modify diff --git a/dak/dakdb/update6.py b/dak/dakdb/update6.py new file mode 100644 index 00000000..4537579a --- /dev/null +++ b/dak/dakdb/update6.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Debian Archive Kit Database Update Script +Copyright © 2008 Michael Casadevall +Copyright © 2008 Roger Leigh + +Debian Archive Kit Database Update Script 2 +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# really, if we want to screw ourselves, let's find a better way. +# rm -rf /srv/ftp.debian.org + +################################################################################ + +import psycopg2 +import time + +################################################################################ + +def do_update(self): + print "Adding content fields to database" + + try: + c = self.db.cursor() + c.execute("""CREATE TABLE content_file_paths ( + id serial primary key not null, + path text unique not null + )""") + + c.execute("""CREATE TABLE content_file_names ( + id serial primary key not null, + file text unique not null + )""") + + c.execute("""CREATE TABLE content_associations ( + id serial not null, + binary_pkg int4 not null references binaries(id) on delete cascade, + filepath int4 not null references content_file_paths(id) on delete cascade, + filename int4 not null references content_file_names(id) on delete cascade + );""") + + c.execute("""CREATE TABLE pending_content_associations ( + id serial not null, + package text not null, + version debversion not null, + filepath int4 not null references content_file_paths(id) on delete cascade, + filename int4 not null references content_file_names(id) on delete cascade + );""") + + c.execute("""CREATE FUNCTION comma_concat(text, text) RETURNS text + AS $_$select case + WHEN $2 is null or $2 = '' THEN $1 + WHEN $1 is null or $1 = '' THEN $2 + ELSE $1 || ',' || $2 + END$_$ + LANGUAGE sql""") + + c.execute("""CREATE AGGREGATE comma_separated_list ( + BASETYPE = text, + SFUNC = comma_concat, + STYPE = text, + INITCOND = '' + );""") + + c.execute( "CREATE INDEX content_assocaitions_binary ON content_associations(binary_pkg)" ) + + c.execute("UPDATE config SET value = '6' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to appy debversion updates, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/dakdb/update7.py b/dak/dakdb/update7.py new file mode 100755 index 00000000..c8828535 --- /dev/null +++ b/dak/dakdb/update7.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Debian Archive Kit Database Update Script +Copyright © 2008 Michael Casadevall +Copyright © 2009 Joerg Jaspert + +Debian Archive Kit Database Update Script 7 +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# * Ganneff ponders how to best write the text to -devel. (need to tell em in +# case they find more bugs). "We fixed the fucking idiotic broken implementation +# to be less so" is probably not the nicest, even if perfect valid, way to say so + +################################################################################ + +import psycopg2 +import time +from daklib.dak_exceptions import DBUpdateError +from daklib.utils import get_conf + +################################################################################ + +def do_update(self): + print "Moving some of the suite config into the DB" + Cnf = get_conf() + + try: + c = self.db.cursor() + + c.execute("ALTER TABLE suite ADD COLUMN untouchable BOOLEAN NOT NULL DEFAULT FALSE;") + query = "UPDATE suite SET untouchable = TRUE WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + untouchable = Cnf.Find("Suite::%s::Untouchable" % (suite)) + if not untouchable: + continue + print "[Untouchable] Processing suite %s" % (suite) + suite = suite.lower() + c.execute(query, [suite]) + + + c.execute("ALTER TABLE suite ADD COLUMN announce text NOT NULL DEFAULT 'debian-devel-changes@lists.debian.org';") + query = "UPDATE suite SET announce = %s WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + announce_list = Cnf.Find("Suite::%s::Announce" % (suite)) + print "[Announce] Processing suite %s" % (suite) + suite = suite.lower() + c.execute(query, [announce_list, suite]) + + c.execute("ALTER TABLE suite ADD COLUMN codename text;") + query = "UPDATE suite SET codename = %s WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + codename = Cnf.Find("Suite::%s::CodeName" % (suite)) + print "[Codename] Processing suite %s" % (suite) + suite = suite.lower() + c.execute(query, [codename, suite]) + + c.execute("ALTER TABLE suite ADD COLUMN overridecodename text;") + query = "UPDATE suite SET overridecodename = %s WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + codename = Cnf.Find("Suite::%s::OverrideCodeName" % (suite)) + print "[OverrideCodeName] Processing suite %s" % (suite) + suite = suite.lower() + c.execute(query, [codename, suite]) + + c.execute("ALTER TABLE suite ADD COLUMN validtime integer NOT NULL DEFAULT 604800;") + query = "UPDATE suite SET validtime = %s WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + validtime = Cnf.Find("Suite::%s::ValidTime" % (suite)) + print "[ValidTime] Processing suite %s" % (suite) + if not validtime: + validtime = 0 + suite = suite.lower() + c.execute(query, [validtime, suite]) + + c.execute("ALTER TABLE suite ADD COLUMN priority integer NOT NULL DEFAULT 0;") + query = "UPDATE suite SET priority = %s WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + priority = Cnf.Find("Suite::%s::Priority" % (suite)) + print "[Priority] Processing suite %s" % (suite) + if not priority: + priority = 0 + suite = suite.lower() + c.execute(query, [priority, suite]) + + + c.execute("ALTER TABLE suite ADD COLUMN notautomatic BOOLEAN NOT NULL DEFAULT FALSE;") + query = "UPDATE suite SET notautomatic = TRUE WHERE suite_name = %s" #: Update query + for suite in Cnf.SubTree("Suite").List(): + notautomatic = Cnf.Find("Suite::%s::NotAutomatic" % (suite)) + print "[NotAutomatic] Processing suite %s" % (suite) + if not notautomatic: + continue + suite = suite.lower() + c.execute(query, [suite]) + + c.execute("UPDATE config SET value = '7' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.ProgrammingError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to appy suite config updates, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/generate_index_diffs.py b/dak/generate_index_diffs.py index 774e0467..e83dfa3d 100755 --- a/dak/generate_index_diffs.py +++ b/dak/generate_index_diffs.py @@ -321,7 +321,7 @@ def main(): print "Processing: " + suite SuiteBlock = Cnf.SubTree("Suite::" + suite) - if SuiteBlock.has_key("Untouchable"): + if database.get_suite_untouchable(suite): print "Skipping: " + suite + " (untouchable)" continue diff --git a/dak/generate_releases.py b/dak/generate_releases.py index 983c8573..137c8447 100755 --- a/dak/generate_releases.py +++ b/dak/generate_releases.py @@ -159,7 +159,7 @@ def main (): print "Processing: " + suite SuiteBlock = Cnf.SubTree("Suite::" + suite) - if SuiteBlock.has_key("Untouchable") and not Options["Force-Touch"]: + if database.get_suite_untouchable(suite) and not Options["Force-Touch"]: print "Skipping: " + suite + " (untouchable)" continue diff --git a/dak/init_db.py b/dak/init_db.py index e1f1bced..af0a03b2 100755 --- a/dak/init_db.py +++ b/dak/init_db.py @@ -19,15 +19,12 @@ ################################################################################ -import pg, sys +import psycopg2, sys import apt_pkg -from daklib import database -from daklib import utils - -################################################################################ -Cnf = None -projectB = None +from daklib import utils +from daklib.DBConn import DBConn +from daklib.Config import Config ################################################################################ @@ -43,155 +40,182 @@ Initalizes some tables in the projectB database based on the config file. ################################################################################ def sql_get (config, key): - """Return the value of config[key] in quotes or NULL if it doesn't exist.""" + """Return the value of config[key] or None if it doesn't exist.""" - if config.has_key(key): - return "'%s'" % (config[key]) - else: - return "NULL" + try: + return config[key] + except KeyError: + return None ################################################################################ -def do_archive(): - """Initalize the archive table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM archive") - for name in Cnf.SubTree("Archive").List(): - archive_config = Cnf.SubTree("Archive::%s" % (name)) - origin_server = sql_get(archive_config, "OriginServer") - description = sql_get(archive_config, "Description") - projectB.query("INSERT INTO archive (name, origin_server, description) " - "VALUES ('%s', %s, %s)" - % (name, origin_server, description)) - projectB.query("COMMIT WORK") - -def do_architecture(): - """Initalize the architecture table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM architecture") - for arch in Cnf.SubTree("Architectures").List(): - description = Cnf["Architectures::%s" % (arch)] - projectB.query("INSERT INTO architecture (arch_string, description) " - "VALUES ('%s', '%s')" % (arch, description)) - projectB.query("COMMIT WORK") - -def do_component(): - """Initalize the component table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM component") - for name in Cnf.SubTree("Component").List(): - component_config = Cnf.SubTree("Component::%s" % (name)) - description = sql_get(component_config, "Description") - if component_config.get("MeetsDFSG").lower() == "true": - meets_dfsg = "true" - else: - meets_dfsg = "false" - projectB.query("INSERT INTO component (name, description, meets_dfsg) " - "VALUES ('%s', %s, %s)" - % (name, description, meets_dfsg)) - projectB.query("COMMIT WORK") - -def do_location(): - """Initalize the location table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM location") - for location in Cnf.SubTree("Location").List(): - location_config = Cnf.SubTree("Location::%s" % (location)) - archive_id = database.get_archive_id(location_config["Archive"]) - if archive_id == -1: - utils.fubar("Archive '%s' for location '%s' not found." - % (location_config["Archive"], location)) - location_type = location_config.get("type") - if location_type == "pool": - for component in Cnf.SubTree("Component").List(): - component_id = database.get_component_id(component) - projectB.query("INSERT INTO location (path, component, " - "archive, type) VALUES ('%s', %d, %d, '%s')" - % (location, component_id, archive_id, - location_type)) - else: - utils.fubar("E: type '%s' not recognised in location %s." - % (location_type, location)) - projectB.query("COMMIT WORK") - -def do_suite(): - """Initalize the suite table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM suite") - for suite in Cnf.SubTree("Suite").List(): - suite_config = Cnf.SubTree("Suite::%s" %(suite)) - version = sql_get(suite_config, "Version") - origin = sql_get(suite_config, "Origin") - description = sql_get(suite_config, "Description") - projectB.query("INSERT INTO suite (suite_name, version, origin, " - "description) VALUES ('%s', %s, %s, %s)" - % (suite.lower(), version, origin, description)) - for architecture in database.get_suite_architectures(suite): - architecture_id = database.get_architecture_id (architecture) - if architecture_id < 0: - utils.fubar("architecture '%s' not found in architecture" - " table for suite %s." - % (architecture, suite)) - projectB.query("INSERT INTO suite_architectures (suite, " - "architecture) VALUES (currval('suite_id_seq'), %d)" - % (architecture_id)) - projectB.query("COMMIT WORK") - -def do_override_type(): - """Initalize the override_type table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM override_type") - for override_type in Cnf.ValueList("OverrideType"): - projectB.query("INSERT INTO override_type (type) VALUES ('%s')" - % (override_type)) - projectB.query("COMMIT WORK") - -def do_priority(): - """Initialize the priority table.""" - - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM priority") - for priority in Cnf.SubTree("Priority").List(): - projectB.query("INSERT INTO priority (priority, level) VALUES " - "('%s', %s)" - % (priority, Cnf["Priority::%s" % (priority)])) - projectB.query("COMMIT WORK") - -def do_section(): - """Initalize the section table.""" - projectB.query("BEGIN WORK") - projectB.query("DELETE FROM section") - for component in Cnf.SubTree("Component").List(): - if Cnf["Control-Overrides::ComponentPosition"] == "prefix": - suffix = "" - if component != "main": - prefix = component + '/' - else: - prefix = "" - else: - prefix = "" - if component != "main": - suffix = '/' + component +class InitDB(object): + def __init__(self, Cnf, projectB): + self.Cnf = Cnf + self.projectB = projectB + + def do_archive(self): + """initalize the archive table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM archive") + archive_add = "INSERT INTO archive (name, origin_server, description) VALUES (%s, %s, %s)" + for name in self.Cnf.SubTree("Archive").List(): + archive_config = self.Cnf.SubTree("Archive::%s" % (name)) + origin_server = sql_get(archive_config, "OriginServer") + description = sql_get(archive_config, "Description") + c.execute(archive_add, [name, origin_server, description]) + self.projectB.commit() + + def do_architecture(self): + """Initalize the architecture table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM architecture") + arch_add = "INSERT INTO architecture (arch_string, description) VALUES (%s, %s)" + for arch in self.Cnf.SubTree("Architectures").List(): + description = self.Cnf["Architectures::%s" % (arch)] + c.execute(arch_add, [arch, description]) + self.projectB.commit() + + def do_component(self): + """Initalize the component table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM component") + + comp_add = "INSERT INTO component (name, description, meets_dfsg) " + \ + "VALUES (%s, %s, %s)" + + for name in self.Cnf.SubTree("Component").List(): + component_config = self.Cnf.SubTree("Component::%s" % (name)) + description = sql_get(component_config, "Description") + meets_dfsg = (component_config.get("MeetsDFSG").lower() == "true") + c.execute(comp_add, [name, description, meets_dfsg]) + + self.projectB.commit() + + def do_location(self): + """Initalize the location table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM location") + + loc_add = "INSERT INTO location (path, component, archive, type) " + \ + "VALUES (%s, %s, %s, %s)" + + for location in self.Cnf.SubTree("Location").List(): + location_config = self.Cnf.SubTree("Location::%s" % (location)) + archive_id = self.projectB.get_archive_id(location_config["Archive"]) + if archive_id == -1: + utils.fubar("Archive '%s' for location '%s' not found." + % (location_config["Archive"], location)) + location_type = location_config.get("type") + if location_type == "pool": + for component in self.Cnf.SubTree("Component").List(): + component_id = self.projectB.get_component_id(component) + c.execute(loc_add, [location, component_id, archive_id, location_type]) else: + utils.fubar("E: type '%s' not recognised in location %s." + % (location_type, location)) + + self.projectB.commit() + + def do_suite(self): + """Initalize the suite table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM suite") + + suite_add = "INSERT INTO suite (suite_name, version, origin, description) " + \ + "VALUES (%s, %s, %s, %s)" + + sa_add = "INSERT INTO suite_architectures (suite, architecture) " + \ + "VALUES (currval('suite_id_seq'), %s)" + + for suite in self.Cnf.SubTree("Suite").List(): + suite_config = self.Cnf.SubTree("Suite::%s" %(suite)) + version = sql_get(suite_config, "Version") + origin = sql_get(suite_config, "Origin") + description = sql_get(suite_config, "Description") + c.execute(suite_add, [suite.lower(), version, origin, description]) + for architecture in self.Cnf.ValueList("Suite::%s::Architectures" % (suite)): + architecture_id = self.projectB.get_architecture_id (architecture) + if architecture_id < 0: + utils.fubar("architecture '%s' not found in architecture" + " table for suite %s." + % (architecture, suite)) + c.execute(sa_add, [architecture_id]) + + self.projectB.commit() + + def do_override_type(self): + """Initalize the override_type table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM override_type") + + over_add = "INSERT INTO override_type (type) VALUES (%s)" + + for override_type in self.Cnf.ValueList("OverrideType"): + c.execute(over_add, [override_type]) + + self.projectB.commit() + + def do_priority(self): + """Initialize the priority table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM priority") + + prio_add = "INSERT INTO priority (priority, level) VALUES (%s, %s)" + + for priority in self.Cnf.SubTree("Priority").List(): + c.execute(prio_add, [priority, self.Cnf["Priority::%s" % (priority)]]) + + self.projectB.commit() + + def do_section(self): + """Initalize the section table.""" + + c = self.projectB.cursor() + c.execute("DELETE FROM section") + + sect_add = "INSERT INTO section (section) VALUES (%s)" + + for component in self.Cnf.SubTree("Component").List(): + if self.Cnf["Control-Overrides::ComponentPosition"] == "prefix": suffix = "" - for section in Cnf.ValueList("Section"): - projectB.query("INSERT INTO section (section) VALUES " - "('%s%s%s')" % (prefix, section, suffix)) - projectB.query("COMMIT WORK") + if component != "main": + prefix = component + '/' + else: + prefix = "" + else: + prefix = "" + if component != "main": + suffix = '/' + component + else: + suffix = "" + for section in self.Cnf.ValueList("Section"): + c.execute(sect_add, [prefix + section + suffix]) + + self.projectB.commit() + + def do_all(self): + self.do_archive() + self.do_architecture() + self.do_component() + self.do_location() + self.do_suite() + self.do_override_type() + self.do_priority() + self.do_section() ################################################################################ def main (): """Sync dak.conf configuartion file and the SQL database""" - global Cnf, projectB - Cnf = utils.get_conf() arguments = [('h', "help", "Init-DB::Options::Help")] for i in [ "help" ]: @@ -207,18 +231,11 @@ def main (): utils.warn("dak init-db takes no arguments.") usage(exit_code=1) - projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], - int(Cnf["DB::Port"])) - database.init(Cnf, projectB) - - do_archive() - do_architecture() - do_component() - do_location() - do_suite() - do_override_type() - do_priority() - do_section() + # Just let connection failures be reported to the user + projectB = DBConn() + Cnf = Config() + + InitDB(Cnf, projectB).do_all() ################################################################################ diff --git a/dak/make_overrides.py b/dak/make_overrides.py index 8fdde8b1..7ac3ec27 100755 --- a/dak/make_overrides.py +++ b/dak/make_overrides.py @@ -118,7 +118,7 @@ def main (): database.init(Cnf, projectB) for suite in Cnf.SubTree("Check-Overrides::OverrideSuites").List(): - if Cnf.has_key("Suite::%s::Untouchable" % suite) and Cnf["Suite::%s::Untouchable" % suite] != 0: + if database.get_suite_untouchable(suite): continue suite = suite.lower() diff --git a/dak/make_suite_file_list.py b/dak/make_suite_file_list.py index fdf2c5e3..8bb9142c 100755 --- a/dak/make_suite_file_list.py +++ b/dak/make_suite_file_list.py @@ -91,7 +91,7 @@ def delete_packages(delete_versions, pkg, dominant_arch, suite, delete_version = version[0] delete_id = packages[delete_unique_id]["sourceid"] delete_arch = packages[delete_unique_id]["arch"] - if not Cnf.Find("Suite::%s::Untouchable" % (suite)) or Options["Force"]: + if not database.get_suite_untouchable(suite) or Options["Force"]: if Options["No-Delete"]: print "Would delete %s_%s_%s in %s in favour of %s_%s" % (pkg, delete_arch, delete_version, suite, dominant_version, dominant_arch) else: diff --git a/dak/process_accepted.py b/dak/process_accepted.py index 71c0312a..97c5d0d5 100755 --- a/dak/process_accepted.py +++ b/dak/process_accepted.py @@ -29,8 +29,13 @@ ############################################################################### -import errno, fcntl, os, sys, time, re -import apt_pkg +import errno +import fcntl +import os +import sys +import time +import re +import apt_pkg, commands from daklib import database from daklib import logging from daklib import queue @@ -97,8 +102,10 @@ class Urgency_Log: else: os.unlink(self.log_filename) + ############################################################################### + def reject (str, prefix="Rejected: "): global reject_message if str: @@ -383,6 +390,12 @@ def install (): suite_id = database.get_suite_id(suite) projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id)) + if not database.copy_temporary_contents(package, version, newfile, reject): + print "REJECT\n" + reject_message, + projectB.query("ROLLBACK") + raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (package, newfile ) + + orig_tar_id = Upload.pkg.orig_tar_id orig_tar_location = Upload.pkg.orig_tar_location @@ -426,7 +439,6 @@ def install (): utils.copy(pkg.changes_file, Cnf["Dir::Root"] + dest) for dest in copy_dot_dak.keys(): utils.copy(Upload.pkg.changes_file[:-8]+".dak", dest) - projectB.query("COMMIT WORK") # Move the .changes into the 'done' directory diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py index 8392e7f5..938f839c 100755 --- a/dak/process_unchecked.py +++ b/dak/process_unchecked.py @@ -28,9 +28,22 @@ ################################################################################ -import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback -import apt_inst, apt_pkg -from daklib import database +import commands +import errno +import fcntl +import os +import re +import shutil +import stat +import sys +import time +import traceback +import tarfile +import apt_inst +import apt_pkg +from debian_bundle import deb822 +from daklib.dbconn import DBConn +from daklib.binary import Binary from daklib import logging from daklib import queue from daklib import utils @@ -302,7 +315,7 @@ def check_distributions(): (source, dest) = args[1:3] if changes["distribution"].has_key(source): for arch in changes["architecture"].keys(): - if arch not in database.get_suite_architectures(source): + if arch not in DBConn().get_suite_architectures(source): reject("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch),"") del changes["distribution"][source] changes["distribution"][dest] = 1 @@ -335,33 +348,6 @@ def check_distributions(): ################################################################################ -def check_deb_ar(filename): - """ - Sanity check the ar of a .deb, i.e. that there is: - - 1. debian-binary - 2. control.tar.gz - 3. data.tar.gz or data.tar.bz2 - - in that order, and nothing else. - """ - cmd = "ar t %s" % (filename) - (result, output) = commands.getstatusoutput(cmd) - if result != 0: - reject("%s: 'ar t' invocation failed." % (filename)) - reject(utils.prefix_multi_line_string(output, " [ar output:] "), "") - chunks = output.split('\n') - if len(chunks) != 3: - reject("%s: found %d chunks, expected 3." % (filename, len(chunks))) - if chunks[0] != "debian-binary": - reject("%s: first chunk is '%s', expected 'debian-binary'." % (filename, chunks[0])) - if chunks[1] != "control.tar.gz": - reject("%s: second chunk is '%s', expected 'control.tar.gz'." % (filename, chunks[1])) - if chunks[2] not in [ "data.tar.bz2", "data.tar.gz" ]: - reject("%s: third chunk is '%s', expected 'data.tar.gz' or 'data.tar.bz2'." % (filename, chunks[2])) - -################################################################################ - def check_files(): global reprocess @@ -400,6 +386,20 @@ def check_files(): has_binaries = 0 has_source = 0 + cursor = DBConn().cursor() + # Check for packages that have moved from one component to another + # STU: this should probably be changed to not join on architecture, suite tables but instead to used their cached name->id mappings from DBConn + DBConn().prepare("moved_pkg_q", """ + PREPARE moved_pkg_q(text,text,text) AS + SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, + component c, architecture a, files f + WHERE b.package = $1 AND s.suite_name = $2 + AND (a.arch_string = $3 OR a.arch_string = 'all') + AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id + AND f.location = l.id + AND l.component = c.id + AND b.file = f.id""") + for f in file_keys: # Ensure the file does not already exist in one of the accepted directories for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: @@ -464,7 +464,7 @@ def check_files(): default_suite = Cnf.get("Dinstall::DefaultSuite", "Unstable") architecture = control.Find("Architecture") upload_suite = changes["distribution"].keys()[0] - if architecture not in database.get_suite_architectures(default_suite) and architecture not in database.get_suite_architectures(upload_suite): + if architecture not in DBConn().get_suite_architectures(default_suite) and architecture not in DBConn().get_suite_architectures(upload_suite): reject("Unknown architecture '%s'." % (architecture)) # Ensure the architecture of the .deb is one of the ones @@ -562,7 +562,7 @@ def check_files(): # Check the version and for file overwrites reject(Upload.check_binary_against_db(f),"") - check_deb_ar(f) + Binary(f, reject).scan_package( ) # Checks for a source package... else: @@ -622,7 +622,7 @@ def check_files(): # Validate the component component = files[f]["component"] - component_id = database.get_component_id(component) + component_id = DBConn().get_component_id(component) if component_id == -1: reject("file '%s' has unknown component '%s'." % (f, component)) continue @@ -637,14 +637,14 @@ def check_files(): # Determine the location location = Cnf["Dir::Pool"] - location_id = database.get_location_id (location, component, archive) + location_id = DBConn().get_location_id(location, component, archive) if location_id == -1: reject("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive)) files[f]["location id"] = location_id # Check the md5sum & size against existing files (if any) files[f]["pool name"] = utils.poolify (changes["source"], files[f]["component"]) - files_id = database.get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"]) + files_id = DBConn().get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"]) if files_id == -1: reject("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f)) elif files_id == -2: @@ -652,16 +652,9 @@ def check_files(): files[f]["files id"] = files_id # Check for packages that have moved from one component to another - q = Upload.projectB.query(""" -SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, - component c, architecture a, files f - WHERE b.package = '%s' AND s.suite_name = '%s' - AND (a.arch_string = '%s' OR a.arch_string = 'all') - AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id - AND f.location = l.id AND l.component = c.id AND b.file = f.id""" - % (files[f]["package"], suite, - files[f]["architecture"])) - ql = q.getresult() + files[f]['suite'] = suite + cursor.execute("""EXECUTE moved_pkg_q( %(package)s, %(suite)s, %(architecture)s )""", ( files[f] ) ) + ql = cursor.fetchone() if ql: files[f]["othercomponents"] = ql[0][0] @@ -886,13 +879,7 @@ def check_source(): or pkg.orig_tar_gz == -1: return - # Create a temporary directory to extract the source into - if Options["No-Action"]: - tmpdir = tempfile.mkdtemp() - else: - # We're in queue/holding and can create a random directory. - tmpdir = "%s" % (os.getpid()) - os.mkdir(tmpdir) + tmpdir = utils.temp_dirname() # Move into the temporary directory cwd = os.getcwd() @@ -1013,12 +1000,21 @@ def check_timestamps(): ################################################################################ def lookup_uid_from_fingerprint(fpr): - q = Upload.projectB.query("SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr)) - qs = q.getresult() - if len(qs) == 0: - return (None, None, None) + """ + Return the uid,name,isdm for a given gpg fingerprint + + @ptype fpr: string + @param fpr: a 40 byte GPG fingerprint + + @return (uid, name, isdm) + """ + cursor = DBConn().cursor() + cursor.execute( "SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr)) + qs = cursor.fetchone() + if qs: + return qs else: - return qs[0] + return (None, None, None) def check_signed_by_key(): """Ensure the .changes is signed by an authorized uploader.""" @@ -1059,12 +1055,16 @@ def check_signed_by_key(): if not sponsored and not may_nmu: source_ids = [] - q = Upload.projectB.query("SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.dm_upload_allowed = 'yes'" % (changes["source"])) + cursor.execute( "SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = %(source)s AND s.dm_upload_allowed = 'yes'", changes ) highest_sid, highest_version = None, None should_reject = True - for si in q.getresult(): + while True: + si = cursor.fetchone() + if not si: + break + if highest_version == None or apt_pkg.VersionCompare(si[1], highest_version) == 1: highest_sid = si[0] highest_version = si[1] @@ -1072,8 +1072,14 @@ def check_signed_by_key(): if highest_sid == None: reject("Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version" % changes["source"]) else: - q = Upload.projectB.query("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid)) - for m in q.getresult(): + + cursor.execute("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid)) + + while True: + m = cursor.fetchone() + if not m: + break + (rfc822, rfc2047, name, email) = utils.fix_maintainer(m[0]) if email == uid_email or name == uid_name: should_reject=False @@ -1084,9 +1090,14 @@ def check_signed_by_key(): for b in changes["binary"].keys(): for suite in changes["distribution"].keys(): - suite_id = database.get_suite_id(suite) - q = Upload.projectB.query("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = '%s' AND ba.suite = %s" % (b, suite_id)) - for s in q.getresult(): + suite_id = DBConn().get_suite_id(suite) + + cursor.execute("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = %(package)s AND ba.suite = %(suite)s" , {'package':b, 'suite':suite_id} ) + while True: + s = cursor.fetchone() + if not s: + break + if s[0] != changes["source"]: reject("%s may not hijack %s from source package %s in suite %s" % (uid, b, s, suite)) @@ -1230,11 +1241,9 @@ def move_to_dir (dest, perms=0660, changesperms=0664): ################################################################################ def is_unembargo (): - q = Upload.projectB.query( - "SELECT package FROM disembargo WHERE package = '%s' AND version = '%s'" % - (changes["source"], changes["version"])) - ql = q.getresult() - if ql: + cursor = DBConn().cursor() + cursor.execute( "SELECT package FROM disembargo WHERE package = %(source)s AND version = %(version)s", changes ) + if cursor.fetchone(): return 1 oldcwd = os.getcwd() @@ -1246,9 +1255,9 @@ def is_unembargo (): if changes["architecture"].has_key("source"): if Options["No-Action"]: return 1 - Upload.projectB.query( - "INSERT INTO disembargo (package, version) VALUES ('%s', '%s')" % - (changes["source"], changes["version"])) + cursor.execute( "INSERT INTO disembargo (package, version) VALUES ('%(package)s', '%(version)s')", + changes ) + cursor.execute( "COMMIT" ) return 1 return 0 @@ -1306,12 +1315,18 @@ def is_stableupdate (): return 0 if not changes["architecture"].has_key("source"): - pusuite = database.get_suite_id("proposed-updates") - q = Upload.projectB.query( - "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" % - (changes["source"], changes["version"], pusuite)) - ql = q.getresult() - if ql: + pusuite = DBConn().get_suite_id("proposed-updates") + cursor = DBConn().cursor() + cursor.execute( """SELECT 1 FROM source s + JOIN src_associations sa ON (s.id = sa.source) + WHERE s.source = %(source)s + AND s.version = %(version)s + AND sa.suite = %(suite)d""", + {'source' : changes['source'], + 'version' : changes['version'], + 'suite' : pusuite}) + + if cursor.fetchone(): # source is already in proposed-updates so no need to hold return 0 @@ -1335,13 +1350,17 @@ def is_oldstableupdate (): return 0 if not changes["architecture"].has_key("source"): - pusuite = database.get_suite_id("oldstable-proposed-updates") - q = Upload.projectB.query( - "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" % - (changes["source"], changes["version"], pusuite)) - ql = q.getresult() - if ql: - # source is already in oldstable-proposed-updates so no need to hold + pusuite = DBConn().get_suite_id("oldstable-proposed-updates") + cursor = DBConn().cursor() + cursor.execute( """"SELECT 1 FROM source s + JOIN src_associations sa ON (s.id = sa.source) + WHERE s.source = %(source)s + AND s.version = %(version)s + AND sa.suite = %d""", + {'source' : changes['source'], + 'version' : changes['version'], + 'suite' : pusuite}) + if cursor.fetchone(): return 0 return 1 diff --git a/dak/rm.py b/dak/rm.py index 903e138e..6844738f 100755 --- a/dak/rm.py +++ b/dak/rm.py @@ -39,8 +39,13 @@ ################################################################################ -import commands, os, pg, re, sys -import apt_pkg, apt_inst +import commands +import os +import pg +import re +import sys +import apt_pkg +import apt_inst from daklib import database from daklib import utils from daklib.dak_exceptions import * diff --git a/dak/update_db.py b/dak/update_db.py index 0d4e65db..0df3b946 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -45,7 +45,7 @@ from daklib.dak_exceptions import DBUpdateError Cnf = None projectB = None -required_database_schema = 5 +required_database_schema = 7 ################################################################################ diff --git a/daklib/binary.py b/daklib/binary.py new file mode 100755 index 00000000..80805276 --- /dev/null +++ b/daklib/binary.py @@ -0,0 +1,245 @@ +#!/usr/bin/python + +""" +Functions related debian binary packages + +@contact: Debian FTPMaster +@copyright: 2009 Mike O'Connor +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# are we going the xorg way? +# a dak without a dak.conf? +# automatically detect the wrong settings at runtime? +# yes! +# well, we'll probably always need dak.conf (how do you get the database setting +# but removing most of the config into the database seems sane +# mhy: dont spoil the fun +# mhy: and i know how. we nmap localhost and check all open ports +# maybe one answers to sql +# we will discover projectb via avahi +# you're both sick +# really fucking sick + +################################################################################ + +import os +import sys +import shutil +import tempfile +import tarfile +import commands +import traceback +import atexit +from debian_bundle import deb822 +from dbconn import DBConn +from config import Config +import logging +import utils + +class Binary(object): + def __init__(self, filename, reject=None): + """ + @ptype filename: string + @param filename: path of a .deb + + @ptype reject: function + @param reject: a function to log reject messages to + """ + self.filename = filename + self.tmpdir = None + self.chunks = None + self.wrapped_reject = reject + + def reject(self, message): + """ + if we were given a reject function, send the reject message, + otherwise send it to stderr. + """ + print >> sys.stderr, message + if self.wrapped_reject: + self.wrapped_reject(message) + + def __del__(self): + """ + make sure we cleanup when we are garbage collected. + """ + self._cleanup() + + def _cleanup(self): + """ + we need to remove the temporary directory, if we created one + """ + if self.tmpdir and os.path.exists(self.tmpdir): + shutil.rmtree(self.tmpdir) + self.tmpdir = None + + def __scan_ar(self): + # get a list of the ar contents + if not self.chunks: + + cmd = "ar t %s" % (self.filename) + (result, output) = commands.getstatusoutput(cmd) + if result != 0: + rejected = True + print("%s: 'ar t' invocation failed." % (self.filename)) + self.reject("%s: 'ar t' invocation failed." % (self.filename)) + self.reject(utils.prefix_multi_line_string(output, " [ar output:] ")) + self.chunks = output.split('\n') + + + + def __unpack(self): + # Internal function which extracts the contents of the .ar to + # a temporary directory + + if not self.tmpdir: + tmpdir = utils.temp_dirname() + cwd = os.getcwd() + try: + os.chdir( tmpdir ) + cmd = "ar x %s %s %s" % (os.path.join(cwd,self.filename), self.chunks[1], self.chunks[2]) + (result, output) = commands.getstatusoutput(cmd) + if result != 0: + print("%s: '%s' invocation failed." % (self.filename, cmd)) + self.reject("%s: '%s' invocation failed." % (self.filename, cmd)) + self.reject(utils.prefix_multi_line_string(output, " [ar output:] ")) + else: + self.tmpdir = tmpdir + atexit.register( self._cleanup ) + + finally: + os.chdir( cwd ) + + def valid_deb(self): + """ + Check deb contents making sure the .deb contains: + 1. debian-binary + 2. control.tar.gz + 3. data.tar.gz or data.tar.bz2 + in that order, and nothing else. + """ + self.__scan_ar() + rejected = not self.chunks + if len(self.chunks) != 3: + rejected = True + self.reject("%s: found %d chunks, expected 3." % (self.filename, len(self.chunks))) + if self.chunks[0] != "debian-binary": + rejected = True + self.reject("%s: first chunk is '%s', expected 'debian-binary'." % (self.filename, self.chunks[0])) + if not rejected and self.chunks[1] != "control.tar.gz": + rejected = True + self.reject("%s: second chunk is '%s', expected 'control.tar.gz'." % (self.filename, self.chunks[1])) + if not rejected and self.chunks[2] not in [ "data.tar.bz2", "data.tar.gz" ]: + rejected = True + self.reject("%s: third chunk is '%s', expected 'data.tar.gz' or 'data.tar.bz2'." % (self.filename, self.chunks[2])) + + return not rejected + + def scan_package(self, bootstrap_id=0): + """ + Unpack the .deb, do sanity checking, and gather info from it. + + Currently information gathering consists of getting the contents list. In + the hopefully near future, it should also include gathering info from the + control file. + + @ptype bootstrap_id: int + @param bootstrap_id: the id of the binary these packages + should be associated or zero meaning we are not bootstrapping + so insert into a temporary table + + @return True if the deb is valid and contents were imported + """ + result = False + rejected = not self.valid_deb() + if not rejected: + self.__unpack() + + + cwd = os.getcwd() + if not rejected and self.tmpdir: + try: + os.chdir(self.tmpdir) + if self.chunks[1] == "control.tar.gz": + control = tarfile.open(os.path.join(self.tmpdir, "control.tar.gz" ), "r:gz") + control.extract('./control', self.tmpdir ) + if self.chunks[2] == "data.tar.gz": + data = tarfile.open(os.path.join(self.tmpdir, "data.tar.gz"), "r:gz") + elif self.chunks[2] == "data.tar.bz2": + data = tarfile.open(os.path.join(self.tmpdir, "data.tar.bz2" ), "r:bz2") + + if bootstrap_id: + result = DBConn().insert_content_paths(bootstrap_id, [tarinfo.name for tarinfo in data if not tarinfo.isdir()]) + else: + pkgs = deb822.Packages.iter_paragraphs(file(os.path.join(self.tmpdir,'control'))) + pkg = pkgs.next() + result = DBConn().insert_pending_content_paths(pkg, [tarinfo.name for tarinfo in data if not tarinfo.isdir()]) + + except: + traceback.print_exc() + + os.chdir(cwd) + return result + + def check_utf8_package(self, package): + """ + Unpack the .deb, do sanity checking, and gather info from it. + + Currently information gathering consists of getting the contents list. In + the hopefully near future, it should also include gathering info from the + control file. + + @ptype bootstrap_id: int + @param bootstrap_id: the id of the binary these packages + should be associated or zero meaning we are not bootstrapping + so insert into a temporary table + + @return True if the deb is valid and contents were imported + """ + rejected = not self.valid_deb() + self.__unpack() + + if not rejected and self.tmpdir: + cwd = os.getcwd() + try: + os.chdir(self.tmpdir) + if self.chunks[1] == "control.tar.gz": + control = tarfile.open(os.path.join(self.tmpdir, "control.tar.gz" ), "r:gz") + control.extract('control', self.tmpdir ) + if self.chunks[2] == "data.tar.gz": + data = tarfile.open(os.path.join(self.tmpdir, "data.tar.gz"), "r:gz") + elif self.chunks[2] == "data.tar.bz2": + data = tarfile.open(os.path.join(self.tmpdir, "data.tar.bz2" ), "r:bz2") + + for tarinfo in data: + try: + unicode( tarinfo.name ) + except: + print >> sys.stderr, "E: %s has non-unicode filename: %s" % (package,tarinfo.name) + + except: + traceback.print_exc() + result = False + + os.chdir(cwd) + +if __name__ == "__main__": + Binary( "/srv/ftp.debian.org/queue/accepted/halevt_0.1.3-2_amd64.deb" ).scan_package() + diff --git a/daklib/config.py b/daklib/config.py new file mode 100755 index 00000000..997a597d --- /dev/null +++ b/daklib/config.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +""" +Config access class + +@contact: Debian FTPMaster +@copyright: 2008 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# mhy, how about "Now with 20% more monty python references" + +################################################################################ + +import apt_pkg +import socket + +from singleton import Singleton + +################################################################################ + +default_config = "/etc/dak/dak.conf" + +def which_conf_file(Cnf): + res = socket.gethostbyaddr(socket.gethostname()) + if Cnf.get("Config::" + res[0] + "::DakConfig"): + return Cnf["Config::" + res[0] + "::DakConfig"] + else: + return default_config + +class Config(Singleton): + """ + A Config object is a singleton containing + information about the DAK configuration + """ + def __init__(self, *args, **kwargs): + super(Config, self).__init__(*args, **kwargs) + + def _readconf(self): + apt_pkg.init() + + self.Cnf = apt_pkg.newConfiguration() + + apt_pkg.ReadConfigFileISC(self.Cnf, default_config) + + # Check whether our dak.conf was the real one or + # just a pointer to our main one + res = socket.gethostbyaddr(socket.gethostname()) + conffile = self.Cnf.get("Config::" + res[0] + "::DakConfig") + if conffile: + apt_pkg.ReadConfigFileISC(self.Cnf, conffile) + + # Rebind some functions + # TODO: Clean this up + self.get = self.Cnf.get + self.SubTree = self.Cnf.SubTree + self.ValueList = self.Cnf.ValueList + + def _startup(self, *args, **kwargs): + self._readconf() + + def has_key(self, name): + return self.Cnf.has_key(name) + + def __getitem__(self, name): + return self.Cnf[name] + diff --git a/daklib/dak_exceptions.py b/daklib/dak_exceptions.py index 33fa5ad3..ccd63e50 100755 --- a/daklib/dak_exceptions.py +++ b/daklib/dak_exceptions.py @@ -58,6 +58,7 @@ dakerrors = { "NoFreeFilenameError": """Exception raised when no alternate filename was found.""", "TransitionsError": """Exception raised when transitions file can't be parsed.""", "NoSourceFieldError": """Exception raised - we cant find the source - wtf?""", + "MissingContents": """Exception raised - we could not determine contents for this deb""", "DBUpdateError": """Exception raised - could not update the database""", "ChangesUnicodeError": """Exception raised - changes file not properly utf-8 encoded""" } #: All dak exceptions diff --git a/daklib/database.py b/daklib/database.py index 3fbd2a50..7ac6f2cc 100755 --- a/daklib/database.py +++ b/daklib/database.py @@ -32,6 +32,8 @@ import sys import time import types +import utils +from binary import Binary ################################################################################ @@ -48,12 +50,15 @@ location_id_cache = {} #: cache for locations maintainer_id_cache = {} #: cache for maintainers keyring_id_cache = {} #: cache for keyrings source_id_cache = {} #: cache for sources + files_id_cache = {} #: cache for files maintainer_cache = {} #: cache for maintainer names fingerprint_id_cache = {} #: cache for fingerprints queue_id_cache = {} #: cache for queues uid_id_cache = {} #: cache for uids suite_version_cache = {} #: cache for suite_versions (packages) +suite_bin_version_cache = {} +cache_preloaded = False ################################################################################ @@ -388,6 +393,7 @@ def get_suite_version(source, suite): @return: the version for I{source} in I{suite} """ + global suite_version_cache cache_key = "%s_%s" % (source, suite) @@ -410,6 +416,50 @@ def get_suite_version(source, suite): return version +def get_latest_binary_version_id(binary, section, suite, arch): + global suite_bin_version_cache + cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch) + cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all")) + + # Check for the cache hit for its arch, then arch all + if suite_bin_version_cache.has_key(cache_key): + return suite_bin_version_cache[cache_key] + if suite_bin_version_cache.has_key(cache_key_all): + return suite_bin_version_cache[cache_key_all] + if cache_preloaded == True: + return # package does not exist + + q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section))) + + if not q.getresult(): + return False + + highest_bid = q.getresult()[0][0] + + suite_bin_version_cache[cache_key] = highest_bid + return highest_bid + +def preload_binary_id_cache(): + global suite_bin_version_cache, cache_preloaded + + # Get suite info + q = projectB.query("SELECT id FROM suite") + suites = q.getresult() + + # Get arch mappings + q = projectB.query("SELECT id FROM architecture") + arches = q.getresult() + + for suite in suites: + for arch in arches: + q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0]))) + + for bi in q.getresult(): + cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0]) + suite_bin_version_cache[cache_key] = int(bi[0]) + + cache_preloaded = True + def get_suite_architectures(suite): """ Returns list of architectures for C{suite}. @@ -436,6 +486,32 @@ def get_suite_architectures(suite): q = projectB.query(sql) return map(lambda x: x[0], q.getresult()) +def get_suite_untouchable(suite): + """ + Returns true if the C{suite} is untouchable, otherwise false. + + @type suite: string, int + @param suite: the suite name or the suite_id + + @rtype: boolean + @return: status of suite + """ + + suite_id = None + if type(suite) == str: + suite_id = get_suite_id(suite) + elif type(suite) == int: + suite_id = suite + else: + return None + + sql = """ SELECT untouchable FROM suite WHERE id='%s' """ % (suite_id) + + q = projectB.query(sql) + if q.getresult()[0][0] == "f": + return False + else: + return True ################################################################################ @@ -758,3 +834,46 @@ def get_suites(pkgname, src=False): q = projectB.query(sql) return map(lambda x: x[0], q.getresult()) + + +################################################################################ + +def copy_temporary_contents(package, version, deb, reject): + """ + copy the previously stored contents from the temp table to the permanant one + + during process-unchecked, the deb should have been scanned and the + contents stored in pending_content_associations + """ + + # first see if contents exist: + + exists = projectB.query("""SELECT 1 FROM pending_content_associations + WHERE package='%s' LIMIT 1""" % package ).getresult() + + if not exists: + # This should NOT happen. We should have added contents + # during process-unchecked. if it did, log an error, and send + # an email. + subst = { + "__PACKAGE__": package, + "__VERSION__": version, + "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"], + "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"] } + + message = utils.TemplateSubst(subst, Cnf["Dir::Templates"]+"/missing-contents") + utils.send_mail( message ) + + exists = Binary(deb, reject).scan_package() + + if exists: + sql = """INSERT INTO content_associations(binary_pkg,filepath,filename) + SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations + WHERE package='%s' + AND version='%s'""" % (package, version) + projectB.query(sql) + projectB.query("""DELETE from pending_content_associations + WHERE package='%s' + AND version='%s'""" % (package, version)) + + return exists diff --git a/daklib/dbconn.py b/daklib/dbconn.py new file mode 100755 index 00000000..24cb9044 --- /dev/null +++ b/daklib/dbconn.py @@ -0,0 +1,573 @@ +#!/usr/bin/python + +""" DB access class + +@contact: Debian FTPMaster +@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup +@copyright: 2008-2009 Mark Hymers +@copyright: 2009 Joerg Jaspert +@copyright: 2009 Mike O'Connor +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# < mhy> I need a funny comment +# < sgran> two peanuts were walking down a dark street +# < sgran> one was a-salted +# * mhy looks up the definition of "funny" + +################################################################################ + +import os +import psycopg2 +import traceback + +from singleton import Singleton +from config import Config + +################################################################################ + +class Cache(object): + def __init__(self, hashfunc=None): + if hashfunc: + self.hashfunc = hashfunc + else: + self.hashfunc = lambda x: x['value'] + + self.data = {} + + def SetValue(self, keys, value): + self.data[self.hashfunc(keys)] = value + + def GetValue(self, keys): + return self.data.get(self.hashfunc(keys)) + +################################################################################ + +class DBConn(Singleton): + """ + database module init. + """ + def __init__(self, *args, **kwargs): + super(DBConn, self).__init__(*args, **kwargs) + + def _startup(self, *args, **kwargs): + self.__createconn() + self.__init_caches() + + ## Connection functions + def __createconn(self): + cnf = Config() + connstr = "dbname=%s" % cnf["DB::Name"] + if cnf["DB::Host"]: + connstr += " host=%s" % cnf["DB::Host"] + if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr += " port=%s" % cnf["DB::Port"] + + self.db_con = psycopg2.connect(connstr) + + def reconnect(self): + try: + self.db_con.close() + except psycopg2.InterfaceError: + pass + + self.db_con = None + self.__createconn() + + ## Cache functions + def __init_caches(self): + self.caches = {'suite': Cache(), + 'section': Cache(), + 'priority': Cache(), + 'override_type': Cache(), + 'architecture': Cache(), + 'archive': Cache(), + 'component': Cache(), + 'content_path_names': Cache(), + 'content_file_names': Cache(), + 'location': Cache(lambda x: '%s_%s_%s' % (x['location'], x['component'], x['location'])), + 'maintainer': {}, # TODO + 'keyring': {}, # TODO + 'source': Cache(lambda x: '%s_%s_' % (x['source'], x['version'])), + 'files': Cache(lambda x: '%s_%s_' % (x['filename'], x['location'])), + 'maintainer': {}, # TODO + 'fingerprint': {}, # TODO + 'queue': {}, # TODO + 'uid': {}, # TODO + 'suite_version': Cache(lambda x: '%s_%s' % (x['source'], x['suite'])), + } + + self.prepared_statements = {} + + def prepare(self,name,statement): + if not self.prepared_statements.has_key(name): + c = self.cursor() + c.execute(statement) + self.prepared_statements[name] = statement + + def clear_caches(self): + self.__init_caches() + + ## Functions to pass through to the database connector + def cursor(self): + return self.db_con.cursor() + + def commit(self): + return self.db_con.commit() + + ## Get functions + def __get_single_id(self, query, values, cachename=None): + # This is a bit of a hack but it's an internal function only + if cachename is not None: + res = self.caches[cachename].GetValue(values) + if res: + return res + + c = self.db_con.cursor() + c.execute(query, values) + + if c.rowcount != 1: + return None + + res = c.fetchone()[0] + + if cachename is not None: + self.caches[cachename].SetValue(values, res) + + return res + + def __get_id(self, retfield, table, qfield, value): + query = "SELECT %s FROM %s WHERE %s = %%(value)s" % (retfield, table, qfield) + return self.__get_single_id(query, {'value': value}, cachename=table) + + def get_suite_id(self, suite): + """ + Returns database id for given C{suite}. + Results are kept in a cache during runtime to minimize database queries. + + @type suite: string + @param suite: The name of the suite + + @rtype: int + @return: the database id for the given suite + + """ + return int(self.__get_id('id', 'suite', 'suite_name', suite)) + + def get_section_id(self, section): + """ + Returns database id for given C{section}. + Results are kept in a cache during runtime to minimize database queries. + + @type section: string + @param section: The name of the section + + @rtype: int + @return: the database id for the given section + + """ + return self.__get_id('id', 'section', 'section', section) + + def get_priority_id(self, priority): + """ + Returns database id for given C{priority}. + Results are kept in a cache during runtime to minimize database queries. + + @type priority: string + @param priority: The name of the priority + + @rtype: int + @return: the database id for the given priority + + """ + return self.__get_id('id', 'priority', 'priority', priority) + + def get_override_type_id(self, override_type): + """ + Returns database id for given override C{type}. + Results are kept in a cache during runtime to minimize database queries. + + @type type: string + @param type: The name of the override type + + @rtype: int + @return: the database id for the given override type + + """ + return self.__get_id('id', 'override_type', 'type', override_type) + + def get_architecture_id(self, architecture): + """ + Returns database id for given C{architecture}. + Results are kept in a cache during runtime to minimize database queries. + + @type architecture: string + @param architecture: The name of the override type + + @rtype: int + @return: the database id for the given architecture + + """ + return self.__get_id('id', 'architecture', 'arch_string', architecture) + + def get_archive_id(self, archive): + """ + returns database id for given c{archive}. + results are kept in a cache during runtime to minimize database queries. + + @type archive: string + @param archive: the name of the override type + + @rtype: int + @return: the database id for the given archive + + """ + return self.__get_id('id', 'archive', 'lower(name)', archive) + + def get_component_id(self, component): + """ + Returns database id for given C{component}. + Results are kept in a cache during runtime to minimize database queries. + + @type component: string + @param component: The name of the override type + + @rtype: int + @return: the database id for the given component + + """ + return self.__get_id('id', 'component', 'lower(name)', component) + + def get_location_id(self, location, component, archive): + """ + Returns database id for the location behind the given combination of + - B{location} - the path of the location, eg. I{/srv/ftp.debian.org/ftp/pool/} + - B{component} - the id of the component as returned by L{get_component_id} + - B{archive} - the id of the archive as returned by L{get_archive_id} + Results are kept in a cache during runtime to minimize database queries. + + @type location: string + @param location: the path of the location + + @type component: int + @param component: the id of the component + + @type archive: int + @param archive: the id of the archive + + @rtype: int + @return: the database id for the location + + """ + + archive_id = self.get_archive_id(archive) + + if not archive_id: + return None + + res = None + + if component: + component_id = self.get_component_id(component) + if component_id: + res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND component=%(component)s AND archive=%(archive)s", + {'location': location, + 'archive': int(archive_id), + 'component': component_id}, cachename='location') + else: + res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND archive=%(archive)d", + {'location': location, 'archive': archive_id, 'component': ''}, cachename='location') + + return res + + def get_source_id(self, source, version): + """ + Returns database id for the combination of C{source} and C{version} + - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} + - B{version} + Results are kept in a cache during runtime to minimize database queries. + + @type source: string + @param source: source package name + + @type version: string + @param version: the source version + + @rtype: int + @return: the database id for the source + + """ + return self.__get_single_id("SELECT id FROM source s WHERE s.source=%(source)s AND s.version=%(version)s", + {'source': source, 'version': version}, cachename='source') + + def get_suite_version(self, source, suite): + """ + Returns database id for a combination of C{source} and C{suite}. + + - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} + - B{suite} - a suite name, eg. I{unstable} + + Results are kept in a cache during runtime to minimize database queries. + + @type source: string + @param source: source package name + + @type suite: string + @param suite: the suite name + + @rtype: string + @return: the version for I{source} in I{suite} + + """ + return self.__get_single_id(""" + SELECT s.version FROM source s, suite su, src_associations sa + WHERE sa.source=s.id + AND sa.suite=su.id + AND su.suite_name=%(suite)s + AND s.source=%(source)""", {'suite': suite, 'source': source}, cachename='suite_version') + + + def get_files_id (self, filename, size, md5sum, location_id): + """ + Returns -1, -2 or the file_id for filename, if its C{size} and C{md5sum} match an + existing copy. + + The database is queried using the C{filename} and C{location_id}. If a file does exist + at that location, the existing size and md5sum are checked against the provided + parameters. A size or checksum mismatch returns -2. If more than one entry is + found within the database, a -1 is returned, no result returns None, otherwise + the file id. + + Results are kept in a cache during runtime to minimize database queries. + + @type filename: string + @param filename: the filename of the file to check against the DB + + @type size: int + @param size: the size of the file to check against the DB + + @type md5sum: string + @param md5sum: the md5sum of the file to check against the DB + + @type location_id: int + @param location_id: the id of the location as returned by L{get_location_id} + + @rtype: int / None + @return: Various return values are possible: + - -2: size/checksum error + - -1: more than one file found in database + - None: no file found in database + - int: file id + + """ + values = {'filename' : filename, + 'location' : location_id} + + res = self.caches['files'].GetValue( values ) + + if not res: + query = """SELECT id, size, md5sum + FROM files + WHERE filename = %(filename)s AND location = %(location)s""" + + cursor = self.db_con.cursor() + cursor.execute( query, values ) + + if cursor.rowcount == 0: + res = None + + elif cursor.rowcount != 1: + res = -1 + + else: + row = cursor.fetchone() + + if row[1] != size or row[2] != md5sum: + res = -2 + + else: + self.caches[cachename].SetValue(values, row[0]) + res = row[0] + + return res + + + def get_or_set_contents_file_id(self, filename): + """ + Returns database id for given filename. + + Results are kept in a cache during runtime to minimize database queries. + If no matching file is found, a row is inserted. + + @type filename: string + @param filename: The filename + + @rtype: int + @return: the database id for the given component + """ + try: + values={'value': filename} + query = "SELECT id FROM content_file_names WHERE file = %(value)s" + id = self.__get_single_id(query, values, cachename='content_file_names') + if not id: + c = self.db_con.cursor() + c.execute( "INSERT INTO content_file_names VALUES (DEFAULT, %(value)s) RETURNING id", + values ) + + id = c.fetchone()[0] + self.caches['content_file_names'].SetValue(values, id) + + return id + except: + traceback.print_exc() + raise + + def get_or_set_contents_path_id(self, path): + """ + Returns database id for given path. + + Results are kept in a cache during runtime to minimize database queries. + If no matching file is found, a row is inserted. + + @type path: string + @param path: The filename + + @rtype: int + @return: the database id for the given component + """ + try: + values={'value': path} + query = "SELECT id FROM content_file_paths WHERE path = %(value)s" + id = self.__get_single_id(query, values, cachename='content_path_names') + if not id: + c = self.db_con.cursor() + c.execute( "INSERT INTO content_file_paths VALUES (DEFAULT, %(value)s) RETURNING id", + values ) + + id = c.fetchone()[0] + self.caches['content_path_names'].SetValue(values, id) + + return id + except: + traceback.print_exc() + raise + + def get_suite_architectures(self, suite): + """ + Returns list of architectures for C{suite}. + + @type suite: string, int + @param suite: the suite name or the suite_id + + @rtype: list + @return: the list of architectures for I{suite} + """ + + suite_id = None + if type(suite) == str: + suite_id = self.get_suite_id(suite) + elif type(suite) == int: + suite_id = suite + else: + return None + + c = self.db_con.cursor() + c.execute( """SELECT a.arch_string FROM suite_architectures sa + JOIN architecture a ON (a.id = sa.architecture) + WHERE suite='%s'""" % suite_id ) + + return map(lambda x: x[0], c.fetchall()) + + def insert_content_paths(self, bin_id, fullpaths): + """ + Make sure given path is associated with given binary id + + @type bin_id: int + @param bin_id: the id of the binary + @type fullpath: string + @param fullpath: the path of the file being associated with the binary + + @return True upon success + """ + + c = self.db_con.cursor() + + c.execute("BEGIN WORK") + try: + + for fullpath in fullpaths: + (path, file) = os.path.split(fullpath) + + # Get the necessary IDs ... + file_id = self.get_or_set_contents_file_id(file) + path_id = self.get_or_set_contents_path_id(path) + + c.execute("""INSERT INTO content_associations + (binary_pkg, filepath, filename) + VALUES ( '%d', '%d', '%d')""" % (bin_id, path_id, file_id) ) + + c.execute("COMMIT") + return True + except: + traceback.print_exc() + c.execute("ROLLBACK") + return False + + def insert_pending_content_paths(self, package, fullpaths): + """ + Make sure given paths are temporarily associated with given + package + + @type package: dict + @param package: the package to associate with should have been read in from the binary control file + @type fullpaths: list + @param fullpaths: the list of paths of the file being associated with the binary + + @return True upon success + """ + + c = self.db_con.cursor() + + c.execute("BEGIN WORK") + try: + + # Remove any already existing recorded files for this package + c.execute("""DELETE FROM pending_content_associations + WHERE package=%(Package)s + AND version=%(Version)s""", package ) + + for fullpath in fullpaths: + (path, file) = os.path.split(fullpath) + + if path.startswith( "./" ): + path = path[2:] + # Get the necessary IDs ... + file_id = self.get_or_set_contents_file_id(file) + path_id = self.get_or_set_contents_path_id(path) + + c.execute("""INSERT INTO pending_content_associations + (package, version, filepath, filename) + VALUES (%%(Package)s, %%(Version)s, '%d', '%d')""" % (path_id, file_id), + package ) + c.execute("COMMIT") + return True + except: + traceback.print_exc() + c.execute("ROLLBACK") + return False diff --git a/daklib/singleton.py b/daklib/singleton.py new file mode 100644 index 00000000..535a25a3 --- /dev/null +++ b/daklib/singleton.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# vim:set et ts=4 sw=4: + +""" +Singleton pattern code + +Inspiration for this very simple ABC was taken from various documents / +tutorials / mailing lists. This may not be thread safe but given that +(as I write) large chunks of dak aren't even type-safe, I'll live with +it for now + +@contact: Debian FTPMaster +@copyright: 2008 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +################################################################################ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# < sgran> NCommander: in SQL, it's better to join than to repeat information +# < tomv_w> that makes SQL the opposite to Debian mailing lists! + +################################################################################ + +""" +This class set implements objects that may need to be instantiated multiple +times, but we don't want the overhead of actually creating and init'ing +them more than once. It also saves us using globals all over the place +""" + +class Singleton(object): + """This is the ABC for other dak Singleton classes""" + __single = None + def __new__(cls, *args, **kwargs): + # Check to see if a __single exists already for this class + # Compare class types instead of just looking for None so + # that subclasses will create their own __single objects + if cls != type(cls.__single): + cls.__single = object.__new__(cls, *args, **kwargs) + cls.__single._startup(*args, **kwargs) + return cls.__single + + def __init__(self, *args, **kwargs): + if type(self) == "Singleton": + raise NotImplementedError("Singleton is an ABC") + + def _startup(self): + """ + _startup is a private method used instead of __init__ due to the way + we instantiate this object + """ + raise NotImplementedError("Singleton is an ABC") + diff --git a/daklib/utils.py b/daklib/utils.py index fd790b59..c1be6b90 100755 --- a/daklib/utils.py +++ b/daklib/utils.py @@ -1484,6 +1484,20 @@ def temp_filename(directory=None, prefix="dak", suffix=""): ################################################################################ +def temp_dirname(parent=None, prefix="dak", suffix=""): + """ + Return a secure and unique directory by pre-creating it. + If 'parent' is non-null, it will be the directory the directory is pre-created in. + If 'prefix' is non-null, the filename will be prefixed with it, default is dak. + If 'suffix' is non-null, the filename will end with it. + + Returns a pathname to the new directory + """ + + return tempfile.mkdtemp(suffix, prefix, parent) + +################################################################################ + def is_email_alias(email): """ checks if the user part of the email is listed in the alias file """ global alias_cache @@ -1525,4 +1539,4 @@ apt_pkg.ReadConfigFileISC(Cnf,default_config) if which_conf_file() != default_config: apt_pkg.ReadConfigFileISC(Cnf,which_conf_file()) -################################################################################ +############################################################################### diff --git a/docs/README.quotes b/docs/README.quotes index 3568ae7a..a71c89db 100644 --- a/docs/README.quotes +++ b/docs/README.quotes @@ -344,3 +344,4 @@ Canadians: This is a lighthouse. Your call. elmo: I can't believe people pay you to fix computers %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + diff --git a/docs/meta/lenny/README.Debian b/docs/meta/lenny/README.Debian new file mode 100644 index 00000000..49659fcb --- /dev/null +++ b/docs/meta/lenny/README.Debian @@ -0,0 +1,11 @@ +ftpmaster meta package for DSA + + +This is a dummy package that makes Debian's package management +system believe that certain packages needed for ftpmaster have +to be installed. The intention is that DSA can easily see what +needs to be there. + +If you, for whatever reason, need a package added to this meta- +packages dependency list, contact ftpmaster@debian.org, NOT +the Debian admins. diff --git a/docs/meta/lenny/changelog b/docs/meta/lenny/changelog new file mode 100644 index 00000000..f2449b88 --- /dev/null +++ b/docs/meta/lenny/changelog @@ -0,0 +1,6 @@ +ftpmaster-lenny (1.0) unstable; urgency=low + + * New "package", to help DSA + + -- Joerg Jaspert Mon, 09 Mar 2009 14:09:09 +0100 + diff --git a/docs/meta/lenny/copyright b/docs/meta/lenny/copyright new file mode 100644 index 00000000..f778e680 --- /dev/null +++ b/docs/meta/lenny/copyright @@ -0,0 +1,25 @@ +This package was put together by: + + Joerg Jaspert on Mon, 09 Mar 2009 14:07:44 +0100 + +Copyright: + + + +License: + + This package is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation. + + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this package; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +On Debian systems, the complete text of the GNU General +Public License can be found in `/usr/share/common-licenses/GPL'. diff --git a/docs/meta/lenny/ftpmaster-lenny b/docs/meta/lenny/ftpmaster-lenny new file mode 100644 index 00000000..14488625 --- /dev/null +++ b/docs/meta/lenny/ftpmaster-lenny @@ -0,0 +1,22 @@ +Section: devel +Priority: optional +Standards-Version: 3.8.1 + +Package: ftpmaster-lenny +Version: 1.0 +Maintainer: Debian FTP Master +Depends: apt-utils, bicyclerepair, binutils-multiarch, build-essential, bzip2, cron, curl, cvs, debian-el, debian-bug, dpkg-dev-el, easypg, devscripts, emacs-goodies-el, emacs22-nox, gnupg, gpgv, graphviz, ikiwiki, irb, libapt-pkg-dev, libdbd-pg-ruby, lintian, mc, mutt, postgresql-plperl-8.3, pychecker, pylint, pymacs, python, python-apt, python-btsutils, python-debian, python-epydoc, python-ldap, python-mode, python-numpy, python-psycopg2, python-pygresql, python-pyrss2gen, python-soappy, python-yaml, r-base, rsync, ruby, ruby-elisp, subversion, git-core, symlinks +Architecture: all +Copyright: copyright +Changelog: changelog +Readme: README.Debian +Description: Meta package for DSA listing ftpmaster needs + This is a small meta package for the Debian System Administrators + so DSA easily knows (and can keep installed) all packages + Ftpmaster needs. + . + If, for whatever reason, you need a package added to this ones + Dependencies, ask ftpmaster, not the Debian admins. + . + This is not only "What DAK needs", but a general "FTPMaster needs this + to do the work" diff --git a/templates/contents b/templates/contents new file mode 100644 index 00000000..16b624f0 --- /dev/null +++ b/templates/contents @@ -0,0 +1,33 @@ +This file maps each file available in the Debian GNU/Linux system to +the package from which it originates. It includes packages from the +DIST distribution for the ARCH architecture. + +You can use this list to determine which package contains a specific +file, or whether or not a specific file is available. The list is +updated weekly, each architecture on a different day. + +When a file is contained in more than one package, all packages are +listed. When a directory is contained in more than one package, only +the first is listed. + +As all Contents files are shipped compressed, the best way to search quickly +for a file is with the Unix `zgrep' utility, as in: + `zgrep CONTENTS.gz': + + $ zgrep nose Contents.gz + etc/nosendfile net/sendfile + usr/X11R6/bin/noseguy x11/xscreensaver + usr/X11R6/man/man1/noseguy.1x.gz x11/xscreensaver + usr/doc/examples/ucbmpeg/mpeg_encode/nosearch.param graphics/ucbmpeg + usr/lib/cfengine/bin/noseyparker admin/cfengine + +This list contains files in all packages, even though not all of the +packages are installed on an actual system at once. If you want to +find out which packages on an installed Debian system provide a +particular file, you can use `dpkg --search ': + + $ dpkg --search /usr/bin/dselect + dpkg: /usr/bin/dselect + + +FILE LOCATION diff --git a/templates/missing-contents b/templates/missing-contents new file mode 100644 index 00000000..90f16dc5 --- /dev/null +++ b/templates/missing-contents @@ -0,0 +1,15 @@ +From: __DAK_ADDRESS__ +To: __TO_ADDRESS__ +X-Debian: DAK +X-Debian-Package: __PACKAGE__ +MIME-Version: 1.0 +Content-Type: text/plain; charset="utf-8" +Content-Transfer-Encoding: 8bit +Subject: Missing contents for __PACKAGE__ in accepted queue + +While processing the accepted queue, I didn't have contents in the +database for __PACKAGE__ version __VERSION__. These contents should +have been put into the database by process-unchecked when the package +first arrived. + +This is probably stew's fault. \ No newline at end of file diff --git a/templates/security-install.advisory b/templates/security-install.advisory index eea2e937..9036bd01 100644 --- a/templates/security-install.advisory +++ b/templates/security-install.advisory @@ -28,14 +28,20 @@ Foo discovered that [single issue] -For the stable distribution (etch), this problem has been fixed in version XXX +For the old stable distribution (etch), this problem has been fixed in version XXX +__PACKAGE__ + +For the stable distribution (lenny), this problem has been fixed in version XXX __PACKAGE__ For the unstable distribution (sid), this problem has been fixed in version XXX [multiple issues] -For the stable distribution (etch), these problems have been fixed in version +For the old stable distribution (etch), these problems have been fixed in version +__PACKAGE__ + +For the stable distribution (lenny), these problems have been fixed in version __PACKAGE__ For the unstable distribution (sid), these problems have been fixed in @@ -66,6 +72,9 @@ footer to the proper configuration. Debian GNU/Linux 4.0 alias etch ------------------------------- +Debian GNU/Linux 5.0 alias lenny +-------------------------------- + __ADVISORY_TEXT__ diff --git a/tools/removals.pl b/tools/removals.pl index b07845bf..839fc618 100755 --- a/tools/removals.pl +++ b/tools/removals.pl @@ -71,7 +71,7 @@ for my $removal (@removals ) { $rss->add_item(title => "$reason", link => "http://ftp-master.debian.org/removals.txt?" . $link, - description => qq[<pre>$body</pre>], + description => qq[
$body
], dc => { creator => "$ftpmaster", }