+2008-12-30 Joerg Jaspert <joerg@debian.org>
+
+ * config/debian/cron.hourly: Generate the 822 format for accepted,
+ new, byhand and proposed-updates
+
+2008-12-30 Michael Casadevall <sonicmctails@gmail.com>
+
+ * dak/queue_report.py - Added directories option to queue report
+
2008-12-28 Frank Lichtenheld <djpig@debian.org>
* dak/override.py (main): Handle source-only packages better
echo "Using dak v1" >> $ftpdir/project/trace/ftp-master.debian.org
echo "Running on host: $(hostname -f)" >> $ftpdir/project/trace/ftp-master.debian.org
dak import-users-from-passwd
-dak queue-report -n -8 > $webdir/new.html
+dak queue-report -n > $webdir/new.html
+dak queue-report -8 -d accepted,new,byhand,proposedupdates
dak show-deferred > ${webdir}/deferred.html
cd $queuedir/new ; dak show-new *.changes > /dev/null
$base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
################################################################################
import sys, os, popen2, tempfile, stat, time, pg
-import apt_pkg
+import gzip, apt_pkg
from daklib import database, utils
from daklib.dak_exceptions import *
################################################################################
-def handle_dup_files(file_list):
- # Sort the list, and then handle finding dups in the filenames key
-
- # Walk the list, seeing if the current entry and the next one are the same
- # and if so, join them together
-
-
- return file_list
-
-################################################################################
-
def generate_contents(suites):
global projectB, Cnf
# Ok, the contents information is in the database
# We need to work and get the contents, and print it out on a per
# architectual basis
+ # Read in the contents file header
+ header = False
+ if Cnf.has_key("Generate-Contents::Header"):
+ h = open(Cnf["Generate-Contents::Header"], "r")
+ header = h.read()
+ h.close()
+
# Get our suites, and the architectures
for s in suites:
suite_id = database.get_suite_id(s)
- q = projectB.query("SELECT architecture FROM suite_architectures WHERE suite = '%d'" % suite_id)
+ q = projectB.query("SELECT s.architecture, a.arch_string FROM suite_architectures s JOIN architecture a ON (s.architecture=a.id) WHERE suite = '%d'" % suite_id)
arch_list = [ ]
for r in q.getresult():
- arch_list.append(r[0])
+ if r[1] != "source" and r[1] != "all":
+ arch_list.append((r[0], r[1]))
arch_all_id = database.get_architecture_id("all")
- # Got the arch all packages, now we need to get the arch dependent packages
- # attach the arch all, stick them together, and write out the result
+ # Time for the query from hell. Essentially, we need to get the assiocations, the filenames, the paths,
+ # and all that fun stuff from the database.
for arch_id in arch_list:
- print "SELECT b.package, c.file, s.section FROM contents c JOIN binaries b ON (b.id=c.binary_pkg) JOIN bin_associations ba ON (b.id=ba.bin) JOIN override o ON (o.package=b.package) JOIN section s ON (s.id=o.section) WHERE (b.architecture = '%d' OR b.architecture = '%d') AND ba.suite = '%d'" % (arch_id, arch_all_id, suite_id)
- q = projectB.query("SELECT b.package, c.file, s.section FROM contents c JOIN binaries b ON (b.id=c.binary_pkg) JOIN bin_associations ba ON (b.id=ba.bin) JOIN override o ON (o.package=b.package) JOIN section s ON (s.id=o.section) WHERE (b.architecture = '%d' OR b.architecture = '%d') AND ba.suite = '%d'" % (arch_id, arch_all_id, suite_id))
- # We need to copy the arch_all packages table into arch packages
+ q = projectB.query("""SELECT p.path||'/'||n.file, comma_separated_list(s.section||'/'||b.package) FROM content_associations c JOIN content_file_paths p ON (c.filepath=p.id) JOIN content_file_names n ON (c.filename=n.id) JOIN binaries b ON (b.id=c.binary_pkg) JOIN bin_associations ba ON (b.id=ba.bin) JOIN override o ON (o.package=b.package) JOIN section s ON (s.id=o.section) WHERE (b.architecture = '%d' OR b.architecture = '%d') AND ba.suite = '%d' AND b.type = 'deb' GROUP BY (p.path||'/'||n.file)""" % (arch_id[0], arch_all_id, suite_id))
+
+ f = gzip.open(Cnf["Dir::Root"] + "dists/%s/Contents-%s.gz" % (s, arch_id[1]), "w")
+
+ if header:
+ f.write(header)
+
+ for contents in q.getresult():
+ f.write(contents[0] + "\t\t\t" + contents[-1] + "\n")
+
+ f.close()
- # This is for the corner case of arch dependent packages colliding
- # with arch all packages only on some architectures.
- # Ugly, I know ...
+ # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free)
+ # This is HORRIBLY debian specific :-/
+ # First off, udeb
- arch_packages = []
- for r in q.getresult():
- arch_packages.append((r[1], (r[2] + '/' + r[0])))
+ section_id = database.get_section_id('debian-installer') # all udebs should be here)
- arch_packages = handle_dup_files(arch_packages)
+ if section_id != -1:
+ q = projectB.query("""SELECT p.path||'/'||n.file, comma_separated_list(s.section||'/'||b.package) FROM content_associations c JOIN content_file_paths p ON (c.filepath=p.id) JOIN content_file_names n ON (c.filename=n.id) JOIN binaries b ON (b.id=c.binary_pkg) JOIN bin_associations ba ON (b.id=ba.bin) JOIN override o ON (o.package=b.package) JOIN section s ON (s.id=o.section) WHERE s.id = '%d' AND ba.suite = '%d' AND b.type = 'udeb' GROUP BY (p.path||'/'||n.file)""" % (section_id, suite_id))
- #for contents in arch_packages:
- #print contents[0] + '\t\t\t\t' + contents[1]
+ f = gzip.open(Cnf["Dir::Root"] + "dists/%s/Contents-udeb.gz" % (s), "w")
+
+ if header:
+ f.write(header)
+
+ for contents in q.getresult():
+ f.write(contents[0] + "\t\t\t" + contents[-1] + "\n")
+
+ f.close()
+
+ # Once more, with non-free
+ section_id = database.get_section_id('non-free/debian-installer') # all udebs should be here)
+
+ if section_id != -1:
+ q = projectB.query("""SELECT p.path||'/'||n.file, comma_separated_list(s.section||'/'||b.package) FROM content_associations c JOIN content_file_paths p ON (c.filepath=p.id) JOIN content_file_names n ON (c.filename=n.id) JOIN binaries b ON (b.id=c.binary_pkg) JOIN bin_associations ba ON (b.id=ba.bin) JOIN override o ON (o.package=b.package) JOIN section s ON (s.id=o.section) WHERE s.id = '%d' AND ba.suite = '%d' AND b.type = 'udeb' GROUP BY (p.path||'/'||n.file)""" % (section_id, suite_id))
+
+ f = gzip.open(Cnf["Dir::Root"] + "dists/%s/Contents-udeb-nf.gz" % (s), "w")
+
+ if header:
+ f.write(header)
+
+ for contents in q.getresult():
+ f.write(contents[0] + "\t\t\t" + contents[-1] + "\n")
+
+ f.close()
################################################################################
Arguments = [('h',"help","Generate-Contents::Options::Help"),
('s',"suite","Generate-Contents::Options::Suite","HasArg"),
]
+
for i in [ "help", "suite" ]:
if not Cnf.has_key("Generate-Contents::Options::%s" % (i)):
Cnf["Generate-Contents::Options::%s" % (i)] = ""
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
# insert contents into the database
+ q = projectB.query("SELECT currval('binaries_id_seq')")
+ bin_id = int(q.getresult()[0][0])
for file in contents:
- projectB.query("INSERT INTO contents (binary_pkg, file) VALUES (currval('binaries_id_seq'), '%s')" % file)
+ database.insert_content_path(bin_id, file)
# If the .orig.tar.gz is in a legacy directory we need to poolify
# it, so that apt-get source (and anything else that goes by the
-s, --sort=key sort output according to key, see below.
-a, --age=key if using sort by age, how should time be treated?
If not given a default of hours will be used.
+ -d, --directories=key A comma seperated list of queues to be scanned
Sorting Keys: ao=age, oldest first. an=age, newest first.
na=name, ascending nd=name, descending
('n',"new","Queue-Report::Options::New"),
('8','822',"Queue-Report::Options::822"),
('s',"sort","Queue-Report::Options::Sort", "HasArg"),
- ('a',"age","Queue-Report::Options::Age", "HasArg")]
+ ('a',"age","Queue-Report::Options::Age", "HasArg"),
+ ('d',"directories","Queue-Report::Options::Directories", "HasArg")]
for i in [ "help" ]:
if not Cnf.has_key("Queue-Report::Options::%s" % (i)):
Cnf["Queue-Report::Options::%s" % (i)] = ""
if Cnf.has_key("Queue-Report::Options::New"):
header()
- directories = Cnf.ValueList("Queue-Report::Directories")
- if not directories:
+ directories = [ ]
+
+ if Cnf.has_key("Queue-Report::Options::Directories"):
+ for i in Cnf["Queue-Report::Options::Directories"].split(","):
+ directories.append(i)
+ elif Cnf.has_key("Queue-Report::Directories"):
+ directories = Cnf.ValueList("Queue-Report::Directories")
+ else:
directories = [ "byhand", "new" ]
f = None
################################################################################
-import sys, time, types
+import os, sys, time, types
################################################################################
queue_id_cache = {}
uid_id_cache = {}
suite_version_cache = {}
+content_path_id_cache = {}
+content_file_id_cache = {}
################################################################################
return version
+def get_latest_binary_version_id(binary, suite):
+ global suite_version_cache
+ cache_key = "%s_%s" % (binary, suite)
+
+
+ if suite_version_cache.has_key(cache_key):
+ return suite_version_cache[cache_key]
+
+ #print "SELECT b.id, b.version FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) WHERE b.package = '%s AND ba.suite = '%d'" % (binary, int(suite))
+ q = projectB.query("SELECT b.id, b.version FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) WHERE b.package = '%s AND ba.suite = '%d'" % (binary, int(suite)))
+
+ highest_bid, highest_version = None, None
+
+ for bi in q.getresult():
+ if highest_version == None or apt_pkg.VersionCompare(bi[1], highest_version) == 1:
+ highest_bid = bi[0]
+ highest_version = bi[1]
+
+ return highest_bid
+
################################################################################
def get_or_set_maintainer_id (maintainer):
sql = "select suite_name from binaries, bin_associations,suite where binaries.id=bin_associations.bin and package='%s' and bin_associations.suite = suite.id"%pkgname
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
+
+################################################################################
+
+def get_or_set_contents_file_id(file):
+ global content_file_id_cache
+
+ if not content_file_id_cache.has_key(file):
+ sql_select = "SELECT id FROM content_file_names WHERE file = '%s'" % file
+ q = projectB.query(sql_select)
+ if not q.getresult():
+ # since this can be called within a transaction, we can't use currval
+ q = projectB.query("SELECT nextval('content_file_names_id_seq')")
+ file_id = int(q.getresult()[0][0])
+ projectB.query("INSERT INTO content_file_names VALUES ('%d', '%s')" % (file_id, file))
+ content_file_id_cache[file] = file_id
+ else:
+ content_file_id_cache[file] = int(q.getresult()[0][0])
+ return content_file_id_cache[file]
+
+################################################################################
+
+def get_or_set_contents_path_id(path):
+ global content_path_id_cache
+
+ if not content_path_id_cache.has_key(path):
+ sql_select = "SELECT id FROM content_file_paths WHERE path = '%s'" % path
+ q = projectB.query(sql_select)
+ if not q.getresult():
+ # since this can be called within a transaction, we can't use currval
+ q = projectB.query("SELECT nextval('content_file_names_id_seq')")
+ path_id = int(q.getresult()[0][0])
+ projectB.query("INSERT INTO content_file_paths VALUES ('%d', '%s')" % ( path_id, path))
+ content_path_id_cache[path] = path_id
+ else:
+ content_path_id_cache[path] = int(q.getresult()[0][0])
+
+ return content_path_id_cache[path]
+
+################################################################################
+
+def insert_content_path(bin_id, fullpath):
+ # split the path into basename, and pathname
+ (path, file) = os.path.split(fullpath)
+
+ # Get the necessary IDs ...
+ file_id = get_or_set_contents_file_id(file)
+ path_id = get_or_set_contents_path_id(path)
+
+ # Put them into content_assiocations
+ projectB.query("INSERT INTO content_associations VALUES (DEFAULT, '%d', '%d', '%d')" % (bin_id, path_id, file_id))
+ return