#!/usr/bin/env python
# Populate the DB
-# Copyright (C) 2000, 2001 James Troup <james@nocrew.org>
-# $Id: neve,v 1.7 2001-11-18 19:57:58 rmurray Exp $
+# Copyright (C) 2000, 2001, 2002, 2003, 2004 James Troup <james@nocrew.org>
+# $Id: neve,v 1.19 2004-04-01 17:13:11 troup Exp $
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-################################################################################
+###############################################################################
# 04:36|<aj> elmo: you're making me waste 5 seconds per architecture!!!!!! YOU BASTARD!!!!!
-################################################################################
+###############################################################################
# This code is a horrible mess for two reasons:
# script again in a hurry, and I don't want to spend any more time
# on it than absolutely necessary.
-###############################################################################################################
+###############################################################################
-import commands, os, pg, re, sys, string, tempfile
-import apt_pkg
-import db_access, utils
+import commands, os, pg, re, sys, time;
+import apt_pkg;
+import db_access, utils;
-###############################################################################################################
+###############################################################################
re_arch_from_filename = re.compile(r"binary-[^/]+")
-###############################################################################################################
+###############################################################################
Cnf = None;
projectB = None;
source_cache = {};
arch_all_cache = {};
binary_cache = {};
+location_path_cache = {};
#
files_id_serial = 0;
source_id_serial = 0;
bin_associations_query_cache = None;
#
source_cache_for_binaries = {};
+reject_message = "";
+
+################################################################################
+
+def usage(exit_code=0):
+ print """Usage: neve
+Initializes a projectB database from an existing archive
+
+ -a, --action actually perform the initalization
+ -h, --help show this help and exit."""
+ sys.exit(exit_code)
+
+###############################################################################
+
+def reject (str, prefix="Rejected: "):
+ global reject_message;
+ if str:
+ reject_message += prefix + str + "\n";
+
+###############################################################################
+
+def check_signature (filename):
+ if not utils.re_taint_free.match(os.path.basename(filename)):
+ reject("!!WARNING!! tainted filename: '%s'." % (filename));
+ return None;
+
+ status_read, status_write = os.pipe();
+ cmd = "gpgv --status-fd %s --keyring %s --keyring %s %s" \
+ % (status_write, Cnf["Dinstall::PGPKeyring"], Cnf["Dinstall::GPGKeyring"], filename);
+ (output, status, exit_status) = utils.gpgv_get_status_output(cmd, status_read, status_write);
+
+ # Process the status-fd output
+ keywords = {};
+ bad = internal_error = "";
+ for line in status.split('\n'):
+ line = line.strip();
+ if line == "":
+ continue;
+ split = line.split();
+ if len(split) < 2:
+ internal_error += "gpgv status line is malformed (< 2 atoms) ['%s'].\n" % (line);
+ continue;
+ (gnupg, keyword) = split[:2];
+ if gnupg != "[GNUPG:]":
+ internal_error += "gpgv status line is malformed (incorrect prefix '%s').\n" % (gnupg);
+ continue;
+ args = split[2:];
+ if keywords.has_key(keyword) and keyword != "NODATA" and keyword != "SIGEXPIRED":
+ internal_error += "found duplicate status token ('%s').\n" % (keyword);
+ continue;
+ else:
+ keywords[keyword] = args;
+
+ # If we failed to parse the status-fd output, let's just whine and bail now
+ if internal_error:
+ reject("internal error while performing signature check on %s." % (filename));
+ reject(internal_error, "");
+ reject("Please report the above errors to the Archive maintainers by replying to this mail.", "");
+ return None;
+
+ # Now check for obviously bad things in the processed output
+ if keywords.has_key("SIGEXPIRED"):
+ utils.warn("%s: signing key has expired." % (filename));
+ if keywords.has_key("KEYREVOKED"):
+ reject("key used to sign %s has been revoked." % (filename));
+ bad = 1;
+ if keywords.has_key("BADSIG"):
+ reject("bad signature on %s." % (filename));
+ bad = 1;
+ if keywords.has_key("ERRSIG") and not keywords.has_key("NO_PUBKEY"):
+ reject("failed to check signature on %s." % (filename));
+ bad = 1;
+ if keywords.has_key("NO_PUBKEY"):
+ args = keywords["NO_PUBKEY"];
+ if len(args) < 1:
+ reject("internal error while checking signature on %s." % (filename));
+ bad = 1;
+ else:
+ fingerprint = args[0];
+ if keywords.has_key("BADARMOR"):
+ reject("ascii armour of signature was corrupt in %s." % (filename));
+ bad = 1;
+ if keywords.has_key("NODATA"):
+ utils.warn("no signature found for %s." % (filename));
+ return "NOSIG";
+ #reject("no signature found in %s." % (filename));
+ #bad = 1;
+
+ if bad:
+ return None;
+
+ # Next check gpgv exited with a zero return code
+ if exit_status and not keywords.has_key("NO_PUBKEY"):
+ reject("gpgv failed while checking %s." % (filename));
+ if status.strip():
+ reject(utils.prefix_multi_line_string(status, " [GPG status-fd output:] "), "");
+ else:
+ reject(utils.prefix_multi_line_string(output, " [GPG output:] "), "");
+ return None;
+
+ # Sanity check the good stuff we expect
+ if not keywords.has_key("VALIDSIG"):
+ if not keywords.has_key("NO_PUBKEY"):
+ reject("signature on %s does not appear to be valid [No VALIDSIG]." % (filename));
+ bad = 1;
+ else:
+ args = keywords["VALIDSIG"];
+ if len(args) < 1:
+ reject("internal error while checking signature on %s." % (filename));
+ bad = 1;
+ else:
+ fingerprint = args[0];
+ if not keywords.has_key("GOODSIG") and not keywords.has_key("NO_PUBKEY"):
+ reject("signature on %s does not appear to be valid [No GOODSIG]." % (filename));
+ bad = 1;
+ if not keywords.has_key("SIG_ID") and not keywords.has_key("NO_PUBKEY"):
+ reject("signature on %s does not appear to be valid [No SIG_ID]." % (filename));
+ bad = 1;
+
+ # Finally ensure there's not something we don't recognise
+ known_keywords = utils.Dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
+ SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="",
+ NODATA="");
+
+ for keyword in keywords.keys():
+ if not known_keywords.has_key(keyword):
+ reject("found unknown status token '%s' from gpgv with args '%r' in %s." % (keyword, keywords[keyword], filename));
+ bad = 1;
+
+ if bad:
+ return None;
+ else:
+ return fingerprint;
-###############################################################################################################
+################################################################################
# Prepares a filename or directory (s) to be file.filename by stripping any part of the location (sub) from it.
def poolify (s, sub):
projectB.query("DELETE FROM suite")
for suite in Cnf.SubTree("Suite").List():
SubSec = Cnf.SubTree("Suite::%s" %(suite))
- projectB.query("INSERT INTO suite (suite_name) VALUES ('%s')" % string.lower(suite));
+ projectB.query("INSERT INTO suite (suite_name) VALUES ('%s')" % suite.lower());
for i in ("Version", "Origin", "Description"):
if SubSec.has_key(i):
- projectB.query("UPDATE suite SET %s = '%s' WHERE suite_name = '%s'" % (string.lower(i), SubSec[i], string.lower(suite)))
- for architecture in Cnf.SubTree("Suite::%s::Architectures" % (suite)).List():
+ projectB.query("UPDATE suite SET %s = '%s' WHERE suite_name = '%s'" % (i.lower(), SubSec[i], suite.lower()))
+ for architecture in Cnf.ValueList("Suite::%s::Architectures" % (suite)):
architecture_id = db_access.get_architecture_id (architecture);
projectB.query("INSERT INTO suite_architectures (suite, architecture) VALUES (currval('suite_id_seq'), %d)" % (architecture_id));
-##############################################################################################################
+def update_override_type():
+ projectB.query("DELETE FROM override_type");
+ for type in Cnf.ValueList("OverrideType"):
+ projectB.query("INSERT INTO override_type (type) VALUES ('%s')" % (type));
+
+def update_priority():
+ projectB.query("DELETE FROM priority");
+ for priority in Cnf.SubTree("Priority").List():
+ projectB.query("INSERT INTO priority (priority, level) VALUES ('%s', %s)" % (priority, Cnf["Priority::%s" % (priority)]));
+
+def update_section():
+ projectB.query("DELETE FROM section");
+ for component in Cnf.SubTree("Component").List():
+ if Cnf["Natalie::ComponentPosition"] == "prefix":
+ suffix = "";
+ if component != 'main':
+ prefix = component + '/';
+ else:
+ prefix = "";
+ else:
+ prefix = "";
+ component = component.replace("non-US/", "");
+ if component != 'main':
+ suffix = '/' + component;
+ else:
+ suffix = "";
+ for section in Cnf.ValueList("Section"):
+ projectB.query("INSERT INTO section (section) VALUES ('%s%s%s')" % (prefix, section, suffix));
+
+def get_location_path(directory):
+ global location_path_cache;
+
+ if location_path_cache.has_key(directory):
+ return location_path_cache[directory];
+
+ q = projectB.query("SELECT DISTINCT path FROM location WHERE path ~ '%s'" % (directory));
+ try:
+ path = q.getresult()[0][0];
+ except:
+ utils.fubar("[neve] get_location_path(): Couldn't get path for %s" % (directory));
+ location_path_cache[directory] = path;
+ return path;
+
+################################################################################
def get_or_set_files_id (filename, size, md5sum, location_id):
global files_id_cache, files_id_serial, files_query_cache;
- cache_key = string.join((filename, size, md5sum, repr(location_id)), '~')
+ cache_key = "~".join((filename, size, md5sum, repr(location_id)));
if not files_id_cache.has_key(cache_key):
- files_id_serial = files_id_serial + 1
+ files_id_serial += 1
files_query_cache.write("%d\t%s\t%s\t%s\t%d\n" % (files_id_serial, filename, size, md5sum, location_id));
files_id_cache[cache_key] = files_id_serial
return files_id_cache[cache_key]
-##############################################################################################################
+###############################################################################
-def process_sources (location, filename, suite, component, archive):
- global source_cache, source_query_cache, src_associations_query_cache, dsc_files_query_cache, source_id_serial, src_associations_id_serial, dsc_files_id_serial, source_cache_for_binaries, orig_tar_gz_cache;
+def process_sources (filename, suite, component, archive):
+ global source_cache, source_query_cache, src_associations_query_cache, dsc_files_query_cache, source_id_serial, src_associations_id_serial, dsc_files_id_serial, source_cache_for_binaries, orig_tar_gz_cache, reject_message;
- suite = string.lower(suite)
+ suite = suite.lower();
suite_id = db_access.get_suite_id(suite);
- if suite == 'stable':
- testing_id = db_access.get_suite_id("testing");
try:
file = utils.open_file (filename);
except utils.cant_open_exc:
- print "WARNING: can't open '%s'" % (filename);
+ utils.warn("can't open '%s'" % (filename));
return;
- Scanner = apt_pkg.ParseTagFile(file)
+ Scanner = apt_pkg.ParseTagFile(file);
while Scanner.Step() != 0:
- package = Scanner.Section["package"]
- version = Scanner.Section["version"]
+ package = Scanner.Section["package"];
+ version = Scanner.Section["version"];
+ directory = Scanner.Section["directory"];
+ dsc_file = os.path.join(Cnf["Dir::Root"], directory, "%s_%s.dsc" % (package, utils.re_no_epoch.sub('', version)));
+ # Sometimes the Directory path is a lie; check in the pool
+ if not os.path.exists(dsc_file):
+ if directory.split('/')[0] == "dists":
+ directory = Cnf["Dir::PoolRoot"] + utils.poolify(package, component);
+ dsc_file = os.path.join(Cnf["Dir::Root"], directory, "%s_%s.dsc" % (package, utils.re_no_epoch.sub('', version)));
+ if not os.path.exists(dsc_file):
+ utils.fubar("%s not found." % (dsc_file));
+ install_date = time.strftime("%Y-%m-%d", time.localtime(os.path.getmtime(dsc_file)));
+ fingerprint = check_signature(dsc_file);
+ fingerprint_id = db_access.get_or_set_fingerprint_id(fingerprint);
+ if reject_message:
+ utils.fubar("%s: %s" % (dsc_file, reject_message));
maintainer = Scanner.Section["maintainer"]
- maintainer = string.replace(maintainer, "'", "\\'")
+ maintainer = maintainer.replace("'", "\\'");
maintainer_id = db_access.get_or_set_maintainer_id(maintainer);
- directory = Scanner.Section["directory"]
- location_id = db_access.get_location_id (location, component, archive)
- if directory[-1:] != "/":
- directory = directory + '/';
+ location = get_location_path(directory.split('/')[0]);
+ location_id = db_access.get_location_id (location, component, archive);
+ if not directory.endswith("/"):
+ directory += '/';
directory = poolify (directory, location);
- if directory != "" and directory[-1:] != "/":
- directory = directory + '/';
- no_epoch_version = utils.re_no_epoch.sub('', version)
+ if directory != "" and not directory.endswith("/"):
+ directory += '/';
+ no_epoch_version = utils.re_no_epoch.sub('', version);
# Add all files referenced by the .dsc to the files table
ids = [];
- for line in string.split(Scanner.Section["files"],'\n'):
+ for line in Scanner.Section["files"].split('\n'):
id = None;
- (md5sum, size, filename) = string.split(string.strip(line));
+ (md5sum, size, filename) = line.strip().split();
# Don't duplicate .orig.tar.gz's
- if filename[-12:] == ".orig.tar.gz":
+ if filename.endswith(".orig.tar.gz"):
cache_key = "%s~%s~%s" % (filename, size, md5sum);
if orig_tar_gz_cache.has_key(cache_key):
id = orig_tar_gz_cache[cache_key];
id = get_or_set_files_id (directory + filename, size, md5sum, location_id);
ids.append(id);
# If this is the .dsc itself; save the ID for later.
- if filename[-4:] == ".dsc":
+ if filename.endswith(".dsc"):
files_id = id;
filename = directory + package + '_' + no_epoch_version + '.dsc'
- cache_key = "%s~%s" % (package, version)
+ cache_key = "%s~%s" % (package, version);
if not source_cache.has_key(cache_key):
nasty_key = "%s~%s" % (package, version)
- source_id_serial = source_id_serial + 1;
+ source_id_serial += 1;
if not source_cache_for_binaries.has_key(nasty_key):
source_cache_for_binaries[nasty_key] = source_id_serial;
tmp_source_id = source_id_serial;
source_cache[cache_key] = source_id_serial;
- source_query_cache.write("%d\t%s\t%s\t%d\t%d\n" % (source_id_serial, package, version, maintainer_id, files_id))
+ source_query_cache.write("%d\t%s\t%s\t%d\t%d\t%s\t%s\n" % (source_id_serial, package, version, maintainer_id, files_id, install_date, fingerprint_id))
for id in ids:
- dsc_files_id_serial = dsc_files_id_serial + 1;
+ dsc_files_id_serial += 1;
dsc_files_query_cache.write("%d\t%d\t%d\n" % (dsc_files_id_serial, tmp_source_id,id));
else:
tmp_source_id = source_cache[cache_key];
- src_associations_id_serial = src_associations_id_serial + 1;
+ src_associations_id_serial += 1;
src_associations_query_cache.write("%d\t%d\t%d\n" % (src_associations_id_serial, suite_id, tmp_source_id))
- # populate 'testing' with a mirror of 'stable'
- if suite == "stable":
- src_associations_id_serial = src_associations_id_serial + 1;
- src_associations_query_cache.write("%d\t%d\t%d\n" % (src_associations_id_serial, testing_id, tmp_source_id))
- file.close()
+ file.close();
-##############################################################################################################
+###############################################################################
-def process_packages (location, filename, suite, component, archive):
- global arch_all_cache, binary_cache, binaries_id_serial, binaries_query_cache, bin_associations_id_serial, bin_associations_query_cache;
+def process_packages (filename, suite, component, archive):
+ global arch_all_cache, binary_cache, binaries_id_serial, binaries_query_cache, bin_associations_id_serial, bin_associations_query_cache, reject_message;
count_total = 0;
count_bad = 0;
- suite = string.lower(suite);
+ suite = suite.lower();
suite_id = db_access.get_suite_id(suite);
- if suite == "stable":
- testing_id = db_access.get_suite_id("testing");
try:
file = utils.open_file (filename);
except utils.cant_open_exc:
- print "WARNING: can't open '%s'" % (filename);
+ utils.warn("can't open '%s'" % (filename));
return;
Scanner = apt_pkg.ParseTagFile(file);
while Scanner.Step() != 0:
package = Scanner.Section["package"]
version = Scanner.Section["version"]
maintainer = Scanner.Section["maintainer"]
- maintainer = string.replace(maintainer, "'", "\\'")
+ maintainer = maintainer.replace("'", "\\'")
maintainer_id = db_access.get_or_set_maintainer_id(maintainer);
architecture = Scanner.Section["architecture"]
architecture_id = db_access.get_architecture_id (architecture);
+ fingerprint = "NOSIG";
+ fingerprint_id = db_access.get_or_set_fingerprint_id(fingerprint);
if not Scanner.Section.has_key("source"):
source = package
else:
source = Scanner.Section["source"]
source_version = ""
- if string.find(source, "(") != -1:
+ if source.find("(") != -1:
m = utils.re_extract_src_version.match(source)
source = m.group(1)
source_version = m.group(2)
if not source_version:
source_version = version
filename = Scanner.Section["filename"]
+ location = get_location_path(filename.split('/')[0]);
location_id = db_access.get_location_id (location, component, archive)
filename = poolify (filename, location)
if architecture == "all":
md5sum = Scanner.Section["md5sum"];
files_id = get_or_set_files_id (filename, size, md5sum, location_id);
type = "deb"; # FIXME
- cache_key = "%s~%s~%s~%d~%d~%d" % (package, version, repr(source_id), architecture_id, location_id, files_id);
+ cache_key = "%s~%s~%s~%d~%d~%d~%d" % (package, version, repr(source_id), architecture_id, location_id, files_id, suite_id);
if not arch_all_cache.has_key(cache_key):
arch_all_cache[cache_key] = 1;
cache_key = "%s~%s~%s~%d" % (package, version, repr(source_id), architecture_id);
if not binary_cache.has_key(cache_key):
if not source_id:
source_id = "\N";
- count_bad = count_bad + 1;
+ count_bad += 1;
else:
source_id = repr(source_id);
- binaries_id_serial = binaries_id_serial + 1;
- binaries_query_cache.write("%d\t%s\t%s\t%d\t%s\t%d\t%d\t%s\n" % (binaries_id_serial, package, version, maintainer_id, source_id, architecture_id, files_id, type));
+ binaries_id_serial += 1;
+ binaries_query_cache.write("%d\t%s\t%s\t%d\t%s\t%d\t%d\t%s\t%s\n" % (binaries_id_serial, package, version, maintainer_id, source_id, architecture_id, files_id, type, fingerprint_id));
binary_cache[cache_key] = binaries_id_serial;
tmp_binaries_id = binaries_id_serial;
else:
tmp_binaries_id = binary_cache[cache_key];
- bin_associations_id_serial = bin_associations_id_serial + 1;
+ bin_associations_id_serial += 1;
bin_associations_query_cache.write("%d\t%d\t%d\n" % (bin_associations_id_serial, suite_id, tmp_binaries_id));
- if suite == "stable":
- bin_associations_id_serial = bin_associations_id_serial + 1;
- bin_associations_query_cache.write("%d\t%d\t%d\n" % (bin_associations_id_serial, testing_id, tmp_binaries_id));
- count_total = count_total +1;
+ count_total += 1;
file.close();
if count_bad != 0:
else:
print "%d binary packages processed; 0 with no source match which is 0%%" % (count_total);
-##############################################################################################################
+###############################################################################
-def do_sources(location, prefix, suite, component, server):
- temp_filename = tempfile.mktemp();
- fd = os.open(temp_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0700);
- os.close(fd);
- sources = location + prefix + 'Sources.gz';
+def do_sources(sources, suite, component, server):
+ temp_filename = utils.temp_filename();
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (sources, temp_filename));
if (result != 0):
utils.fubar("Gunzip invocation failed!\n%s" % (output), result);
print 'Processing '+sources+'...';
- process_sources (location, temp_filename, suite, component, server);
+ process_sources (temp_filename, suite, component, server);
os.unlink(temp_filename);
-##############################################################################################################
+###############################################################################
-def main ():
+def do_da_do_da ():
global Cnf, projectB, query_cache, files_query_cache, source_query_cache, src_associations_query_cache, dsc_files_query_cache, bin_associations_query_cache, binaries_query_cache;
- Cnf = utils.get_conf()
+ Cnf = utils.get_conf();
+ Arguments = [('a', "action", "Neve::Options::Action"),
+ ('h', "help", "Neve::Options::Help")];
+ for i in [ "action", "help" ]:
+ if not Cnf.has_key("Neve::Options::%s" % (i)):
+ Cnf["Neve::Options::%s" % (i)] = "";
+
+ apt_pkg.ParseCommandLine(Cnf, Arguments, sys.argv);
+
+ Options = Cnf.SubTree("Neve::Options")
+ if Options["Help"]:
+ usage();
+
+ if not Options["Action"]:
+ utils.warn("""no -a/--action given; not doing anything.
+Please read the documentation before running this script.
+""");
+ usage(1);
print "Re-Creating DB..."
- (result, output) = commands.getstatusoutput("psql -f init_pool.sql")
+ (result, output) = commands.getstatusoutput("psql -f init_pool.sql template1");
if (result != 0):
utils.fubar("psql invocation failed!\n", result);
- print output
+ print output;
- projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"]), None, None, 'postgres')
+ projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"]));
db_access.init (Cnf, projectB);
update_archives();
update_locations();
update_suites();
+ update_override_type();
+ update_priority();
+ update_section();
projectB.query("COMMIT WORK");
files_query_cache = utils.open_file(Cnf["Neve::ExportDir"]+"files","w");
server = SubSec["Archive"];
type = Cnf.Find("Location::%s::Type" % (location));
if type == "legacy-mixed":
- prefix = ''
+ sources = location + 'Sources.gz';
suite = Cnf.Find("Location::%s::Suite" % (location));
- do_sources(location, prefix, suite, "", server);
- elif type == "legacy":
- for suite in Cnf.SubTree("Location::%s::Suites" % (location)).List():
+ do_sources(sources, suite, "", server);
+ elif type == "legacy" or type == "pool":
+ for suite in Cnf.ValueList("Location::%s::Suites" % (location)):
for component in Cnf.SubTree("Component").List():
- prefix = Cnf.Find("Suite::%s::CodeName" % (suite)) + '/' + component + '/source/'
- do_sources(location, prefix, suite, component, server);
- elif type == "pool":
- continue;
-# for component in Cnf.SubTree("Component").List():
-# prefix = component + '/'
-# do_sources(location, prefix);
+ sources = Cnf["Dir::Root"] + "dists/" + Cnf["Suite::%s::CodeName" % (suite)] + '/' + component + '/source/' + 'Sources.gz';
+ do_sources(sources, suite, component, server);
else:
utils.fubar("Unknown location type ('%s')." % (type));
packages = location + 'Packages';
suite = Cnf.Find("Location::%s::Suite" % (location));
print 'Processing '+location+'...';
- process_packages (location, packages, suite, "", server);
- elif type == "legacy":
- for suite in Cnf.SubTree("Location::%s::Suites" % (location)).List():
+ process_packages (packages, suite, "", server);
+ elif type == "legacy" or type == "pool":
+ for suite in Cnf.ValueList("Location::%s::Suites" % (location)):
for component in Cnf.SubTree("Component").List():
- for architecture in Cnf.SubTree("Suite::%s::Architectures" % (suite)).List():
- if architecture == "source" or architecture == "all":
- continue;
- packages = location + Cnf.Find("Suite::%s::CodeName" % (suite)) + '/' + component + '/binary-' + architecture + '/Packages'
+ architectures = filter(utils.real_arch,
+ Cnf.ValueList("Suite::%s::Architectures" % (suite)));
+ for architecture in architectures:
+ packages = Cnf["Dir::Root"] + "dists/" + Cnf["Suite::%s::CodeName" % (suite)] + '/' + component + '/binary-' + architecture + '/Packages'
print 'Processing '+packages+'...';
- process_packages (location, packages, suite, component, server);
- elif type == "pool":
- continue;
+ process_packages (packages, suite, component, server);
files_query_cache.close();
source_query_cache.close();
# See add_constraints.sql for more details...
print "Running add_constraints.sql...";
- (result, output) = commands.getstatusoutput("psql projectb < add_constraints.sql");
+ (result, output) = commands.getstatusoutput("psql %s < add_constraints.sql" % (Cnf["DB::Name"]));
print output
if (result != 0):
utils.fubar("psql invocation failed!\n%s" % (output), result);
return;
+################################################################################
+
+def main():
+ utils.try_with_debug(do_da_do_da);
+
+################################################################################
+
if __name__ == '__main__':
- main()
+ main();