cp -al ${incoming}/buildd/. tree/${STAMP}/
ln -sfT tree/${STAMP} ${incoming}/builddweb
find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+
+ for dist in experimental
+ do
+ cd ${incoming}/dists/${dist}
+ mkdir -p tree/${STAMP}
+ cp -al ${incoming}/dists/${dist}/buildd/. tree/${STAMP}/
+ ln -sfT tree/${STAMP} ${incoming}/dists/${dist}/current
+ find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+ done
}
# Do the unchecked processing, in case we have files.
dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
}
+# Do the newstage processing, in case we have files.
+function do_newstage () {
+ cd $newstage
+
+ changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
+ report=$queuedir/REPORT
+ timestamp=$(date "+%Y-%m-%d %H:%M")
+ UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
+
+ echo "$timestamp": ${changes:-"Nothing to do in newstage"} >> $report
+ dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+}
+
function sync_debbugs () {
# sync with debbugs
echo "--" >> $report
rm -f "$LOCK_NEW"
GO=(
- FUNC="msfl"
- TIME="make-suite-file-list"
+ FUNC="dominate"
+ TIME="dominate"
ARGS=""
ERR=""
)
# We used to have accepted in here, but it doesn't exist in that form any more
dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates
dak show-deferred > ${webdir}/deferred.html
-#cd $queuedir/new ; dak show-new *.changes > /dev/null
+dak show-new > /dev/null
$base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
$base/dak/tools/removals.pl > $webdir/rss/removals.rss
lockfile -r3 $LOCKFILE || exit 0
trap cleanup 0
+do_newstage
do_unchecked
if [ ! -z "$changes" ]; then
Transitions
{
Notifications "debian-devel@lists.debian.org";
- TempPath "/srv/ftp.debian.org/tmp/";
};
Generate-Index-Diffs
Override "/srv/ftp.debian.org/scripts/override/";
QueueBuild "/srv/incoming.debian.org/buildd/";
UrgencyLog "/srv/release.debian.org/britney/input/urgencies/";
+ TempPath "/srv/ftp.debian.org/tmp/";
Queue
{
Byhand "/srv/ftp.debian.org/queue/byhand/";
dak check-overrides
}
-function msfl() {
- log "Generating suite file lists for apt-ftparchive"
- dak make-suite-file-list
+function dominate() {
+ log "Removing obsolete source and binary associations"
+ dak dominate
}
function filelist() {
output += " o %s: %s\n" % (version, ", ".join(packages))
if all_packages:
all_packages.sort()
- cmd_output += " dak rm -m \"[auto-cruft] NBS (was built by %s)\" -s %s -b %s\n\n" % (source, suite.suite_name, " ".join(all_packages))
+ cmd_output += " dak rm -m \"[auto-cruft] NBS (was built by %s)\" -s %s -b %s -R\n\n" % (source, suite.suite_name, " ".join(all_packages))
output += "\n"
################################################################################
-def do_obsolete_source(duplicate_bins, bin2source):
- obsolete = {}
- for key in duplicate_bins.keys():
- (source_a, source_b) = key.split('_')
- for source in [ source_a, source_b ]:
- if not obsolete.has_key(source):
- if not source_binaries.has_key(source):
- # Source has already been removed
- continue
- else:
- obsolete[source] = [ i.strip() for i in source_binaries[source].split(',') ]
- for binary in duplicate_bins[key]:
- if bin2source.has_key(binary) and bin2source[binary]["source"] == source:
- continue
- if binary in obsolete[source]:
- obsolete[source].remove(binary)
-
- to_remove = []
- output = "Obsolete source package\n"
- output += "-----------------------\n\n"
- obsolete_keys = obsolete.keys()
- obsolete_keys.sort()
- for source in obsolete_keys:
- if not obsolete[source]:
- to_remove.append(source)
- output += " * %s (%s)\n" % (source, source_versions[source])
- for binary in [ i.strip() for i in source_binaries[source].split(',') ]:
- if bin2source.has_key(binary):
- output += " o %s (%s) is built by %s.\n" \
- % (binary, bin2source[binary]["version"],
- bin2source[binary]["source"])
- else:
- output += " o %s is not built.\n" % binary
- output += "\n"
-
- if to_remove:
- print output
-
- print "Suggested command:"
- print " dak rm -S -p -m \"[auto-cruft] obsolete source package\" %s" % (" ".join(to_remove))
- print
+def obsolete_source(suite_name, session):
+ """returns obsolete source packages for suite_name without binaries
+ in the same suite sorted by install_date; install_date should help
+ detecting source only (or binary throw away) uploads; duplicates in
+ the suite are skipped
+
+ subquery 'source_suite_unique' returns source package names from
+ suite without duplicates; the rationale behind is that neither
+ cruft-report nor rm cannot handle duplicates (yet)"""
+
+ query = """
+WITH source_suite_unique AS
+ (SELECT source, suite
+ FROM source_suite GROUP BY source, suite HAVING count(*) = 1)
+SELECT ss.src, ss.source, ss.version,
+ to_char(ss.install_date, 'YYYY-MM-DD') AS install_date
+ FROM source_suite ss
+ JOIN source_suite_unique ssu
+ ON ss.source = ssu.source AND ss.suite = ssu.suite
+ JOIN suite s ON s.id = ss.suite
+ LEFT JOIN bin_associations_binaries bab
+ ON ss.src = bab.source AND ss.suite = bab.suite
+ WHERE s.suite_name = :suite_name AND bab.id IS NULL
+ ORDER BY install_date"""
+ args = { 'suite_name': suite_name }
+ return session.execute(query, args)
+
+def source_bin(source, session):
+ """returns binaries built by source for all or no suite grouped and
+ ordered by package name"""
+
+ query = """
+SELECT b.package
+ FROM binaries b
+ JOIN src_associations_src sas ON b.source = sas.src
+ WHERE sas.source = :source
+ GROUP BY b.package
+ ORDER BY b.package"""
+ args = { 'source': source }
+ return session.execute(query, args)
+
+def newest_source_bab(suite_name, package, session):
+ """returns newest source that builds binary package in suite grouped
+ and sorted by source and package name"""
+
+ query = """
+SELECT sas.source, MAX(sas.version) AS srcver
+ FROM src_associations_src sas
+ JOIN bin_associations_binaries bab ON sas.src = bab.source
+ JOIN suite s on s.id = bab.suite
+ WHERE s.suite_name = :suite_name AND bab.package = :package
+ GROUP BY sas.source, bab.package
+ ORDER BY sas.source, bab.package"""
+ args = { 'suite_name': suite_name, 'package': package }
+ return session.execute(query, args)
+
+def report_obsolete_source(suite_name, session):
+ rows = obsolete_source(suite_name, session)
+ if rows.rowcount == 0:
+ return
+ print \
+"""Obsolete source packages in suite %s
+----------------------------------%s\n""" % \
+ (suite_name, '-' * len(suite_name))
+ for os_row in rows.fetchall():
+ (src, old_source, version, install_date) = os_row
+ print " * obsolete source %s version %s installed at %s" % \
+ (old_source, version, install_date)
+ for sb_row in source_bin(old_source, session):
+ (package, ) = sb_row
+ print " - has built binary %s" % package
+ for nsb_row in newest_source_bab(suite_name, package, session):
+ (new_source, srcver) = nsb_row
+ print " currently built by source %s version %s" % \
+ (new_source, srcver)
+ print " - suggested command:"
+ rm_opts = "-S -p -m \"[auto-cruft] obsolete source package\""
+ print " dak rm -s %s %s %s\n" % (suite_name, rm_opts, old_source)
def get_suite_binaries(suite, session):
# Initalize a large hash table of all binary packages
suite_id = suite.suite_id
suite_name = suite.suite_name.lower()
+ if "obsolete source" in checks:
+ report_obsolete_source(suite_name, session)
+
bin_not_built = {}
if "bnb" in checks:
- bins_in_suite = get_suite_binaries(suite_name, session)
+ bins_in_suite = get_suite_binaries(suite, session)
# Checks based on the Sources files
components = cnf.ValueList("Suite::%s::Components" % (suite_name))
# Check for duplicated packages and build indices for checking "no source" later
source_index = component + '/' + source
- if src_pkgs.has_key(source):
- print " %s is a duplicated source package (%s and %s)" % (source, source_index, src_pkgs[source])
+ #if src_pkgs.has_key(source):
+ # print " %s is a duplicated source package (%s and %s)" % (source, source_index, src_pkgs[source])
src_pkgs[source] = source_index
for binary in binaries_list:
if bin_pkgs.has_key(binary):
packages.close()
os.unlink(temp_filename)
- if "obsolete source" in checks:
- do_obsolete_source(duplicate_bins, bin2source)
-
# Distinguish dubious (version numbers match) and 'real' NBS (they don't)
dubious_nbs = {}
real_nbs = {}
"Process NEW and BYHAND packages"),
("process-upload",
"Process packages in queue/unchecked"),
+ ("process-policy",
+ "Process packages in policy queues from COMMENTS files"),
+ ("dominate",
+ "Remove obsolete source and binary associations from suites"),
("make-suite-file-list",
- "Generate lists of packages per suite for apt-ftparchive"),
+ "OBSOLETE: replaced by dominate and generate-filelist"),
("make-pkg-file-mapping",
"Generate package <-> file mapping"),
("generate-filelist",
#!/usr/bin/env python
-# coding=utf8
"""
-Adding a trainee field to the process-new notes
+Add views for new dominate command.
@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2009 Mike O'Connor <stew@debian.org>
+@copyright: 2009 Torsten Werner <twerner@debian.org>
@license: GNU General Public License version 2 or later
"""
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-################################################################################
-
-
-################################################################################
-
import psycopg2
-import time
-from daklib.dak_exceptions import DBUpdateError
-
-################################################################################
-
-def suites():
- """
- return a list of suites to operate on
- """
- if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
- suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
- else:
- suites = [ 'unstable', 'testing' ]
-# suites = Config().SubTree("Suite").List()
-
- return suites
-
-def arches(cursor, suite):
- """
- return a list of archs to operate on
- """
- arch_list = []
- cursor.execute("""SELECT s.architecture, a.arch_string
- FROM suite_architectures s
- JOIN architecture a ON (s.architecture=a.id)
- WHERE suite = :suite""", {'suite' : suite })
-
- while True:
- r = cursor.fetchone()
- if not r:
- break
-
- if r[1] != "source" and r[1] != "all":
- arch_list.append((r[0], r[1]))
-
- return arch_list
def do_update(self):
- """
- Adding contents table as first step to maybe, finally getting rid
- of apt-ftparchive
- """
-
- print __doc__
+ print "Add views for generate_filelist to database."
try:
c = self.db.cursor()
- c.execute("""CREATE TABLE pending_bin_contents (
- id serial NOT NULL,
- package text NOT NULL,
- version debversion NOT NULL,
- arch int NOT NULL,
- filename text NOT NULL,
- type int NOT NULL,
- PRIMARY KEY(id))""" );
-
- c.execute("""CREATE TABLE deb_contents (
- filename text,
- section text,
- package text,
- binary_id integer,
- arch integer,
- suite integer)""" )
-
- c.execute("""CREATE TABLE udeb_contents (
- filename text,
- section text,
- package text,
- binary_id integer,
- suite integer,
- arch integer)""" )
-
- c.execute("""ALTER TABLE ONLY deb_contents
- ADD CONSTRAINT deb_contents_arch_fkey
- FOREIGN KEY (arch) REFERENCES architecture(id)
- ON DELETE CASCADE;""")
-
- c.execute("""ALTER TABLE ONLY udeb_contents
- ADD CONSTRAINT udeb_contents_arch_fkey
- FOREIGN KEY (arch) REFERENCES architecture(id)
- ON DELETE CASCADE;""")
-
- c.execute("""ALTER TABLE ONLY deb_contents
- ADD CONSTRAINT deb_contents_pkey
- PRIMARY KEY (filename,package,arch,suite);""")
-
- c.execute("""ALTER TABLE ONLY udeb_contents
- ADD CONSTRAINT udeb_contents_pkey
- PRIMARY KEY (filename,package,arch,suite);""")
-
- c.execute("""ALTER TABLE ONLY deb_contents
- ADD CONSTRAINT deb_contents_suite_fkey
- FOREIGN KEY (suite) REFERENCES suite(id)
- ON DELETE CASCADE;""")
-
- c.execute("""ALTER TABLE ONLY udeb_contents
- ADD CONSTRAINT udeb_contents_suite_fkey
- FOREIGN KEY (suite) REFERENCES suite(id)
- ON DELETE CASCADE;""")
-
- c.execute("""ALTER TABLE ONLY deb_contents
- ADD CONSTRAINT deb_contents_binary_fkey
- FOREIGN KEY (binary_id) REFERENCES binaries(id)
- ON DELETE CASCADE;""")
-
- c.execute("""ALTER TABLE ONLY udeb_contents
- ADD CONSTRAINT udeb_contents_binary_fkey
- FOREIGN KEY (binary_id) REFERENCES binaries(id)
- ON DELETE CASCADE;""")
-
- c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
-
-
- suites = self.suites()
-
- for suite in [i.lower() for i in suites]:
- suite_id = DBConn().get_suite_id(suite)
- arch_list = arches(c, suite_id)
- arch_list = arches(c, suite_id)
-
- for (arch_id,arch_str) in arch_list:
- c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
-
- for section, sname in [("debian-installer","main"),
- ("non-free/debian-installer", "nonfree")]:
- c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
-
-
- c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS $$
- event = TD["event"]
- if event == "DELETE" or event == "UPDATE":
-
- plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
- ["int","int"]),
- [TD["old"]["bin"], TD["old"]["suite"]])
-
- if event == "INSERT" or event == "UPDATE":
-
- content_data = plpy.execute(plpy.prepare(
- """SELECT s.section, b.package, b.architecture, ot.type
- FROM override o
- JOIN override_type ot on o.type=ot.id
- JOIN binaries b on b.package=o.package
- JOIN files f on b.file=f.id
- JOIN location l on l.id=f.location
- JOIN section s on s.id=o.section
- WHERE b.id=$1
- AND o.suite=$2
- """,
- ["int", "int"]),
- [TD["new"]["bin"], TD["new"]["suite"]])[0]
-
- tablename="%s_contents" % content_data['type']
-
- plpy.execute(plpy.prepare("""DELETE FROM %s
- WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
- ['text','int','int']),
- [content_data['package'],
- content_data['architecture'],
- TD["new"]["suite"]])
-
- filenames = plpy.execute(plpy.prepare(
- "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
- ["int"]),
- [TD["new"]["bin"]])
-
- for filename in filenames:
- plpy.execute(plpy.prepare(
- """INSERT INTO %s
- (filename,section,package,binary_id,arch,suite)
- VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
- ["text","text","text","int","int","int"]),
- [filename["file"],
- content_data["section"],
- content_data["package"],
- TD["new"]["bin"],
- content_data["architecture"],
- TD["new"]["suite"]] )
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
-
- c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS $$
- event = TD["event"]
- if event == "UPDATE":
-
- otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
- if otype["type"].endswith("deb"):
- section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
-
- table_name = "%s_contents" % otype["type"]
- plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
- ["text","text","int"]),
- [section["section"],
- TD["new"]["package"],
- TD["new"]["suite"]])
-
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
- c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
- RETURNS trigger AS $$
- event = TD["event"]
- if event == "UPDATE" or event == "INSERT":
- row = TD["new"]
- r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
- JOIN binaries b ON b.architecture = sa.architecture
- WHERE b.id = $1 and sa.suite = $2""",
- ["int", "int"]),
- [row["bin"], row["suite"]])
- if not len(r):
- plpy.error("Illegal architecture for this suite")
-
-$$ LANGUAGE plpythonu VOLATILE;""")
-
- c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
- BEFORE INSERT OR UPDATE ON bin_associations
- FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
- c.execute( """CREATE TRIGGER bin_associations_contents_trigger
- AFTER INSERT OR UPDATE OR DELETE ON bin_associations
- FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
- c.execute("""CREATE TRIGGER override_contents_trigger
- AFTER UPDATE ON override
- FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
-
- c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
- c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
-
+ print "Drop old views."
+ c.execute("DROP VIEW IF EXISTS binaries_suite_arch CASCADE")
+ c.execute("DROP VIEW IF EXISTS newest_all_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS obsolete_any_by_all_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS newest_any_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS obsolete_any_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS source_suite CASCADE")
+ c.execute("DROP VIEW IF EXISTS newest_source CASCADE")
+ c.execute("DROP VIEW IF EXISTS newest_src_association CASCADE")
+ c.execute("DROP VIEW IF EXISTS any_associations_source CASCADE")
+ c.execute("DROP VIEW IF EXISTS src_associations_src CASCADE")
+ c.execute("DROP VIEW IF EXISTS almost_obsolete_src_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS obsolete_src_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS bin_associations_binaries CASCADE")
+ c.execute("DROP VIEW IF EXISTS src_associations_bin CASCADE")
+ c.execute("DROP VIEW IF EXISTS almost_obsolete_all_associations CASCADE")
+ c.execute("DROP VIEW IF EXISTS obsolete_all_associations CASCADE")
+
+ print "Create new views."
+ c.execute("""
+CREATE VIEW binaries_suite_arch AS
+ SELECT bin_associations.id, binaries.id AS bin, binaries.package,
+ binaries.version, binaries.source, bin_associations.suite,
+ suite.suite_name, binaries.architecture, architecture.arch_string
+ FROM binaries JOIN bin_associations ON binaries.id = bin_associations.bin
+ JOIN suite ON suite.id = bin_associations.suite
+ JOIN architecture ON binaries.architecture = architecture.id;
+ """)
+ c.execute("""
+CREATE VIEW newest_all_associations AS
+ SELECT package, max(version) AS version, suite, architecture
+ FROM binaries_suite_arch
+ WHERE architecture = 2 GROUP BY package, suite, architecture;
+ """)
+ c.execute("""
+CREATE VIEW obsolete_any_by_all_associations AS
+ SELECT binaries_suite_arch.id, binaries_suite_arch.package,
+ binaries_suite_arch.version, binaries_suite_arch.suite,
+ binaries_suite_arch.architecture
+ FROM binaries_suite_arch
+ JOIN newest_all_associations
+ ON (binaries_suite_arch.package = newest_all_associations.package AND
+ binaries_suite_arch.version < newest_all_associations.version AND
+ binaries_suite_arch.suite = newest_all_associations.suite AND
+ binaries_suite_arch.architecture > 2);
+ """)
+ c.execute("""
+CREATE VIEW newest_any_associations AS
+ SELECT package, max(version) AS version, suite, architecture
+ FROM binaries_suite_arch
+ WHERE architecture > 2 GROUP BY package, suite, architecture;
+ """)
+ c.execute("""
+CREATE VIEW obsolete_any_associations AS
+ SELECT id, binaries_suite_arch.architecture, binaries_suite_arch.version,
+ binaries_suite_arch.package, binaries_suite_arch.suite
+ FROM binaries_suite_arch
+ JOIN newest_any_associations
+ ON binaries_suite_arch.architecture = newest_any_associations.architecture AND
+ binaries_suite_arch.package = newest_any_associations.package AND
+ binaries_suite_arch.suite = newest_any_associations.suite AND
+ binaries_suite_arch.version != newest_any_associations.version;
+ """)
+ c.execute("""
+CREATE VIEW source_suite AS
+ SELECT src_associations.id, source.id AS src , source.source, source.version,
+ src_associations.suite, suite.suite_name
+ FROM source
+ JOIN src_associations ON source.id = src_associations.source
+ JOIN suite ON suite.id = src_associations.suite;
+ """)
+ c.execute("""
+CREATE VIEW newest_source AS
+ SELECT source, max(version) AS version, suite
+ FROM source_suite
+ GROUP BY source, suite;
+ """)
+ c.execute("""
+CREATE VIEW newest_src_association AS
+ SELECT id, src, source, version, suite
+ FROM source_suite
+ JOIN newest_source USING (source, version, suite);
+ """)
+ c.execute("""
+CREATE VIEW any_associations_source AS
+ SELECT bin_associations.id, bin_associations.suite, binaries.id AS bin,
+ binaries.package, binaries.version AS binver, binaries.architecture,
+ source.id AS src, source.source, source.version AS srcver
+ FROM bin_associations
+ JOIN binaries ON bin_associations.bin = binaries.id AND architecture != 2
+ JOIN source ON binaries.source = source.id;
+ """)
+ c.execute("""
+CREATE VIEW src_associations_src AS
+ SELECT src_associations.id, src_associations.suite, source.id AS src,
+ source.source, source.version
+ FROM src_associations
+ JOIN source ON src_associations.source = source.id;
+ """)
+ c.execute("""
+CREATE VIEW almost_obsolete_src_associations AS
+ SELECT src_associations_src.id, src_associations_src.src,
+ src_associations_src.source, src_associations_src.version, suite
+ FROM src_associations_src
+ LEFT JOIN any_associations_source USING (src, suite)
+ WHERE bin IS NULL;
+ """)
+ c.execute("""
+CREATE VIEW obsolete_src_associations AS
+ SELECT almost.id, almost.src, almost.source, almost.version, almost.suite
+ FROM almost_obsolete_src_associations as almost
+ JOIN newest_src_association AS newest
+ ON almost.source = newest.source AND
+ almost.version < newest.version AND
+ almost.suite = newest.suite;
+ """)
+ c.execute("""
+CREATE VIEW bin_associations_binaries AS
+ SELECT bin_associations.id, bin_associations.bin, binaries.package,
+ binaries.version, bin_associations.suite, binaries.architecture
+ FROM bin_associations
+ JOIN binaries ON bin_associations.bin = binaries.id;
+ """)
+ c.execute("""
+CREATE VIEW src_associations_bin AS
+ SELECT src_associations.id, src_associations.source, src_associations.suite,
+ binaries.id AS bin, binaries.architecture
+ FROM src_associations
+ JOIN source ON src_associations.source = source.id
+ JOIN binaries ON source.id = binaries.source;
+ """)
+ c.execute("""
+CREATE VIEW almost_obsolete_all_associations AS
+ SELECT bin_associations_binaries.id AS id, bin, bin_associations_binaries.package,
+ bin_associations_binaries.version, suite
+ FROM bin_associations_binaries
+ LEFT JOIN src_associations_bin USING (bin, suite, architecture)
+ WHERE source IS NULL AND architecture = 2;
+ """)
+ c.execute("""
+CREATE VIEW obsolete_all_associations AS
+ SELECT almost.id, almost.bin, almost.package, almost.version, almost.suite
+ FROM almost_obsolete_all_associations AS almost
+ JOIN newest_all_associations AS newest
+ ON almost.package = newest.package AND
+ almost.version < newest.version AND
+ almost.suite = newest.suite;
+ """)
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '25' WHERE name = 'db_revision'")
self.db.commit()
- except psycopg2.ProgrammingError, msg:
+ except psycopg2.InternalError, msg:
self.db.rollback()
- raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
+ raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
--- /dev/null
+#!/usr/bin/env python
+
+"""
+Add created,modified columns for all tables.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Barry deFreese <bdefreese@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import psycopg2
+
+def do_update(self):
+ print "Add created, modified fields for all tables."
+
+ updatetables = ['architecture', 'archive', 'bin_associations', 'bin_contents',
+ 'binaries', 'binary_acl', 'binary_acl_map', 'build_queue', 'build_queue_files',
+ 'changes', 'changes_pending_binaries', 'changes_pending_files',
+ 'changes_pending_files_map', 'changes_pending_source', 'changes_pending_source_files',
+ 'changes_pool_files', 'component', 'config', 'dsc_files', 'files', 'fingerprint',
+ 'keyring_acl_map', 'keyrings', 'location', 'maintainer', 'new_comments', 'override',
+ 'override_type', 'policy_queue', 'priority', 'section', 'source', 'source_acl',
+ 'src_associations', 'src_format', 'src_uploaders', 'suite', 'suite_architectures',
+ 'suite_build_queue_copy', 'suite_src_formats', 'uid', 'upload_blocks']
+
+ c = self.db.cursor()
+
+ print "Create trigger function."
+ c.execute("""CREATE OR REPLACE FUNCTION tfunc_set_modified() RETURNS trigger AS $$
+ BEGIN NEW.modified = now(); return NEW; END;
+ $$ LANGUAGE 'plpgsql'""")
+
+ try:
+ for updatetable in updatetables:
+
+ print "Add created field to %s." % updatetable
+ c.execute("ALTER TABLE %s ADD COLUMN created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now()" % updatetable)
+
+ print "Add modified field to %s." % updatetable
+ c.execute("ALTER TABLE %s ADD COLUMN modified TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now()" % updatetable)
+
+ print "Create modified trigger."
+ c.execute("""CREATE TRIGGER modified_%s BEFORE UPDATE ON %s
+ FOR EACH ROW EXECUTE PROCEDURE tfunc_set_modified()""" % (updatetable, updatetable))
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '26' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.InternalError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
--- /dev/null
+#!/usr/bin/env python
+
+"""
+Add views for new obsolete source detection.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Torsten Werner <twerner@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import psycopg2
+
+def do_update(self):
+ print "Add/modify views for obsolete source detection."
+
+ try:
+ c = self.db.cursor()
+
+ print "Replace old views."
+ # joins src_associations and source
+ c.execute("""
+CREATE OR REPLACE VIEW source_suite AS
+ SELECT src_associations.id, source.id AS src, source.source, source.version,
+ src_associations.suite, suite.suite_name, source.install_date
+ FROM source
+ JOIN src_associations ON source.id = src_associations.source
+ JOIN suite ON suite.id = src_associations.suite;
+ """)
+ # joins bin_associations and binaries
+ c.execute("""
+CREATE OR REPLACE VIEW bin_associations_binaries AS
+ SELECT bin_associations.id, bin_associations.bin, binaries.package,
+ binaries.version, bin_associations.suite, binaries.architecture,
+ binaries.source
+ FROM bin_associations
+ JOIN binaries ON bin_associations.bin = binaries.id;
+ """)
+
+ print "Grant permissions to views."
+ c.execute("GRANT SELECT ON binfiles_suite_component_arch TO PUBLIC;");
+ c.execute("GRANT SELECT ON srcfiles_suite_component TO PUBLIC;");
+ c.execute("GRANT SELECT ON binaries_suite_arch TO PUBLIC;");
+ c.execute("GRANT SELECT ON newest_all_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON obsolete_any_by_all_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON newest_any_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON obsolete_any_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON source_suite TO PUBLIC;");
+ c.execute("GRANT SELECT ON newest_source TO PUBLIC;");
+ c.execute("GRANT SELECT ON newest_src_association TO PUBLIC;");
+ c.execute("GRANT SELECT ON any_associations_source TO PUBLIC;");
+ c.execute("GRANT SELECT ON src_associations_src TO PUBLIC;");
+ c.execute("GRANT SELECT ON almost_obsolete_src_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON obsolete_src_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON bin_associations_binaries TO PUBLIC;");
+ c.execute("GRANT SELECT ON src_associations_bin TO PUBLIC;");
+ c.execute("GRANT SELECT ON almost_obsolete_all_associations TO PUBLIC;");
+ c.execute("GRANT SELECT ON obsolete_all_associations TO PUBLIC;");
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '27' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.InternalError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Adding a trainee field to the process-new notes
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+
+def suites():
+ """
+ return a list of suites to operate on
+ """
+ if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+ suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+ else:
+ suites = [ 'unstable', 'testing' ]
+# suites = Config().SubTree("Suite").List()
+
+ return suites
+
+def arches(cursor, suite):
+ """
+ return a list of archs to operate on
+ """
+ arch_list = []
+ cursor.execute("""SELECT s.architecture, a.arch_string
+ FROM suite_architectures s
+ JOIN architecture a ON (s.architecture=a.id)
+ WHERE suite = :suite""", {'suite' : suite })
+
+ while True:
+ r = cursor.fetchone()
+ if not r:
+ break
+
+ if r[1] != "source" and r[1] != "all":
+ arch_list.append((r[0], r[1]))
+
+ return arch_list
+
+def do_update(self):
+ """
+ Adding contents table as first step to maybe, finally getting rid
+ of apt-ftparchive
+ """
+
+ print __doc__
+
+ try:
+ c = self.db.cursor()
+
+ c.execute("""CREATE TABLE pending_bin_contents (
+ id serial NOT NULL,
+ package text NOT NULL,
+ version debversion NOT NULL,
+ arch int NOT NULL,
+ filename text NOT NULL,
+ type int NOT NULL,
+ PRIMARY KEY(id))""" );
+
+ c.execute("""CREATE TABLE deb_contents (
+ filename text,
+ section text,
+ package text,
+ binary_id integer,
+ arch integer,
+ suite integer)""" )
+
+ c.execute("""CREATE TABLE udeb_contents (
+ filename text,
+ section text,
+ package text,
+ binary_id integer,
+ suite integer,
+ arch integer)""" )
+
+ c.execute("""ALTER TABLE ONLY deb_contents
+ ADD CONSTRAINT deb_contents_arch_fkey
+ FOREIGN KEY (arch) REFERENCES architecture(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""ALTER TABLE ONLY udeb_contents
+ ADD CONSTRAINT udeb_contents_arch_fkey
+ FOREIGN KEY (arch) REFERENCES architecture(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""ALTER TABLE ONLY deb_contents
+ ADD CONSTRAINT deb_contents_pkey
+ PRIMARY KEY (filename,package,arch,suite);""")
+
+ c.execute("""ALTER TABLE ONLY udeb_contents
+ ADD CONSTRAINT udeb_contents_pkey
+ PRIMARY KEY (filename,package,arch,suite);""")
+
+ c.execute("""ALTER TABLE ONLY deb_contents
+ ADD CONSTRAINT deb_contents_suite_fkey
+ FOREIGN KEY (suite) REFERENCES suite(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""ALTER TABLE ONLY udeb_contents
+ ADD CONSTRAINT udeb_contents_suite_fkey
+ FOREIGN KEY (suite) REFERENCES suite(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""ALTER TABLE ONLY deb_contents
+ ADD CONSTRAINT deb_contents_binary_fkey
+ FOREIGN KEY (binary_id) REFERENCES binaries(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""ALTER TABLE ONLY udeb_contents
+ ADD CONSTRAINT udeb_contents_binary_fkey
+ FOREIGN KEY (binary_id) REFERENCES binaries(id)
+ ON DELETE CASCADE;""")
+
+ c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
+
+
+ suites = self.suites()
+
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+ arch_list = arches(c, suite_id)
+ arch_list = arches(c, suite_id)
+
+ for (arch_id,arch_str) in arch_list:
+ c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
+
+ for section, sname in [("debian-installer","main"),
+ ("non-free/debian-installer", "nonfree")]:
+ c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
+
+
+ c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS $$
+ event = TD["event"]
+ if event == "DELETE" or event == "UPDATE":
+
+ plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
+ ["int","int"]),
+ [TD["old"]["bin"], TD["old"]["suite"]])
+
+ if event == "INSERT" or event == "UPDATE":
+
+ content_data = plpy.execute(plpy.prepare(
+ """SELECT s.section, b.package, b.architecture, ot.type
+ FROM override o
+ JOIN override_type ot on o.type=ot.id
+ JOIN binaries b on b.package=o.package
+ JOIN files f on b.file=f.id
+ JOIN location l on l.id=f.location
+ JOIN section s on s.id=o.section
+ WHERE b.id=$1
+ AND o.suite=$2
+ """,
+ ["int", "int"]),
+ [TD["new"]["bin"], TD["new"]["suite"]])[0]
+
+ tablename="%s_contents" % content_data['type']
+
+ plpy.execute(plpy.prepare("""DELETE FROM %s
+ WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
+ ['text','int','int']),
+ [content_data['package'],
+ content_data['architecture'],
+ TD["new"]["suite"]])
+
+ filenames = plpy.execute(plpy.prepare(
+ "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
+ ["int"]),
+ [TD["new"]["bin"]])
+
+ for filename in filenames:
+ plpy.execute(plpy.prepare(
+ """INSERT INTO %s
+ (filename,section,package,binary_id,arch,suite)
+ VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
+ ["text","text","text","int","int","int"]),
+ [filename["file"],
+ content_data["section"],
+ content_data["package"],
+ TD["new"]["bin"],
+ content_data["architecture"],
+ TD["new"]["suite"]] )
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+
+ c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS $$
+ event = TD["event"]
+ if event == "UPDATE":
+
+ otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
+ if otype["type"].endswith("deb"):
+ section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
+
+ table_name = "%s_contents" % otype["type"]
+ plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
+ ["text","text","int"]),
+ [section["section"],
+ TD["new"]["package"],
+ TD["new"]["suite"]])
+
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
+ RETURNS trigger AS $$
+ event = TD["event"]
+ if event == "UPDATE" or event == "INSERT":
+ row = TD["new"]
+ r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
+ JOIN binaries b ON b.architecture = sa.architecture
+ WHERE b.id = $1 and sa.suite = $2""",
+ ["int", "int"]),
+ [row["bin"], row["suite"]])
+ if not len(r):
+ plpy.error("Illegal architecture for this suite")
+
+$$ LANGUAGE plpythonu VOLATILE;""")
+
+ c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
+ BEFORE INSERT OR UPDATE ON bin_associations
+ FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+ c.execute( """CREATE TRIGGER bin_associations_contents_trigger
+ AFTER INSERT OR UPDATE OR DELETE ON bin_associations
+ FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
+ c.execute("""CREATE TRIGGER override_contents_trigger
+ AFTER UPDATE ON override
+ FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+
+ c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
+ c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
+
+ c.execute("UPDATE config SET value = '28' WHERE name = 'db_revision'")
+
+ self.db.commit()
+
+ except psycopg2.ProgrammingError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Unable to apply process-new update 28, rollback issued. Error message : %s" % (str(msg))
+
--- /dev/null
+#!/usr/bin/python
+
+"""
+Remove obsolete source and binary associations from suites.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Torsten Werner <twerner@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+from daklib.dbconn import *
+from daklib.config import Config
+from daklib import daklog, utils
+import apt_pkg, sys
+
+Options = None
+Logger = None
+
+def fetch(reason, query, args, session):
+ idList = []
+ for row in session.execute(query, args).fetchall():
+ (id, package, version, suite_name, architecture) = row
+ if Options['No-Action']:
+ print "Delete %s %s from %s architecture %s (%s, %d)" % \
+ (package, version, suite_name, architecture, reason, id)
+ else:
+ Logger.log([reason, package, version, suite_name, \
+ architecture, id])
+ idList.append(id)
+ return idList
+
+def obsoleteAnyByAllAssociations(suite, session):
+ query = """
+ SELECT obsolete.id, package, obsolete.version, suite_name, arch_string
+ FROM obsolete_any_by_all_associations AS obsolete
+ JOIN architecture ON obsolete.architecture = architecture.id
+ JOIN suite ON obsolete.suite = suite.id
+ WHERE suite = :suite
+ """
+ return fetch('newer_all', query, { 'suite': suite }, session)
+
+def obsoleteAnyAssociations(suite, session):
+ query = """
+ SELECT obsolete.id, package, obsolete.version, suite_name, arch_string
+ FROM obsolete_any_associations AS obsolete
+ JOIN architecture ON obsolete.architecture = architecture.id
+ JOIN suite ON obsolete.suite = suite.id
+ WHERE suite = :suite
+ """
+ return fetch('newer_any', query, { 'suite': suite }, session)
+
+def obsoleteSrcAssociations(suite, session):
+ query = """
+ SELECT obsolete.id, source, obsolete.version, suite_name,
+ 'source' AS arch_string
+ FROM obsolete_src_associations AS obsolete
+ JOIN suite ON obsolete.suite = suite.id
+ WHERE suite = :suite
+ """
+ return fetch('old_and_unreferenced', query, { 'suite': suite }, session)
+
+def obsoleteAllAssociations(suite, session):
+ query = """
+ SELECT obsolete.id, package, obsolete.version, suite_name,
+ 'all' AS arch_string
+ FROM obsolete_all_associations AS obsolete
+ JOIN suite ON obsolete.suite = suite.id
+ WHERE suite = :suite
+ """
+ return fetch('old_and_unreferenced', query, { 'suite': suite }, session)
+
+def deleteAssociations(table, idList, session):
+ query = """
+ DELETE
+ FROM %s
+ WHERE id = :id
+ """ % table
+ session.execute(query, [{'id': id} for id in idList])
+
+def doDaDoDa(suite, session):
+ # keep this part disabled because it is too dangerous
+ #idList = obsoleteAnyByAllAssociations(suite, session)
+ #deleteAssociations('bin_associations', idList, session)
+
+ idList = obsoleteAnyAssociations(suite, session)
+ deleteAssociations('bin_associations', idList, session)
+
+ idList = obsoleteSrcAssociations(suite, session)
+ deleteAssociations('src_associations', idList, session)
+
+ idList = obsoleteAllAssociations(suite, session)
+ deleteAssociations('bin_associations', idList, session)
+
+def usage():
+ print """Usage: dak dominate [OPTIONS]
+Remove obsolete source and binary associations from suites.
+
+ -s, --suite=SUITE act on this suite
+ -h, --help show this help and exit
+ -n, --no-action don't commit changes
+ -f, --force also clean up untouchable suites
+
+SUITE can be comma (or space) separated list, e.g.
+ --suite=testing,unstable"""
+ sys.exit()
+
+def main():
+ global Options, Logger
+ cnf = Config()
+ Arguments = [('h', "help", "Obsolete::Options::Help"),
+ ('s', "suite", "Obsolete::Options::Suite", "HasArg"),
+ ('n', "no-action", "Obsolete::Options::No-Action"),
+ ('f', "force", "Obsolete::Options::Force")]
+ query_suites = DBConn().session().query(Suite)
+ suites = [suite.suite_name for suite in query_suites.all()]
+ if not cnf.has_key('Obsolete::Options::Suite'):
+ cnf['Obsolete::Options::Suite'] = ','.join(suites)
+ cnf['Obsolete::Options::Help'] = ''
+ cnf['Obsolete::Options::No-Action'] = ''
+ cnf['Obsolete::Options::Force'] = ''
+ apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+ Options = cnf.SubTree("Obsolete::Options")
+ if Options['Help']:
+ usage()
+ Logger = daklog.Logger(cnf.Cnf, "dominate")
+ session = DBConn().session()
+ for suite_name in utils.split_args(Options['Suite']):
+ suite = session.query(Suite).filter_by(suite_name = suite_name).one()
+ if not suite.untouchable or Options['Force']:
+ doDaDoDa(suite.suite_id, session)
+ if Options['No-Action']:
+ session.rollback()
+ else:
+ session.commit()
+
+if __name__ == '__main__':
+ main()
+
def create_depends_string (suite, depends_tree):
result = ""
if suite == 'experimental':
- suite_where = " in ('experimental','unstable')"
+ suite_where = "in ('experimental','unstable')"
else:
- suite_where = " ='%s'" % suite
+ suite_where = "= '%s'" % suite
comma_count = 1
session = DBConn().session()
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Import known_changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+
+################################################################################
+
+import sys
+import os
+import logging
+import threading
+import glob
+import apt_pkg
+from daklib.dbconn import DBConn, get_dbchange, get_policy_queue, session_wrapper, ChangePendingFile, get_location, check_poolfile
+from daklib.config import Config
+from daklib.queue import Upload
+from daklib.utils import poolify
+
+# where in dak.conf all of our configuration will be stowed
+options_prefix = "NewFiles"
+options_prefix = "%s::Options" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
+
+def usage (exit_code=0):
+ print """Usage: dak import-new-files [options]
+
+OPTIONS
+ -v, --verbose
+ show verbose information messages
+
+ -q, --quiet
+ supress all output but errors
+
+"""
+ sys.exit(exit_code)
+
+class ImportNewFiles(object):
+ @session_wrapper
+ def __init__(self, session=None):
+ cnf = Config()
+ try:
+ newq = get_policy_queue('new', session)
+ for changes_fn in glob.glob(newq.path + "/*.changes"):
+ changes_bn = os.path.basename(changes_fn)
+ chg = get_dbchange(changes_bn, session)
+
+ u = Upload()
+ success = u.load_changes(changes_fn)
+ u.pkg.changes_file = changes_bn
+ u.check_hashes()
+
+ if not chg:
+ chg = u.pkg.add_known_changes(newq.path, newq.policy_queue_id, session)
+ session.add(chg)
+
+ if not success:
+ log.critical("failed to load %s" % changes_fn)
+ sys.exit(1)
+ else:
+ log.critical("ACCLAIM: %s" % changes_fn)
+
+ files=[]
+ for chg_fn in u.pkg.files.keys():
+ try:
+ f = open(os.path.join(newq.path, chg_fn))
+ cpf = ChangePendingFile()
+ cpf.filename = chg_fn
+ cpf.size = u.pkg.files[chg_fn]['size']
+ cpf.md5sum = u.pkg.files[chg_fn]['md5sum']
+
+ if u.pkg.files[chg_fn].has_key('sha1sum'):
+ cpf.sha1sum = u.pkg.files[chg_fn]['sha1sum']
+ else:
+ log.warning("Having to generate sha1sum for %s" % chg_fn)
+ f.seek(0)
+ cpf.sha1sum = apt_pkg.sha1sum(f)
+
+ if u.pkg.files[chg_fn].has_key('sha256sum'):
+ cpf.sha256sum = u.pkg.files[chg_fn]['sha256sum']
+ else:
+ log.warning("Having to generate sha256sum for %s" % chg_fn)
+ f.seek(0)
+ cpf.sha256sum = apt_pkg.sha256sum(f)
+
+ session.add(cpf)
+ files.append(cpf)
+ f.close()
+ except IOError:
+ # Can't find the file, try to look it up in the pool
+ poolname = poolify(u.pkg.changes["source"], u.pkg.files[chg_fn]["component"])
+ l = get_location(cnf["Dir::Pool"], u.pkg.files[chg_fn]["component"], session=session)
+ if not l:
+ log.critical("ERROR: Can't find location for %s (component %s)" % (chg_fn, u.pkg.files[chg_fn]["component"]))
+
+ found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+ u.pkg.files[chg_fn]['size'],
+ u.pkg.files[chg_fn]["md5sum"],
+ l.location_id,
+ session=session)
+
+ if found is None:
+ log.critical("ERROR: Found multiple files for %s in pool" % chg_fn)
+ sys.exit(1)
+ elif found is False and poolfile is not None:
+ log.critical("ERROR: md5sum / size mismatch for %s in pool" % chg_fn)
+ sys.exit(1)
+ else:
+ if poolfile is None:
+ log.critical("ERROR: Could not find %s in pool" % chg_fn)
+ sys.exit(1)
+ else:
+ chg.poolfiles.append(poolfile)
+
+
+ chg.files = files
+
+
+ session.commit()
+
+ except KeyboardInterrupt:
+ print("Caught C-c; terminating.")
+ utils.warn("Caught C-c; terminating.")
+ self.plsDie()
+
+
+def main():
+ cnf = Config()
+
+ arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+ ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+ ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+ ]
+
+ args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+ num_threads = 1
+
+ if len(args) > 0:
+ usage(1)
+
+ if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+ usage(0)
+
+ level=logging.INFO
+ if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+ level=logging.ERROR
+
+ elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+ level=logging.DEBUG
+
+
+ logging.basicConfig( level=level,
+ format='%(asctime)s %(levelname)s %(message)s',
+ stream = sys.stderr )
+
+ ImportNewFiles()
+
+
+if __name__ == '__main__':
+ main()
keys = postgres_unames.keys()
keys.sort()
for uname in keys:
- if not passwd_unames.has_key(uname)and not known_postgres_unames.has_key(uname):
- print "W: %s is in Postgres but not the passwd file or list of known Postgres users." % (uname)
+ if not passwd_unames.has_key(uname) and not known_postgres_unames.has_key(uname):
+ print "I: Deleting %s from Postgres, no longer in passwd or list of known Postgres users" % (uname)
+ q = session.execute('DROP USER "%s"' % (uname))
keys = passwd_unames.keys()
keys.sort()
Subst["__BCC__"] = "Bcc: " + ", ".join(bcc)
else:
Subst["__BCC__"] = "X-Filler: 42"
- Subst["__CC__"] = "Cc: " + package + "@" + Cnf["Dinstall::PackagesServer"] + "\nX-DAK: dak override"
- Subst["__ADMIN_ADDRESS__"] = Cnf["Dinstall::MyAdminAddress"]
- Subst["__DISTRO__"] = Cnf["Dinstall::MyDistribution"]
+ Subst["__CC__"] = "Cc: " + package + "@" + cnf["Dinstall::PackagesServer"] + "\nX-DAK: dak override"
+ Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"]
+ Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"]
Subst["__WHOAMI__"] = utils.whoami()
Subst["__SOURCE__"] = package
from daklib.queue import *
from daklib import daklog
from daklib import utils
-from daklib.regexes import re_no_epoch, re_default_answer, re_isanum
+from daklib.regexes import re_no_epoch, re_default_answer, re_isanum, re_package
from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
from daklib.summarystats import SummaryStats
from daklib.config import Config
+from daklib.changesutils import *
# Globals
Options = None
if answer == 'R':
upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
+ upload.pkg.remove_known_changes(session=session)
+ session.commit()
return 0
elif answer == 'S':
return 0
################################################################################
-def indiv_sg_compare (a, b):
- """Sort by source name, source, version, 'have source', and
- finally by filename."""
- # Sort by source version
- q = apt_pkg.VersionCompare(a["version"], b["version"])
- if q:
- return -q
-
- # Sort by 'have source'
- a_has_source = a["architecture"].get("source")
- b_has_source = b["architecture"].get("source")
- if a_has_source and not b_has_source:
- return -1
- elif b_has_source and not a_has_source:
- return 1
-
- return cmp(a["filename"], b["filename"])
-
-############################################################
-
-def sg_compare (a, b):
- a = a[1]
- b = b[1]
- """Sort by have note, source already in database and time of oldest upload."""
- # Sort by have note
- a_note_state = a["note_state"]
- b_note_state = b["note_state"]
- if a_note_state < b_note_state:
- return -1
- elif a_note_state > b_note_state:
- return 1
- # Sort by source already in database (descending)
- source_in_database = cmp(a["source_in_database"], b["source_in_database"])
- if source_in_database:
- return -source_in_database
-
- # Sort by time of oldest upload
- return cmp(a["oldest"], b["oldest"])
-
-def sort_changes(changes_files, session):
- """Sort into source groups, then sort each source group by version,
- have source, filename. Finally, sort the source groups by have
- note, time of oldest upload of each source upload."""
- if len(changes_files) == 1:
- return changes_files
-
- sorted_list = []
- cache = {}
- # Read in all the .changes files
- for filename in changes_files:
- u = Upload()
- try:
- u.pkg.changes_file = filename
- u.load_changes(filename)
- u.update_subst()
- cache[filename] = copy.copy(u.pkg.changes)
- cache[filename]["filename"] = filename
- except:
- sorted_list.append(filename)
- break
- # Divide the .changes into per-source groups
- per_source = {}
- for filename in cache.keys():
- source = cache[filename]["source"]
- if not per_source.has_key(source):
- per_source[source] = {}
- per_source[source]["list"] = []
- per_source[source]["list"].append(cache[filename])
- # Determine oldest time and have note status for each source group
- for source in per_source.keys():
- q = session.query(DBSource).filter_by(source = source).all()
- per_source[source]["source_in_database"] = len(q)>0
- source_list = per_source[source]["list"]
- first = source_list[0]
- oldest = os.stat(first["filename"])[stat.ST_MTIME]
- have_note = 0
- for d in per_source[source]["list"]:
- mtime = os.stat(d["filename"])[stat.ST_MTIME]
- if mtime < oldest:
- oldest = mtime
- have_note += has_new_comment(d["source"], d["version"], session)
- per_source[source]["oldest"] = oldest
- if not have_note:
- per_source[source]["note_state"] = 0; # none
- elif have_note < len(source_list):
- per_source[source]["note_state"] = 1; # some
- else:
- per_source[source]["note_state"] = 2; # all
- per_source[source]["list"].sort(indiv_sg_compare)
- per_source_items = per_source.items()
- per_source_items.sort(sg_compare)
- for i in per_source_items:
- for j in i[1]["list"]:
- sorted_list.append(j["filename"])
- return sorted_list
-
-################################################################################
-
class Section_Completer:
def __init__ (self, session):
self.sections = []
prod_mail_message = utils.TemplateSubst(
Subst,cnf["Dir::Templates"]+"/process-new.prod")
- # Send the prod mail if appropriate
- if not cnf["Dinstall::Options::No-Mail"]:
- utils.send_mail(prod_mail_message)
+ # Send the prod mail
+ utils.send_mail(prod_mail_message)
print "Sent proding message"
changes = upload.pkg.changes
cnf = Config()
+ # Check for a valid distribution
+ upload.check_distributions()
+
# Make a copy of distribution we can happily trample on
changes["suite"] = copy.copy(changes["distribution"])
- # Fix up the list of target suites
- for suite in changes["suite"].keys():
- override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
- if override:
- (olderr, newerr) = (get_suite(suite, session) == None,
- get_suite(override, session) == None)
- if olderr or newerr:
- (oinv, newinv) = ("", "")
- if olderr: oinv = "invalid "
- if newerr: ninv = "invalid "
- print "warning: overriding %ssuite %s to %ssuite %s" % (
- oinv, suite, ninv, override)
- del changes["suite"][suite]
- changes["suite"][override] = 1
- # Validate suites
- for suite in changes["suite"].keys():
- if get_suite(suite, session) is None:
- utils.fubar("%s has invalid suite '%s' (possibly overriden). say wha?" % (changes, suite))
-
# The main NEW processing loop
done = 0
while not done:
try:
check_daily_lock()
done = add_overrides (new, upload, session)
+ new_accept(upload, Options["No-Action"], session)
Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
except CantGetLockError:
print "Hello? Operator! Give me the number for 911!"
elif answer == 'E' and not Options["Trainee"]:
new = edit_overrides (new, upload, session)
elif answer == 'M' and not Options["Trainee"]:
- upload.pkg.remove_known_changes()
aborted = upload.do_reject(manual=1,
reject_message=Options["Manual-Reject"],
- note=get_new_comments(changes.get("source", ""), session=session))
+ notes=get_new_comments(changes.get("source", ""), session=session))
if not aborted:
+ upload.pkg.remove_known_changes(session=session)
+ session.commit()
Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
done = 1
elif answer == 'N':
elif answer == 'M':
Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
upload.do_reject(manual=1, reject_message=Options["Manual-Reject"])
+ upload.pkg.remove_known_changes(session=session)
+ session.commit()
done = 1
elif answer == 'S':
done = 1
finally:
os.unlink(path)
-def move_file_to_queue(to_q, f, session):
- """mark a file as being in the unchecked queue"""
- # update the queue_file entry for the existing queue
- qf = session.query(QueueFile).filter_by(queueid=to_q.queueid,
- filename=f.filename)
- qf.queue = to_q
-
- # update the changes_pending_files row
- f.queue = to_q
+class clean_holding(object):
+ def __init__(self,pkg):
+ self.pkg = pkg
-def changes_to_unchecked(changes, session):
- """move a changes file to unchecked"""
- unchecked = get_policy_queue('unchecked', session );
- changes.in_queue = unchecked
+ def __enter__(self):
+ pass
- for f in changes.pkg.files:
- move_file_to_queue(unchecked, f)
+ def __exit__(self, type, value, traceback):
+ h = Holding()
- # actually move files
- changes.move_to_queue(unchecked)
+ for f in self.pkg.files.keys():
+ if os.path.exists(os.path.join(h.holding_dir, f)):
+ os.unlink(os.path.join(h.holding_dir, f))
-def _accept(upload):
- if Options["No-Action"]:
- return
- (summary, short_summary) = upload.build_summaries()
-# upload.accept(summary, short_summary, targetqueue)
-# os.unlink(upload.pkg.changes_file[:-8]+".dak")
- changes_to_unchecked(upload)
-
-def do_accept(upload):
- print "ACCEPT"
- cnf = Config()
- if not Options["No-Action"]:
- (summary, short_summary) = upload.build_summaries()
-
- if cnf.FindB("Dinstall::SecurityQueueHandling"):
- upload.dump_vars(cnf["Dir::Queue::Embargoed"])
- upload.move_to_queue(get_policy_queue('embargoed'))
- upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
- # Check for override disparities
- upload.Subst["__SUMMARY__"] = summary
- else:
- # Just a normal upload, accept it...
- _accept(upload)
def do_pkg(changes_file, session):
new_queue = get_policy_queue('new', session );
u = Upload()
u.pkg.changes_file = changes_file
+ (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
u.load_changes(changes_file)
u.pkg.directory = new_queue.path
u.update_subst()
u.Subst["__BCC__"] = bcc
files = u.pkg.files
+ for deb_filename, f in files.items():
+ if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+ u.binary_file_checks(deb_filename, session)
+ u.check_binary_against_db(deb_filename, session)
+ else:
+ u.source_file_checks(deb_filename, session)
+ u.check_source_against_db(deb_filename, session)
+
+ u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
try:
with lock_package(u.pkg.changes["source"]):
- if not recheck(u, session):
- return
-
- do_new(u,session)
-
+ with clean_holding(u.pkg):
+ if not recheck(u, session):
+ return
+
+ # FIXME: This does need byhand checks added!
+ new = determine_new(u.pkg.changes, files)
+ if new:
+ do_new(u, session)
+ else:
+ try:
+ check_daily_lock()
+ new_accept(u, Options["No-Action"], session)
+ except CantGetLockError:
+ print "Hello? Operator! Give me the number for 911!"
+ print "Dinstall in the locked area, cant process packages, come back later"
# (new, byhand) = check_status(files)
# if new or byhand:
# if new:
sys.stderr.write("Sorting changes...\n")
changes_files = sort_changes(changes_files, session)
- # Kill me now? **FIXME**
- cnf["Dinstall::Options::No-Mail"] = ""
-
for changes_file in changes_files:
changes_file = utils.validate_changes_file_arg(changes_file, 0)
if not changes_file:
--- /dev/null
+#!/usr/bin/env python
+# vim:set et ts=4 sw=4:
+
+""" Handles packages from policy queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
+@copyright: 2009 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+# <mhy> So how do we handle that at the moment?
+# <stew> Probably incorrectly.
+
+################################################################################
+
+import os
+import copy
+import sys
+import apt_pkg
+
+from daklib.dbconn import *
+from daklib.queue import *
+from daklib import daklog
+from daklib import utils
+from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
+from daklib.config import Config
+from daklib.changesutils import *
+
+# Globals
+Options = None
+Logger = None
+
+################################################################################
+
+def do_comments(dir, srcqueue, opref, npref, line, fn, session):
+ for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
+ lines = open("%s/%s" % (dir, comm)).readlines()
+ if len(lines) == 0 or lines[0] != line + "\n": continue
+ changes_files = [ x for x in os.listdir(".") if x.startswith(comm[7:]+"_")
+ and x.endswith(".changes") ]
+ changes_files = sort_changes(changes_files, session)
+ for f in changes_files:
+ print "Processing changes file: %s" % f
+ f = utils.validate_changes_file_arg(f, 0)
+ if not f:
+ print "Couldn't validate changes file %s" % f
+ continue
+ fn(f, srcqueue, "".join(lines[1:]), session)
+
+ if opref != npref and not Options["No-Action"]:
+ newcomm = npref + comm[len(opref):]
+ os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm))
+
+################################################################################
+
+def comment_accept(changes_file, srcqueue, comments, session):
+ u = Upload()
+ u.pkg.changes_file = changes_file
+ u.load_changes(changes_file)
+ u.update_subst()
+
+ if not Options["No-Action"]:
+ destqueue = get_policy_queue('newstage', session)
+ if changes_to_queue(u, srcqueue, destqueue, session):
+ Logger.log(["Policy Queue ACCEPT: %s: %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+ else:
+ print "E: Failed to migrate %s" % u.pkg.changes_file
+
+################################################################################
+
+def comment_reject(changes_file, srcqueue, comments, session):
+ u = Upload()
+ u.pkg.changes_file = changes_file
+ u.load_changes(changes_file)
+ u.update_subst()
+
+ u.rejects.append(comments)
+
+ cnf = Config()
+ bcc = "X-DAK: dak process-policy"
+ if cnf.has_key("Dinstall::Bcc"):
+ u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+ else:
+ u.Subst["__BCC__"] = bcc
+
+ if not Options["No-Action"]:
+ u.do_reject(manual=0, reject_message='\n'.join(u.rejects))
+ u.pkg.remove_known_changes(session=session)
+ session.commit()
+
+ Logger.log(["Policy Queue REJECT: %s: %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+
+
+################################################################################
+
+def main():
+ global Options, Logger
+
+ cnf = Config()
+ session = DBConn().session()
+
+ Arguments = [('h',"help","Process-Policy::Options::Help"),
+ ('n',"no-action","Process-Policy::Options::No-Action")]
+
+ for i in ["help", "no-action"]:
+ if not cnf.has_key("Process-Policy::Options::%s" % (i)):
+ cnf["Process-Policy::Options::%s" % (i)] = ""
+
+ queue_name = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
+
+ if len(queue_name) != 1:
+ print "E: Specify exactly one policy queue"
+ sys.exit(1)
+
+ queue_name = queue_name[0]
+
+ Options = cnf.SubTree("Process-Policy::Options")
+
+ if Options["Help"]:
+ usage()
+
+ if not Options["No-Action"]:
+ try:
+ Logger = daklog.Logger(cnf, "process-policy")
+ except CantOpenError, e:
+ Logger = None
+
+ # Find policy queue
+ session.query(PolicyQueue)
+
+ try:
+ pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
+ except NoResultFound:
+ print "E: Cannot find policy queue %s" % queue_name
+ sys.exit(1)
+
+ commentsdir = os.path.join(pq.path, 'COMMENTS')
+ # The comments stuff relies on being in the right directory
+ os.chdir(pq.path)
+ do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, session)
+ do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, session)
+
+
+################################################################################
+
+if __name__ == '__main__':
+ main()
###############################################################################
+def byebye():
+ if not Options["No-Action"]:
+ # Clean out the queue files
+ session = DBConn().session()
+ session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
+ session.commit()
+
+
+
def action(u, session):
cnf = Config()
holding = Holding()
for s in u.pkg.changes["distribution"].keys():
suite = get_suite(s, session)
if suite.policy_queue:
- if not chg or chg.approved_for_id != su.policy_queue.policy_queue_id:
+ if not chg or chg.approved_for_id != suite.policy_queue.policy_queue_id:
# This routine will check whether the upload is a binary
# upload when the source is already in the target suite. If
# so, we skip the policy queue, otherwise we go there.
elif answer == 'A':
if not chg:
chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
+ session.commit()
u.accept(summary, short_summary, session)
u.check_override()
+ chg.clean_from_queue()
session.commit()
u.remove()
elif answer == 'P':
session.commit()
u.remove()
elif answer == 'Q':
+ byebye()
sys.exit(0)
session.commit()
utils.size_type(int(summarystats.accept_bytes)))
Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
+ byebye()
+
if not Options["No-Action"]:
if log_urgency:
UrgencyLog().close()
+
Logger.close()
###############################################################################
from daklib import utils
from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment, DBChange
+from daklib.dbconn import DBConn, has_new_comment, DBChange, get_uid_from_fingerprint
from daklib.textutils import fix_maintainer
from daklib.dak_exceptions import *
source = i[1]["list"][0]["source"]
if len(source) > max_source_len:
max_source_len = len(source)
+ binary_list = i[1]["list"][0]["binary"].keys()
+ binary = ', '.join(binary_list)
arches = {}
versions = {}
for j in i[1]["list"]:
closes=j["closes"].keys()
if dbc:
fingerprint = dbc.fingerprint
-
- # TODO: This won't work now as it never gets set
- # Fix so that we compare the changed-by/maintainer and the signing key
- # Should probably be done somewhere more central
- #if j.has_key("sponsoremail"):
- # sponsor=j["sponsoremail"]
+ sponsor_name = get_uid_from_fingerprint(fingerprint).name
+ sponsor_email = get_uid_from_fingerprint(fingerprint).uid + "@debian.org"
+ if sponsor_name != maintainer["maintainername"] and sponsor_name != changeby["changedbyname"] and \
+ sponsor_email != maintainer["maintaineremail"] and sponsor_name != changeby["changedbyemail"]:
+ sponsor = sponsor_email
for arch in j["architecture"].keys():
arches[arch] = ""
note = " | [N]"
else:
note = ""
- entries.append([source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
+ entries.append([source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
# direction entry consists of "Which field, which direction, time-consider" where
# time-consider says how we should treat last_modified. Thats all.
if Cnf.has_key("Queue-Report::Options::822"):
# print stuff out in 822 format
for entry in entries:
- (source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
+ (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
# We'll always have Source, Version, Arch, Mantainer, and Dist
# For the rest, check to see if we have them, then print them out
log.write("Source: " + source + "\n")
+ log.write("Binary: " + binary + "\n")
log.write("Version: " + version_list + "\n")
log.write("Architectures: ")
log.write( (", ".join(arch_list.split(" "))) + "\n")
source_count = len(per_source_items)
table_header(type.upper(), source_count, total_count)
for entry in entries:
- (source, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
+ (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
table_row(source, version_list, arch_list, time_pp(last_modified), maint, distribution, closes, fingerprint, sponsor, changedby)
table_footer(type.upper())
elif not Cnf.has_key("Queue-Report::Options::822"):
msg = ""
for entry in entries:
- (source, version_list, arch_list, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
+ (source, binary, version_list, arch_list, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
msg += format % (source, version_list, arch_list, note, time_pp(last_modified))
if msg:
#!/usr/bin/env python
""" Output html for packages in NEW """
-# Copyright (C) 2007 Joerg Jaspert <joerg@debian.org>
+# Copyright (C) 2007, 2009 Joerg Jaspert <joerg@debian.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
import apt_pkg
import examine_package
-from daklib.queue import determine_new, check_valid
+from daklib.dbconn import *
+from daklib.queue import determine_new, check_valid, Upload, get_policy_queue
from daklib import utils
from daklib.regexes import re_source_ext
+from daklib.config import Config
+from daklib import daklog
+from daklib.changesutils import *
# Globals
Cnf = None
</body>
</html>
"""
-
+#"""
################################################################################
-def do_pkg(changes_file):
- c = Changes()
- c.load_dot_dak(changes_file)
- files = c.files
- changes = c.changes
-
- c.changes["suite"] = copy(c.changes["distribution"])
- distribution = c.changes["distribution"].keys()[0]
- # Find out what's new
- new = determine_new(c.changes, c.files, 0)
+def do_pkg(changes_file, session):
+ u = Upload()
+ u.pkg.changes_file = changes_file
+ (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
+ u.load_changes(changes_file)
+ new_queue = get_policy_queue('new', session );
+ u.pkg.directory = new_queue.path
+ u.update_subst()
+ origchanges = os.path.abspath(u.pkg.changes_file)
+ files = u.pkg.files
+ changes = u.pkg.changes
+
+ for deb_filename, f in files.items():
+ if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+ u.binary_file_checks(deb_filename, session)
+ u.check_binary_against_db(deb_filename, session)
+ else:
+ u.source_file_checks(deb_filename, session)
+ u.check_source_against_db(deb_filename, session)
+ u.pkg.changes["suite"] = u.pkg.changes["distribution"]
+
+ new = determine_new(u.pkg.changes, files, 0)
stdout_fd = sys.stdout
- htmlname = c.changes["source"] + "_" + c.changes["version"] + ".html"
+ htmlname = changes["source"] + "_" + changes["version"] + ".html"
sources.add(htmlname)
# do not generate html output if that source/version already has one.
- if not os.path.exists(os.path.join(Cnf["Show-New::HTMLPath"],htmlname)):
- sys.stdout = open(os.path.join(Cnf["Show-New::HTMLPath"],htmlname),"w")
+ if not os.path.exists(os.path.join(cnf["Show-New::HTMLPath"],htmlname)):
+ sys.stdout = open(os.path.join(cnf["Show-New::HTMLPath"],htmlname),"w")
filestoexamine = []
for pkg in new.keys():
for fn in new[pkg]["files"]:
- if (c.files[fn].has_key("new") and
- (c.files[fn]["type"] == "dsc" or
- not re_source_ext.match(c.files[fn]["type"]))):
+ if (files[fn].has_key("new") and
+ (files[fn]["type"] == "dsc" or
+ not re_source_ext.match(files[fn]["type"]))):
filestoexamine.append(fn)
- html_header(c.changes["source"], filestoexamine)
+ html_header(changes["source"], filestoexamine)
check_valid(new)
- examine_package.display_changes( distribution, changes_file)
+ examine_package.display_changes( u.pkg.changes["distribution"], changes_file)
for fn in filter(lambda fn: fn.endswith(".dsc"), filestoexamine):
examine_package.check_dsc(distribution, fn)
################################################################################
-def init():
- global Cnf, Options
+def init(session):
+ global cnf, Options
- Cnf = utils.get_conf()
+ cnf = Config()
Arguments = [('h',"help","Show-New::Options::Help"),
("p","html-path","Show-New::HTMLPath","HasArg")]
for i in ["help"]:
- if not Cnf.has_key("Show-New::Options::%s" % (i)):
- Cnf["Show-New::Options::%s" % (i)] = ""
+ if not cnf.has_key("Show-New::Options::%s" % (i)):
+ cnf["Show-New::Options::%s" % (i)] = ""
- changes_files = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
- Options = Cnf.SubTree("Show-New::Options")
+ changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
+ if len(changes_files) == 0:
+ new_queue = get_policy_queue('new', session );
+ changes_files = utils.get_changes_files(new_queue.path)
+
+ Options = cnf.SubTree("Show-New::Options")
if Options["help"]:
usage()
################################################################################
def main():
- changes_files = init()
+ session = DBConn().session()
+ changes_files = init(session)
examine_package.use_html=1
if not changes_file:
continue
print "\n" + changes_file
- do_pkg (changes_file)
- files = set(os.listdir(Cnf["Show-New::HTMLPath"]))
+ do_pkg (changes_file, session)
+
+ files = set(os.listdir(cnf["Show-New::HTMLPath"]))
to_delete = filter(lambda x: x.endswith(".html"), files.difference(sources))
for f in to_delete:
- os.remove(os.path.join(Cnf["Show-New::HTMLPath"],f))
+ os.remove(os.path.join(cnf["Show-New::HTMLPath"],f))
################################################################################
"""
# Lets check if from_file is in the directory we expect it to be in
- if not os.path.abspath(from_file).startswith(Cnf["Transitions::TempPath"]):
- print "Will not accept transitions file outside of %s" % (Cnf["Transitions::TempPath"])
+ if not os.path.abspath(from_file).startswith(Cnf["Dir::TempPath"]):
+ print "Will not accept transitions file outside of %s" % (Cnf["Dir::TempPath"])
sys.exit(3)
if Options["sudo"]:
sudo-ed script and would be unreadable if it has default mkstemp mode
"""
- (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Transitions::TempPath"])
+ (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Dir::TempPath"])
os.chmod(path, 0644)
f = open(path, "w")
yaml.dump(transitions, f, default_flow_style=False)
(Cnf["Dinstall::Reject::ReleaseTransitions"]))
sys.exit(1)
# Also check if our temp directory is defined and existant
- temppath = Cnf.get("Transitions::TempPath", "")
+ temppath = Cnf.get("Dir::TempPath", "")
if temppath == "":
- utils.warn("Transitions::TempPath not defined")
+ utils.warn("Dir::TempPath not defined")
sys.exit(1)
if not os.path.exists(temppath):
utils.warn("Temporary path %s not found." %
- (Cnf["Transitions::TempPath"]))
+ (Cnf["Dir::TempPath"]))
sys.exit(1)
if Options["import"]:
################################################################################
Cnf = None
-required_database_schema = 24
+required_database_schema = 28
################################################################################
else:
multivalues[key] = self.changes[key]
- # TODO: Use ORM
- session.execute(
- """INSERT INTO changes
- (changesname, in_queue, seen, source, binaries, architecture, version,
- distribution, urgency, maintainer, fingerprint, changedby, date)
- VALUES (:changesfile,:in_queue,:filetime,:source,:binary, :architecture,
- :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
- { 'changesfile': self.changes_file,
- 'filetime': filetime,
- 'in_queue': in_queue,
- 'source': self.changes["source"],
- 'binary': multivalues["binary"],
- 'architecture': multivalues["architecture"],
- 'version': self.changes["version"],
- 'distribution': multivalues["distribution"],
- 'urgency': self.changes["urgency"],
- 'maintainer': self.changes["maintainer"],
- 'fingerprint': self.changes["fingerprint"],
- 'changedby': self.changes["changed-by"],
- 'date': self.changes["date"]} )
+ chg = DBChange()
+ chg.changesname = self.changes_file
+ chg.seen = filetime
+ chg.in_queue_id = in_queue
+ chg.source = self.changes["source"]
+ chg.binaries = multivalues["binary"]
+ chg.architecture = multivalues["architecture"]
+ chg.version = self.changes["version"]
+ chg.distribution = multivalues["distribution"]
+ chg.urgency = self.changes["urgency"]
+ chg.maintainer = self.changes["maintainer"]
+ chg.fingerprint = self.changes["fingerprint"]
+ chg.changedby = self.changes["changed-by"]
+ chg.date = self.changes["date"]
+
+ session.add(chg)
+
+ files = []
+ for chg_fn, entry in self.files.items():
+ try:
+ f = open(os.path.join(dirpath, chg_fn))
+ cpf = ChangePendingFile()
+ cpf.filename = chg_fn
+ cpf.size = entry['size']
+ cpf.md5sum = entry['md5sum']
+
+ if entry.has_key('sha1sum'):
+ cpf.sha1sum = entry['sha1sum']
+ else:
+ f.seek(0)
+ cpf.sha1sum = apt_pkg.sha1sum(f)
+
+ if entry.has_key('sha256sum'):
+ cpf.sha256sum = entry['sha256sum']
+ else:
+ f.seek(0)
+ cpf.sha256sum = apt_pkg.sha256sum(f)
+
+ session.add(cpf)
+ files.append(cpf)
+ f.close()
+
+ except IOError:
+ # Can't find the file, try to look it up in the pool
+ poolname = poolify(entry["source"], entry["component"])
+ l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+ found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+ entry['size'],
+ entry["md5sum"],
+ l.location_id,
+ session=session)
+
+ if found is None:
+ Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+ elif found is False and poolfile is not None:
+ Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+ else:
+ if poolfile is None:
+ Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+ else:
+ chg.poolfiles.append(poolfile)
+
+ chg.files = files
session.commit()
+ chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
- return session.query(DBChange).filter_by(changesname = self.changes_file).one()
+ return chg
def unknown_files_fields(self, name):
return sorted(list( set(self.files[name].keys()) -
--- /dev/null
+#!/usr/bin/env python
+# vim:set et ts=4 sw=4:
+
+"""Utilities for handling changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import copy
+import os
+import stat
+import apt_pkg
+
+from daklib.dbconn import *
+from daklib.queue import *
+from daklib import utils
+from daklib.config import Config
+
+################################################################################
+
+__all__ = []
+
+################################################################################
+
+def indiv_sg_compare (a, b):
+ """Sort by source name, source, version, 'have source', and
+ finally by filename."""
+ # Sort by source version
+ q = apt_pkg.VersionCompare(a["version"], b["version"])
+ if q:
+ return -q
+
+ # Sort by 'have source'
+ a_has_source = a["architecture"].get("source")
+ b_has_source = b["architecture"].get("source")
+ if a_has_source and not b_has_source:
+ return -1
+ elif b_has_source and not a_has_source:
+ return 1
+
+ return cmp(a["filename"], b["filename"])
+
+__all__.append('indiv_sg_compare')
+
+############################################################
+
+def sg_compare (a, b):
+ a = a[1]
+ b = b[1]
+ """Sort by have note, source already in database and time of oldest upload."""
+ # Sort by have note
+ a_note_state = a["note_state"]
+ b_note_state = b["note_state"]
+ if a_note_state < b_note_state:
+ return -1
+ elif a_note_state > b_note_state:
+ return 1
+ # Sort by source already in database (descending)
+ source_in_database = cmp(a["source_in_database"], b["source_in_database"])
+ if source_in_database:
+ return -source_in_database
+
+ # Sort by time of oldest upload
+ return cmp(a["oldest"], b["oldest"])
+
+__all__.append('sg_compare')
+
+def sort_changes(changes_files, session):
+ """Sort into source groups, then sort each source group by version,
+ have source, filename. Finally, sort the source groups by have
+ note, time of oldest upload of each source upload."""
+ if len(changes_files) == 1:
+ return changes_files
+
+ sorted_list = []
+ cache = {}
+ # Read in all the .changes files
+ for filename in changes_files:
+ u = Upload()
+ try:
+ u.pkg.changes_file = filename
+ u.load_changes(filename)
+ u.update_subst()
+ cache[filename] = copy.copy(u.pkg.changes)
+ cache[filename]["filename"] = filename
+ except:
+ sorted_list.append(filename)
+ break
+ # Divide the .changes into per-source groups
+ per_source = {}
+ for filename in cache.keys():
+ source = cache[filename]["source"]
+ if not per_source.has_key(source):
+ per_source[source] = {}
+ per_source[source]["list"] = []
+ per_source[source]["list"].append(cache[filename])
+ # Determine oldest time and have note status for each source group
+ for source in per_source.keys():
+ q = session.query(DBSource).filter_by(source = source).all()
+ per_source[source]["source_in_database"] = len(q)>0
+ source_list = per_source[source]["list"]
+ first = source_list[0]
+ oldest = os.stat(first["filename"])[stat.ST_MTIME]
+ have_note = 0
+ for d in per_source[source]["list"]:
+ mtime = os.stat(d["filename"])[stat.ST_MTIME]
+ if mtime < oldest:
+ oldest = mtime
+ have_note += has_new_comment(d["source"], d["version"], session)
+ per_source[source]["oldest"] = oldest
+ if not have_note:
+ per_source[source]["note_state"] = 0; # none
+ elif have_note < len(source_list):
+ per_source[source]["note_state"] = 1; # some
+ else:
+ per_source[source]["note_state"] = 2; # all
+ per_source[source]["list"].sort(indiv_sg_compare)
+ per_source_items = per_source.items()
+ per_source_items.sort(sg_compare)
+ for i in per_source_items:
+ for j in i[1]["list"]:
+ sorted_list.append(j["filename"])
+ return sorted_list
+
+__all__.append('sort_changes')
+
+################################################################################
+
+def changes_to_queue(upload, srcqueue, destqueue, session):
+ """Move a changes file to a different queue and mark as approved for the
+ source queue"""
+
+ try:
+ chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
+ except NoResultFound:
+ return False
+
+ chg.approved_for_id = srcqueue.policy_queue_id
+
+ for f in chg.files:
+ # update the changes_pending_files row
+ f.queue = destqueue
+ utils.move(os.path.join(srcqueue.path, f.filename), destqueue.path, perms=int(destqueue.perms, 8))
+
+ utils.move(os.path.join(srcqueue.path, upload.pkg.changes_file), destqueue.path, perms=int(destqueue.perms, 8))
+ chg.in_queue = destqueue
+ session.commit()
+
+ return True
+
+__all__.append('changes_to_queue')
+
+def new_accept(upload, dry_run, session):
+ print "ACCEPT"
+
+ if not dry_run:
+ cnf = Config()
+
+ (summary, short_summary) = upload.build_summaries()
+
+ # XXX: mhy: I think this is wrong as these are all attributes on the
+ # build and policy queues now
+ if cnf.FindB("Dinstall::SecurityQueueHandling"):
+ upload.dump_vars(cnf["Dir::Queue::Embargoed"])
+ upload.move_to_queue(get_policy_queue('embargoed'))
+ upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
+ # Check for override disparities
+ upload.Subst["__SUMMARY__"] = summary
+ else:
+ # Just a normal upload, accept it...
+ (summary, short_summary) = upload.build_summaries()
+ srcqueue = get_policy_queue('new', session)
+ destqueue = get_policy_queue('newstage', session)
+
+ changes_to_queue(upload, srcqueue, destqueue, session)
+
+__all__.append('new_accept')
sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
- WHERE b.package=:package
+ WHERE b.package='%(package)s'
AND b.file = fi.id
AND fi.location = l.id
AND l.component = c.id
AND ba.bin=b.id
AND ba.suite = su.id
- AND su.suite_name=:suitename
+ AND su.suite_name %(suitename)s
ORDER BY b.version DESC"""
- return session.execute(sql, {'package': package, 'suitename': suitename})
+ return session.execute(sql % {'package': package, 'suitename': suitename})
__all__.append('get_binary_from_name_suite')
def __repr__(self):
return '<DBChange %s>' % self.changesname
+ def clean_from_queue(self):
+ session = DBConn().session().object_session(self)
+
+ # Remove changes_pool_files entries
+ self.poolfiles = []
+
+ # Remove changes_pending_files references
+ self.files = []
+
+ # Clear out of queue
+ self.in_queue = None
+ self.approved_for_id = None
+
__all__.append('DBChange')
@session_wrapper
session.flush()
- return dsc_component, dsc_location_id, pfs
+ return source, dsc_component, dsc_location_id, pfs
__all__.append('add_dsc_to_db')
poolfiles = relation(PoolFile,
secondary=self.tbl_changes_pool_files,
backref="changeslinks"),
+ seen = self.tbl_changes.c.seen,
+ source = self.tbl_changes.c.source,
+ binaries = self.tbl_changes.c.binaries,
+ architecture = self.tbl_changes.c.architecture,
+ distribution = self.tbl_changes.c.distribution,
+ urgency = self.tbl_changes.c.urgency,
+ maintainer = self.tbl_changes.c.maintainer,
+ changedby = self.tbl_changes.c.changedby,
+ date = self.tbl_changes.c.date,
+ version = self.tbl_changes.c.version,
files = relation(ChangePendingFile,
secondary=self.tbl_changes_pending_files_map,
backref="changesfile"),
properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
mapper(ChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+ properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+ filename = self.tbl_changes_pending_files.c.filename,
+ size = self.tbl_changes_pending_files.c.size,
+ md5sum = self.tbl_changes_pending_files.c.md5sum,
+ sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+ sha256sum = self.tbl_changes_pending_files.c.sha256sum))
mapper(ChangePendingSource, self.tbl_changes_pending_source,
properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
self.in_holding = {}
self.holding_dir = Config()["Dir::Queue::Holding"]
+ # ftptrainees haven't access to holding, use a temp directory instead
+ if not os.access(self.holding_dir, os.W_OK):
+ self.holding_dir = Config()["Dir::TempPath"]
def copy_to_holding(self, filename):
base_filename = os.path.basename(filename)
if f.has_key("othercomponents"):
new[pkg]["othercomponents"] = f["othercomponents"]
+ # Fix up the list of target suites
+ cnf = Config()
+ for suite in changes["suite"].keys():
+ override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
+ if override:
+ (olderr, newerr) = (get_suite(suite, session) == None,
+ get_suite(override, session) == None)
+ if olderr or newerr:
+ (oinv, newinv) = ("", "")
+ if olderr: oinv = "invalid "
+ if newerr: ninv = "invalid "
+ print "warning: overriding %ssuite %s to %ssuite %s" % (
+ oinv, suite, ninv, override)
+ del changes["suite"][suite]
+ changes["suite"][override] = 1
+
for suite in changes["suite"].keys():
for pkg in new.keys():
ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
self.warnings = []
self.notes = []
+ self.later_check_files = []
+
self.pkg.reset()
def package_info(self):
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
- msg += '\n'
+ msg += '\n\n'
return msg
self.pkg.changes["changedbyemail"] = ""
self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
- % (filename, changes["changed-by"], msg))
+ % (filename, self.pkg.changes["changed-by"], msg))
# Ensure all the values in Closes: are numbers
if self.pkg.changes.has_key("closes"):
for f in file_keys:
ret = holding.copy_to_holding(f)
if ret is not None:
- # XXX: Should we bail out here or try and continue?
- self.rejects.append(ret)
+ self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
os.chdir(cwd)
# if in the pool or in a queue other than unchecked, reject
if (dbc.in_queue is None) \
or (dbc.in_queue is not None
- and dbc.in_queue.queue_name != 'unchecked'):
+ and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
self.rejects.append("%s file already known to dak" % base_filename)
except NoResultFound, e:
# not known, good
if os.path.exists(f):
self.rejects.append("Can't read `%s'. [permission denied]" % (f))
else:
- self.rejects.append("Can't read `%s'. [file not found]" % (f))
+ # Don't directly reject, mark to check later to deal with orig's
+ # we can find in the pool
+ self.later_check_files.append(f)
entry["type"] = "unreadable"
continue
self.check_dsc_against_db(dsc_filename, session)
session.close()
+ # Finally, check if we're missing any files
+ for f in self.later_check_files:
+ self.rejects.append("Could not find file %s references in changes" % f)
+
return True
###########################################################################
self.check_dm_upload(fpr, session)
else:
# Check source-based permissions for other types
- if self.pkg.changes["architecture"].has_key("source"):
- if fpr.source_acl.access_level is None:
- rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
- rej += '\nPlease contact ftpmaster if you think this is incorrect'
- self.rejects.append(rej)
- return
- else:
- # If not a DM, we allow full upload rights
- uid_email = "%s@debian.org" % (fpr.uid.uid)
- self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+ if self.pkg.changes["architecture"].has_key("source") and \
+ fpr.source_acl.access_level is None:
+ rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+ rej += '\nPlease contact ftpmaster if you think this is incorrect'
+ self.rejects.append(rej)
+ return
+ # If not a DM, we allow full upload rights
+ uid_email = "%s@debian.org" % (fpr.uid.uid)
+ self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
# Check binary upload permissions
# Add the .dsc file to the DB first
for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+ source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
for j in pfs:
poolfiles.append(j)
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
+ # XXX: mhy: I think this should be in add_dsc_to_db
if self.pkg.changes["architecture"].has_key("source"):
for orig_file in self.pkg.orig_files.keys():
if not self.pkg.orig_files[orig_file].has_key("id"):
new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+ (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
+ # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
if newf is None:
utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
- # TODO: Check that there's only 1 here
- source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
- dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
- dscf.poolfile_id = newf.file_id
- session.add(dscf)
session.flush()
+ # Don't reference the old file from this changes
+ for p in poolfiles:
+ if p.file_id == oldf.file_id:
+ poolfiles.remove(p)
+
poolfiles.append(newf)
+ # Fix up the DSC references
+ toremove = []
+
+ for df in source.srcfiles:
+ if df.poolfile.file_id == oldf.file_id:
+ # Add a new DSC entry and mark the old one for deletion
+ # Don't do it in the loop so we don't change the thing we're iterating over
+ newdscf = DSCFile()
+ newdscf.source_id = source.source_id
+ newdscf.poolfile_id = newf.file_id
+ session.add(newdscf)
+
+ toremove.append(df)
+
+ for df in toremove:
+ session.delete(df)
+
+ # Flush our changes
+ session.flush()
+
+ # Make sure that our source object is up-to-date
+ session.expire(source)
+
# Install the files into the pool
for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
- # Send accept mail, announce to lists, close bugs and check for
- # override disparities
- if not cnf["Dinstall::Options::No-Mail"]:
- self.update_subst()
- self.Subst["__SUITE__"] = ""
- self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst,
- os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
- utils.send_mail(mail_message)
- self.announce(short_summary, 1)
+ self.update_subst()
+ self.Subst["__SUITE__"] = ""
+ self.Subst["__SUMMARY__"] = summary
+ mail_message = utils.TemplateSubst(self.Subst,
+ os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
+ utils.send_mail(mail_message)
+ self.announce(short_summary, 1)
## Helper stuff for DebBugs Version Tracking
if cnf.Find("Dir::Queue::BTSVersionTrack"):
cnf = Config()
- # Abandon the check if:
- # a) override disparity checks have been disabled
- # b) we're not sending mail
- if not cnf.FindB("Dinstall::OverrideDisparityCheck") or \
- cnf["Dinstall::Options::No-Mail"]:
+ # Abandon the check if override disparity checks have been disabled
+ if not cnf.FindB("Dinstall::OverrideDisparityCheck"):
return
summary = self.pkg.check_override()
os.close(dest_fd)
###########################################################################
- def do_reject (self, manual=0, reject_message="", note=""):
+ def do_reject (self, manual=0, reject_message="", notes=""):
"""
Reject an upload. If called without a reject message or C{manual} is
true, spawn an editor so the user can write one.
if manual and not reject_message:
(fd, temp_filename) = utils.temp_filename()
temp_file = os.fdopen(fd, 'w')
- if len(note) > 0:
- for line in note:
- temp_file.write(line)
+ if len(notes) > 0:
+ for note in notes:
+ temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
+ % (note.author, note.version, note.notedate, note.comment))
temp_file.close()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
+ self.Subst["__REJECT_MESSAGE__"] = ""
self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
# Write the rejection email out as the <foo>.reason file
os.close(reason_fd)
- # Send the rejection mail if appropriate
- if not cnf["Dinstall::Options::No-Mail"]:
- utils.send_mail(reject_mail_message)
+ # Send the rejection mail
+ utils.send_mail(reject_mail_message)
if self.logger:
self.logger.log(["rejected", self.pkg.changes_file])
cansave = 1
if not cansave:
- self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
+ self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
################################################################################
def check_binary_against_db(self, filename, session):
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
match = 1
+ # Don't bitch that we couldn't find this file later
+ try:
+ self.later_check_files.remove(dsc_name)
+ except ValueError:
+ pass
+
+
if not match:
self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
u.check_override()
# Send accept mail, announce to lists and close bugs
- if announce and not cnf["Dinstall::Options::No-Mail"]:
+ if announce:
template = os.path.join(cnf["Dir::Templates"], announce)
u.update_subst()
u.Subst["__SUITE__"] = ""
def do_autobyhand(u, summary, short_summary, chg, session):
print "Attempting AUTOBYHAND."
- byhandleft = True
+ byhandleft = False
for f, entry in u.pkg.files.items():
byhandfile = f
if result == 0:
os.unlink(byhandfile)
- del entry
+ del u.pkg.files[f]
else:
print "Error processing %s, left as byhand." % (f)
byhandleft = True
session.add(chg)
session.commit()
- if not cnf["Dinstall::Options::No-Mail"]:
- print "Sending new ack."
- template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
- u.update_subst()
- u.Subst["__SUMMARY__"] = summary
- new_ack_message = utils.TemplateSubst(u.Subst, template)
- utils.send_mail(new_ack_message)
+ print "Sending new ack."
+ template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
+ u.update_subst()
+ u.Subst["__SUMMARY__"] = summary
+ new_ack_message = utils.TemplateSubst(u.Subst, template)
+ utils.send_mail(new_ack_message)
################################################################################
def our_raw_input(prompt=""):
if prompt:
- sys.stdout.write(prompt)
+ while 1:
+ try:
+ sys.stdout.write(prompt)
+ break
+ except IOError:
+ pass
sys.stdout.flush()
try:
ret = raw_input()
def send_mail (message, filename=""):
"""sendmail wrapper, takes _either_ a message string or a file as arguments"""
+ # Check whether we're supposed to be sending mail
+ if Cnf.has_key("Dinstall::Options::No-Mail") and Cnf["Dinstall::Options::No-Mail"]:
+ return
+
# If we've been passed a string dump it into a temporary file
if message:
(fd, filename) = tempfile.mkstemp()
if exit_status:
rejects.append("gpgv failed while checking %s." % (sig_filename))
if status.strip():
- rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "), "")
+ rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "))
else:
- rejects.append(prefix_multi_line_string(output, " [GPG output:] "), "")
+ rejects.append(prefix_multi_line_string(output, " [GPG output:] "))
return (None, rejects)
# Sanity check the good stuff we expect
$keep_files = '(status|\.message|README)$';
# file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
# Change files to mode 644 locally (after md5 check) or only on master?
$chmod_on_target = 0;
$keep_files = '(status|\.message|README)$';
# file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
# Change files to mode 644 locally (after md5 check) or only on master?
$chmod_on_target = 0;
$keep_files = '(status|\.message|README)$';
# file patterns that aren't deleted right away
-$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
# Change files to mode 644 locally (after md5 check) or only on master?
$chmod_on_target = 0;
r'Archive maintenance timestamp \(([^\)]*)\): (\d{2}):(\d{2}):(\d{2})$')
UNSAFE = re.compile(r'[^a-zA-Z/\._:0-9\- ]')
-graphs = {"dinstall1": {"keystolist":["pg_dump1", "i18n 1", "accepted", "make-suite-file-list", "apt-ftparchive",
+graphs = {"dinstall1": {"keystolist":["pg_dump1", "i18n 1", "accepted", "dominate", "generate-filelist", "apt-ftparchive",
"pdiff", "release files", "w-b", "i18n 2", "apt-ftparchive cleanup"],
"showothers":True},
"dinstall2": {"keystolist":['External Updates', 'p-u-new', 'o-p-u-new', 'cruft', 'import-keyring', 'overrides', 'cleanup', 'scripts', 'mirror hardlinks', 'stats', 'compress', "pkg-file-mapping" ],
raise Exception("I don't like command line arguments including char '%s'"%m.group(0))
if args:
- for l in os.popen('bzgrep -H "Archive maintenance timestamp" "'+'" "'.join(args)+'"'):
+ for l in os.popen('bzgrep -H "^Archive maintenance timestamp" "'+'" "'.join(args)+'"'):
m = LINE.match(l)
if not m:
raise Exception("woops '%s'"%l)
<li><a href="#new">NEW</a></li>
<li><a href="#pending">Pending removals</a></li>
<li><a href="#removed">Removed packages</a></li>
+ <li><a href="#cruft">Cruft Report</a></li>
<li><a href="#testing">Testing</a></li>
<li><a href="#stable">Stable</a></li>
<li><a href="#rejections">Rejections</a></li>
</p>
</div>
+ <div id="cruft">
+ <h1>Cruft Report</h1>
+ <p>Some packages which needs to be removed manually are found in
+ <a href="cruft-report-daily.txt">the cruft-report</a>.</p>
+ </div>
+
<div id="testing">
<h1>Testing</h1>
<p>Squeeze is testing, sid is unstable. For more details please look