dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" team@backports.debian.org
# and one on crufty packages
log "Sending information about crufty packages"
- dak cruft-report > $webdir/cruft-report-daily.txt
-# dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
+ dak cruft-report -R > $webdir/cruft-report-daily.txt
+# dak cruft-report -R -s experimental >> $webdir/cruft-report-daily.txt
cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" team@backports.debian.org
}
dak queue-report -d new,proposedupdates | mail -e -s "NEW and BYHAND on $(date +%D)" team@backports.debian.org
# and one on crufty packages
-dak cruft-report -m bdo -s squeeze-backports > $webdir/cruft-report-daily.txt
+dak cruft-report -R -m bdo -s squeeze-backports > $webdir/cruft-report-daily.txt
cat $webdir/cruft-report-daily.txt | mail -e -s "Debian backports archive cruft report for $(date +%D)" team@backports.debian.org
echo Daily cron scripts successful.
dak queue-report | mail -a "X-Debian: DAK" -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
# and one on crufty packages
log "Sending information about crufty packages"
- dak cruft-report > $webdir/cruft-report-daily.txt
- dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
+ dak cruft-report -R > $webdir/cruft-report-daily.txt
+ dak cruft-report -R -s experimental >> $webdir/cruft-report-daily.txt
cat $webdir/cruft-report-daily.txt | mail -a "X-Debian: DAK" -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
}
-h, --help show this help and exit.
-m, --mode=MODE chose the MODE to run in (full, daily, bdo).
-s, --suite=SUITE check suite SUITE.
+ -R, --rdep-check check reverse dependencies
-w, --wanna-build-dump where to find the copies of http://buildd.debian.org/stats/*.txt"""
sys.exit(exit_code)
order by ub.package"""
return session.execute(query, { 'suite_id': suite_id })
-def reportWithoutSource(suite_name, suite_id, session):
+def reportWithoutSource(suite_name, suite_id, session, rdeps=False):
rows = queryWithoutSource(suite_id, session)
title = 'packages without source in suite %s' % suite_name
if rows.rowcount > 0:
print "* package %s in version %s is no longer built from source" % \
(package, version)
print " - suggested command:"
- print " dak rm -m %s -s %s -a all -p -R -b %s\n" % \
+ print " dak rm -m %s -s %s -a all -p -R -b %s" % \
(message, suite_name, package)
+ if rdeps:
+ if utils.check_reverse_depends([package], suite_name, ["all"], session, True):
+ print
+ else:
+ print " - No dependency problem found\n"
+ else:
+ print
def queryNewerAll(suite_name, session):
"""searches for arch != all packages that have an arch == all
select * from outdated_packages order by source"""
return session.execute(query, { 'suite_id': suite_id })
-def reportNBS(suite_name, suite_id):
+def reportNBS(suite_name, suite_id, rdeps=False):
session = DBConn().session()
nbsRows = queryNBS(suite_id, session)
title = 'NBS packages in suite %s' % suite_name
print " on %s" % arch_string
print " - suggested command:"
message = '"[auto-cruft] NBS (no longer built by %s)"' % source
- print " dak rm -m %s -s %s -a %s -p -R -b %s\n" % \
+ print " dak rm -m %s -s %s -a %s -p -R -b %s" % \
(message, suite_name, arch_string, pkg_string)
+ if rdeps:
+ if utils.check_reverse_depends(pkg_list, suite_name, arch_list, session, True):
+ print
+ else:
+ print " - No dependency problem found\n"
+ else:
+ print
session.close()
-def reportAllNBS(suite_name, suite_id, session):
- reportWithoutSource(suite_name, suite_id, session)
+def reportAllNBS(suite_name, suite_id, session, rdeps=False):
+ reportWithoutSource(suite_name, suite_id, session, rdeps)
reportNewerAll(suite_name, session)
- reportNBS(suite_name, suite_id)
+ reportNBS(suite_name, suite_id, rdeps)
################################################################################
################################################################################
-def report_outdated_nonfree(suite, session):
+def report_outdated_nonfree(suite, session, rdeps=False):
packages = {}
query = """WITH outdated_sources AS (
for binary in sorted(packages[source]):
binaries.add(binary)
archs = archs.union(packages[source][binary])
- print ' dak rm -m %s -s %s -a %s -p -R -b %s\n' % \
+ print ' dak rm -m %s -s %s -a %s -p -R -b %s' % \
(message, suite, ','.join(archs), ' '.join(binaries))
+ if rdeps:
+ if utils.check_reverse_depends(list(binaries), suite, archs, session, True):
+ print
+ else:
+ print " - No dependency problem found\n"
+ else:
+ print
################################################################################
Arguments = [('h',"help","Cruft-Report::Options::Help"),
('m',"mode","Cruft-Report::Options::Mode", "HasArg"),
+ ('R',"rdep-check", "Cruft-Report::Options::Rdep-Check"),
('s',"suite","Cruft-Report::Options::Suite","HasArg"),
('w',"wanna-build-dump","Cruft-Report::Options::Wanna-Build-Dump","HasArg")]
- for i in [ "help" ]:
+ for i in [ "help", "Rdep-Check" ]:
if not cnf.has_key("Cruft-Report::Options::%s" % (i)):
cnf["Cruft-Report::Options::%s" % (i)] = ""
if Options["Help"]:
usage()
+ if Options["Rdep-Check"]:
+ rdeps = True
+ else:
+ rdeps = False
+
# Set up checks based on mode
if Options["Mode"] == "daily":
checks = [ "nbs", "nviu", "nvit", "obsolete source", "outdated non-free", "nfu" ]
report_obsolete_source(suite_name, session)
if "nbs" in checks:
- reportAllNBS(suite_name, suite_id, session)
+ reportAllNBS(suite_name, suite_id, session, rdeps)
if "outdated non-free" in checks:
- report_outdated_nonfree(suite_name, session)
+ report_outdated_nonfree(suite_name, session, rdeps)
bin_not_built = {}
from daklib.dbconn import *
from daklib import utils
from daklib.dak_exceptions import *
-from daklib.regexes import re_strip_source_version, re_build_dep_arch, re_bin_only_nmu
+from daklib.regexes import re_strip_source_version, re_bin_only_nmu
import debianbts as bts
################################################################################
################################################################################
def reverse_depends_check(removals, suite, arches=None, session=None):
- dbsuite = get_suite(suite, session)
- cnf = Config()
-
print "Checking reverse dependencies..."
- dep_problem = 0
- p2c = {}
- all_broken = {}
- if arches:
- all_arches = set(arches)
- else:
- all_arches = set([x.arch_string for x in get_suite_architectures(suite)])
- all_arches -= set(["source", "all"])
- metakey_d = get_or_set_metadatakey("Depends", session)
- metakey_p = get_or_set_metadatakey("Provides", session)
- params = {
- 'suite_id': dbsuite.suite_id,
- 'metakey_d_id': metakey_d.key_id,
- 'metakey_p_id': metakey_p.key_id,
- }
- for architecture in all_arches | set(['all']):
- deps = {}
- sources = {}
- virtual_packages = {}
- params['arch_id'] = get_architecture(architecture, session).arch_id
-
- statement = '''
- SELECT b.id, b.package, s.source, c.name as component,
- (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
- (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
- FROM binaries b
- JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
- JOIN source s ON b.source = s.id
- JOIN files f ON b.file = f.id
- JOIN location l ON f.location = l.id
- JOIN component c ON l.component = c.id
- WHERE b.architecture = :arch_id'''
- query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
- from_statement(statement).params(params)
- for binary_id, package, source, component, depends, provides in query:
- sources[package] = source
- p2c[package] = component
- if depends is not None:
- deps[package] = depends
- # Maintain a counter for each virtual package. If a
- # Provides: exists, set the counter to 0 and count all
- # provides by a package not in the list for removal.
- # If the counter stays 0 at the end, we know that only
- # the to-be-removed packages provided this virtual
- # package.
- if provides is not None:
- for virtual_pkg in provides.split(","):
- virtual_pkg = virtual_pkg.strip()
- if virtual_pkg == package: continue
- if not virtual_packages.has_key(virtual_pkg):
- virtual_packages[virtual_pkg] = 0
- if package not in removals:
- virtual_packages[virtual_pkg] += 1
-
- # If a virtual package is only provided by the to-be-removed
- # packages, treat the virtual package as to-be-removed too.
- for virtual_pkg in virtual_packages.keys():
- if virtual_packages[virtual_pkg] == 0:
- removals.append(virtual_pkg)
-
- # Check binary dependencies (Depends)
- for package in deps.keys():
- if package in removals: continue
- parsed_dep = []
- try:
- parsed_dep += apt_pkg.ParseDepends(deps[package])
- except ValueError as e:
- print "Error for package %s: %s" % (package, e)
- for dep in parsed_dep:
- # Check for partial breakage. If a package has a ORed
- # dependency, there is only a dependency problem if all
- # packages in the ORed depends will be removed.
- unsat = 0
- for dep_package, _, _ in dep:
- if dep_package in removals:
- unsat += 1
- if unsat == len(dep):
- component = p2c[package]
- source = sources[package]
- if component != "main":
- source = "%s/%s" % (source, component)
- all_broken.setdefault(source, {}).setdefault(package, set()).add(architecture)
- dep_problem = 1
-
- if all_broken:
- print "# Broken Depends:"
- for source, bindict in sorted(all_broken.items()):
- lines = []
- for binary, arches in sorted(bindict.items()):
- if arches == all_arches or 'all' in arches:
- lines.append(binary)
- else:
- lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
- print '%s: %s' % (source, lines[0])
- for line in lines[1:]:
- print ' ' * (len(source) + 2) + line
- print
-
- # Check source dependencies (Build-Depends and Build-Depends-Indep)
- all_broken.clear()
- metakey_bd = get_or_set_metadatakey("Build-Depends", session)
- metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
- params = {
- 'suite_id': dbsuite.suite_id,
- 'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
- }
- statement = '''
- SELECT s.id, s.source, string_agg(sm.value, ', ') as build_dep
- FROM source s
- JOIN source_metadata sm ON s.id = sm.src_id
- WHERE s.id in
- (SELECT source FROM src_associations
- WHERE suite = :suite_id)
- AND sm.key_id in :metakey_ids
- GROUP BY s.id, s.source'''
- query = session.query('id', 'source', 'build_dep').from_statement(statement). \
- params(params)
- for source_id, source, build_dep in query:
- if source in removals: continue
- parsed_dep = []
- if build_dep is not None:
- # Remove [arch] information since we want to see breakage on all arches
- build_dep = re_build_dep_arch.sub("", build_dep)
- try:
- parsed_dep += apt_pkg.ParseDepends(build_dep)
- except ValueError as e:
- print "Error for source %s: %s" % (source, e)
- for dep in parsed_dep:
- unsat = 0
- for dep_package, _, _ in dep:
- if dep_package in removals:
- unsat += 1
- if unsat == len(dep):
- component = DBSource.get(source_id, session).get_component_name()
- if component != "main":
- source = "%s/%s" % (source, component)
- all_broken.setdefault(source, set()).add(utils.pp_deps(dep))
- dep_problem = 1
-
- if all_broken:
- print "# Broken Build-Depends:"
- for source, bdeps in sorted(all_broken.items()):
- bdeps = sorted(bdeps)
- print '%s: %s' % (source, bdeps[0])
- for bdep in bdeps[1:]:
- print ' ' * (len(source) + 2) + bdep
- print
-
- if dep_problem:
+ if utils.check_reverse_depends(removals, suite, arches, session):
print "Dependency problem found."
if not Options["No-Action"]:
game_over()
from dbconn import DBConn, get_architecture, get_component, get_suite, \
get_override_type, Keyring, session_wrapper, \
- get_active_keyring_paths, get_primary_keyring_path
+ get_active_keyring_paths, get_primary_keyring_path, \
+ get_suite_architectures, get_or_set_metadatakey, DBSource
from sqlalchemy import desc
from dak_exceptions import *
from gpg import SignedFile
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_taint_free, \
re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
- re_is_orig_source
+ re_is_orig_source, re_build_dep_arch
from formats import parse_format, validate_changes_format
from srcformats import get_format_from_string
return open(tmp.name, 'r').read()
finally:
os.unlink(tmp.name)
+
+################################################################################
+
+def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False):
+ dbsuite = get_suite(suite, session)
+ dep_problem = 0
+ p2c = {}
+ all_broken = {}
+ if arches:
+ all_arches = set(arches)
+ else:
+ all_arches = set([x.arch_string for x in get_suite_architectures(suite)])
+ all_arches -= set(["source", "all"])
+ metakey_d = get_or_set_metadatakey("Depends", session)
+ metakey_p = get_or_set_metadatakey("Provides", session)
+ params = {
+ 'suite_id': dbsuite.suite_id,
+ 'metakey_d_id': metakey_d.key_id,
+ 'metakey_p_id': metakey_p.key_id,
+ }
+ for architecture in all_arches | set(['all']):
+ deps = {}
+ sources = {}
+ virtual_packages = {}
+ params['arch_id'] = get_architecture(architecture, session).arch_id
+
+ statement = '''
+ SELECT b.id, b.package, s.source, c.name as component,
+ (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
+ (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
+ FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
+ JOIN source s ON b.source = s.id
+ JOIN files f ON b.file = f.id
+ JOIN location l ON f.location = l.id
+ JOIN component c ON l.component = c.id
+ WHERE b.architecture = :arch_id'''
+ query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
+ from_statement(statement).params(params)
+ for binary_id, package, source, component, depends, provides in query:
+ sources[package] = source
+ p2c[package] = component
+ if depends is not None:
+ deps[package] = depends
+ # Maintain a counter for each virtual package. If a
+ # Provides: exists, set the counter to 0 and count all
+ # provides by a package not in the list for removal.
+ # If the counter stays 0 at the end, we know that only
+ # the to-be-removed packages provided this virtual
+ # package.
+ if provides is not None:
+ for virtual_pkg in provides.split(","):
+ virtual_pkg = virtual_pkg.strip()
+ if virtual_pkg == package: continue
+ if not virtual_packages.has_key(virtual_pkg):
+ virtual_packages[virtual_pkg] = 0
+ if package not in removals:
+ virtual_packages[virtual_pkg] += 1
+
+ # If a virtual package is only provided by the to-be-removed
+ # packages, treat the virtual package as to-be-removed too.
+ for virtual_pkg in virtual_packages.keys():
+ if virtual_packages[virtual_pkg] == 0:
+ removals.append(virtual_pkg)
+
+ # Check binary dependencies (Depends)
+ for package in deps.keys():
+ if package in removals: continue
+ parsed_dep = []
+ try:
+ parsed_dep += apt_pkg.ParseDepends(deps[package])
+ except ValueError as e:
+ print "Error for package %s: %s" % (package, e)
+ for dep in parsed_dep:
+ # Check for partial breakage. If a package has a ORed
+ # dependency, there is only a dependency problem if all
+ # packages in the ORed depends will be removed.
+ unsat = 0
+ for dep_package, _, _ in dep:
+ if dep_package in removals:
+ unsat += 1
+ if unsat == len(dep):
+ component = p2c[package]
+ source = sources[package]
+ if component != "main":
+ source = "%s/%s" % (source, component)
+ all_broken.setdefault(source, {}).setdefault(package, set()).add(architecture)
+ dep_problem = 1
+
+ if all_broken:
+ if cruft:
+ print " - broken Depends:"
+ else:
+ print "# Broken Depends:"
+ for source, bindict in sorted(all_broken.items()):
+ lines = []
+ for binary, arches in sorted(bindict.items()):
+ if arches == all_arches or 'all' in arches:
+ lines.append(binary)
+ else:
+ lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
+ if cruft:
+ print ' %s: %s' % (source, lines[0])
+ else:
+ print '%s: %s' % (source, lines[0])
+ for line in lines[1:]:
+ if cruft:
+ print ' ' + ' ' * (len(source) + 2) + line
+ else:
+ print ' ' * (len(source) + 2) + line
+ if not cruft:
+ print
+
+ # Check source dependencies (Build-Depends and Build-Depends-Indep)
+ all_broken.clear()
+ metakey_bd = get_or_set_metadatakey("Build-Depends", session)
+ metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
+ params = {
+ 'suite_id': dbsuite.suite_id,
+ 'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
+ }
+ statement = '''
+ SELECT s.id, s.source, string_agg(sm.value, ', ') as build_dep
+ FROM source s
+ JOIN source_metadata sm ON s.id = sm.src_id
+ WHERE s.id in
+ (SELECT source FROM src_associations
+ WHERE suite = :suite_id)
+ AND sm.key_id in :metakey_ids
+ GROUP BY s.id, s.source'''
+ query = session.query('id', 'source', 'build_dep').from_statement(statement). \
+ params(params)
+ for source_id, source, build_dep in query:
+ if source in removals: continue
+ parsed_dep = []
+ if build_dep is not None:
+ # Remove [arch] information since we want to see breakage on all arches
+ build_dep = re_build_dep_arch.sub("", build_dep)
+ try:
+ parsed_dep += apt_pkg.ParseDepends(build_dep)
+ except ValueError as e:
+ print "Error for source %s: %s" % (source, e)
+ for dep in parsed_dep:
+ unsat = 0
+ for dep_package, _, _ in dep:
+ if dep_package in removals:
+ unsat += 1
+ if unsat == len(dep):
+ component = DBSource.get(source_id, session).get_component_name()
+ if component != "main":
+ source = "%s/%s" % (source, component)
+ all_broken.setdefault(source, set()).add(pp_deps(dep))
+ dep_problem = 1
+
+ if all_broken:
+ if cruft:
+ print " - broken Build-Depends:"
+ else:
+ print "# Broken Build-Depends:"
+ for source, bdeps in sorted(all_broken.items()):
+ bdeps = sorted(bdeps)
+ if cruft:
+ print ' %s: %s' % (source, bdeps[0])
+ else:
+ print '%s: %s' % (source, bdeps[0])
+ for bdep in bdeps[1:]:
+ if cruft:
+ print ' ' + ' ' * (len(source) + 2) + bdep
+ else:
+ print ' ' * (len(source) + 2) + bdep
+ if not cruft:
+ print
+
+ return dep_problem