import apt_inst
import apt_pkg
import utils
+import commands
+import shutil
+import textwrap
from types import *
+import yaml
+
from dak_exceptions import *
from changes import *
from regexes import *
from summarystats import SummaryStats
from utils import parse_changes
from textutils import fix_maintainer
+from binary import Binary
###############################################################################
-def get_type(f, session=None):
+def get_type(f, session):
"""
Get the file type of C{f}
@type f: dict
@param f: file entry from Changes object
+ @type session: SQLA Session
+ @param session: SQL Alchemy session object
+
@rtype: string
@return: filetype
"""
- if session is None:
- session = DBConn().session()
-
# Determine the type
if f.has_key("dbtype"):
- file_type = file["dbtype"]
+ file_type = f["dbtype"]
elif f["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]:
file_type = "dsc"
else:
pkg = f["package"]
priority = f["priority"]
section = f["section"]
- file_type = get_type(f)
+ file_type = get_type(f, session)
component = f["component"]
if file_type == "dsc":
if new[pkg].has_key("othercomponents"):
print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
+ session.close()
+
return new
################################################################################
# This is a stupid default, but see the comments below
is_dm = False
- user = get_uid_from_fingerprint(changes["fingerprint"], session)
+ user = get_uid_from_fingerprint(fpr, session)
if user is not None:
uid = user.uid
uid_name = user.name
# Check the relevant fingerprint (which we have to have)
- for f in uid.fingerprint:
- if f.fingerprint == changes['fingerprint']:
+ for f in user.fingerprint:
+ if f.fingerprint == fpr:
is_dm = f.keyring.debian_maintainer
break
"""
def __init__(self):
+ self.logger = None
self.pkg = Changes()
self.reset()
# If 'dak process-unchecked' crashed out in the right place, architecture may still be a string.
if not self.pkg.changes.has_key("architecture") or not \
- isinstance(changes["architecture"], DictType):
+ isinstance(self.pkg.changes["architecture"], DictType):
self.pkg.changes["architecture"] = { "Unknown" : "" }
# and maintainer2047 may not exist.
(self.pkg.changes["changedby822"] != self.pkg.changes["maintainer822"]):
self.Subst["__MAINTAINER_FROM__"] = self.pkg.changes["changedby2047"]
- self.Subst["__MAINTAINER_TO__"] = "%s, %s" % (self.pkg.changes["changedby2047"], changes["maintainer2047"])
+ self.Subst["__MAINTAINER_TO__"] = "%s, %s" % (self.pkg.changes["changedby2047"], self.pkg.changes["maintainer2047"])
self.Subst["__MAINTAINER__"] = self.pkg.changes.get("changed-by", "Unknown")
else:
self.Subst["__MAINTAINER_FROM__"] = self.pkg.changes["maintainer2047"]
###########################################################################
def load_changes(self, filename):
"""
- @rtype boolean
+ @rtype: boolean
@rvalue: whether the changes file was valid or not. We may want to
reject even if this is True (see what gets put in self.rejects).
This is simply to prevent us even trying things later which will
fail because we couldn't properly parse the file.
"""
+ Cnf = Config()
self.pkg.changes_file = filename
# Parse the .changes field into a dictionary
# Parse the Files field from the .changes into another dictionary
try:
- self.pkg.files.update(build_file_list(self.pkg.changes))
+ self.pkg.files.update(utils.build_file_list(self.pkg.changes))
except ParseChangesError, line:
self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
return False
(source, dest) = args[1:3]
if self.pkg.changes["distribution"].has_key(source):
for arch in self.pkg.changes["architecture"].keys():
- if arch not in [ arch_string for a in get_suite_architectures(source) ]:
+ if arch not in [ a.arch_string for a in get_suite_architectures(source) ]:
self.notes.append("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch))
del self.pkg.changes["distribution"][source]
self.pkg.changes["distribution"][dest] = 1
entry["maintainer"] = control.Find("Maintainer", "")
if f.endswith(".udeb"):
- files[f]["dbtype"] = "udeb"
+ self.pkg.files[f]["dbtype"] = "udeb"
elif f.endswith(".deb"):
- files[f]["dbtype"] = "deb"
+ self.pkg.files[f]["dbtype"] = "deb"
else:
self.rejects.append("%s is neither a .deb or a .udeb." % (f))
source_version = m.group(2)
if not source_version:
- source_version = files[f]["version"]
+ source_version = self.pkg.files[f]["version"]
entry["source package"] = source
entry["source version"] = source_version
# Check the version and for file overwrites
self.check_binary_against_db(f, session)
- b = Binary(f).scan_package()
- if len(b.rejects) > 0:
- for j in b.rejects:
- self.rejects.append(j)
+ # Temporarily disable contents generation until we change the table storage layout
+ #b = Binary(f)
+ #b.scan_package()
+ #if len(b.rejects) > 0:
+ # for j in b.rejects:
+ # self.rejects.append(j)
def source_file_checks(self, f, session):
entry = self.pkg.files[f]
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
+ archive = utils.where_am_i()
# Skip byhand
if entry.has_key("byhand"):
return
+ # Check we have fields we need to do these checks
+ oktogo = True
+ for m in ['component', 'package', 'priority', 'size', 'md5sum']:
+ if not entry.has_key(m):
+ self.rejects.append("file '%s' does not have field %s set" % (f, m))
+ oktogo = False
+
+ if not oktogo:
+ return
+
# Handle component mappings
for m in cnf.ValueList("ComponentMappings"):
(source, dest) = m.split()
return
# Validate the component
- component = entry["component"]
- if not get_component(component, session):
+ if not get_component(entry["component"], session):
self.rejects.append("file '%s' has unknown component '%s'." % (f, component))
return
# Determine the location
location = cnf["Dir::Pool"]
- l = get_location(location, component, archive, session)
+ l = get_location(location, entry["component"], archive, session)
if l is None:
self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
entry["location id"] = -1
# Check for packages that have moved from one component to another
entry['suite'] = suite
- res = get_binary_components(files[f]['package'], suite, entry["architecture"], session)
+ res = get_binary_components(self.pkg.files[f]['package'], suite, entry["architecture"], session)
if res.rowcount > 0:
entry["othercomponents"] = res.fetchone()[0]
has_binaries = False
has_source = False
- s = DBConn().session()
+ session = DBConn().session()
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
for suite in self.pkg.changes["distribution"].keys():
self.per_suite_file_checks(f, suite, session)
+ session.close()
+
# If the .changes file says it has source, it must have source.
if self.pkg.changes["architecture"].has_key("source"):
if not has_source:
# Build up the file list of files mentioned by the .dsc
try:
- self.pkg.dsc_files.update(utils.build_file_list(dsc, is_a_dsc=1))
+ self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
except NoFilesFieldError:
self.rejects.append("%s: no Files: field." % (dsc_filename))
return False
# Validate the source and version fields
if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
- if not re_valid_version.match(dsc["version"]):
+ if not re_valid_version.match(self.pkg.dsc["version"]):
self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
# Bumping the version number of the .dsc breaks extraction by stable's
# Ensure there is a .tar.gz in the .dsc file
has_tar = False
- for f in dsc_files.keys():
+ for f in self.pkg.dsc_files.keys():
m = re_issource.match(f)
if not m:
self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f))
self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename))
# Ensure source is newer than existing source in target suites
+ session = DBConn().session()
self.check_source_against_db(dsc_filename, session)
-
- self.check_dsc_against_db(dsc_filename)
+ self.check_dsc_against_db(dsc_filename, session)
+ session.close()
return True
# Find the .dsc (again)
dsc_filename = None
- for f in self.files.keys():
- if files[f]["type"] == "dsc":
+ for f in self.pkg.files.keys():
+ if self.pkg.files[f]["type"] == "dsc":
dsc_filename = f
# If there isn't one, we have nothing to do. (We have reject()ed the upload already)
return
# Create a symlink mirror of the source files in our temporary directory
- for f in self.files.keys():
+ for f in self.pkg.files.keys():
m = re_issource.match(f)
if m:
src = os.path.join(source_dir, f)
return
# Get the upstream version
- upstr_version = re_no_epoch.sub('', dsc["version"])
+ upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
if re_strip_revision.search(upstr_version):
upstr_version = re_strip_revision.sub('', upstr_version)
shutil.rmtree(tmpdir)
except OSError, e:
if e.errno != errno.EACCES:
+ print "foobar"
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
if result != 0:
utils.fubar("'%s' failed with result %s." % (cmd, result))
shutil.rmtree(tmpdir)
- except:
+ except Exception, e:
+ print "foobar2 (%s)" % e
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
###########################################################################
# We need to deal with the original changes blob, as the fields we need
# might not be in the changes dict serialised into the .dak anymore.
- orig_changes = parse_deb822(self.pkg.changes['filecontents'])
+ orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
# Copy the checksums over to the current changes dict. This will keep
# the existing modifications to it intact.
for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
self.rejects.append(j)
- def check_hashes():
+ def check_hashes(self):
for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
self.rejects.append(m)
for m in utils.check_size(".dsc", self.pkg.dsc_files):
self.rejects.append(m)
- for m in utils.ensure_hashes(self.pkg.changes, dsc, files, dsc_files):
- self.rejects.append(m)
+ self.ensure_hashes()
###########################################################################
def check_urgency(self):
# travel can cause errors on extraction]
def check_timestamps(self):
+ Cnf = Config()
+
future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
tar = TarTime(future_cutoff, past_cutoff)
- for filename, entry in self.pkg.files.keys():
+ for filename, entry in self.pkg.files.items():
if entry["type"] == "deb":
tar.reset()
try:
except:
self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_type, sys.exc_value))
+ ###########################################################################
+ def check_transition(self, session):
+ cnf = Config()
+
+ sourcepkg = self.pkg.changes["source"]
+
+ # No sourceful upload -> no need to do anything else, direct return
+ # We also work with unstable uploads, not experimental or those going to some
+ # proposed-updates queue
+ if "source" not in self.pkg.changes["architecture"] or \
+ "unstable" not in self.pkg.changes["distribution"]:
+ return
+
+ # Also only check if there is a file defined (and existant) with
+ # checks.
+ transpath = cnf.get("Dinstall::Reject::ReleaseTransitions", "")
+ if transpath == "" or not os.path.exists(transpath):
+ return
+
+ # Parse the yaml file
+ sourcefile = file(transpath, 'r')
+ sourcecontent = sourcefile.read()
+ try:
+ transitions = yaml.load(sourcecontent)
+ except yaml.YAMLError, msg:
+ # This shouldn't happen, there is a wrapper to edit the file which
+ # checks it, but we prefer to be safe than ending up rejecting
+ # everything.
+ utils.warn("Not checking transitions, the transitions file is broken: %s." % (msg))
+ return
+
+ # Now look through all defined transitions
+ for trans in transitions:
+ t = transitions[trans]
+ source = t["source"]
+ expected = t["new"]
+
+ # Will be None if nothing is in testing.
+ current = get_source_in_suite(source, "testing", session)
+ if current is not None:
+ compare = apt_pkg.VersionCompare(current.version, expected)
+
+ if current is None or compare < 0:
+ # This is still valid, the current version in testing is older than
+ # the new version we wait for, or there is none in testing yet
+
+ # Check if the source we look at is affected by this.
+ if sourcepkg in t['packages']:
+ # The source is affected, lets reject it.
+
+ rejectmsg = "%s: part of the %s transition.\n\n" % (
+ sourcepkg, trans)
+
+ if current is not None:
+ currentlymsg = "at version %s" % (current.version)
+ else:
+ currentlymsg = "not present in testing"
+
+ rejectmsg += "Transition description: %s\n\n" % (t["reason"])
+
+ rejectmsg += "\n".join(textwrap.wrap("""Your package
+is part of a testing transition designed to get %s migrated (it is
+currently %s, we need version %s). This transition is managed by the
+Release Team, and %s is the Release-Team member responsible for it.
+Please mail debian-release@lists.debian.org or contact %s directly if you
+need further assistance. You might want to upload to experimental until this
+transition is done."""
+ % (source, currentlymsg, expected,t["rm"], t["rm"])))
+
+ self.rejects.append(rejectmsg)
+ return
+
###########################################################################
def check_signed_by_key(self):
"""Ensure the .changes is signed by an authorized uploader."""
session = DBConn().session()
+ self.check_transition(session)
+
(uid, uid_name, is_dm) = lookup_uid_from_fingerprint(self.pkg.changes["fingerprint"], session=session)
# match claimed name with actual name:
for suite in self.pkg.changes["distribution"].keys():
q = session.query(DBSource)
q = q.join(DBBinary).filter_by(package=b)
- q = q.join(BinAssociation).join(Suite).filter_by(suite)
+ q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite)
for s in q.all():
if s.source != self.pkg.changes["source"]:
if self.pkg.files[f].has_key("new"):
self.rejects.append("%s may not upload NEW file %s" % (uid, f))
+ session.close()
+
###########################################################################
def build_summaries(self):
""" Build a summary of changes the upload introduces. """
for bug in bugs:
summary += "%s " % (bug)
if action:
+ self.update_subst()
self.Subst["__BUG_NUMBER__"] = bug
if self.pkg.changes["distribution"].has_key("stable"):
self.Subst["__STABLE_WARNING__"] = """
distribution."""
else:
self.Subst["__STABLE_WARNING__"] = ""
- mail_message = utils.TemplateSubst(self.Subst, template)
- utils.send_mail(mail_message)
+ mail_message = utils.TemplateSubst(self.Subst, template)
+ utils.send_mail(mail_message)
# Clear up after ourselves
del self.Subst["__BUG_NUMBER__"]
del self.Subst["__STABLE_WARNING__"]
- if action:
- self.Logger.log(["closing bugs"] + bugs)
+ if action and self.logger:
+ self.logger.log(["closing bugs"] + bugs)
summary += "\n"
self.Subst["__SHORT_SUMMARY__"] = short_summary
for dist in self.pkg.changes["distribution"].keys():
- announce_list = Cnf.Find("Suite::%s::Announce" % (dist))
+ announce_list = cnf.Find("Suite::%s::Announce" % (dist))
if announce_list == "" or lists_done.has_key(announce_list):
continue
summary += "Announcing to %s\n" % (announce_list)
if action:
+ self.update_subst()
self.Subst["__ANNOUNCE_LIST_ADDRESS__"] = announce_list
if cnf.get("Dinstall::TrackingServer") and \
self.pkg.changes["architecture"].has_key("source"):
targetdir = cnf["Dir::Queue::Accepted"]
print "Accepting."
- self.Logger.log(["Accepting changes", self.pkg.changes_file])
+ if self.logger:
+ self.logger.log(["Accepting changes", self.pkg.changes_file])
- self.write_dot_dak(targetdir)
+ self.pkg.write_dot_dak(targetdir)
# Move all the files into the accepted directory
utils.move(self.pkg.changes_file, targetdir)
# Send accept mail, announce to lists, close bugs and check for
# override disparities
if not cnf["Dinstall::Options::No-Mail"]:
+ self.update_subst()
self.Subst["__SUITE__"] = ""
self.Subst["__SUMMARY__"] = summary
mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
overridetemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.override-disparity')
+ self.update_subst()
self.Subst["__SUMMARY__"] = summary
mail_message = utils.TemplateSubst(self.Subst, overridetemplate)
utils.send_mail(mail_message)
rej_template = os.path.join(cnf["Dir::Templates"], "queue.rejected")
+ self.update_subst()
if not manual:
self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
self.Subst["__MANUAL_REJECT_MESSAGE__"] = ""
user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
- self.Subst["__CC__"] = "Cc: " + Cnf["Dinstall::MyEmailAddress"]
+ self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
# Write the rejection email out as the <foo>.reason file
os.write(reason_fd, reject_mail_message)
if not cnf["Dinstall::Options::No-Mail"]:
utils.send_mail(reject_mail_message)
- self.Logger.log(["rejected", pkg.changes_file])
+ if self.logger:
+ self.logger.log(["rejected", self.pkg.changes_file])
return 0
################################################################################
- def in_override_p(self, package, component, suite, binary_type, file, session=None):
+ def in_override_p(self, package, component, suite, binary_type, file, session):
"""
Check if a package already has override entries in the DB
cnf = Config()
- if session is None:
- session = DBConn().session()
-
if binary_type == "": # must be source
file_type = "dsc"
else:
Description: TODO
"""
+ Cnf = Config()
anyversion = None
- anysuite = [suite] + self.Cnf.ValueList("Suite::%s::VersionChecks::Enhances" % (suite))
+ anysuite = [suite] + Cnf.ValueList("Suite::%s::VersionChecks::Enhances" % (suite))
for (s, v) in sv_list:
if s in [ x.lower() for x in anysuite ]:
if not anyversion or apt_pkg.VersionCompare(anyversion, v) <= 0:
self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite))
################################################################################
- def check_binary_against_db(self, file, session=None):
- if session is None:
- session = DBConn().session()
-
+ def check_binary_against_db(self, file, session):
# Ensure version is sane
q = session.query(BinAssociation)
q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[file]["package"])
q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
- file, files[file]["version"], sourceful=False)
+ file, self.pkg.files[file]["version"], sourceful=False)
# Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(files[file]["package"])
- q = q.filter_by(version=files[file]["version"])
- q = q.join(Architecture).filter_by(arch_string=files[file]["architecture"])
+ q = session.query(DBBinary).filter_by(package=self.pkg.files[file]["package"])
+ q = q.filter_by(version=self.pkg.files[file]["version"])
+ q = q.join(Architecture).filter_by(arch_string=self.pkg.files[file]["architecture"])
if q.count() > 0:
self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file))
################################################################################
- def check_source_against_db(self, file, session=None):
+ def check_source_against_db(self, file, session):
"""
"""
- if session is None:
- session = DBConn().session()
-
source = self.pkg.dsc.get("source")
version = self.pkg.dsc.get("version")
file, version, sourceful=True)
################################################################################
- def check_dsc_against_db(self, file, session=None):
+ def check_dsc_against_db(self, file, session):
"""
@warning: NB: this function can remove entries from the 'files' index [if
"""
- if session is None:
- session = DBConn().session()
-
+ Cnf = Config()
self.pkg.orig_tar_gz = None
# Try and find all files mentioned in the .dsc. This has
found = "%s in incoming" % (dsc_name)
# Check the file does not already exist in the archive
- ql = get_poolfile_like_name(dsc_name)
+ ql = get_poolfile_like_name(dsc_name, session)
# Strip out anything that isn't '%s' or '/%s$'
for i in ql:
# TODO: Don't delete the entry, just mark it as not needed
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
- del files[dsc_name]
+ del self.pkg.files[dsc_name]
self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
match = 1
old_file_fh.close()
actual_size = os.stat(old_file)[stat.ST_SIZE]
found = old_file
- suite_type = f.location.archive_type
+ suite_type = x.location.archive_type
# need this for updating dsc_files in install()
- dsc_entry["files id"] = f.file_id
+ dsc_entry["files id"] = x.file_id
# See install() in process-accepted...
- self.pkg.orig_tar_id = f.file_id
+ self.pkg.orig_tar_id = x.file_id
self.pkg.orig_tar_gz = old_file
- self.pkg.orig_tar_location = f.location.location_id
+ self.pkg.orig_tar_location = x.location.location_id
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
- in_otherdir = os.path.join(self.Cnf["Dir::Queue::%s" % (directory)], dsc_name)
+ if not Cnf.has_key("Dir::Queue::%s" % (directory)):
+ continue
+ in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
if os.path.exists(in_otherdir):
in_otherdir_fh = utils.open_file(in_otherdir)
actual_md5 = apt_pkg.md5sum(in_otherdir_fh)
self.rejects.append("size for %s doesn't match %s." % (found, file))
################################################################################
- def accepted_checks(self, overwrite_checks=True, session=None):
+ def accepted_checks(self, overwrite_checks, session):
# Recheck anything that relies on the database; since that's not
# frozen between accept and our run time when called from p-a.
# overwrite_checks is set to False when installing to stable/oldstable
- if session is None:
- session = DBConn().session()
-
propogate={}
nopropogate={}
+ # Find the .dsc (again)
+ dsc_filename = None
+ for f in self.pkg.files.keys():
+ if self.pkg.files[f]["type"] == "dsc":
+ dsc_filename = f
+
for checkfile in self.pkg.files.keys():
# The .orig.tar.gz can disappear out from under us is it's a
# duplicate of one in the archive.
# propogate in the case it is in the override tables:
for suite in self.pkg.changes.get("propdistribution", {}).keys():
- if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile):
+ if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
propogate[suite] = 1
else:
nopropogate[suite] = 1
for checkfile in self.pkg.files.keys():
# Check the package is still in the override tables
for suite in self.pkg.changes["distribution"].keys():
- if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile):
+ if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
self.rejects.append("%s is NEW for %s." % (checkfile, suite))
################################################################################
def do_unaccept(self):
cnf = Config()
+ self.update_subst()
self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
self.Subst["__REJECT_MESSAGE__"] = self.package_info()
self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]