import textwrap
from types import *
from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
import yaml
from regexes import *
from config import Config
from holding import Holding
+from urgencylog import UrgencyLog
from dbconn import *
from summarystats import SummaryStats
from utils import parse_changes, check_dsc_files
from textutils import fix_maintainer
from binary import Binary
+from lintian import parse_lintian_output, generate_reject_messages
###############################################################################
# Build up a list of potentially new things
for name, f in files.items():
# Skip byhand elements
- if f["type"] == "byhand":
- continue
+# if f["type"] == "byhand":
+# continue
pkg = f["package"]
priority = f["priority"]
section = f["section"]
def check_status(files):
new = byhand = 0
for f in files.keys():
- if files[f]["type"] == "byhand":
+ if files[f].has_key("byhand"):
byhand = 1
elif files[f].has_key("new"):
new = 1
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+ msg += '\n'
return msg
self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
- # Check there isn't already a changes file of the same name in one
- # of the queue directories.
- base_filename = os.path.basename(filename)
- if get_knownchange(base_filename):
- self.rejects.append("%s: a file with this name already exists." % (base_filename))
-
# Check the .changes is non-empty
if not self.pkg.files:
self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename))
entry["new"] = 1
else:
dsc_file_exists = False
- for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+ for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
if cnf.has_key("Dir::Queue::%s" % (myq)):
if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
dsc_file_exists = True
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
- archive = utils.where_am_i()
# Skip byhand
if entry.has_key("byhand"):
# Determine the location
location = cnf["Dir::Pool"]
- l = get_location(location, entry["component"], archive, session)
+ l = get_location(location, entry["component"], session=session)
if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive))
+ self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"])
entry["location id"] = -1
else:
entry["location id"] = l.location_id
os.chdir(cwd)
- # Check there isn't already a .changes file of the same name in
- # the proposed-updates "CopyChanges" storage directories.
+ # check we already know the changes file
# [NB: this check must be done post-suite mapping]
base_filename = os.path.basename(self.pkg.changes_file)
- for suite in self.pkg.changes["distribution"].keys():
- copychanges = "Suite::%s::CopyChanges" % (suite)
- if cnf.has_key(copychanges) and \
- os.path.exists(os.path.join(cnf[copychanges], base_filename)):
- self.rejects.append("%s: a file with this name already exists in %s" \
- % (base_filename, cnf[copychanges]))
+ session = DBConn().session()
+
+ try:
+ dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
+ # if in the pool or in a queue other than unchecked, reject
+ if (dbc.in_queue is None) \
+ or (dbc.in_queue is not None
+ and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
+ self.rejects.append("%s file already known to dak" % base_filename)
+ except NoResultFound, e:
+ # not known, good
+ pass
has_binaries = False
has_source = False
- session = DBConn().session()
-
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
- for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not cnf.has_key("Dir::Queue::%s" % (d)): continue
- if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
+ if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
self.rejects.append("%s file already exists in the %s directory." % (f, d))
if not re_taint_free.match(f):
continue
# Look in some other queues for the file
- queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+ queues = ('New', 'Byhand', 'ProposedUpdates',
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
###########################################################################
def check_lintian(self):
+ """
+ Extends self.rejects by checking the output of lintian against tags
+ specified in Dinstall::LintianTags.
+ """
+
cnf = Config()
# Don't reject binary uploads
return
# Only check some distributions
- valid_dist = False
for dist in ('unstable', 'experimental'):
if dist in self.pkg.changes['distribution']:
- valid_dist = True
break
-
- if not valid_dist:
+ else:
return
+ # If we do not have a tagfile, don't do anything
tagfile = cnf.get("Dinstall::LintianTags")
if tagfile is None:
- # We don't have a tagfile, so just don't do anything.
return
# Parse the yaml file
sourcefile = file(tagfile, 'r')
sourcecontent = sourcefile.read()
sourcefile.close()
+
try:
lintiantags = yaml.load(sourcecontent)['lintian']
except yaml.YAMLError, msg:
# Try and find all orig mentioned in the .dsc
symlinked = self.ensure_orig()
- # Now setup the input file for lintian. lintian wants "one tag per line" only,
- # so put it together like it. We put all types of tags in one file and then sort
- # through lintians output later to see if its a fatal tag we detected, or not.
- # So we only run lintian once on all tags, even if we might reject on some, but not
- # reject on others.
- # Additionally build up a set of tags
- tags = set()
- (fd, temp_filename) = utils.temp_filename()
+ # Setup the input file for lintian
+ fd, temp_filename = utils.temp_filename()
temptagfile = os.fdopen(fd, 'w')
- for tagtype in lintiantags:
- for tag in lintiantags[tagtype]:
- temptagfile.write("%s\n" % tag)
- tags.add(tag)
+ for tags in lintiantags.values():
+ temptagfile.writelines(['%s\n' % x for x in tags])
temptagfile.close()
- # So now we should look at running lintian at the .changes file, capturing output
- # to then parse it.
- command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file)
- (result, output) = commands.getstatusoutput(command)
+ try:
+ cmd = "lintian --show-overrides --tags-from-file %s %s" % \
+ (temp_filename, self.pkg.changes_file)
- # We are done with lintian, remove our tempfile and any symlinks we created
- os.unlink(temp_filename)
- for symlink in symlinked:
- os.unlink(symlink)
+ result, output = commands.getstatusoutput(cmd)
+ finally:
+ # Remove our tempfile and any symlinks we created
+ os.unlink(temp_filename)
- if (result == 2):
- utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result))
- utils.warn(utils.prefix_multi_line_string(output, " [possible output:] "))
+ for symlink in symlinked:
+ os.unlink(symlink)
- if len(output) == 0:
- return
+ if result == 2:
+ utils.warn("lintian failed for %s [return code: %s]." % \
+ (self.pkg.changes_file, result))
+ utils.warn(utils.prefix_multi_line_string(output, \
+ " [possible output:] "))
def log(*txt):
if self.logger:
- self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt))
-
- # We have output of lintian, this package isn't clean. Lets parse it and see if we
- # are having a victim for a reject.
- # W: tzdata: binary-without-manpage usr/sbin/tzconfig
- for line in output.split('\n'):
- m = re_parse_lintian.match(line)
- if m is None:
- continue
-
- etype = m.group(1)
- epackage = m.group(2)
- etag = m.group(3)
- etext = m.group(4)
-
- # So lets check if we know the tag at all.
- if etag not in tags:
- continue
+ self.logger.log(
+ [self.pkg.changes_file, "check_lintian"] + list(txt)
+ )
- if etype == 'O':
- # We know it and it is overriden. Check that override is allowed.
- if etag in lintiantags['warning']:
- # The tag is overriden, and it is allowed to be overriden.
- # Don't add a reject message.
- pass
- elif etag in lintiantags['error']:
- # The tag is overriden - but is not allowed to be
- self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
- log("ftpmaster does not allow tag to be overridable", etag)
- else:
- # Tag is known, it is not overriden, direct reject.
- self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
- # Now tell if they *might* override it.
- if etag in lintiantags['warning']:
- log("auto rejecting", "overridable", etag)
- self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
- else:
- log("auto rejecting", "not overridable", etag)
+ # Generate messages
+ parsed_tags = parse_lintian_output(output)
+ self.rejects.extend(
+ generate_reject_messages(parsed_tags, lintiantags, log=log)
+ )
###########################################################################
def check_urgency(self):
self.check_dm_upload(fpr, session)
else:
# Check source-based permissions for other types
- if self.pkg.changes["architecture"].has_key("source"):
- if fpr.source_acl.access_level is None:
- rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
- rej += '\nPlease contact ftpmaster if you think this is incorrect'
- self.rejects.append(rej)
- return
- else:
- # If not a DM, we allow full upload rights
- uid_email = "%s@debian.org" % (fpr.uid.uid)
- self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+ if self.pkg.changes["architecture"].has_key("source") and \
+ fpr.source_acl.access_level is None:
+ rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+ rej += '\nPlease contact ftpmaster if you think this is incorrect'
+ self.rejects.append(rej)
+ return
+ # If not a DM, we allow full upload rights
+ uid_email = "%s@debian.org" % (fpr.uid.uid)
+ self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
# Check binary upload permissions
return summary
###########################################################################
-
- def accept (self, summary, short_summary, session):
+ @session_wrapper
+ def accept (self, summary, short_summary, session=None):
"""
Accept an upload.
stats = SummaryStats()
print "Installing."
- Logger.log(["installing changes", u.pkg.changes_file])
+ self.logger.log(["installing changes", self.pkg.changes_file])
+
+ poolfiles = []
# Add the .dsc file to the DB first
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session)
+ dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+ for j in pfs:
+ poolfiles.append(j)
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
- add_deb_to_db(u, newfile, session)
+ poolfiles.append(add_deb_to_db(self, newfile, session))
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
- if u.pkg.changes["architecture"].has_key("source"):
- for orig_file in u.pkg.orig_files.keys():
- if not u.pkg.orig_files[orig_file].has_key("id"):
+ if self.pkg.changes["architecture"].has_key("source"):
+ for orig_file in self.pkg.orig_files.keys():
+ if not self.pkg.orig_files[orig_file].has_key("id"):
continue # Skip if it's not in the pool
- orig_file_id = u.pkg.orig_files[orig_file]["id"]
- if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+ orig_file_id = self.pkg.orig_files[orig_file]["id"]
+ if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
continue # Skip if the location didn't change
# Do the move
old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
- new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+ new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
(found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
# TODO: Check that there's only 1 here
- source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
+ source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
dscf.poolfile_id = newf.file_id
session.add(dscf)
session.flush()
+ poolfiles.append(newf)
+
# Install the files into the pool
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
utils.move(newfile, destination)
- Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
- summarystats.accept_bytes += float(entry["size"])
+ self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
+ stats.accept_bytes += float(entry["size"])
# Copy the .changes file across for suite which need it.
copy_changes = {}
- for suite_name in u.pkg.changes["distribution"].keys():
+ for suite_name in self.pkg.changes["distribution"].keys():
if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
for dest in copy_changes.keys():
- utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
+ utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
# We're done - commit the database changes
session.commit()
# the last commit
# Move the .changes into the 'done' directory
- utils.move(u.pkg.changes_file,
- os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file)))
+ utils.move(self.pkg.changes_file,
+ os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
- if u.pkg.changes["architecture"].has_key("source") and log_urgency:
- UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"])
+ if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+ UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
# Send accept mail, announce to lists, close bugs and check for
# override disparities
self.update_subst()
self.Subst["__SUITE__"] = ""
self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+ mail_message = utils.TemplateSubst(self.Subst,
+ os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
utils.send_mail(mail_message)
self.announce(short_summary, 1)
os.rename(temp_filename, filename)
os.chmod(filename, 0644)
- # auto-build queue
-# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session)
-# if res:
-# utils.fubar(res)
-# now_date = datetime.now()
+ session.commit()
+
+ # Set up our copy queues (e.g. buildd queues)
+ for suite_name in self.pkg.changes["distribution"].keys():
+ suite = get_suite(suite_name, session)
+ for q in suite.copy_queues:
+ for f in poolfiles:
+ q.add_file_from_pool(f)
session.commit()
# Finally...
- summarystats.accept_count += 1
+ stats.accept_count += 1
def check_override(self):
"""
def remove(self, from_dir=None):
"""
Used (for instance) in p-u to remove the package from unchecked
+
+ Also removes the package from holding area.
"""
if from_dir is None:
- os.chdir(self.pkg.directory)
- else:
- os.chdir(from_dir)
+ from_dir = self.pkg.directory
+ h = Holding()
for f in self.pkg.files.keys():
- os.unlink(f)
- os.unlink(self.pkg.changes_file)
+ os.unlink(os.path.join(from_dir, f))
+ if os.path.exists(os.path.join(h.holding_dir, f)):
+ os.unlink(os.path.join(h.holding_dir, f))
+
+ os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+ if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+ os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
###########################################################################
- def move_to_dir (self, dest, perms=0660, changesperms=0664):
+ def move_to_queue (self, queue):
"""
- Move files to dest with certain perms/changesperms
+ Move files to a destination queue using the permissions in the table
"""
- utils.move(self.pkg.changes_file, dest, perms=changesperms)
+ h = Holding()
+ utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+ queue.path, perms=int(queue.change_perms, 8))
for f in self.pkg.files.keys():
- utils.move(f, dest, perms=perms)
+ utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
###########################################################################
user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
+ self.Subst["__REJECT_MESSAGE__"] = ""
self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
# Write the rejection email out as the <foo>.reason file
################################################################################
def check_source_against_db(self, filename, session):
- """
- """
source = self.pkg.dsc.get("source")
version = self.pkg.dsc.get("version")
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
del self.pkg.files[dsc_name]
+ dsc_entry["files id"] = i.file_id
if not orig_files.has_key(dsc_name):
orig_files[dsc_name] = {}
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
- for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not Cnf.has_key("Dir::Queue::%s" % (directory)):
continue
in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
source_epochless_version = re_no_epoch.sub('', source_version)
dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
found = False
- for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+ for q in ["Embargoed", "Unembargoed", "Newstage"]:
if cnf.has_key("Dir::Queue::%s" % (q)):
if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
found = True