from utils import parse_changes, check_dsc_files
from textutils import fix_maintainer
from binary import Binary
+from lintian import parse_lintian_output, generate_reject_messages
###############################################################################
# Build up a list of potentially new things
for name, f in files.items():
# Skip byhand elements
- if f["type"] == "byhand":
- continue
+# if f["type"] == "byhand":
+# continue
pkg = f["package"]
priority = f["priority"]
section = f["section"]
if f.has_key("othercomponents"):
new[pkg]["othercomponents"] = f["othercomponents"]
+ # Fix up the list of target suites
+ cnf = Config()
+ for suite in changes["suite"].keys():
+ override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
+ if override:
+ (olderr, newerr) = (get_suite(suite, session) == None,
+ get_suite(override, session) == None)
+ if olderr or newerr:
+ (oinv, newinv) = ("", "")
+ if olderr: oinv = "invalid "
+ if newerr: ninv = "invalid "
+ print "warning: overriding %ssuite %s to %ssuite %s" % (
+ oinv, suite, ninv, override)
+ del changes["suite"][suite]
+ changes["suite"][override] = 1
+
for suite in changes["suite"].keys():
for pkg in new.keys():
ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
def check_status(files):
new = byhand = 0
for f in files.keys():
- if files[f]["type"] == "byhand":
+ if files[f].has_key("byhand"):
byhand = 1
elif files[f].has_key("new"):
new = 1
self.warnings = []
self.notes = []
+ self.later_check_files = []
+
self.pkg.reset()
def package_info(self):
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
- msg += '\n'
+ msg += '\n\n'
return msg
self.pkg.changes["changedbyemail"] = ""
self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
- % (filename, changes["changed-by"], msg))
+ % (filename, self.pkg.changes["changed-by"], msg))
# Ensure all the values in Closes: are numbers
if self.pkg.changes.has_key("closes"):
entry["new"] = 1
else:
dsc_file_exists = False
- for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+ for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
if cnf.has_key("Dir::Queue::%s" % (myq)):
if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
dsc_file_exists = True
for f in file_keys:
ret = holding.copy_to_holding(f)
if ret is not None:
- # XXX: Should we bail out here or try and continue?
- self.rejects.append(ret)
+ self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
os.chdir(cwd)
# if in the pool or in a queue other than unchecked, reject
if (dbc.in_queue is None) \
or (dbc.in_queue is not None
- and dbc.in_queue.queue_name != 'unchecked'):
+ and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
self.rejects.append("%s file already known to dak" % base_filename)
except NoResultFound, e:
# not known, good
if os.path.exists(f):
self.rejects.append("Can't read `%s'. [permission denied]" % (f))
else:
- self.rejects.append("Can't read `%s'. [file not found]" % (f))
+ # Don't directly reject, mark to check later to deal with orig's
+ # we can find in the pool
+ self.later_check_files.append(f)
entry["type"] = "unreadable"
continue
self.check_dsc_against_db(dsc_filename, session)
session.close()
+ # Finally, check if we're missing any files
+ for f in self.later_check_files:
+ self.rejects.append("Could not find file %s references in changes" % f)
+
return True
###########################################################################
continue
# Look in some other queues for the file
- queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+ queues = ('New', 'Byhand', 'ProposedUpdates',
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
###########################################################################
def check_lintian(self):
+ """
+ Extends self.rejects by checking the output of lintian against tags
+ specified in Dinstall::LintianTags.
+ """
+
cnf = Config()
# Don't reject binary uploads
return
# Only check some distributions
- valid_dist = False
for dist in ('unstable', 'experimental'):
if dist in self.pkg.changes['distribution']:
- valid_dist = True
break
-
- if not valid_dist:
+ else:
return
+ # If we do not have a tagfile, don't do anything
tagfile = cnf.get("Dinstall::LintianTags")
if tagfile is None:
- # We don't have a tagfile, so just don't do anything.
return
# Parse the yaml file
sourcefile = file(tagfile, 'r')
sourcecontent = sourcefile.read()
sourcefile.close()
+
try:
lintiantags = yaml.load(sourcecontent)['lintian']
except yaml.YAMLError, msg:
# Try and find all orig mentioned in the .dsc
symlinked = self.ensure_orig()
- # Now setup the input file for lintian. lintian wants "one tag per line" only,
- # so put it together like it. We put all types of tags in one file and then sort
- # through lintians output later to see if its a fatal tag we detected, or not.
- # So we only run lintian once on all tags, even if we might reject on some, but not
- # reject on others.
- # Additionally build up a set of tags
- tags = set()
- (fd, temp_filename) = utils.temp_filename()
+ # Setup the input file for lintian
+ fd, temp_filename = utils.temp_filename()
temptagfile = os.fdopen(fd, 'w')
- for tagtype in lintiantags:
- for tag in lintiantags[tagtype]:
- temptagfile.write("%s\n" % tag)
- tags.add(tag)
+ for tags in lintiantags.values():
+ temptagfile.writelines(['%s\n' % x for x in tags])
temptagfile.close()
- # So now we should look at running lintian at the .changes file, capturing output
- # to then parse it.
- command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file)
- (result, output) = commands.getstatusoutput(command)
+ try:
+ cmd = "lintian --show-overrides --tags-from-file %s %s" % \
+ (temp_filename, self.pkg.changes_file)
- # We are done with lintian, remove our tempfile and any symlinks we created
- os.unlink(temp_filename)
- for symlink in symlinked:
- os.unlink(symlink)
+ result, output = commands.getstatusoutput(cmd)
+ finally:
+ # Remove our tempfile and any symlinks we created
+ os.unlink(temp_filename)
- if (result == 2):
- utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result))
- utils.warn(utils.prefix_multi_line_string(output, " [possible output:] "))
+ for symlink in symlinked:
+ os.unlink(symlink)
- if len(output) == 0:
- return
+ if result == 2:
+ utils.warn("lintian failed for %s [return code: %s]." % \
+ (self.pkg.changes_file, result))
+ utils.warn(utils.prefix_multi_line_string(output, \
+ " [possible output:] "))
def log(*txt):
if self.logger:
- self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt))
-
- # We have output of lintian, this package isn't clean. Lets parse it and see if we
- # are having a victim for a reject.
- # W: tzdata: binary-without-manpage usr/sbin/tzconfig
- for line in output.split('\n'):
- m = re_parse_lintian.match(line)
- if m is None:
- continue
-
- etype = m.group(1)
- epackage = m.group(2)
- etag = m.group(3)
- etext = m.group(4)
-
- # So lets check if we know the tag at all.
- if etag not in tags:
- continue
+ self.logger.log(
+ [self.pkg.changes_file, "check_lintian"] + list(txt)
+ )
- if etype == 'O':
- # We know it and it is overriden. Check that override is allowed.
- if etag in lintiantags['warning']:
- # The tag is overriden, and it is allowed to be overriden.
- # Don't add a reject message.
- pass
- elif etag in lintiantags['error']:
- # The tag is overriden - but is not allowed to be
- self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
- log("ftpmaster does not allow tag to be overridable", etag)
- else:
- # Tag is known, it is not overriden, direct reject.
- self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
- # Now tell if they *might* override it.
- if etag in lintiantags['warning']:
- log("auto rejecting", "overridable", etag)
- self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
- else:
- log("auto rejecting", "not overridable", etag)
+ # Generate messages
+ parsed_tags = parse_lintian_output(output)
+ self.rejects.extend(
+ generate_reject_messages(parsed_tags, lintiantags, log=log)
+ )
###########################################################################
def check_urgency(self):
self.check_dm_upload(fpr, session)
else:
# Check source-based permissions for other types
- if self.pkg.changes["architecture"].has_key("source"):
- if fpr.source_acl.access_level is None:
- rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
- rej += '\nPlease contact ftpmaster if you think this is incorrect'
- self.rejects.append(rej)
- return
- else:
- # If not a DM, we allow full upload rights
- uid_email = "%s@debian.org" % (fpr.uid.uid)
- self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+ if self.pkg.changes["architecture"].has_key("source") and \
+ fpr.source_acl.access_level is None:
+ rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+ rej += '\nPlease contact ftpmaster if you think this is incorrect'
+ self.rejects.append(rej)
+ return
+ # If not a DM, we allow full upload rights
+ uid_email = "%s@debian.org" % (fpr.uid.uid)
+ self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
# Check binary upload permissions
print "Installing."
self.logger.log(["installing changes", self.pkg.changes_file])
+ poolfiles = []
+
# Add the .dsc file to the DB first
for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
+ source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+ for j in pfs:
+ poolfiles.append(j)
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
- add_deb_to_db(self, newfile, session)
+ poolfiles.append(add_deb_to_db(self, newfile, session))
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
+ # XXX: mhy: I think this should be in add_dsc_to_db
if self.pkg.changes["architecture"].has_key("source"):
for orig_file in self.pkg.orig_files.keys():
if not self.pkg.orig_files[orig_file].has_key("id"):
new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+ (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
+ # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
if newf is None:
utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
- # TODO: Check that there's only 1 here
- source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
- dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
- dscf.poolfile_id = newf.file_id
- session.add(dscf)
session.flush()
+ # Don't reference the old file from this changes
+ for p in poolfiles:
+ if p.file_id == oldf.file_id:
+ poolfiles.remove(p)
+
+ poolfiles.append(newf)
+
+ # Fix up the DSC references
+ toremove = []
+
+ for df in source.srcfiles:
+ if df.poolfile.file_id == oldf.file_id:
+ # Add a new DSC entry and mark the old one for deletion
+ # Don't do it in the loop so we don't change the thing we're iterating over
+ newdscf = DSCFile()
+ newdscf.source_id = source.source_id
+ newdscf.poolfile_id = newf.file_id
+ session.add(newdscf)
+
+ toremove.append(df)
+
+ for df in toremove:
+ session.delete(df)
+
+ # Flush our changes
+ session.flush()
+
+ # Make sure that our source object is up-to-date
+ session.expire(source)
+
# Install the files into the pool
for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
- # Send accept mail, announce to lists, close bugs and check for
- # override disparities
- if not cnf["Dinstall::Options::No-Mail"]:
- self.update_subst()
- self.Subst["__SUITE__"] = ""
- self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst,
- os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
- utils.send_mail(mail_message)
- self.announce(short_summary, 1)
+ self.update_subst()
+ self.Subst["__SUITE__"] = ""
+ self.Subst["__SUMMARY__"] = summary
+ mail_message = utils.TemplateSubst(self.Subst,
+ os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
+ utils.send_mail(mail_message)
+ self.announce(short_summary, 1)
## Helper stuff for DebBugs Version Tracking
if cnf.Find("Dir::Queue::BTSVersionTrack"):
os.rename(temp_filename, filename)
os.chmod(filename, 0644)
- # This routine returns None on success or an error on failure
- # TODO: Replace queue copying using the new queue.add_file_from_pool routine
- # and by looking up which queues in suite.copy_queues
- #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
- #if res:
- # utils.fubar(res)
+ session.commit()
+
+ # Set up our copy queues (e.g. buildd queues)
+ for suite_name in self.pkg.changes["distribution"].keys():
+ suite = get_suite(suite_name, session)
+ for q in suite.copy_queues:
+ for f in poolfiles:
+ q.add_file_from_pool(f)
session.commit()
cnf = Config()
- # Abandon the check if:
- # a) override disparity checks have been disabled
- # b) we're not sending mail
- if not cnf.FindB("Dinstall::OverrideDisparityCheck") or \
- cnf["Dinstall::Options::No-Mail"]:
+ # Abandon the check if override disparity checks have been disabled
+ if not cnf.FindB("Dinstall::OverrideDisparityCheck"):
return
summary = self.pkg.check_override()
###########################################################################
- def move_to_dir (self, dest, perms=0660, changesperms=0664):
+ def move_to_queue (self, queue):
"""
- Move files to dest with certain perms/changesperms
+ Move files to a destination queue using the permissions in the table
"""
h = Holding()
utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
- dest, perms=changesperms)
+ queue.path, perms=int(queue.change_perms, 8))
for f in self.pkg.files.keys():
- utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
+ utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
###########################################################################
try:
dest_fd = os.open(dest_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0644)
except OSError, e:
- # File exists? Let's try and move it to the morgue
+ # File exists? Let's find a new name by adding a number
if e.errno == errno.EEXIST:
- morgue_file = os.path.join(cnf["Dir::Morgue"], cnf["Dir::MorgueReject"], file_entry)
try:
- morgue_file = utils.find_next_free(morgue_file)
+ dest_file = utils.find_next_free(dest_file, 255)
except NoFreeFilenameError:
# Something's either gone badly Pete Tong, or
# someone is trying to exploit us.
- utils.warn("**WARNING** failed to move %s from the reject directory to the morgue." % (file_entry))
+ utils.warn("**WARNING** failed to find a free filename for %s in %s." % (file_entry, cnf["Dir::Queue::Reject"]))
return
- utils.move(dest_file, morgue_file, perms=0660)
+
+ # Make sure we really got it
try:
dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
except OSError, e:
os.close(dest_fd)
###########################################################################
- def do_reject (self, manual=0, reject_message="", note=""):
+ def do_reject (self, manual=0, reject_message="", notes=""):
"""
Reject an upload. If called without a reject message or C{manual} is
true, spawn an editor so the user can write one.
if manual and not reject_message:
(fd, temp_filename) = utils.temp_filename()
temp_file = os.fdopen(fd, 'w')
- if len(note) > 0:
- for line in note:
- temp_file.write(line)
+ if len(notes) > 0:
+ for note in notes:
+ temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
+ % (note.author, note.version, note.notedate, note.comment))
temp_file.close()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
+ self.Subst["__REJECT_MESSAGE__"] = ""
self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
# Write the rejection email out as the <foo>.reason file
os.close(reason_fd)
- # Send the rejection mail if appropriate
- if not cnf["Dinstall::Options::No-Mail"]:
- utils.send_mail(reject_mail_message)
+ # Send the rejection mail
+ utils.send_mail(reject_mail_message)
if self.logger:
self.logger.log(["rejected", self.pkg.changes_file])
cansave = 1
if not cansave:
- self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
+ self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
################################################################################
def check_binary_against_db(self, filename, session):
################################################################################
def check_source_against_db(self, filename, session):
- """
- """
source = self.pkg.dsc.get("source")
version = self.pkg.dsc.get("version")
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
match = 1
+ # Don't bitch that we couldn't find this file later
+ try:
+ self.later_check_files.remove(dsc_name)
+ except ValueError:
+ pass
+
+
if not match:
self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
- for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not Cnf.has_key("Dir::Queue::%s" % (directory)):
continue
in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
source_epochless_version = re_no_epoch.sub('', source_version)
dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
found = False
- for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+ for q in ["Embargoed", "Unembargoed", "Newstage"]:
if cnf.has_key("Dir::Queue::%s" % (q)):
if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
found = True