@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2001 - 2006 James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
"""
elif re_source_ext.match(f["type"]):
file_type = "dsc"
else:
+ file_type = f["type"]
utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (file_type))
# Validate the override type
# Determine what parts in a .changes are NEW
-def determine_new(changes, files, warn=1):
+def determine_new(changes, files, warn=1, session = None):
"""
Determine what parts in a C{changes} file are NEW.
"""
new = {}
- session = DBConn().session()
-
# Build up a list of potentially new things
for name, f in files.items():
# Skip byhand elements
if f.has_key("othercomponents"):
new[pkg]["othercomponents"] = f["othercomponents"]
+ # Fix up the list of target suites
+ cnf = Config()
+ for suite in changes["suite"].keys():
+ override = cnf.Find("Suite::%s::OverrideSuite" % (suite))
+ if override:
+ (olderr, newerr) = (get_suite(suite, session) == None,
+ get_suite(override, session) == None)
+ if olderr or newerr:
+ (oinv, newinv) = ("", "")
+ if olderr: oinv = "invalid "
+ if newerr: ninv = "invalid "
+ print "warning: overriding %ssuite %s to %ssuite %s" % (
+ oinv, suite, ninv, override)
+ del changes["suite"][suite]
+ changes["suite"][override] = 1
+
for suite in changes["suite"].keys():
for pkg in new.keys():
ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
if new[pkg].has_key("othercomponents"):
print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
- session.close()
-
return new
################################################################################
-def check_valid(new):
+def check_valid(new, session = None):
"""
Check if section and priority for NEW packages exist in database.
Additionally does sanity checks:
priority_name = new[pkg]["priority"]
file_type = new[pkg]["type"]
- section = get_section(section_name)
+ section = get_section(section_name, session)
if section is None:
new[pkg]["section id"] = -1
else:
new[pkg]["section id"] = section.section_id
- priority = get_priority(priority_name)
+ priority = get_priority(priority_name, session)
if priority is None:
new[pkg]["priority id"] = -1
else:
###############################################################################
-def check_status(files):
- new = byhand = 0
- for f in files.keys():
- if files[f].has_key("byhand"):
- byhand = 1
- elif files[f].has_key("new"):
- new = 1
- return (new, byhand)
-
-###############################################################################
-
# Used by Upload.check_timestamps
class TarTime(object):
def __init__(self, future_cutoff, past_cutoff):
self.warnings = []
self.notes = []
+ self.later_check_files = []
+
self.pkg.reset()
def package_info(self):
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
- msg += '\n'
+ msg += '\n\n'
return msg
self.Subst["__MAINTAINER_TO__"] = self.pkg.changes["maintainer2047"]
self.Subst["__MAINTAINER__"] = self.pkg.changes.get("maintainer", "Unknown")
- if "sponsoremail" in self.pkg.changes:
- self.Subst["__MAINTAINER_TO__"] += ", %s" % self.pkg.changes["sponsoremail"]
+ # Process policy doesn't set the fingerprint field and I don't want to make it
+ # do it for now as I don't want to have to deal with the case where we accepted
+ # the package into PU-NEW, but the fingerprint has gone away from the keyring in
+ # the meantime so the package will be remarked as rejectable. Urgh.
+ # TODO: Fix this properly
+ if self.pkg.changes.has_key('fingerprint'):
+ session = DBConn().session()
+ fpr = get_fingerprint(self.pkg.changes['fingerprint'], session)
+ if fpr and self.check_if_upload_is_sponsored("%s@debian.org" % fpr.uid.uid, fpr.uid.name):
+ if self.pkg.changes.has_key("sponsoremail"):
+ self.Subst["__MAINTAINER_TO__"] += ", %s" % self.pkg.changes["sponsoremail"]
+ session.close()
if cnf.has_key("Dinstall::TrackingServer") and self.pkg.changes.has_key("source"):
self.Subst["__MAINTAINER_TO__"] += "\nBcc: %s@%s" % (self.pkg.changes["source"], cnf["Dinstall::TrackingServer"])
self.Subst["__REJECT_MESSAGE__"] = self.package_info()
self.Subst["__SOURCE__"] = self.pkg.changes.get("source", "Unknown")
self.Subst["__VERSION__"] = self.pkg.changes.get("version", "Unknown")
+ self.Subst["__SUITE__"] = ", ".join(self.pkg.changes["distribution"])
###########################################################################
def load_changes(self, filename):
"""
+ Load a changes file and setup a dictionary around it. Also checks for mandantory
+ fields within.
+
+ @type filename: string
+ @param filename: Changes filename, full path.
+
@rtype: boolean
- @rvalue: whether the changes file was valid or not. We may want to
+ @return: whether the changes file was valid or not. We may want to
reject even if this is True (see what gets put in self.rejects).
This is simply to prevent us even trying things later which will
fail because we couldn't properly parse the file.
self.pkg.changes["changedbyemail"] = ""
self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
- % (filename, changes["changed-by"], msg))
+ % (filename, self.pkg.changes["changed-by"], msg))
# Ensure all the values in Closes: are numbers
if self.pkg.changes.has_key("closes"):
# Check the .changes is non-empty
if not self.pkg.files:
- self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename))
+ self.rejects.append("%s: nothing to do (Files field is empty)." % (os.path.basename(self.pkg.changes_file)))
return False
# Changes was syntactically valid even if we'll reject
architecture = control.Find("Architecture")
upload_suite = self.pkg.changes["distribution"].keys()[0]
- if architecture not in [a.arch_string for a in get_suite_architectures(default_suite, session)] \
- and architecture not in [a.arch_string for a in get_suite_architectures(upload_suite, session)]:
+ if architecture not in [a.arch_string for a in get_suite_architectures(default_suite, session = session)] \
+ and architecture not in [a.arch_string for a in get_suite_architectures(upload_suite, session = session)]:
self.rejects.append("Unknown architecture '%s'." % (architecture))
# Ensure the architecture of the .deb is one of the ones
entry["new"] = 1
else:
dsc_file_exists = False
- for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+ for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates", "Lenny-Volatile-Proposed-Updates"]:
if cnf.has_key("Dir::Queue::%s" % (myq)):
if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
dsc_file_exists = True
location = cnf["Dir::Pool"]
l = get_location(location, entry["component"], session=session)
if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"])
+ self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s)" % entry["component"])
entry["location id"] = -1
else:
entry["location id"] = l.location_id
for f in file_keys:
ret = holding.copy_to_holding(f)
if ret is not None:
- # XXX: Should we bail out here or try and continue?
- self.rejects.append(ret)
+ self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
os.chdir(cwd)
if os.path.exists(f):
self.rejects.append("Can't read `%s'. [permission denied]" % (f))
else:
- self.rejects.append("Can't read `%s'. [file not found]" % (f))
+ # Don't directly reject, mark to check later to deal with orig's
+ # we can find in the pool
+ self.later_check_files.append(f)
entry["type"] = "unreadable"
continue
# Parse the .dsc file
try:
- self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=1))
+ self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=1, dsc_file=1))
except CantOpenError:
# if not -n copy_to_holding() will have done this for us...
if not action:
self.check_dsc_against_db(dsc_filename, session)
session.close()
+ # Finally, check if we're missing any files
+ for f in self.later_check_files:
+ self.rejects.append("Could not find file %s references in changes" % f)
+
return True
###########################################################################
def check_source(self):
# Bail out if:
# a) there's no source
- # or c) the orig files are MIA
- if not self.pkg.changes["architecture"].has_key("source") \
- or len(self.pkg.orig_files) == 0:
+ if not self.pkg.changes["architecture"].has_key("source"):
return
tmpdir = utils.temp_dirname()
found = False
# Look in the pool
- for poolfile in get_poolfile_like_name('/%s' % filename, session_):
+ for poolfile in get_poolfile_like_name('%s' % filename, session_):
poolfile_path = os.path.join(
poolfile.location.path, poolfile.filename
)
# Add the .dsc file to the DB first
for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+ source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
for j in pfs:
poolfiles.append(j)
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
+ # XXX: mhy: I think this should be in add_dsc_to_db
if self.pkg.changes["architecture"].has_key("source"):
for orig_file in self.pkg.orig_files.keys():
if not self.pkg.orig_files[orig_file].has_key("id"):
new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+ (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
+ # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
if newf is None:
utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
- # TODO: Check that there's only 1 here
- source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
- dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
- dscf.poolfile_id = newf.file_id
- session.add(dscf)
session.flush()
+ # Don't reference the old file from this changes
+ for p in poolfiles:
+ if p.file_id == oldf.file_id:
+ poolfiles.remove(p)
+
poolfiles.append(newf)
+ # Fix up the DSC references
+ toremove = []
+
+ for df in source.srcfiles:
+ if df.poolfile.file_id == oldf.file_id:
+ # Add a new DSC entry and mark the old one for deletion
+ # Don't do it in the loop so we don't change the thing we're iterating over
+ newdscf = DSCFile()
+ newdscf.source_id = source.source_id
+ newdscf.poolfile_id = newf.file_id
+ session.add(newdscf)
+
+ toremove.append(df)
+
+ for df in toremove:
+ session.delete(df)
+
+ # Flush our changes
+ session.flush()
+
+ # Make sure that our source object is up-to-date
+ session.expire(source)
+
+ # Add changelog information to the database
+ self.store_changelog()
+
# Install the files into the pool
for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
- # Send accept mail, announce to lists, close bugs and check for
- # override disparities
- if not cnf["Dinstall::Options::No-Mail"]:
- self.update_subst()
- self.Subst["__SUITE__"] = ""
- self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst,
- os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
- utils.send_mail(mail_message)
- self.announce(short_summary, 1)
+ self.update_subst()
+ self.Subst["__SUMMARY__"] = summary
+ mail_message = utils.TemplateSubst(self.Subst,
+ os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
+ utils.send_mail(mail_message)
+ self.announce(short_summary, 1)
## Helper stuff for DebBugs Version Tracking
if cnf.Find("Dir::Queue::BTSVersionTrack"):
- # ??? once queue/* is cleared on *.d.o and/or reprocessed
- # the conditionalization on dsc["bts changelog"] should be
- # dropped.
-
- # Write out the version history from the changelog
- if self.pkg.changes["architecture"].has_key("source") and \
- self.pkg.dsc.has_key("bts changelog"):
-
+ if self.pkg.changes["architecture"].has_key("source"):
(fd, temp_filename) = utils.temp_filename(cnf["Dir::Queue::BTSVersionTrack"], prefix=".")
version_history = os.fdopen(fd, 'w')
version_history.write(self.pkg.dsc["bts changelog"])
cnf = Config()
- # Abandon the check if:
- # a) override disparity checks have been disabled
- # b) we're not sending mail
- if not cnf.FindB("Dinstall::OverrideDisparityCheck") or \
- cnf["Dinstall::Options::No-Mail"]:
+ # Abandon the check if override disparity checks have been disabled
+ if not cnf.FindB("Dinstall::OverrideDisparityCheck"):
return
summary = self.pkg.check_override()
directory it will be moved to the morgue to make way for
the new file.
- @type files: dict
- @param files: file dictionary
+ @type reject_files: dict
+ @param reject_files: file dictionary
"""
try:
dest_fd = os.open(dest_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0644)
except OSError, e:
- # File exists? Let's try and move it to the morgue
+ # File exists? Let's find a new name by adding a number
if e.errno == errno.EEXIST:
- morgue_file = os.path.join(cnf["Dir::Morgue"], cnf["Dir::MorgueReject"], file_entry)
try:
- morgue_file = utils.find_next_free(morgue_file)
+ dest_file = utils.find_next_free(dest_file, 255)
except NoFreeFilenameError:
# Something's either gone badly Pete Tong, or
# someone is trying to exploit us.
- utils.warn("**WARNING** failed to move %s from the reject directory to the morgue." % (file_entry))
+ utils.warn("**WARNING** failed to find a free filename for %s in %s." % (file_entry, cnf["Dir::Queue::Reject"]))
return
- utils.move(dest_file, morgue_file, perms=0660)
+
+ # Make sure we really got it
try:
dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
except OSError, e:
os.close(dest_fd)
###########################################################################
- def do_reject (self, manual=0, reject_message="", note=""):
+ def do_reject (self, manual=0, reject_message="", notes=""):
"""
Reject an upload. If called without a reject message or C{manual} is
true, spawn an editor so the user can write one.
if manual and not reject_message:
(fd, temp_filename) = utils.temp_filename()
temp_file = os.fdopen(fd, 'w')
- if len(note) > 0:
- for line in note:
- temp_file.write(line)
+ if len(notes) > 0:
+ for note in notes:
+ temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
+ % (note.author, note.version, note.notedate, note.comment))
temp_file.close()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
os.close(reason_fd)
- # Send the rejection mail if appropriate
- if not cnf["Dinstall::Options::No-Mail"]:
- utils.send_mail(reject_mail_message)
+ # Send the rejection mail
+ utils.send_mail(reject_mail_message)
if self.logger:
self.logger.log(["rejected", self.pkg.changes_file])
cansave = 1
if not cansave:
- self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
+ self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
################################################################################
def check_binary_against_db(self, filename, session):
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
match = 1
+ # Don't bitch that we couldn't find this file later
+ try:
+ self.later_check_files.remove(dsc_name)
+ except ValueError:
+ pass
+
+
if not match:
self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
self.rejects.append("%s is NEW for %s." % (checkfile, suite))
- ################################################################################
- # This is not really a reject, but an unaccept, but since a) the code for
- # that is non-trivial (reopen bugs, unannounce etc.), b) this should be
- # extremely rare, for now we'll go with whining at our admin folks...
-
- def do_unaccept(self):
- cnf = Config()
-
- self.update_subst()
- self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
- self.Subst["__REJECT_MESSAGE__"] = self.package_info()
- self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
- self.Subst["__BCC__"] = "X-DAK: dak process-accepted"
- if cnf.has_key("Dinstall::Bcc"):
- self.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-
- template = os.path.join(cnf["Dir::Templates"], "process-accepted.unaccept")
-
- reject_mail_message = utils.TemplateSubst(self.Subst, template)
-
- # Write the rejection email out as the <foo>.reason file
- reason_filename = os.path.basename(self.pkg.changes_file[:-8]) + ".reason"
- reject_filename = os.path.join(cnf["Dir::Queue::Reject"], reason_filename)
-
- # If we fail here someone is probably trying to exploit the race
- # so let's just raise an exception ...
- if os.path.exists(reject_filename):
- os.unlink(reject_filename)
-
- fd = os.open(reject_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
- os.write(fd, reject_mail_message)
- os.close(fd)
-
- utils.send_mail(reject_mail_message)
-
- del self.Subst["__REJECTOR_ADDRESS__"]
- del self.Subst["__REJECT_MESSAGE__"]
- del self.Subst["__CC__"]
-
################################################################################
# If any file of an upload has a recent mtime then chances are good
# the file is still being uploaded.
os.chdir(cwd)
return too_new
+
+ def store_changelog(self):
+
+ # Skip binary-only upload if it is not a bin-NMU
+ if not self.pkg.changes['architecture'].has_key('source'):
+ from daklib.regexes import re_bin_only_nmu
+ if not re_bin_only_nmu.search(self.pkg.changes['version']):
+ return
+
+ session = DBConn().session()
+
+ # Check if upload already has a changelog entry
+ query = """SELECT changelog_id FROM changes WHERE source = :source
+ AND version = :version AND architecture = :architecture AND changelog_id != 0"""
+ if session.execute(query, {'source': self.pkg.changes['source'], \
+ 'version': self.pkg.changes['version'], \
+ 'architecture': " ".join(self.pkg.changes['architecture'].keys())}).rowcount:
+ session.commit()
+ return
+
+ # Add current changelog text into changelogs_text table, return created ID
+ query = "INSERT INTO changelogs_text (changelog) VALUES (:changelog) RETURNING id"
+ ID = session.execute(query, {'changelog': self.pkg.changes['changes']}).fetchone()[0]
+
+ # Link ID to the upload available in changes table
+ query = """UPDATE changes SET changelog_id = :id WHERE source = :source
+ AND version = :version AND architecture = :architecture"""
+ session.execute(query, {'id': ID, 'source': self.pkg.changes['source'], \
+ 'version': self.pkg.changes['version'], \
+ 'architecture': " ".join(self.pkg.changes['architecture'].keys())})
+
+ session.commit()