-
- ###########################################################################
-
- def remove(self, from_dir=None):
- """
- Used (for instance) in p-u to remove the package from unchecked
-
- Also removes the package from holding area.
- """
- if from_dir is None:
- from_dir = self.pkg.directory
- h = Holding()
-
- for f in self.pkg.files.keys():
- os.unlink(os.path.join(from_dir, f))
- if os.path.exists(os.path.join(h.holding_dir, f)):
- os.unlink(os.path.join(h.holding_dir, f))
-
- os.unlink(os.path.join(from_dir, self.pkg.changes_file))
- if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
- os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
-
- ###########################################################################
-
- def move_to_queue (self, queue):
- """
- Move files to a destination queue using the permissions in the table
- """
- h = Holding()
- utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
- queue.path, perms=int(queue.change_perms, 8))
- for f in self.pkg.files.keys():
- utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
-
- ###########################################################################
-
- def force_reject(self, reject_files):
- """
- Forcefully move files from the current directory to the
- reject directory. If any file already exists in the reject
- directory it will be moved to the morgue to make way for
- the new file.
-
- @type reject_files: dict
- @param reject_files: file dictionary
-
- """
-
- cnf = Config()
-
- for file_entry in reject_files:
- # Skip any files which don't exist or which we don't have permission to copy.
- if os.access(file_entry, os.R_OK) == 0:
- continue
-
- dest_file = os.path.join(cnf["Dir::Queue::Reject"], file_entry)
-
- try:
- dest_fd = os.open(dest_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0644)
- except OSError, e:
- # File exists? Let's find a new name by adding a number
- if e.errno == errno.EEXIST:
- try:
- dest_file = utils.find_next_free(dest_file, 255)
- except NoFreeFilenameError:
- # Something's either gone badly Pete Tong, or
- # someone is trying to exploit us.
- utils.warn("**WARNING** failed to find a free filename for %s in %s." % (file_entry, cnf["Dir::Queue::Reject"]))
- return
-
- # Make sure we really got it
- try:
- dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
- except OSError, e:
- # Likewise
- utils.warn("**WARNING** failed to claim %s in the reject directory." % (file_entry))
- return
- else:
- raise
- # If we got here, we own the destination file, so we can
- # safely overwrite it.
- utils.move(file_entry, dest_file, 1, perms=0660)
- os.close(dest_fd)
-
- ###########################################################################
- def do_reject (self, manual=0, reject_message="", notes=""):
- """
- Reject an upload. If called without a reject message or C{manual} is
- true, spawn an editor so the user can write one.
-
- @type manual: bool
- @param manual: manual or automated rejection
-
- @type reject_message: string
- @param reject_message: A reject message
-
- @return: 0
-
- """
- # If we weren't given a manual rejection message, spawn an
- # editor so the user can add one in...
- if manual and not reject_message:
- (fd, temp_filename) = utils.temp_filename()
- temp_file = os.fdopen(fd, 'w')
- if len(notes) > 0:
- for note in notes:
- temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
- % (note.author, note.version, note.notedate, note.comment))
- temp_file.close()
- editor = os.environ.get("EDITOR","vi")
- answer = 'E'
- while answer == 'E':
- os.system("%s %s" % (editor, temp_filename))
- temp_fh = utils.open_file(temp_filename)
- reject_message = "".join(temp_fh.readlines())
- temp_fh.close()
- print "Reject message:"
- print utils.prefix_multi_line_string(reject_message," ",include_blank_lines=1)
- prompt = "[R]eject, Edit, Abandon, Quit ?"
- answer = "XXX"
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.search(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
- os.unlink(temp_filename)
- if answer == 'A':
- return 1
- elif answer == 'Q':
- sys.exit(0)
-
- print "Rejecting.\n"
-
- cnf = Config()
-
- reason_filename = self.pkg.changes_file[:-8] + ".reason"
- reason_filename = os.path.join(cnf["Dir::Queue::Reject"], reason_filename)
-
- # Move all the files into the reject directory
- reject_files = self.pkg.files.keys() + [self.pkg.changes_file]
- self.force_reject(reject_files)
-
- # If we fail here someone is probably trying to exploit the race
- # so let's just raise an exception ...
- if os.path.exists(reason_filename):
- os.unlink(reason_filename)
- reason_fd = os.open(reason_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644)
-
- rej_template = os.path.join(cnf["Dir::Templates"], "queue.rejected")
-
- self.update_subst()
- if not manual:
- self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
- self.Subst["__MANUAL_REJECT_MESSAGE__"] = ""
- self.Subst["__CC__"] = "X-DAK-Rejection: automatic (moo)"
- os.write(reason_fd, reject_message)
- reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
- else:
- # Build up the rejection email
- user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
- self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
- self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
- self.Subst["__REJECT_MESSAGE__"] = ""
- self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
- reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
- # Write the rejection email out as the <foo>.reason file
- os.write(reason_fd, reject_mail_message)
-
- del self.Subst["__REJECTOR_ADDRESS__"]
- del self.Subst["__MANUAL_REJECT_MESSAGE__"]
- del self.Subst["__CC__"]
-
- os.close(reason_fd)
-
- # Send the rejection mail
- utils.send_mail(reject_mail_message)
-
- if self.logger:
- self.logger.log(["rejected", self.pkg.changes_file])
-
- stats = SummaryStats()
- stats.reject_count += 1
- return 0
-
- ################################################################################
- def in_override_p(self, package, component, suite, binary_type, filename, session):
- """
- Check if a package already has override entries in the DB
-
- @type package: string
- @param package: package name
-
- @type component: string
- @param component: database id of the component
-
- @type suite: int
- @param suite: database id of the suite
-
- @type binary_type: string
- @param binary_type: type of the package
-
- @type filename: string
- @param filename: filename we check
-
- @return: the database result. But noone cares anyway.
-
- """
-
- cnf = Config()
-
- if binary_type == "": # must be source
- file_type = "dsc"
- else:
- file_type = binary_type
-
- # Override suite name; used for example with proposed-updates
- oldsuite = get_suite(suite, session)
- if (not oldsuite is None) and oldsuite.overridesuite:
- suite = oldsuite.overridesuite
-
- result = get_override(package, suite, component, file_type, session)
-
- # If checking for a source package fall back on the binary override type
- if file_type == "dsc" and len(result) < 1:
- result = get_override(package, suite, component, ['deb', 'udeb'], session)
-
- # Remember the section and priority so we can check them later if appropriate
- if len(result) > 0:
- result = result[0]
- self.pkg.files[filename]["override section"] = result.section.section
- self.pkg.files[filename]["override priority"] = result.priority.priority
- return result
-
- return None
-
- ################################################################################
- def get_anyversion(self, sv_list, suite):
- """
- @type sv_list: list
- @param sv_list: list of (suite, version) tuples to check
-
- @type suite: string
- @param suite: suite name
-
- Description: TODO
- """
- Cnf = Config()
- anyversion = None
- anysuite = [suite] + [ vc.reference.suite_name for vc in get_version_checks(suite, "Enhances") ]
- for (s, v) in sv_list:
- if s in [ x.lower() for x in anysuite ]:
- if not anyversion or apt_pkg.VersionCompare(anyversion, v) <= 0:
- anyversion = v
-
- return anyversion
-
- ################################################################################
-
- def cross_suite_version_check(self, sv_list, filename, new_version, sourceful=False):
- """
- @type sv_list: list
- @param sv_list: list of (suite, version) tuples to check
-
- @type filename: string
- @param filename: XXX
-
- @type new_version: string
- @param new_version: XXX
-
- Ensure versions are newer than existing packages in target
- suites and that cross-suite version checking rules as
- set out in the conf file are satisfied.
- """
-
- cnf = Config()
-
- # Check versions for each target suite
- for target_suite in self.pkg.changes["distribution"].keys():
- # Check we can find the target suite
- ts = get_suite(target_suite)
- if ts is None:
- self.rejects.append("Cannot find target suite %s to perform version checks" % target_suite)
- continue
-
- must_be_newer_than = [ vc.reference.suite_name for vc in get_version_checks(target_suite, "MustBeNewerThan") ]
- must_be_older_than = [ vc.reference.suite_name for vc in get_version_checks(target_suite, "MustBeOlderThan") ]
-
- # Enforce "must be newer than target suite" even if conffile omits it
- if target_suite not in must_be_newer_than:
- must_be_newer_than.append(target_suite)
-
- for (suite, existent_version) in sv_list:
- vercmp = apt_pkg.VersionCompare(new_version, existent_version)
-
- if suite in must_be_newer_than and sourceful and vercmp < 1:
- self.rejects.append("%s: old version (%s) in %s >= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
-
- if suite in must_be_older_than and vercmp > -1:
- cansave = 0
-
- if self.pkg.changes.get('distribution-version', {}).has_key(suite):
- # we really use the other suite, ignoring the conflicting one ...
- addsuite = self.pkg.changes["distribution-version"][suite]
-
- add_version = self.get_anyversion(sv_list, addsuite)
- target_version = self.get_anyversion(sv_list, target_suite)
-
- if not add_version:
- # not add_version can only happen if we map to a suite
- # that doesn't enhance the suite we're propup'ing from.
- # so "propup-ver x a b c; map a d" is a problem only if
- # d doesn't enhance a.
- #
- # i think we could always propagate in this case, rather
- # than complaining. either way, this isn't a REJECT issue
- #
- # And - we really should complain to the dorks who configured dak
- self.warnings.append("%s is mapped to, but not enhanced by %s - adding anyways" % (suite, addsuite))
- self.pkg.changes.setdefault("propdistribution", {})
- self.pkg.changes["propdistribution"][addsuite] = 1
- cansave = 1
- elif not target_version:
- # not targets_version is true when the package is NEW
- # we could just stick with the "...old version..." REJECT
- # for this, I think.
- self.rejects.append("Won't propogate NEW packages.")
- elif apt_pkg.VersionCompare(new_version, add_version) < 0:
- # propogation would be redundant. no need to reject though.
- self.warnings.append("ignoring versionconflict: %s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
- cansave = 1
- elif apt_pkg.VersionCompare(new_version, add_version) > 0 and \
- apt_pkg.VersionCompare(add_version, target_version) >= 0:
- # propogate!!
- self.warnings.append("Propogating upload to %s" % (addsuite))
- self.pkg.changes.setdefault("propdistribution", {})
- self.pkg.changes["propdistribution"][addsuite] = 1
- cansave = 1
-
- if not cansave:
- self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
-
- ################################################################################
- def check_binary_against_db(self, filename, session):
- # Ensure version is sane
- self.cross_suite_version_check( \
- get_suite_version_by_package(self.pkg.files[filename]["package"], \
- self.pkg.files[filename]["architecture"], session),
- filename, self.pkg.files[filename]["version"], sourceful=False)
-
- # Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"])
- q = q.filter_by(version=self.pkg.files[filename]["version"])
- q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"])
-
- if q.count() > 0:
- self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename)
-
- ################################################################################
-
- def check_source_against_db(self, filename, session):
- source = self.pkg.dsc.get("source")
- version = self.pkg.dsc.get("version")
-
- # Ensure version is sane
- self.cross_suite_version_check( \
- get_suite_version_by_source(source, session), filename, version,
- sourceful=True)
-
- ################################################################################
- def check_dsc_against_db(self, filename, session):
- """
-
- @warning: NB: this function can remove entries from the 'files' index [if
- the orig tarball is a duplicate of the one in the archive]; if
- you're iterating over 'files' and call this function as part of
- the loop, be sure to add a check to the top of the loop to
- ensure you haven't just tried to dereference the deleted entry.
-
- """
-
- Cnf = Config()
- self.pkg.orig_files = {} # XXX: do we need to clear it?
- orig_files = self.pkg.orig_files
-
- # Try and find all files mentioned in the .dsc. This has
- # to work harder to cope with the multiple possible
- # locations of an .orig.tar.gz.
- # The ordering on the select is needed to pick the newest orig
- # when it exists in multiple places.
- for dsc_name, dsc_entry in self.pkg.dsc_files.items():
- found = None
- if self.pkg.files.has_key(dsc_name):
- actual_md5 = self.pkg.files[dsc_name]["md5sum"]
- actual_size = int(self.pkg.files[dsc_name]["size"])
- found = "%s in incoming" % (dsc_name)
-
- # Check the file does not already exist in the archive
- ql = get_poolfile_like_name(dsc_name, session)
-
- # Strip out anything that isn't '%s' or '/%s$'
- for i in ql:
- if not i.filename.endswith(dsc_name):
- ql.remove(i)
-
- # "[dak] has not broken them. [dak] has fixed a
- # brokenness. Your crappy hack exploited a bug in
- # the old dinstall.
- #
- # "(Come on! I thought it was always obvious that
- # one just doesn't release different files with
- # the same name and version.)"
- # -- ajk@ on d-devel@l.d.o
-
- if len(ql) > 0:
- # Ignore exact matches for .orig.tar.gz
- match = 0
- if re_is_orig_source.match(dsc_name):
- for i in ql:
- if self.pkg.files.has_key(dsc_name) and \
- int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \
- self.pkg.files[dsc_name]["md5sum"] == i.md5sum:
- self.warnings.append("ignoring %s, since it's already in the archive." % (dsc_name))
- # TODO: Don't delete the entry, just mark it as not needed
- # This would fix the stupidity of changing something we often iterate over
- # whilst we're doing it
- del self.pkg.files[dsc_name]
- dsc_entry["files id"] = i.file_id
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
- match = 1
-
- # Don't bitch that we couldn't find this file later
- try:
- self.later_check_files.remove(dsc_name)
- except ValueError:
- pass
-
-
- if not match:
- self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
-
- elif re_is_orig_source.match(dsc_name):
- # Check in the pool
- ql = get_poolfile_like_name(dsc_name, session)
-
- # Strip out anything that isn't '%s' or '/%s$'
- # TODO: Shouldn't we just search for things which end with our string explicitly in the SQL?
- for i in ql:
- if not i.filename.endswith(dsc_name):
- ql.remove(i)
-
- if len(ql) > 0:
- # Unfortunately, we may get more than one match here if,
- # for example, the package was in potato but had an -sa
- # upload in woody. So we need to choose the right one.
-
- # default to something sane in case we don't match any or have only one
- x = ql[0]
-
- if len(ql) > 1:
- for i in ql:
- old_file = os.path.join(i.location.path, i.filename)
- old_file_fh = utils.open_file(old_file)
- actual_md5 = apt_pkg.md5sum(old_file_fh)
- old_file_fh.close()
- actual_size = os.stat(old_file)[stat.ST_SIZE]
- if actual_md5 == dsc_entry["md5sum"] and actual_size == int(dsc_entry["size"]):
- x = i
-
- old_file = os.path.join(i.location.path, i.filename)
- old_file_fh = utils.open_file(old_file)
- actual_md5 = apt_pkg.md5sum(old_file_fh)
- old_file_fh.close()
- actual_size = os.stat(old_file)[stat.ST_SIZE]
- found = old_file
- suite_type = x.location.archive_type
- # need this for updating dsc_files in install()
- dsc_entry["files id"] = x.file_id
- # See install() in process-accepted...
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["id"] = x.file_id
- orig_files[dsc_name]["path"] = old_file
- orig_files[dsc_name]["location"] = x.location.location_id
- else:
- # TODO: Record the queues and info in the DB so we don't hardcode all this crap
- # Not there? Check the queue directories...
- for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
- if not Cnf.has_key("Dir::Queue::%s" % (directory)):
- continue
- in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
- if os.path.exists(in_otherdir):
- in_otherdir_fh = utils.open_file(in_otherdir)
- actual_md5 = apt_pkg.md5sum(in_otherdir_fh)
- in_otherdir_fh.close()
- actual_size = os.stat(in_otherdir)[stat.ST_SIZE]
- found = in_otherdir
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["path"] = in_otherdir
-
- if not found:
- self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name))
- continue
- else:
- self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name))
- continue
- if actual_md5 != dsc_entry["md5sum"]:
- self.rejects.append("md5sum for %s doesn't match %s." % (found, filename))
- if actual_size != int(dsc_entry["size"]):
- self.rejects.append("size for %s doesn't match %s." % (found, filename))
-
- ################################################################################
- # This is used by process-new and process-holding to recheck a changes file
- # at the time we're running. It mainly wraps various other internal functions
- # and is similar to accepted_checks - these should probably be tidied up
- # and combined
- def recheck(self, session):
- cnf = Config()
- for f in self.pkg.files.keys():
- # The .orig.tar.gz can disappear out from under us is it's a
- # duplicate of one in the archive.
- if not self.pkg.files.has_key(f):
- continue
-
- entry = self.pkg.files[f]
-
- # Check that the source still exists
- if entry["type"] == "deb":
- source_version = entry["source version"]
- source_package = entry["source package"]
- if not self.pkg.changes["architecture"].has_key("source") \
- and not source_exists(source_package, source_version, \
- suites = self.pkg.changes["distribution"].keys(), session = session):
- source_epochless_version = re_no_epoch.sub('', source_version)
- dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
- found = False
- for q in ["Embargoed", "Unembargoed", "Newstage"]:
- if cnf.has_key("Dir::Queue::%s" % (q)):
- if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
- found = True
- if not found:
- self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
- # Version and file overwrite checks
- if entry["type"] == "deb":
- self.check_binary_against_db(f, session)
- elif entry["type"] == "dsc":
- self.check_source_against_db(f, session)
- self.check_dsc_against_db(f, session)
-
- ################################################################################
- def accepted_checks(self, overwrite_checks, session):
- # Recheck anything that relies on the database; since that's not
- # frozen between accept and our run time when called from p-a.
-
- # overwrite_checks is set to False when installing to stable/oldstable
-
- propogate={}
- nopropogate={}
-
- # Find the .dsc (again)
- dsc_filename = None
- for f in self.pkg.files.keys():
- if self.pkg.files[f]["type"] == "dsc":
- dsc_filename = f
-
- for checkfile in self.pkg.files.keys():
- # The .orig.tar.gz can disappear out from under us is it's a
- # duplicate of one in the archive.
- if not self.pkg.files.has_key(checkfile):
- continue
-
- entry = self.pkg.files[checkfile]
-
- # Check that the source still exists
- if entry["type"] == "deb":
- source_version = entry["source version"]
- source_package = entry["source package"]
- if not self.pkg.changes["architecture"].has_key("source") \
- and not source_exists(source_package, source_version, \
- suites = self.pkg.changes["distribution"].keys(), \
- session = session):
- self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
-
- # Version and file overwrite checks
- if overwrite_checks:
- if entry["type"] == "deb":
- self.check_binary_against_db(checkfile, session)
- elif entry["type"] == "dsc":
- self.check_source_against_db(checkfile, session)
- self.check_dsc_against_db(dsc_filename, session)
-
- # propogate in the case it is in the override tables:
- for suite in self.pkg.changes.get("propdistribution", {}).keys():
- if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
- propogate[suite] = 1
- else:
- nopropogate[suite] = 1
-
- for suite in propogate.keys():
- if suite in nopropogate:
- continue
- self.pkg.changes["distribution"][suite] = 1
-
- for checkfile in self.pkg.files.keys():
- # Check the package is still in the override tables
- for suite in self.pkg.changes["distribution"].keys():
- if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
- self.rejects.append("%s is NEW for %s." % (checkfile, suite))
-
- ################################################################################
- # If any file of an upload has a recent mtime then chances are good
- # the file is still being uploaded.
-
- def upload_too_new(self):
- cnf = Config()
- too_new = False
- # Move back to the original directory to get accurate time stamps
- cwd = os.getcwd()
- os.chdir(self.pkg.directory)
- file_list = self.pkg.files.keys()
- file_list.extend(self.pkg.dsc_files.keys())
- file_list.append(self.pkg.changes_file)
- for f in file_list:
- try:
- last_modified = time.time()-os.path.getmtime(f)
- if last_modified < int(cnf["Dinstall::SkipTime"]):
- too_new = True
- break
- except:
- pass
-
- os.chdir(cwd)
- return too_new
-
- def store_changelog(self):
-
- # Skip binary-only upload if it is not a bin-NMU
- if not self.pkg.changes['architecture'].has_key('source'):
- from daklib.regexes import re_bin_only_nmu
- if not re_bin_only_nmu.search(self.pkg.changes['version']):
- return
-
- session = DBConn().session()
-
- # Check if upload already has a changelog entry
- query = """SELECT changelog_id FROM changes WHERE source = :source
- AND version = :version AND architecture = :architecture AND changelog_id != 0"""
- if session.execute(query, {'source': self.pkg.changes['source'], \
- 'version': self.pkg.changes['version'], \
- 'architecture': " ".join(self.pkg.changes['architecture'].keys())}).rowcount:
- session.commit()
- return
-
- # Add current changelog text into changelogs_text table, return created ID
- query = "INSERT INTO changelogs_text (changelog) VALUES (:changelog) RETURNING id"
- ID = session.execute(query, {'changelog': self.pkg.changes['changes']}).fetchone()[0]
-
- # Link ID to the upload available in changes table
- query = """UPDATE changes SET changelog_id = :id WHERE source = :source
- AND version = :version AND architecture = :architecture"""
- session.execute(query, {'id': ID, 'source': self.pkg.changes['source'], \
- 'version': self.pkg.changes['version'], \
- 'architecture': " ".join(self.pkg.changes['architecture'].keys())})
-
- session.commit()