- # Ensure the component is valid for the target suite
- if cnf.has_key("Suite:%s::Components" % (suite)) and \
- entry["component"] not in cnf.ValueList("Suite::%s::Components" % (suite)):
- self.rejects.append("unknown component `%s' for suite `%s'." % (entry["component"], suite))
- return
-
- # Validate the component
- if not get_component(entry["component"], session):
- self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"]))
- return
-
- # See if the package is NEW
- if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), f, session):
- entry["new"] = 1
-
- # Validate the priority
- if entry["priority"].find('/') != -1:
- self.rejects.append("file '%s' has invalid priority '%s' [contains '/']." % (f, entry["priority"]))
-
- # Determine the location
- location = cnf["Dir::Pool"]
- l = get_location(location, entry["component"], session=session)
- if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"])
- entry["location id"] = -1
- else:
- entry["location id"] = l.location_id
-
- # Check the md5sum & size against existing files (if any)
- entry["pool name"] = utils.poolify(self.pkg.changes["source"], entry["component"])
-
- found, poolfile = check_poolfile(os.path.join(entry["pool name"], f),
- entry["size"], entry["md5sum"], entry["location id"])
-
- if found is None:
- self.rejects.append("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
- elif found is False and poolfile is not None:
- self.rejects.append("md5sum and/or size mismatch on existing copy of %s." % (f))
- else:
- if poolfile is None:
- entry["files id"] = None
- else:
- entry["files id"] = poolfile.file_id
-
- # Check for packages that have moved from one component to another
- entry['suite'] = suite
- res = get_binary_components(self.pkg.files[f]['package'], suite, entry["architecture"], session)
- if res.rowcount > 0:
- entry["othercomponents"] = res.fetchone()[0]
-
- def check_files(self, action=True):
- file_keys = self.pkg.files.keys()
- holding = Holding()
- cnf = Config()
-
- if action:
- cwd = os.getcwd()
- os.chdir(self.pkg.directory)
- for f in file_keys:
- ret = holding.copy_to_holding(f)
- if ret is not None:
- # XXX: Should we bail out here or try and continue?
- self.rejects.append(ret)
-
- os.chdir(cwd)
-
- # check we already know the changes file
- # [NB: this check must be done post-suite mapping]
- base_filename = os.path.basename(self.pkg.changes_file)
-
- session = DBConn().session()
-
- try:
- dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
- # if in the pool or in a queue other than unchecked, reject
- if (dbc.in_queue is None) \
- or (dbc.in_queue is not None
- and dbc.in_queue.queue_name != 'unchecked'):
- self.rejects.append("%s file already known to dak" % base_filename)
- except NoResultFound, e:
- # not known, good
- pass
-
- has_binaries = False
- has_source = False
-
- for f, entry in self.pkg.files.items():
- # Ensure the file does not already exist in one of the accepted directories
- for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
- if not cnf.has_key("Dir::Queue::%s" % (d)): continue
- if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
- self.rejects.append("%s file already exists in the %s directory." % (f, d))
-
- if not re_taint_free.match(f):
- self.rejects.append("!!WARNING!! tainted filename: '%s'." % (f))
-
- # Check the file is readable
- if os.access(f, os.R_OK) == 0:
- # When running in -n, copy_to_holding() won't have
- # generated the reject_message, so we need to.
- if action:
- if os.path.exists(f):
- self.rejects.append("Can't read `%s'. [permission denied]" % (f))
- else:
- self.rejects.append("Can't read `%s'. [file not found]" % (f))
- entry["type"] = "unreadable"
- continue
-
- # If it's byhand skip remaining checks
- if entry["section"] == "byhand" or entry["section"][:4] == "raw-":
- entry["byhand"] = 1
- entry["type"] = "byhand"
-
- # Checks for a binary package...
- elif re_isadeb.match(f):
- has_binaries = True
- entry["type"] = "deb"
-
- # This routine appends to self.rejects/warnings as appropriate
- self.binary_file_checks(f, session)
-
- # Checks for a source package...
- elif re_issource.match(f):
- has_source = True
-
- # This routine appends to self.rejects/warnings as appropriate
- self.source_file_checks(f, session)
-
- # Not a binary or source package? Assume byhand...
- else:
- entry["byhand"] = 1
- entry["type"] = "byhand"
-
- # Per-suite file checks
- entry["oldfiles"] = {}
- for suite in self.pkg.changes["distribution"].keys():
- self.per_suite_file_checks(f, suite, session)
-
- session.close()
-
- # If the .changes file says it has source, it must have source.
- if self.pkg.changes["architecture"].has_key("source"):
- if not has_source:
- self.rejects.append("no source found and Architecture line in changes mention source.")
-
- if not has_binaries and cnf.FindB("Dinstall::Reject::NoSourceOnly"):
- self.rejects.append("source only uploads are not supported.")
-
- ###########################################################################
- def check_dsc(self, action=True, session=None):
- """Returns bool indicating whether or not the source changes are valid"""
- # Ensure there is source to check
- if not self.pkg.changes["architecture"].has_key("source"):
- return True
-
- # Find the .dsc
- dsc_filename = None
- for f, entry in self.pkg.files.items():
- if entry["type"] == "dsc":
- if dsc_filename:
- self.rejects.append("can not process a .changes file with multiple .dsc's.")
- return False
- else:
- dsc_filename = f
-
- # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
- if not dsc_filename:
- self.rejects.append("source uploads must contain a dsc file")
- return False
-
- # Parse the .dsc file
- try:
- self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=1))
- except CantOpenError:
- # if not -n copy_to_holding() will have done this for us...
- if not action:
- self.rejects.append("%s: can't read file." % (dsc_filename))
- except ParseChangesError, line:
- self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
- except InvalidDscError, line:
- self.rejects.append("%s: syntax error on line %s." % (dsc_filename, line))
- except ChangesUnicodeError:
- self.rejects.append("%s: dsc file not proper utf-8." % (dsc_filename))
-
- # Build up the file list of files mentioned by the .dsc
- try:
- self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
- except NoFilesFieldError:
- self.rejects.append("%s: no Files: field." % (dsc_filename))
- return False
- except UnknownFormatError, format:
- self.rejects.append("%s: unknown format '%s'." % (dsc_filename, format))
- return False
- except ParseChangesError, line:
- self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
- return False
-
- # Enforce mandatory fields
- for i in ("format", "source", "version", "binary", "maintainer", "architecture", "files"):
- if not self.pkg.dsc.has_key(i):
- self.rejects.append("%s: missing mandatory field `%s'." % (dsc_filename, i))
- return False
-
- # Validate the source and version fields
- if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
- self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
- if not re_valid_version.match(self.pkg.dsc["version"]):
- self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
-
- # Only a limited list of source formats are allowed in each suite
- for dist in self.pkg.changes["distribution"].keys():
- allowed = [ x.format_name for x in get_suite_src_formats(dist, session) ]
- if self.pkg.dsc["format"] not in allowed:
- self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed)))
-
- # Validate the Maintainer field
- try:
- # We ignore the return value
- fix_maintainer(self.pkg.dsc["maintainer"])
- except ParseMaintError, msg:
- self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
- % (dsc_filename, self.pkg.dsc["maintainer"], msg))
-
- # Validate the build-depends field(s)
- for field_name in [ "build-depends", "build-depends-indep" ]:
- field = self.pkg.dsc.get(field_name)
- if field:
- # Have apt try to parse them...
- try:
- apt_pkg.ParseSrcDepends(field)
- except:
- self.rejects.append("%s: invalid %s field (can not be parsed by apt)." % (dsc_filename, field_name.title()))
-
- # Ensure the version number in the .dsc matches the version number in the .changes
- epochless_dsc_version = re_no_epoch.sub('', self.pkg.dsc["version"])
- changes_version = self.pkg.files[dsc_filename]["version"]
-
- if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]:
- self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version))
-
- # Ensure the Files field contain only what's expected
- self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files))
-
- # Ensure source is newer than existing source in target suites
- session = DBConn().session()
- self.check_source_against_db(dsc_filename, session)
- self.check_dsc_against_db(dsc_filename, session)
- session.close()
-
- return True
-
- ###########################################################################
-
- def get_changelog_versions(self, source_dir):
- """Extracts a the source package and (optionally) grabs the
- version history out of debian/changelog for the BTS."""
-
- cnf = Config()
-
- # Find the .dsc (again)
- dsc_filename = None
- for f in self.pkg.files.keys():
- if self.pkg.files[f]["type"] == "dsc":
- dsc_filename = f
-
- # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
- if not dsc_filename:
- return
-
- # Create a symlink mirror of the source files in our temporary directory
- for f in self.pkg.files.keys():
- m = re_issource.match(f)
- if m:
- src = os.path.join(source_dir, f)
- # If a file is missing for whatever reason, give up.
- if not os.path.exists(src):
- return
- ftype = m.group(3)
- if re_is_orig_source.match(f) and self.pkg.orig_files.has_key(f) and \
- self.pkg.orig_files[f].has_key("path"):
- continue
- dest = os.path.join(os.getcwd(), f)
- os.symlink(src, dest)
-
- # If the orig files are not a part of the upload, create symlinks to the
- # existing copies.
- for orig_file in self.pkg.orig_files.keys():
- if not self.pkg.orig_files[orig_file].has_key("path"):
- continue
- dest = os.path.join(os.getcwd(), os.path.basename(orig_file))
- os.symlink(self.pkg.orig_files[orig_file]["path"], dest)
-
- # Extract the source
- cmd = "dpkg-source -sn -x %s" % (dsc_filename)
- (result, output) = commands.getstatusoutput(cmd)
- if (result != 0):
- self.rejects.append("'dpkg-source -x' failed for %s [return code: %s]." % (dsc_filename, result))
- self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] "))
- return
-
- if not cnf.Find("Dir::Queue::BTSVersionTrack"):
- return
-
- # Get the upstream version
- upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
- if re_strip_revision.search(upstr_version):
- upstr_version = re_strip_revision.sub('', upstr_version)
-
- # Ensure the changelog file exists
- changelog_filename = "%s-%s/debian/changelog" % (self.pkg.dsc["source"], upstr_version)
- if not os.path.exists(changelog_filename):
- self.rejects.append("%s: debian/changelog not found in extracted source." % (dsc_filename))
- return
-
- # Parse the changelog
- self.pkg.dsc["bts changelog"] = ""
- changelog_file = utils.open_file(changelog_filename)
- for line in changelog_file.readlines():
- m = re_changelog_versions.match(line)
- if m:
- self.pkg.dsc["bts changelog"] += line
- changelog_file.close()
-
- # Check we found at least one revision in the changelog
- if not self.pkg.dsc["bts changelog"]:
- self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
-
- def check_source(self):
- # Bail out if:
- # a) there's no source
- # or c) the orig files are MIA
- if not self.pkg.changes["architecture"].has_key("source") \
- or len(self.pkg.orig_files) == 0:
- return
-
- tmpdir = utils.temp_dirname()
-
- # Move into the temporary directory
- cwd = os.getcwd()
- os.chdir(tmpdir)
-
- # Get the changelog version history
- self.get_changelog_versions(cwd)
-
- # Move back and cleanup the temporary tree
- os.chdir(cwd)
-
- try:
- shutil.rmtree(tmpdir)
- except OSError, e:
- if e.errno != errno.EACCES:
- print "foobar"
- utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
- self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
- # We probably have u-r or u-w directories so chmod everything
- # and try again.
- cmd = "chmod -R u+rwx %s" % (tmpdir)
- result = os.system(cmd)
- if result != 0:
- utils.fubar("'%s' failed with result %s." % (cmd, result))
- shutil.rmtree(tmpdir)
- except Exception, e:
- print "foobar2 (%s)" % e
- utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
- ###########################################################################
- def ensure_hashes(self):
- # Make sure we recognise the format of the Files: field in the .changes
- format = self.pkg.changes.get("format", "0.0").split(".", 1)
- if len(format) == 2:
- format = int(format[0]), int(format[1])
- else:
- format = int(float(format[0])), 0
-
- # We need to deal with the original changes blob, as the fields we need
- # might not be in the changes dict serialised into the .dak anymore.
- orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
-
- # Copy the checksums over to the current changes dict. This will keep
- # the existing modifications to it intact.
- for field in orig_changes:
- if field.startswith('checksums-'):
- self.pkg.changes[field] = orig_changes[field]
-
- # Check for unsupported hashes
- for j in utils.check_hash_fields(".changes", self.pkg.changes):
- self.rejects.append(j)
-
- for j in utils.check_hash_fields(".dsc", self.pkg.dsc):
- self.rejects.append(j)
-
- # We have to calculate the hash if we have an earlier changes version than
- # the hash appears in rather than require it exist in the changes file
- for hashname, hashfunc, version in utils.known_hashes:
- # TODO: Move _ensure_changes_hash into this class
- for j in utils._ensure_changes_hash(self.pkg.changes, format, version, self.pkg.files, hashname, hashfunc):
- self.rejects.append(j)
- if "source" in self.pkg.changes["architecture"]:
- # TODO: Move _ensure_dsc_hash into this class
- for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
- self.rejects.append(j)
-
- def check_hashes(self):
- for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
- self.rejects.append(m)
-
- for m in utils.check_size(".changes", self.pkg.files):
- self.rejects.append(m)
-
- for m in utils.check_hash(".dsc", self.pkg.dsc_files, "md5", apt_pkg.md5sum):
- self.rejects.append(m)
-
- for m in utils.check_size(".dsc", self.pkg.dsc_files):
- self.rejects.append(m)
-
- self.ensure_hashes()
-
- ###########################################################################
-
- def ensure_orig(self, target_dir='.', session=None):
- """
- Ensures that all orig files mentioned in the changes file are present
- in target_dir. If they do not exist, they are symlinked into place.
-
- An list containing the symlinks that were created are returned (so they
- can be removed).
- """
-
- symlinked = []
- cnf = Config()
-
- for filename, entry in self.pkg.dsc_files.iteritems():
- if not re_is_orig_source.match(filename):
- # File is not an orig; ignore
- continue
-
- if os.path.exists(filename):
- # File exists, no need to continue
- continue
-
- def symlink_if_valid(path):
- f = utils.open_file(path)
- md5sum = apt_pkg.md5sum(f)
- f.close()
-
- fingerprint = (os.stat(path)[stat.ST_SIZE], md5sum)
- expected = (int(entry['size']), entry['md5sum'])
-
- if fingerprint != expected:
- return False
-
- dest = os.path.join(target_dir, filename)
-
- os.symlink(path, dest)
- symlinked.append(dest)
-
- return True
-
- session_ = session
- if session is None:
- session_ = DBConn().session()
-
- found = False
-
- # Look in the pool
- for poolfile in get_poolfile_like_name('/%s' % filename, session_):
- poolfile_path = os.path.join(
- poolfile.location.path, poolfile.filename
- )
-
- if symlink_if_valid(poolfile_path):
- found = True
- break
-
- if session is None:
- session_.close()
-
- if found:
- continue
-
- # Look in some other queues for the file
- queues = ('New', 'Byhand', 'ProposedUpdates',
- 'OldProposedUpdates', 'Embargoed', 'Unembargoed')
-
- for queue in queues:
- if not cnf.get('Dir::Queue::%s' % queue):
- continue
-
- queuefile_path = os.path.join(
- cnf['Dir::Queue::%s' % queue], filename
- )
-
- if not os.path.exists(queuefile_path):
- # Does not exist in this queue
- continue
-
- if symlink_if_valid(queuefile_path):
- break
-
- return symlinked
-
- ###########################################################################
-
- def check_lintian(self):
- """
- Extends self.rejects by checking the output of lintian against tags
- specified in Dinstall::LintianTags.
- """
-
- cnf = Config()
-
- # Don't reject binary uploads
- if not self.pkg.changes['architecture'].has_key('source'):
- return
-
- # Only check some distributions
- for dist in ('unstable', 'experimental'):
- if dist in self.pkg.changes['distribution']:
- break
- else:
- return
-
- # If we do not have a tagfile, don't do anything
- tagfile = cnf.get("Dinstall::LintianTags")
- if tagfile is None:
- return
-
- # Parse the yaml file
- sourcefile = file(tagfile, 'r')
- sourcecontent = sourcefile.read()
- sourcefile.close()
-
- try:
- lintiantags = yaml.load(sourcecontent)['lintian']
- except yaml.YAMLError, msg:
- utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
- return
-
- # Try and find all orig mentioned in the .dsc
- symlinked = self.ensure_orig()
-
- # Setup the input file for lintian
- fd, temp_filename = utils.temp_filename()
- temptagfile = os.fdopen(fd, 'w')
- for tags in lintiantags.values():
- temptagfile.writelines(['%s\n' % x for x in tags])
- temptagfile.close()
-
- try:
- cmd = "lintian --show-overrides --tags-from-file %s %s" % \
- (temp_filename, self.pkg.changes_file)
-
- result, output = commands.getstatusoutput(cmd)
- finally:
- # Remove our tempfile and any symlinks we created
- os.unlink(temp_filename)
-
- for symlink in symlinked:
- os.unlink(symlink)
-
- if result == 2:
- utils.warn("lintian failed for %s [return code: %s]." % \
- (self.pkg.changes_file, result))
- utils.warn(utils.prefix_multi_line_string(output, \
- " [possible output:] "))
-
- def log(*txt):
- if self.logger:
- self.logger.log(
- [self.pkg.changes_file, "check_lintian"] + list(txt)
- )
-
- # Generate messages
- parsed_tags = parse_lintian_output(output)
- self.rejects.extend(
- generate_reject_messages(parsed_tags, lintiantags, log=log)
- )
-
- ###########################################################################
- def check_urgency(self):
- cnf = Config()
- if self.pkg.changes["architecture"].has_key("source"):
- if not self.pkg.changes.has_key("urgency"):
- self.pkg.changes["urgency"] = cnf["Urgency::Default"]
- self.pkg.changes["urgency"] = self.pkg.changes["urgency"].lower()
- if self.pkg.changes["urgency"] not in cnf.ValueList("Urgency::Valid"):
- self.warnings.append("%s is not a valid urgency; it will be treated as %s by testing." % \
- (self.pkg.changes["urgency"], cnf["Urgency::Default"]))
- self.pkg.changes["urgency"] = cnf["Urgency::Default"]
-