import errno
import os
-import pg
import stat
import sys
import time
import commands
import shutil
import textwrap
-import tempfile
from types import *
import yaml
self.pkg.reset()
def package_info(self):
- msg = ''
-
- if len(self.rejects) > 0:
- msg += "Reject Reasons:\n"
- msg += "\n".join(self.rejects)
+ """
+ Format various messages from this Upload to send to the maintainer.
+ """
- if len(self.warnings) > 0:
- msg += "Warnings:\n"
- msg += "\n".join(self.warnings)
+ msgs = (
+ ('Reject Reasons', self.rejects),
+ ('Warnings', self.warnings),
+ ('Notes', self.notes),
+ )
- if len(self.notes) > 0:
- msg += "Notes:\n"
- msg += "\n".join(self.notes)
+ msg = ''
+ for title, messages in msgs:
+ if messages:
+ msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
return msg
# If 'dak process-unchecked' crashed out in the right place, architecture may still be a string.
if not self.pkg.changes.has_key("architecture") or not \
- isinstance(self.pkg.changes["architecture"], DictType):
+ isinstance(self.pkg.changes["architecture"], dict):
self.pkg.changes["architecture"] = { "Unknown" : "" }
# and maintainer2047 may not exist.
fix_maintainer (self.pkg.changes["maintainer"])
except ParseMaintError, msg:
self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
- % (filename, changes["maintainer"], msg))
+ % (filename, self.pkg.changes["maintainer"], msg))
# ...likewise for the Changed-By: field if it exists.
try:
# Check there isn't already a changes file of the same name in one
# of the queue directories.
base_filename = os.path.basename(filename)
- for d in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
- if os.path.exists(os.path.join(Cnf["Dir::Queue::%s" % (d) ], base_filename)):
- self.rejects.append("%s: a file with this name already exists in the %s directory." % (base_filename, d))
+ if get_knownchange(base_filename):
+ self.rejects.append("%s: a file with this name already exists." % (base_filename))
# Check the .changes is non-empty
if not self.pkg.files:
# Validate the component
if not get_component(entry["component"], session):
- self.rejects.append("file '%s' has unknown component '%s'." % (f, component))
+ self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"]))
return
# See if the package is NEW
location = cnf["Dir::Pool"]
l = get_location(location, entry["component"], archive, session)
if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
+ self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive))
entry["location id"] = -1
else:
entry["location id"] = l.location_id
self.ensure_hashes()
###########################################################################
- def check_lintian(self):
- cnf = Config()
- # Only check some distributions
- valid_dist = False
- for dist in ('unstable', 'experimental'):
- if dist in self.pkg.changes['distribution']:
- valid_dist = True
- break
-
- if not valid_dist:
- return
-
- tagfile = cnf.get("Dinstall::LintianTags")
- if tagfile is None:
- # We don't have a tagfile, so just don't do anything.
- return
+ def ensure_orig(self, target_dir='.', session=None):
+ """
+ Ensures that all orig files mentioned in the changes file are present
+ in target_dir. If they do not exist, they are symlinked into place.
- # Parse the yaml file
- sourcefile = file(tagfile, 'r')
- sourcecontent = sourcefile.read()
- sourcefile.close()
- try:
- lintiantags = yaml.load(sourcecontent)['lintian']
- except yaml.YAMLError, msg:
- utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
- return
+ An list containing the symlinks that were created are returned (so they
+ can be removed).
+ """
- # Try and find all orig mentioned in the .dsc
- target_dir = '.'
symlinked = []
+ cnf = Config()
+
for filename, entry in self.pkg.dsc_files.iteritems():
if not re_is_orig_source.match(filename):
# File is not an orig; ignore
return True
- session = DBConn().session()
+ session_ = session
+ if session is None:
+ session_ = DBConn().session()
+
found = False
# Look in the pool
- for poolfile in get_poolfile_like_name('/%s' % filename, session):
+ for poolfile in get_poolfile_like_name('/%s' % filename, session_):
poolfile_path = os.path.join(
poolfile.location.path, poolfile.filename
)
found = True
break
- session.close()
+ if session is None:
+ session_.close()
if found:
continue
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
- if 'Dir::Queue::%s' % directory not in cnf:
+ if not cnf.get('Dir::Queue::%s' % queue):
continue
queuefile_path = os.path.join(
- cnf['Dir::Queue::%s' % directory], filename
+ cnf['Dir::Queue::%s' % queue], filename
)
if not os.path.exists(queuefile_path):
if symlink_if_valid(queuefile_path):
break
+ return symlinked
+
+ ###########################################################################
+
+ def check_lintian(self):
+ cnf = Config()
+
+ # Don't reject binary uploads
+ if not self.pkg.changes['architecture'].has_key('source'):
+ return
+
+ # Only check some distributions
+ valid_dist = False
+ for dist in ('unstable', 'experimental'):
+ if dist in self.pkg.changes['distribution']:
+ valid_dist = True
+ break
+
+ if not valid_dist:
+ return
+
+ tagfile = cnf.get("Dinstall::LintianTags")
+ if tagfile is None:
+ # We don't have a tagfile, so just don't do anything.
+ return
+
+ # Parse the yaml file
+ sourcefile = file(tagfile, 'r')
+ sourcecontent = sourcefile.read()
+ sourcefile.close()
+ try:
+ lintiantags = yaml.load(sourcecontent)['lintian']
+ except yaml.YAMLError, msg:
+ utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
+ return
+
+ # Try and find all orig mentioned in the .dsc
+ symlinked = self.ensure_orig()
+
# Now setup the input file for lintian. lintian wants "one tag per line" only,
# so put it together like it. We put all types of tags in one file and then sort
# through lintians output later to see if its a fatal tag we detected, or not.
elif etag in lintiantags['error']:
# The tag is overriden - but is not allowed to be
self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
- log("overidden tag is overridden", etag)
+ log("ftpmaster does not allow tag to be overridable", etag)
else:
# Tag is known, it is not overriden, direct reject.
self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
- log("auto rejecting", etag)
# Now tell if they *might* override it.
if etag in lintiantags['warning']:
+ log("auto rejecting", "overridable", etag)
self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
+ else:
+ log("auto rejecting", "not overridable", etag)
###########################################################################
def check_urgency(self):
rej = False
for f in self.pkg.files.keys():
if self.pkg.files[f].has_key("byhand"):
- self.rejects.append("%s may not upload BYHAND file %s" % (uid, f))
+ self.rejects.append("%s may not upload BYHAND file %s" % (fpr.uid.uid, f))
rej = True
if self.pkg.files[f].has_key("new"):
- self.rejects.append("%s may not upload NEW file %s" % (uid, f))
+ self.rejects.append("%s may not upload NEW file %s" % (fpr.uid.uid, f))
rej = True
if rej:
###########################################################################
- def remove(self, dir=None):
+ def remove(self, from_dir=None):
"""
Used (for instance) in p-u to remove the package from unchecked
"""
- if dir is None:
+ if from_dir is None:
os.chdir(self.pkg.directory)
else:
- os.chdir(dir)
+ os.chdir(from_dir)
for f in self.pkg.files.keys():
os.unlink(f)
return 0
################################################################################
- def in_override_p(self, package, component, suite, binary_type, file, session):
+ def in_override_p(self, package, component, suite, binary_type, filename, session):
"""
Check if a package already has override entries in the DB
@type binary_type: string
@param binary_type: type of the package
- @type file: string
- @param file: filename we check
+ @type filename: string
+ @param filename: filename we check
@return: the database result. But noone cares anyway.
# Remember the section and priority so we can check them later if appropriate
if len(result) > 0:
result = result[0]
- self.pkg.files[file]["override section"] = result.section.section
- self.pkg.files[file]["override priority"] = result.priority.priority
+ self.pkg.files[filename]["override section"] = result.section.section
+ self.pkg.files[filename]["override priority"] = result.priority.priority
return result
return None
self.reject.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (file, existent_version, suite, new_version, target_suite))
################################################################################
- def check_binary_against_db(self, file, session):
+ def check_binary_against_db(self, filename, session):
# Ensure version is sane
q = session.query(BinAssociation)
- q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[file]["package"])
- q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
+ q = q.join(DBBinary).filter(DBBinary.package==self.pkg.files[filename]["package"])
+ q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[filename]["architecture"], 'all']))
self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
- file, self.pkg.files[file]["version"], sourceful=False)
+ filename, self.pkg.files[filename]["version"], sourceful=False)
# Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(package=self.pkg.files[file]["package"])
- q = q.filter_by(version=self.pkg.files[file]["version"])
- q = q.join(Architecture).filter_by(arch_string=self.pkg.files[file]["architecture"])
+ q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"])
+ q = q.filter_by(version=self.pkg.files[filename]["version"])
+ q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"])
if q.count() > 0:
- self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file))
+ self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename)
################################################################################
- def check_source_against_db(self, file, session):
+ def check_source_against_db(self, filename, session):
"""
"""
source = self.pkg.dsc.get("source")
q = q.join(DBSource).filter(DBSource.source==source)
self.cross_suite_version_check([ (x.suite.suite_name, x.source.version) for x in q.all() ],
- file, version, sourceful=True)
+ filename, version, sourceful=True)
################################################################################
- def check_dsc_against_db(self, file, session):
+ def check_dsc_against_db(self, filename, session):
"""
@warning: NB: this function can remove entries from the 'files' index [if
orig_files[dsc_name]["path"] = in_otherdir
if not found:
- self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (file, dsc_name))
+ self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name))
continue
else:
- self.rejects.append("%s refers to %s, but I can't find it in the queue." % (file, dsc_name))
+ self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name))
continue
if actual_md5 != dsc_entry["md5sum"]:
- self.rejects.append("md5sum for %s doesn't match %s." % (found, file))
+ self.rejects.append("md5sum for %s doesn't match %s." % (found, filename))
if actual_size != int(dsc_entry["size"]):
- self.rejects.append("size for %s doesn't match %s." % (found, file))
+ self.rejects.append("size for %s doesn't match %s." % (found, filename))
################################################################################
# This is used by process-new and process-holding to recheck a changes file