session.add(cf)
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
return cf.cafilename_id
else:
return q.one().cafilename_id
__all__.append('ContentFilepath')
-def get_or_set_contents_path_id(filepath, session):
+def get_or_set_contents_path_id(filepath, session=None):
"""
Returns database id for given path.
session.add(cf)
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
return cf.cafilepath_id
else:
return q.one().cafilepath_id
privatetrans = True
try:
+ # Insert paths
+ pathcache = {}
for fullpath in fullpaths:
+ # Get the necessary IDs ...
(path, file) = os.path.split(fullpath)
- # Get the necessary IDs ...
+ filepath_id = get_or_set_contents_path_id(path, session)
+ filename_id = get_or_set_contents_file_id(file, session)
+
+ pathcache[fullpath] = (filepath_id, filename_id)
+
+ for fullpath, dat in pathcache.items():
ca = ContentAssociation()
ca.binary_id = binary_id
- ca.filename_id = get_or_set_contents_file_id(file)
- ca.filepath_id = get_or_set_contents_path_id(path)
+ ca.filepath_id = dat[0]
+ ca.filename_id = dat[1]
session.add(ca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
return True
+
except:
traceback.print_exc()
# Only rollback if we set up the session ourself
if privatetrans:
session.rollback()
+ session.close()
return False
q.delete()
# Insert paths
+ pathcache = {}
for fullpath in fullpaths:
(path, file) = os.path.split(fullpath)
if path.startswith( "./" ):
path = path[2:]
+ filepath_id = get_or_set_contents_path_id(path, session)
+ filename_id = get_or_set_contents_file_id(file, session)
+
+ pathcache[fullpath] = (filepath_id, filename_id)
+
+ for fullpath, dat in pathcache.items():
pca = PendingContentAssociation()
pca.package = package['Package']
pca.version = package['Version']
- pca.filename_id = get_or_set_contents_file_id(file, session)
- pca.filepath_id = get_or_set_contents_path_id(path, session)
+ pca.filepath_id = dat[0]
+ pca.filename_id = dat[1]
pca.architecture = arch_id
session.add(pca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
+ else:
+ session.flush()
return True
except:
dest = os.path.join(dest_dir, file_entry)
# TODO: Move into database as above
- if Cnf.FindB("Dinstall::SecurityQueueBuild"):
+ if conf.FindB("Dinstall::SecurityQueueBuild"):
# Copy it since the original won't be readable by www-data
utils.copy(src, dest)
else:
import apt_inst
import apt_pkg
import utils
+import commands
+import shutil
from types import *
from dak_exceptions import *
from summarystats import SummaryStats
from utils import parse_changes
from textutils import fix_maintainer
+from binary import Binary
###############################################################################
# This is a stupid default, but see the comments below
is_dm = False
- user = get_uid_from_fingerprint(changes["fingerprint"], session)
+ user = get_uid_from_fingerprint(fpr, session)
if user is not None:
uid = user.uid
uid_name = user.name
# Check the relevant fingerprint (which we have to have)
- for f in uid.fingerprint:
- if f.fingerprint == changes['fingerprint']:
+ for f in user.fingerprint:
+ if f.fingerprint == fpr:
is_dm = f.keyring.debian_maintainer
break
"""
def __init__(self):
+ self.logger = None
self.pkg = Changes()
self.reset()
This is simply to prevent us even trying things later which will
fail because we couldn't properly parse the file.
"""
+ Cnf = Config()
self.pkg.changes_file = filename
# Parse the .changes field into a dictionary
# Parse the Files field from the .changes into another dictionary
try:
- self.pkg.files.update(build_file_list(self.pkg.changes))
+ self.pkg.files.update(utils.build_file_list(self.pkg.changes))
except ParseChangesError, line:
self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
return False
entry["maintainer"] = control.Find("Maintainer", "")
if f.endswith(".udeb"):
- files[f]["dbtype"] = "udeb"
+ self.pkg.files[f]["dbtype"] = "udeb"
elif f.endswith(".deb"):
- files[f]["dbtype"] = "deb"
+ self.pkg.files[f]["dbtype"] = "deb"
else:
self.rejects.append("%s is neither a .deb or a .udeb." % (f))
source_version = m.group(2)
if not source_version:
- source_version = files[f]["version"]
+ source_version = self.pkg.files[f]["version"]
entry["source package"] = source
entry["source version"] = source_version
# Check the version and for file overwrites
self.check_binary_against_db(f, session)
- b = Binary(f).scan_package()
+ b = Binary(f)
+ b.scan_package()
if len(b.rejects) > 0:
for j in b.rejects:
self.rejects.append(j)
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
+ archive = utils.where_am_i()
# Skip byhand
if entry.has_key("byhand"):
# Determine the location
location = cnf["Dir::Pool"]
- l = get_location(location, component, archive, session)
+ l = get_location(location, entry["component"], archive, session)
if l is None:
self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
entry["location id"] = -1
# Check for packages that have moved from one component to another
entry['suite'] = suite
- res = get_binary_components(files[f]['package'], suite, entry["architecture"], session)
+ res = get_binary_components(self.pkg.files[f]['package'], suite, entry["architecture"], session)
if res.rowcount > 0:
entry["othercomponents"] = res.fetchone()[0]
has_binaries = False
has_source = False
- s = DBConn().session()
+ session = DBConn().session()
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
# Build up the file list of files mentioned by the .dsc
try:
- self.pkg.dsc_files.update(utils.build_file_list(dsc, is_a_dsc=1))
+ self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
except NoFilesFieldError:
self.rejects.append("%s: no Files: field." % (dsc_filename))
return False
# Validate the source and version fields
if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
- if not re_valid_version.match(dsc["version"]):
+ if not re_valid_version.match(self.pkg.dsc["version"]):
self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
# Bumping the version number of the .dsc breaks extraction by stable's
# Ensure there is a .tar.gz in the .dsc file
has_tar = False
- for f in dsc_files.keys():
+ for f in self.pkg.dsc_files.keys():
m = re_issource.match(f)
if not m:
self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f))
self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename))
# Ensure source is newer than existing source in target suites
- self.check_source_against_db(dsc_filename, session)
+ self.check_source_against_db(dsc_filename)
self.check_dsc_against_db(dsc_filename)
# Find the .dsc (again)
dsc_filename = None
- for f in self.files.keys():
- if files[f]["type"] == "dsc":
+ for f in self.pkg.files.keys():
+ if self.pkg.files[f]["type"] == "dsc":
dsc_filename = f
# If there isn't one, we have nothing to do. (We have reject()ed the upload already)
return
# Create a symlink mirror of the source files in our temporary directory
- for f in self.files.keys():
+ for f in self.pkg.files.keys():
m = re_issource.match(f)
if m:
src = os.path.join(source_dir, f)
return
# Get the upstream version
- upstr_version = re_no_epoch.sub('', dsc["version"])
+ upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
if re_strip_revision.search(upstr_version):
upstr_version = re_strip_revision.sub('', upstr_version)
shutil.rmtree(tmpdir)
except OSError, e:
if e.errno != errno.EACCES:
+ print "foobar"
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
if result != 0:
utils.fubar("'%s' failed with result %s." % (cmd, result))
shutil.rmtree(tmpdir)
- except:
+ except Exception, e:
+ print "foobar2 (%s)" % e
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
###########################################################################
# We need to deal with the original changes blob, as the fields we need
# might not be in the changes dict serialised into the .dak anymore.
- orig_changes = parse_deb822(self.pkg.changes['filecontents'])
+ orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
# Copy the checksums over to the current changes dict. This will keep
# the existing modifications to it intact.
for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
self.rejects.append(j)
- def check_hashes():
+ def check_hashes(self):
for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
self.rejects.append(m)
for m in utils.check_size(".dsc", self.pkg.dsc_files):
self.rejects.append(m)
- for m in utils.ensure_hashes(self.pkg.changes, dsc, files, dsc_files):
- self.rejects.append(m)
+ self.ensure_hashes()
###########################################################################
def check_urgency(self):
# travel can cause errors on extraction]
def check_timestamps(self):
+ Cnf = Config()
+
future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
tar = TarTime(future_cutoff, past_cutoff)
- for filename, entry in self.pkg.files.keys():
+ for filename, entry in self.pkg.files.items():
if entry["type"] == "deb":
tar.reset()
try:
del self.Subst["__BUG_NUMBER__"]
del self.Subst["__STABLE_WARNING__"]
- if action:
- self.Logger.log(["closing bugs"] + bugs)
+ if action and self.logger:
+ self.logger.log(["closing bugs"] + bugs)
summary += "\n"
self.Subst["__SHORT_SUMMARY__"] = short_summary
for dist in self.pkg.changes["distribution"].keys():
- announce_list = Cnf.Find("Suite::%s::Announce" % (dist))
+ announce_list = cnf.Find("Suite::%s::Announce" % (dist))
if announce_list == "" or lists_done.has_key(announce_list):
continue
targetdir = cnf["Dir::Queue::Accepted"]
print "Accepting."
- self.Logger.log(["Accepting changes", self.pkg.changes_file])
+ if self.logger:
+ self.logger.log(["Accepting changes", self.pkg.changes_file])
- self.write_dot_dak(targetdir)
+ self.pkg.write_dot_dak(targetdir)
# Move all the files into the accepted directory
utils.move(self.pkg.changes_file, targetdir)
if not cnf["Dinstall::Options::No-Mail"]:
utils.send_mail(reject_mail_message)
- self.Logger.log(["rejected", pkg.changes_file])
+ if self.logger:
+ self.logger.log(["rejected", pkg.changes_file])
return 0
q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
- file, files[file]["version"], sourceful=False)
+ file, self.pkg.files[file]["version"], sourceful=False)
# Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(files[file]["package"])
- q = q.filter_by(version=files[file]["version"])
- q = q.join(Architecture).filter_by(arch_string=files[file]["architecture"])
+ q = session.query(DBBinary).filter_by(package=self.pkg.files[file]["package"])
+ q = q.filter_by(version=self.pkg.files[file]["version"])
+ q = q.join(Architecture).filter_by(arch_string=self.pkg.files[file]["architecture"])
if q.count() > 0:
self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file))
# TODO: Don't delete the entry, just mark it as not needed
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
- del files[dsc_name]
+ del self.pkg.files[dsc_name]
self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
match = 1