Signed-off-by: Mark Hymers <mhy@debian.org>
from daklib.dbconn import *
from daklib.binary import Binary
from daklib import daklog
from daklib.dbconn import *
from daklib.binary import Binary
from daklib import daklog
-from daklib import queue
+from daklib.queue import *
from daklib import utils
from daklib.textutils import fix_maintainer
from daklib.dak_exceptions import *
from daklib import utils
from daklib.textutils import fix_maintainer
from daklib.dak_exceptions import *
# the .changes file.
def process_it(changes_file):
# the .changes file.
def process_it(changes_file):
u = Upload()
u.pkg.changes_file = changes_file
u = Upload()
u.pkg.changes_file = changes_file
+ u.pkg.directory = os.getcwd()
+ u.logger = Logger
# Some defaults in case we can't fully process the .changes file
u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
# Some defaults in case we can't fully process the .changes file
u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
# Absolutize the filename to avoid the requirement of being in the
# same directory as the .changes file.
# Absolutize the filename to avoid the requirement of being in the
# same directory as the .changes file.
- copy_to_holding(os.path.abspath(changes_file))
+ holding.copy_to_holding(os.path.abspath(changes_file))
# Relativize the filename so we use the copy in holding
# rather than the original...
changespath = os.path.basename(u.pkg.changes_file)
# Relativize the filename so we use the copy in holding
# rather than the original...
changespath = os.path.basename(u.pkg.changes_file)
- changes["fingerprint"] = utils.check_signature(changespath, reject)
+ (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
- if changes["fingerprint"]:
+ if u.pkg.changes["fingerprint"]:
valid_changes_p = u.load_changes(changespath)
else:
valid_changes_p = False
valid_changes_p = u.load_changes(changespath)
else:
valid_changes_p = False
+ u.rejects.extend(rejects)
if valid_changes_p:
while u.reprocess:
if valid_changes_p:
while u.reprocess:
or the text of a warning if there are
"""
or the text of a warning if there are
"""
summary = ""
# Abandon the check if it's a non-sourceful upload
summary = ""
# Abandon the check if it's a non-sourceful upload
entry["override section"])
if entry["priority"] != "-":
entry["override section"])
if entry["priority"] != "-":
- if entry["priority"] != entry["override_priority"]:
+ if entry["priority"] != entry["override priority"]:
summary += "%s: package says priority is %s, override says %s.\n" % (name,
entry["priority"],
entry["override priority"])
summary += "%s: package says priority is %s, override says %s.\n" % (name,
entry["priority"],
entry["override priority"])
session.add(cf)
if privatetrans:
session.commit()
session.add(cf)
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
return cf.cafilename_id
else:
return q.one().cafilename_id
return cf.cafilename_id
else:
return q.one().cafilename_id
__all__.append('ContentFilepath')
__all__.append('ContentFilepath')
-def get_or_set_contents_path_id(filepath, session):
+def get_or_set_contents_path_id(filepath, session=None):
"""
Returns database id for given path.
"""
Returns database id for given path.
session.add(cf)
if privatetrans:
session.commit()
session.add(cf)
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
return cf.cafilepath_id
else:
return q.one().cafilepath_id
return cf.cafilepath_id
else:
return q.one().cafilepath_id
+ # Insert paths
+ pathcache = {}
for fullpath in fullpaths:
for fullpath in fullpaths:
+ # Get the necessary IDs ...
(path, file) = os.path.split(fullpath)
(path, file) = os.path.split(fullpath)
- # Get the necessary IDs ...
+ filepath_id = get_or_set_contents_path_id(path, session)
+ filename_id = get_or_set_contents_file_id(file, session)
+
+ pathcache[fullpath] = (filepath_id, filename_id)
+
+ for fullpath, dat in pathcache.items():
ca = ContentAssociation()
ca.binary_id = binary_id
ca = ContentAssociation()
ca.binary_id = binary_id
- ca.filename_id = get_or_set_contents_file_id(file)
- ca.filepath_id = get_or_set_contents_path_id(path)
+ ca.filepath_id = dat[0]
+ ca.filename_id = dat[1]
session.add(ca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
session.add(ca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
+ session.close()
+ else:
+ session.flush()
except:
traceback.print_exc()
# Only rollback if we set up the session ourself
if privatetrans:
session.rollback()
except:
traceback.print_exc()
# Only rollback if we set up the session ourself
if privatetrans:
session.rollback()
q.delete()
# Insert paths
q.delete()
# Insert paths
for fullpath in fullpaths:
(path, file) = os.path.split(fullpath)
if path.startswith( "./" ):
path = path[2:]
for fullpath in fullpaths:
(path, file) = os.path.split(fullpath)
if path.startswith( "./" ):
path = path[2:]
+ filepath_id = get_or_set_contents_path_id(path, session)
+ filename_id = get_or_set_contents_file_id(file, session)
+
+ pathcache[fullpath] = (filepath_id, filename_id)
+
+ for fullpath, dat in pathcache.items():
pca = PendingContentAssociation()
pca.package = package['Package']
pca.version = package['Version']
pca = PendingContentAssociation()
pca.package = package['Package']
pca.version = package['Version']
- pca.filename_id = get_or_set_contents_file_id(file, session)
- pca.filepath_id = get_or_set_contents_path_id(path, session)
+ pca.filepath_id = dat[0]
+ pca.filename_id = dat[1]
pca.architecture = arch_id
session.add(pca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
pca.architecture = arch_id
session.add(pca)
# Only commit if we set up the session ourself
if privatetrans:
session.commit()
+ else:
+ session.flush()
dest = os.path.join(dest_dir, file_entry)
# TODO: Move into database as above
dest = os.path.join(dest_dir, file_entry)
# TODO: Move into database as above
- if Cnf.FindB("Dinstall::SecurityQueueBuild"):
+ if conf.FindB("Dinstall::SecurityQueueBuild"):
# Copy it since the original won't be readable by www-data
utils.copy(src, dest)
else:
# Copy it since the original won't be readable by www-data
utils.copy(src, dest)
else:
import os
from errno import ENOENT, EEXIST, EACCES
import os
from errno import ENOENT, EEXIST, EACCES
from singleton import Singleton
from config import Config
from singleton import Singleton
from config import Config
- def clean(self, filename):
cwd = os.getcwd()
os.chdir(self.holding_dir)
for f in self.in_holding.keys():
cwd = os.getcwd()
os.chdir(self.holding_dir)
for f in self.in_holding.keys():
import apt_inst
import apt_pkg
import utils
import apt_inst
import apt_pkg
import utils
+import commands
+import shutil
from types import *
from dak_exceptions import *
from types import *
from dak_exceptions import *
from summarystats import SummaryStats
from utils import parse_changes
from textutils import fix_maintainer
from summarystats import SummaryStats
from utils import parse_changes
from textutils import fix_maintainer
+from binary import Binary
###############################################################################
###############################################################################
# This is a stupid default, but see the comments below
is_dm = False
# This is a stupid default, but see the comments below
is_dm = False
- user = get_uid_from_fingerprint(changes["fingerprint"], session)
+ user = get_uid_from_fingerprint(fpr, session)
if user is not None:
uid = user.uid
if user is not None:
uid = user.uid
uid_name = user.name
# Check the relevant fingerprint (which we have to have)
uid_name = user.name
# Check the relevant fingerprint (which we have to have)
- for f in uid.fingerprint:
- if f.fingerprint == changes['fingerprint']:
+ for f in user.fingerprint:
+ if f.fingerprint == fpr:
is_dm = f.keyring.debian_maintainer
break
is_dm = f.keyring.debian_maintainer
break
self.pkg = Changes()
self.reset()
self.pkg = Changes()
self.reset()
This is simply to prevent us even trying things later which will
fail because we couldn't properly parse the file.
"""
This is simply to prevent us even trying things later which will
fail because we couldn't properly parse the file.
"""
self.pkg.changes_file = filename
# Parse the .changes field into a dictionary
self.pkg.changes_file = filename
# Parse the .changes field into a dictionary
# Parse the Files field from the .changes into another dictionary
try:
# Parse the Files field from the .changes into another dictionary
try:
- self.pkg.files.update(build_file_list(self.pkg.changes))
+ self.pkg.files.update(utils.build_file_list(self.pkg.changes))
except ParseChangesError, line:
self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
return False
except ParseChangesError, line:
self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
return False
entry["maintainer"] = control.Find("Maintainer", "")
if f.endswith(".udeb"):
entry["maintainer"] = control.Find("Maintainer", "")
if f.endswith(".udeb"):
- files[f]["dbtype"] = "udeb"
+ self.pkg.files[f]["dbtype"] = "udeb"
- files[f]["dbtype"] = "deb"
+ self.pkg.files[f]["dbtype"] = "deb"
else:
self.rejects.append("%s is neither a .deb or a .udeb." % (f))
else:
self.rejects.append("%s is neither a .deb or a .udeb." % (f))
source_version = m.group(2)
if not source_version:
source_version = m.group(2)
if not source_version:
- source_version = files[f]["version"]
+ source_version = self.pkg.files[f]["version"]
entry["source package"] = source
entry["source version"] = source_version
entry["source package"] = source
entry["source version"] = source_version
# Check the version and for file overwrites
self.check_binary_against_db(f, session)
# Check the version and for file overwrites
self.check_binary_against_db(f, session)
- b = Binary(f).scan_package()
+ b = Binary(f)
+ b.scan_package()
if len(b.rejects) > 0:
for j in b.rejects:
self.rejects.append(j)
if len(b.rejects) > 0:
for j in b.rejects:
self.rejects.append(j)
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
+ archive = utils.where_am_i()
# Skip byhand
if entry.has_key("byhand"):
# Skip byhand
if entry.has_key("byhand"):
# Determine the location
location = cnf["Dir::Pool"]
# Determine the location
location = cnf["Dir::Pool"]
- l = get_location(location, component, archive, session)
+ l = get_location(location, entry["component"], archive, session)
if l is None:
self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
entry["location id"] = -1
if l is None:
self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
entry["location id"] = -1
# Check for packages that have moved from one component to another
entry['suite'] = suite
# Check for packages that have moved from one component to another
entry['suite'] = suite
- res = get_binary_components(files[f]['package'], suite, entry["architecture"], session)
+ res = get_binary_components(self.pkg.files[f]['package'], suite, entry["architecture"], session)
if res.rowcount > 0:
entry["othercomponents"] = res.fetchone()[0]
if res.rowcount > 0:
entry["othercomponents"] = res.fetchone()[0]
has_binaries = False
has_source = False
has_binaries = False
has_source = False
+ session = DBConn().session()
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
# Build up the file list of files mentioned by the .dsc
try:
# Build up the file list of files mentioned by the .dsc
try:
- self.pkg.dsc_files.update(utils.build_file_list(dsc, is_a_dsc=1))
+ self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
except NoFilesFieldError:
self.rejects.append("%s: no Files: field." % (dsc_filename))
return False
except NoFilesFieldError:
self.rejects.append("%s: no Files: field." % (dsc_filename))
return False
# Validate the source and version fields
if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
# Validate the source and version fields
if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
- if not re_valid_version.match(dsc["version"]):
+ if not re_valid_version.match(self.pkg.dsc["version"]):
self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
# Bumping the version number of the .dsc breaks extraction by stable's
self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
# Bumping the version number of the .dsc breaks extraction by stable's
# Ensure there is a .tar.gz in the .dsc file
has_tar = False
# Ensure there is a .tar.gz in the .dsc file
has_tar = False
- for f in dsc_files.keys():
+ for f in self.pkg.dsc_files.keys():
m = re_issource.match(f)
if not m:
self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f))
m = re_issource.match(f)
if not m:
self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f))
self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename))
# Ensure source is newer than existing source in target suites
self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename))
# Ensure source is newer than existing source in target suites
- self.check_source_against_db(dsc_filename, session)
+ self.check_source_against_db(dsc_filename)
self.check_dsc_against_db(dsc_filename)
self.check_dsc_against_db(dsc_filename)
# Find the .dsc (again)
dsc_filename = None
# Find the .dsc (again)
dsc_filename = None
- for f in self.files.keys():
- if files[f]["type"] == "dsc":
+ for f in self.pkg.files.keys():
+ if self.pkg.files[f]["type"] == "dsc":
dsc_filename = f
# If there isn't one, we have nothing to do. (We have reject()ed the upload already)
dsc_filename = f
# If there isn't one, we have nothing to do. (We have reject()ed the upload already)
return
# Create a symlink mirror of the source files in our temporary directory
return
# Create a symlink mirror of the source files in our temporary directory
- for f in self.files.keys():
+ for f in self.pkg.files.keys():
m = re_issource.match(f)
if m:
src = os.path.join(source_dir, f)
m = re_issource.match(f)
if m:
src = os.path.join(source_dir, f)
return
# Get the upstream version
return
# Get the upstream version
- upstr_version = re_no_epoch.sub('', dsc["version"])
+ upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
if re_strip_revision.search(upstr_version):
upstr_version = re_strip_revision.sub('', upstr_version)
if re_strip_revision.search(upstr_version):
upstr_version = re_strip_revision.sub('', upstr_version)
shutil.rmtree(tmpdir)
except OSError, e:
if e.errno != errno.EACCES:
shutil.rmtree(tmpdir)
except OSError, e:
if e.errno != errno.EACCES:
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
if result != 0:
utils.fubar("'%s' failed with result %s." % (cmd, result))
shutil.rmtree(tmpdir)
if result != 0:
utils.fubar("'%s' failed with result %s." % (cmd, result))
shutil.rmtree(tmpdir)
+ except Exception, e:
+ print "foobar2 (%s)" % e
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
###########################################################################
utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
###########################################################################
# We need to deal with the original changes blob, as the fields we need
# might not be in the changes dict serialised into the .dak anymore.
# We need to deal with the original changes blob, as the fields we need
# might not be in the changes dict serialised into the .dak anymore.
- orig_changes = parse_deb822(self.pkg.changes['filecontents'])
+ orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
# Copy the checksums over to the current changes dict. This will keep
# the existing modifications to it intact.
# Copy the checksums over to the current changes dict. This will keep
# the existing modifications to it intact.
for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
self.rejects.append(j)
for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
self.rejects.append(j)
+ def check_hashes(self):
for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
self.rejects.append(m)
for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
self.rejects.append(m)
for m in utils.check_size(".dsc", self.pkg.dsc_files):
self.rejects.append(m)
for m in utils.check_size(".dsc", self.pkg.dsc_files):
self.rejects.append(m)
- for m in utils.ensure_hashes(self.pkg.changes, dsc, files, dsc_files):
- self.rejects.append(m)
###########################################################################
def check_urgency(self):
###########################################################################
def check_urgency(self):
# travel can cause errors on extraction]
def check_timestamps(self):
# travel can cause errors on extraction]
def check_timestamps(self):
future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
tar = TarTime(future_cutoff, past_cutoff)
future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
tar = TarTime(future_cutoff, past_cutoff)
- for filename, entry in self.pkg.files.keys():
+ for filename, entry in self.pkg.files.items():
if entry["type"] == "deb":
tar.reset()
try:
if entry["type"] == "deb":
tar.reset()
try:
del self.Subst["__BUG_NUMBER__"]
del self.Subst["__STABLE_WARNING__"]
del self.Subst["__BUG_NUMBER__"]
del self.Subst["__STABLE_WARNING__"]
- if action:
- self.Logger.log(["closing bugs"] + bugs)
+ if action and self.logger:
+ self.logger.log(["closing bugs"] + bugs)
self.Subst["__SHORT_SUMMARY__"] = short_summary
for dist in self.pkg.changes["distribution"].keys():
self.Subst["__SHORT_SUMMARY__"] = short_summary
for dist in self.pkg.changes["distribution"].keys():
- announce_list = Cnf.Find("Suite::%s::Announce" % (dist))
+ announce_list = cnf.Find("Suite::%s::Announce" % (dist))
if announce_list == "" or lists_done.has_key(announce_list):
continue
if announce_list == "" or lists_done.has_key(announce_list):
continue
targetdir = cnf["Dir::Queue::Accepted"]
print "Accepting."
targetdir = cnf["Dir::Queue::Accepted"]
print "Accepting."
- self.Logger.log(["Accepting changes", self.pkg.changes_file])
+ if self.logger:
+ self.logger.log(["Accepting changes", self.pkg.changes_file])
- self.write_dot_dak(targetdir)
+ self.pkg.write_dot_dak(targetdir)
# Move all the files into the accepted directory
utils.move(self.pkg.changes_file, targetdir)
# Move all the files into the accepted directory
utils.move(self.pkg.changes_file, targetdir)
if not cnf["Dinstall::Options::No-Mail"]:
utils.send_mail(reject_mail_message)
if not cnf["Dinstall::Options::No-Mail"]:
utils.send_mail(reject_mail_message)
- self.Logger.log(["rejected", pkg.changes_file])
+ if self.logger:
+ self.logger.log(["rejected", pkg.changes_file])
q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
q = q.join(Architecture).filter(Architecture.arch_string.in_([self.pkg.files[file]["architecture"], 'all']))
self.cross_suite_version_check([ (x.suite.suite_name, x.binary.version) for x in q.all() ],
- file, files[file]["version"], sourceful=False)
+ file, self.pkg.files[file]["version"], sourceful=False)
# Check for any existing copies of the file
# Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(files[file]["package"])
- q = q.filter_by(version=files[file]["version"])
- q = q.join(Architecture).filter_by(arch_string=files[file]["architecture"])
+ q = session.query(DBBinary).filter_by(package=self.pkg.files[file]["package"])
+ q = q.filter_by(version=self.pkg.files[file]["version"])
+ q = q.join(Architecture).filter_by(arch_string=self.pkg.files[file]["architecture"])
if q.count() > 0:
self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file))
if q.count() > 0:
self.rejects.append("%s: can not overwrite existing copy already in the archive." % (file))
# TODO: Don't delete the entry, just mark it as not needed
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
# TODO: Don't delete the entry, just mark it as not needed
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
+ del self.pkg.files[dsc_name]
self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
match = 1
self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
match = 1