from utils import parse_changes, check_dsc_files
from textutils import fix_maintainer
from binary import Binary
+from lintian import parse_lintian_output
###############################################################################
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+ msg += '\n'
return msg
self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
- # Check there isn't already a changes file of the same name in one
- # of the queue directories.
- base_filename = os.path.basename(filename)
- if get_knownchange(base_filename):
- self.rejects.append("%s: a file with this name already exists." % (base_filename))
-
# Check the .changes is non-empty
if not self.pkg.files:
self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename))
def per_suite_file_checks(self, f, suite, session):
cnf = Config()
entry = self.pkg.files[f]
- archive = utils.where_am_i()
# Skip byhand
if entry.has_key("byhand"):
# Determine the location
location = cnf["Dir::Pool"]
- l = get_location(location, entry["component"], archive, session)
+ l = get_location(location, entry["component"], session=session)
if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive))
+ self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"])
entry["location id"] = -1
else:
entry["location id"] = l.location_id
os.chdir(cwd)
- # Check there isn't already a .changes file of the same name in
- # the proposed-updates "CopyChanges" storage directories.
+ # check we already know the changes file
# [NB: this check must be done post-suite mapping]
base_filename = os.path.basename(self.pkg.changes_file)
- for suite in self.pkg.changes["distribution"].keys():
- copychanges = "Suite::%s::CopyChanges" % (suite)
- if cnf.has_key(copychanges) and \
- os.path.exists(os.path.join(cnf[copychanges], base_filename)):
- self.rejects.append("%s: a file with this name already exists in %s" \
- % (base_filename, cnf[copychanges]))
+ session = DBConn().session()
+
+ try:
+ dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
+ # if in the pool or in a queue other than unchecked, reject
+ if (dbc.in_queue is None) \
+ or (dbc.in_queue is not None
+ and dbc.in_queue.queue_name != 'unchecked'):
+ self.rejects.append("%s file already known to dak" % base_filename)
+ except NoResultFound, e:
+ # not known, good
+ pass
has_binaries = False
has_source = False
- session = DBConn().session()
-
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
- for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not cnf.has_key("Dir::Queue::%s" % (d)): continue
- if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
+ if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
self.rejects.append("%s file already exists in the %s directory." % (f, d))
if not re_taint_free.match(f):
if self.logger:
self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt))
- # We have output of lintian, this package isn't clean. Lets parse it and see if we
- # are having a victim for a reject.
- # W: tzdata: binary-without-manpage usr/sbin/tzconfig
- for line in output.split('\n'):
- m = re_parse_lintian.match(line)
- if m is None:
- continue
-
- etype = m.group(1)
- epackage = m.group(2)
- etag = m.group(3)
- etext = m.group(4)
+ for etype, epackage, etag, etext in parse_lintian_output(output):
# So lets check if we know the tag at all.
if etag not in tags:
if etype == 'O':
# We know it and it is overriden. Check that override is allowed.
- if etag in lintiantags['warning']:
+ if etag in lintiantags['nonfatal']:
# The tag is overriden, and it is allowed to be overriden.
# Don't add a reject message.
pass
- elif etag in lintiantags['error']:
+ elif etag in lintiantags['fatal']:
# The tag is overriden - but is not allowed to be
self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
log("ftpmaster does not allow tag to be overridable", etag)
# Tag is known, it is not overriden, direct reject.
self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
# Now tell if they *might* override it.
- if etag in lintiantags['warning']:
+ if etag in lintiantags['nonfatal']:
log("auto rejecting", "overridable", etag)
self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
else:
print "Installing."
self.logger.log(["installing changes", self.pkg.changes_file])
+ poolfiles = []
+
# Add the .dsc file to the DB first
for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
+ dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+ for j in pfs:
+ poolfiles.append(j)
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
- add_deb_to_db(self, newfile, session)
+ poolfiles.append(add_deb_to_db(self, newfile, session))
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
session.add(dscf)
session.flush()
+ poolfiles.append(newf)
+
# Install the files into the pool
for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
os.rename(temp_filename, filename)
os.chmod(filename, 0644)
- # auto-build queue
-# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session)
-# if res:
-# utils.fubar(res)
-# now_date = datetime.now()
+ session.commit()
+
+ # Set up our copy queues (e.g. buildd queues)
+ for suite_name in self.pkg.changes["distribution"].keys():
+ suite = get_suite(suite_name, session)
+ for q in suite.copy_queues:
+ for f in poolfiles:
+ q.add_file_from_pool(f)
session.commit()
os.unlink(os.path.join(from_dir, f))
if os.path.exists(os.path.join(h.holding_dir, f)):
os.unlink(os.path.join(h.holding_dir, f))
-
+
os.unlink(os.path.join(from_dir, self.pkg.changes_file))
if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
###########################################################################
- def move_to_dir (self, dest, perms=0660, changesperms=0664):
+ def move_to_queue (self, queue):
"""
- Move files to dest with certain perms/changesperms
+ Move files to a destination queue using the permissions in the table
"""
h = Holding()
utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
- dest, perms=changesperms)
+ queue.path, perms=int(queue.change_perms, 8))
for f in self.pkg.files.keys():
- utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
+ utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
###########################################################################