import textwrap
from types import *
from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
import yaml
from regexes import *
from config import Config
from holding import Holding
+from urgencylog import UrgencyLog
from dbconn import *
from summarystats import SummaryStats
from utils import parse_changes, check_dsc_files
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+ msg += '\n'
return msg
os.chdir(cwd)
- # Check there isn't already a .changes file of the same name in
- # the proposed-updates "CopyChanges" storage directories.
+ # check we already know the changes file
# [NB: this check must be done post-suite mapping]
base_filename = os.path.basename(self.pkg.changes_file)
- for suite in self.pkg.changes["distribution"].keys():
- copychanges = "Suite::%s::CopyChanges" % (suite)
- if cnf.has_key(copychanges) and \
- os.path.exists(os.path.join(cnf[copychanges], base_filename)):
- self.rejects.append("%s: a file with this name already exists in %s" \
- % (base_filename, cnf[copychanges]))
+ session = DBConn().session()
+
+ try:
+ changes = session.query(KnownChange).filter_by(changesname=base_filename).one()
+ if not changes.approved_for:
+ self.rejects.append("%s file already known to dak" % base_filename)
+ except NoResultFound, e:
+ # not known, good
+ pass
has_binaries = False
has_source = False
- session = DBConn().session()
-
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
- for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not cnf.has_key("Dir::Queue::%s" % (d)): continue
- if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
+ if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
self.rejects.append("%s file already exists in the %s directory." % (f, d))
if not re_taint_free.match(f):
return summary
###########################################################################
-
- def accept (self, summary, short_summary, session):
+ @session_wrapper
+ def accept (self, summary, short_summary, session=None):
"""
Accept an upload.
stats = SummaryStats()
print "Installing."
- Logger.log(["installing changes", u.pkg.changes_file])
+ self.logger.log(["installing changes", self.pkg.changes_file])
# Add the .dsc file to the DB first
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
- dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session)
+ dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
- add_deb_to_db(u, newfile, session)
+ add_deb_to_db(self, newfile, session)
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
- if u.pkg.changes["architecture"].has_key("source"):
- for orig_file in u.pkg.orig_files.keys():
- if not u.pkg.orig_files[orig_file].has_key("id"):
+ if self.pkg.changes["architecture"].has_key("source"):
+ for orig_file in self.pkg.orig_files.keys():
+ if not self.pkg.orig_files[orig_file].has_key("id"):
continue # Skip if it's not in the pool
- orig_file_id = u.pkg.orig_files[orig_file]["id"]
- if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+ orig_file_id = self.pkg.orig_files[orig_file]["id"]
+ if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
continue # Skip if the location didn't change
# Do the move
old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
- new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+ new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
(found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
# TODO: Check that there's only 1 here
- source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
+ source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
dscf.poolfile_id = newf.file_id
session.add(dscf)
session.flush()
# Install the files into the pool
- for newfile, entry in u.pkg.files.items():
+ for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
utils.move(newfile, destination)
- Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
- summarystats.accept_bytes += float(entry["size"])
+ self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
+ stats.accept_bytes += float(entry["size"])
# Copy the .changes file across for suite which need it.
copy_changes = {}
- for suite_name in u.pkg.changes["distribution"].keys():
+ for suite_name in self.pkg.changes["distribution"].keys():
if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
for dest in copy_changes.keys():
- utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
+ utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
# We're done - commit the database changes
session.commit()
# the last commit
# Move the .changes into the 'done' directory
- utils.move(u.pkg.changes_file,
- os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file)))
+ utils.move(self.pkg.changes_file,
+ os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
- if u.pkg.changes["architecture"].has_key("source") and log_urgency:
- UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"])
+ if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+ UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
# Send accept mail, announce to lists, close bugs and check for
# override disparities
self.update_subst()
self.Subst["__SUITE__"] = ""
self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+ mail_message = utils.TemplateSubst(self.Subst,
+ os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
utils.send_mail(mail_message)
self.announce(short_summary, 1)
os.rename(temp_filename, filename)
os.chmod(filename, 0644)
- # auto-build queue
-# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session)
-# if res:
-# utils.fubar(res)
-# now_date = datetime.now()
+ # This routine returns None on success or an error on failure
+ # TODO: Replace queue copying using the new queue.add_file_from_pool routine
+ # and by looking up which queues in suite.copy_queues
+ #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
+ #if res:
+ # utils.fubar(res)
session.commit()
# Finally...
- summarystats.accept_count += 1
+ stats.accept_count += 1
def check_override(self):
"""
def remove(self, from_dir=None):
"""
Used (for instance) in p-u to remove the package from unchecked
+
+ Also removes the package from holding area.
"""
if from_dir is None:
- os.chdir(self.pkg.directory)
- else:
- os.chdir(from_dir)
+ from_dir = self.pkg.directory
+ h = Holding()
for f in self.pkg.files.keys():
- os.unlink(f)
- os.unlink(self.pkg.changes_file)
+ os.unlink(os.path.join(from_dir, f))
+ if os.path.exists(os.path.join(h.holding_dir, f)):
+ os.unlink(os.path.join(h.holding_dir, f))
+
+ os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+ if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+ os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
###########################################################################
"""
Move files to dest with certain perms/changesperms
"""
- utils.move(self.pkg.changes_file, dest, perms=changesperms)
+ h = Holding()
+ utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+ dest, perms=changesperms)
for f in self.pkg.files.keys():
- utils.move(f, dest, perms=perms)
+ utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
###########################################################################
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
del self.pkg.files[dsc_name]
+ dsc_entry["files id"] = i.file_id
if not orig_files.has_key(dsc_name):
orig_files[dsc_name] = {}
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)