X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fqueue.py;h=66424d86eb6a5e30424026bb3f851ae49ef5819c;hb=bf38dcbe75f32f221887eeda8fce0e81e64db115;hp=2b86d4ccf3035551b86a7574ab3f8c4d162ae0dd;hpb=df4d04268ed10e33bb600e68e65e6763f522e669;p=dak.git diff --git a/daklib/queue.py b/daklib/queue.py index 2b86d4cc..66424d86 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -39,6 +39,7 @@ import shutil import textwrap from types import * from sqlalchemy.sql.expression import desc +from sqlalchemy.orm.exc import NoResultFound import yaml @@ -47,6 +48,7 @@ from changes import * from regexes import * from config import Config from holding import Holding +from urgencylog import UrgencyLog from dbconn import * from summarystats import SummaryStats from utils import parse_changes, check_dsc_files @@ -286,6 +288,7 @@ class Upload(object): for title, messages in msgs: if messages: msg += '\n\n%s:\n%s' % (title, '\n'.join(messages)) + msg += '\n' return msg @@ -435,12 +438,6 @@ class Upload(object): self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"]) self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"]) - # Check there isn't already a changes file of the same name in one - # of the queue directories. - base_filename = os.path.basename(filename) - if get_knownchange(base_filename): - self.rejects.append("%s: a file with this name already exists." % (base_filename)) - # Check the .changes is non-empty if not self.pkg.files: self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename)) @@ -723,7 +720,6 @@ class Upload(object): def per_suite_file_checks(self, f, suite, session): cnf = Config() entry = self.pkg.files[f] - archive = utils.where_am_i() # Skip byhand if entry.has_key("byhand"): @@ -767,9 +763,9 @@ class Upload(object): # Determine the location location = cnf["Dir::Pool"] - l = get_location(location, entry["component"], archive, session) + l = get_location(location, entry["component"], session=session) if l is None: - self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive)) + self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"]) entry["location id"] = -1 else: entry["location id"] = l.location_id @@ -812,28 +808,31 @@ class Upload(object): os.chdir(cwd) - # Check there isn't already a .changes file of the same name in - # the proposed-updates "CopyChanges" storage directories. + # check we already know the changes file # [NB: this check must be done post-suite mapping] base_filename = os.path.basename(self.pkg.changes_file) - for suite in self.pkg.changes["distribution"].keys(): - copychanges = "Suite::%s::CopyChanges" % (suite) - if cnf.has_key(copychanges) and \ - os.path.exists(os.path.join(cnf[copychanges], base_filename)): - self.rejects.append("%s: a file with this name already exists in %s" \ - % (base_filename, cnf[copychanges])) + session = DBConn().session() + + try: + dbc = session.query(DBChange).filter_by(changesname=base_filename).one() + # if in the pool or in a queue other than unchecked, reject + if (dbc.in_queue is None) \ + or (dbc.in_queue is not None + and dbc.in_queue.queue_name != 'unchecked'): + self.rejects.append("%s file already known to dak" % base_filename) + except NoResultFound, e: + # not known, good + pass has_binaries = False has_source = False - session = DBConn().session() - for f, entry in self.pkg.files.items(): # Ensure the file does not already exist in one of the accepted directories - for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: + for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: if not cnf.has_key("Dir::Queue::%s" % (d)): continue - if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f): + if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)): self.rejects.append("%s file already exists in the %s directory." % (f, d)) if not re_taint_free.match(f): @@ -1819,8 +1818,8 @@ distribution.""" return summary ########################################################################### - - def accept (self, summary, short_summary, session): + @session_wrapper + def accept (self, summary, short_summary, session=None): """ Accept an upload. @@ -1841,27 +1840,31 @@ distribution.""" stats = SummaryStats() print "Installing." - Logger.log(["installing changes", u.pkg.changes_file]) + self.logger.log(["installing changes", self.pkg.changes_file]) + + poolfiles = [] # Add the .dsc file to the DB first - for newfile, entry in u.pkg.files.items(): + for newfile, entry in self.pkg.files.items(): if entry["type"] == "dsc": - dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session) + dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session) + for j in pfs: + poolfiles.append(j) # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb) - for newfile, entry in u.pkg.files.items(): + for newfile, entry in self.pkg.files.items(): if entry["type"] == "deb": - add_deb_to_db(u, newfile, session) + poolfiles.append(add_deb_to_db(self, newfile, session)) # If this is a sourceful diff only upload that is moving # cross-component we need to copy the .orig files into the new # component too for the same reasons as above. - if u.pkg.changes["architecture"].has_key("source"): - for orig_file in u.pkg.orig_files.keys(): - if not u.pkg.orig_files[orig_file].has_key("id"): + if self.pkg.changes["architecture"].has_key("source"): + for orig_file in self.pkg.orig_files.keys(): + if not self.pkg.orig_files[orig_file].has_key("id"): continue # Skip if it's not in the pool - orig_file_id = u.pkg.orig_files[orig_file]["id"] - if u.pkg.orig_files[orig_file]["location"] == dsc_location_id: + orig_file_id = self.pkg.orig_files[orig_file]["id"] + if self.pkg.orig_files[orig_file]["location"] == dsc_location_id: continue # Skip if the location didn't change # Do the move @@ -1870,7 +1873,7 @@ distribution.""" old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} - new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) + new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) # TODO: Care about size/md5sum collisions etc (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) @@ -1880,27 +1883,29 @@ distribution.""" newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) # TODO: Check that there's only 1 here - source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0] + source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0] dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0] dscf.poolfile_id = newf.file_id session.add(dscf) session.flush() + poolfiles.append(newf) + # Install the files into the pool - for newfile, entry in u.pkg.files.items(): + for newfile, entry in self.pkg.files.items(): destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile) utils.move(newfile, destination) - Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]]) - summarystats.accept_bytes += float(entry["size"]) + self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]]) + stats.accept_bytes += float(entry["size"]) # Copy the .changes file across for suite which need it. copy_changes = {} - for suite_name in u.pkg.changes["distribution"].keys(): + for suite_name in self.pkg.changes["distribution"].keys(): if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)): copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = "" for dest in copy_changes.keys(): - utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest)) + utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest)) # We're done - commit the database changes session.commit() @@ -1908,11 +1913,11 @@ distribution.""" # the last commit # Move the .changes into the 'done' directory - utils.move(u.pkg.changes_file, - os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file))) + utils.move(self.pkg.changes_file, + os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file))) - if u.pkg.changes["architecture"].has_key("source") and log_urgency: - UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"]) + if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"): + UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"]) # Send accept mail, announce to lists, close bugs and check for # override disparities @@ -1920,7 +1925,8 @@ distribution.""" self.update_subst() self.Subst["__SUITE__"] = "" self.Subst["__SUMMARY__"] = summary - mail_message = utils.TemplateSubst(self.Subst, accepttemplate) + mail_message = utils.TemplateSubst(self.Subst, + os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')) utils.send_mail(mail_message) self.announce(short_summary, 1) @@ -1958,16 +1964,19 @@ distribution.""" os.rename(temp_filename, filename) os.chmod(filename, 0644) - # auto-build queue -# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session) -# if res: -# utils.fubar(res) -# now_date = datetime.now() + session.commit() + + # Set up our copy queues (e.g. buildd queues) + for suite_name in self.pkg.changes["distribution"].keys(): + suite = get_suite(suite_name, session) + for q in suite.copyqueues: + for f in poolfiles: + q.add_file_from_pool(f) session.commit() # Finally... - summarystats.accept_count += 1 + stats.accept_count += 1 def check_override(self): """ @@ -2006,25 +2015,33 @@ distribution.""" def remove(self, from_dir=None): """ Used (for instance) in p-u to remove the package from unchecked + + Also removes the package from holding area. """ if from_dir is None: - os.chdir(self.pkg.directory) - else: - os.chdir(from_dir) + from_dir = self.pkg.directory + h = Holding() for f in self.pkg.files.keys(): - os.unlink(f) - os.unlink(self.pkg.changes_file) + os.unlink(os.path.join(from_dir, f)) + if os.path.exists(os.path.join(h.holding_dir, f)): + os.unlink(os.path.join(h.holding_dir, f)) + + os.unlink(os.path.join(from_dir, self.pkg.changes_file)) + if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)): + os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file)) ########################################################################### - def move_to_dir (self, dest, perms=0660, changesperms=0664): + def move_to_queue (self, queue): """ - Move files to dest with certain perms/changesperms + Move files to a destination queue using the permissions in the table """ - utils.move(self.pkg.changes_file, dest, perms=changesperms) + h = Holding() + utils.move(os.path.join(h.holding_dir, self.pkg.changes_file), + dest, perms=int(queue.changesperms, 8)) for f in self.pkg.files.keys(): - utils.move(f, dest, perms=perms) + utils.move(os.path.join(h.holding_dir, f), dest, perms=int(queue.perms, 8)) ########################################################################### @@ -2415,6 +2432,7 @@ distribution.""" # This would fix the stupidity of changing something we often iterate over # whilst we're doing it del self.pkg.files[dsc_name] + dsc_entry["files id"] = i.file_id if not orig_files.has_key(dsc_name): orig_files[dsc_name] = {} orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)