SELECT id, filename FROM files f
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
+ AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
+ AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
AND last_used IS NULL
ORDER BY filename""")
("process-new",
"Process NEW and BYHAND packages"),
- ("process-unchecked",
+ ("process-upload",
"Process packages in queue/unchecked"),
- ("process-accepted",
- "Install packages into the pool"),
("make-suite-file-list",
"Generate lists of packages per suite for apt-ftparchive"),
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Modify queue autobuild support
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+import os
+import datetime
+import traceback
+
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+
+def do_update(self):
+ print "Updating queue_build table"
+
+ try:
+ c = self.db.cursor()
+
+ cnf = Config()
+
+ print "Adding copy_files field to queue table"
+ c.execute("ALTER TABLE queue ADD copy_pool_files BOOL NOT NULL DEFAULT FALSE")
+
+ print "Adding queue_files table"
+
+ c.execute("""CREATE TABLE queue_files (
+ id SERIAL PRIMARY KEY,
+ queueid INT4 NOT NULL REFERENCES queue(id) ON DELETE RESTRICT,
+ insertdate TIMESTAMP NOT NULL DEFAULT now(),
+ lastused TIMESTAMP DEFAULT NULL,
+ filename TEXT NOT NULL,
+ fileid INT4 REFERENCES files(id) ON DELETE CASCADE)""")
+
+ c.execute("""SELECT queue_build.filename, queue_build.last_used, queue_build.queue
+ FROM queue_build""")
+
+ for r in c.fetchall():
+ print r[0]
+ filename = r[0]
+ last_used = r[1]
+ queue = r[2]
+ try:
+ endlink = os.readlink(filename)
+ c.execute("SELECT files.id FROM files WHERE filename LIKE '%%%s'" % endlink[endlink.rindex('/')+1:])
+ f = c.fetchone()
+ c.execute("""INSERT INTO queue_files (queueid, lastused, filename, fileid) VALUES
+ (%s, now(), %s, %s)""", (queue, filename[filename.rindex('/')+1:], f[0]))
+ except OSError, e:
+ print "Can't find file %s (%s)" % (filename, e)
+
+ print "Dropping old queue_build table"
+ c.execute("DROP TABLE queue_build")
+
+ print "Adding changes_pending_files table"
+ c.execute("""CREATE TABLE changes_pending_files (
+ id SERIAL PRIMARY KEY,
+ changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE,
+ filename TEXT NOT NULL,
+ source BOOL NOT NULL DEFAULT FALSE,
+ filesize BIGINT NOT NULL,
+ md5sum TEXT NOT NULL,
+ sha1sum TEXT NOT NULL,
+ sha256sum TEXT NOT NULL)""")
+
+
+ print "Adding changes_pool_files table"
+ c.execute("""CREATE TABLE changes_pool_files (
+ changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE,
+ fileid INT4 NOT NULL REFERENCES files(id) ON DELETE RESTRICT,
+
+ PRIMARY KEY (changeid, fileid))""")
+
+ print "Adding suite_queue_copy table"
+ c.execute("""CREATE TABLE suite_queue_copy (
+ suite INT4 NOT NULL REFERENCES suite(id),
+ queue INT4 NOT NULL REFERENCES queue(id),
+
+ PRIMARY KEY (suite, queue))""")
+
+ # Link all suites from accepted
+ c.execute("""SELECT suite.id FROM suite""")
+ for s in c.fetchall():
+ c.execute("""INSERT INTO suite_queue_copy (suite, queue) VALUES (%s, (SELECT id FROM queue WHERE queue_name = 'accepted'))""", s)
+
+ # Parse the config and add any buildd stuff
+ cnf = Config()
+ c.execute("""INSERT INTO queue (queue_name, path) VALUES ('buildd', '%s')""" % cnf["Dir::QueueBuild"].rstrip('/'))
+
+ for s in cnf.ValueList("Dinstall::QueueBuildSuites"):
+ c.execute("""INSERT INTO suite_queue_copy (suite, queue)
+ VALUES ( (SELECT id FROM suite WHERE suite_name = '%s'),
+ (SELECT id FROM queue WHERE queue_name = 'buildd'))""" % s.lower())
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '21' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.InternalError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg))
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-Installs Debian packages from queue/accepted into the pool
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@license: GNU General Public License version 2 or later
-
-"""
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-###############################################################################
-
-# Cartman: "I'm trying to make the best of a bad situation, I don't
-# need to hear crap from a bunch of hippy freaks living in
-# denial. Screw you guys, I'm going home."
-#
-# Kyle: "But Cartman, we're trying to..."
-#
-# Cartman: "uhh.. screw you guys... home."
-
-###############################################################################
-
-import errno
-import fcntl
-import os
-import sys
-from datetime import datetime
-import apt_pkg
-
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.dbconn import *
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer, re_issource, re_fdnic
-from daklib.urgencylog import UrgencyLog
-from daklib.summarystats import SummaryStats
-from daklib.config import Config
-
-###############################################################################
-
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
- global Options
-
- # Initialize config and connection to db
- cnf = Config()
- DBConn()
-
- Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
- ('h',"help","Dinstall::Options::Help"),
- ('n',"no-action","Dinstall::Options::No-Action"),
- ('p',"no-lock", "Dinstall::Options::No-Lock"),
- ('s',"no-mail", "Dinstall::Options::No-Mail"),
- ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
- for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
- "version", "directory"]:
- if not cnf.has_key("Dinstall::Options::%s" % (i)):
- cnf["Dinstall::Options::%s" % (i)] = ""
-
- changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
- Options = cnf.SubTree("Dinstall::Options")
-
- if Options["Help"]:
- usage()
-
- # If we have a directory flag, use it to find our files
- if cnf["Dinstall::Options::Directory"] != "":
- # Note that we clobber the list of files we were given in this case
- # so warn if the user has done both
- if len(changes_files) > 0:
- utils.warn("Directory provided so ignoring files given on command line")
-
- changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
- return changes_files
-
-###############################################################################
-
-def usage (exit_code=0):
- print """Usage: dak process-accepted [OPTION]... [CHANGES]...
- -a, --automatic automatic run
- -h, --help show this help and exit.
- -n, --no-action don't do anything
- -p, --no-lock don't check lockfile !! for cron.daily only !!
- -s, --no-mail don't send any mail
- -V, --version display the version number and exit"""
- sys.exit(exit_code)
-
-###############################################################################
-
-def action (u, stable_queue=None, log_urgency=True, session=None):
- (summary, short_summary) = u.build_summaries()
- pi = u.package_info()
-
- (prompt, answer) = ("", "XXX")
- if Options["No-Action"] or Options["Automatic"]:
- answer = 'S'
-
- if len(u.rejects) > 0:
- print "REJECT\n" + pi
- prompt = "[R]eject, Skip, Quit ?"
- if Options["Automatic"]:
- answer = 'R'
- else:
- print "INSTALL to " + ", ".join(u.pkg.changes["distribution"].keys())
- print pi + summary,
- prompt = "[I]nstall, Skip, Quit ?"
- if Options["Automatic"]:
- answer = 'I'
-
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.match(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
-
- if answer == 'R':
- u.do_unaccept()
- Logger.log(["unaccepted", u.pkg.changes_file])
- elif answer == 'I':
- if stable_queue:
- stable_install(u, summary, short_summary, stable_queue, log_urgency)
- else:
- install(u, session, log_urgency)
- elif answer == 'Q':
- sys.exit(0)
-
-
-###############################################################################
-def add_poolfile(filename, datadict, location_id, session):
- poolfile = PoolFile()
- poolfile.filename = filename
- poolfile.filesize = datadict["size"]
- poolfile.md5sum = datadict["md5sum"]
- poolfile.sha1sum = datadict["sha1sum"]
- poolfile.sha256sum = datadict["sha256sum"]
- poolfile.location_id = location_id
-
- session.add(poolfile)
- # Flush to get a file id (NB: This is not a commit)
- session.flush()
-
- return poolfile
-
-def add_dsc_to_db(u, filename, session):
- entry = u.pkg.files[filename]
- source = DBSource()
-
- source.source = u.pkg.dsc["source"]
- source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
- source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
- source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
- source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- source.install_date = datetime.now().date()
-
- dsc_component = entry["component"]
- dsc_location_id = entry["location id"]
-
- source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
- # Set up a new poolfile if necessary
- if not entry.has_key("files id") or not entry["files id"]:
- filename = entry["pool name"] + filename
- poolfile = add_poolfile(filename, entry, dsc_location_id, session)
- entry["files id"] = poolfile.file_id
-
- source.poolfile_id = entry["files id"]
- session.add(source)
- session.flush()
-
- for suite_name in u.pkg.changes["distribution"].keys():
- sa = SrcAssociation()
- sa.source_id = source.source_id
- sa.suite_id = get_suite(suite_name).suite_id
- session.add(sa)
-
- session.flush()
-
- # Add the source files to the DB (files and dsc_files)
- dscfile = DSCFile()
- dscfile.source_id = source.source_id
- dscfile.poolfile_id = entry["files id"]
- session.add(dscfile)
-
- for dsc_file, dentry in u.pkg.dsc_files.items():
- df = DSCFile()
- df.source_id = source.source_id
-
- # If the .orig tarball is already in the pool, it's
- # files id is stored in dsc_files by check_dsc().
- files_id = dentry.get("files id", None)
-
- # Find the entry in the files hash
- # TODO: Bail out here properly
- dfentry = None
- for f, e in u.pkg.files.items():
- if f == dsc_file:
- dfentry = e
- break
-
- if files_id is None:
- filename = dfentry["pool name"] + dsc_file
-
- (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
- # FIXME: needs to check for -1/-2 and or handle exception
- if found and obj is not None:
- files_id = obj.file_id
-
- # If still not found, add it
- if files_id is None:
- # HACK: Force sha1sum etc into dentry
- dentry["sha1sum"] = dfentry["sha1sum"]
- dentry["sha256sum"] = dfentry["sha256sum"]
- poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
- files_id = poolfile.file_id
-
- df.poolfile_id = files_id
- session.add(df)
-
- session.flush()
-
- # Add the src_uploaders to the DB
- uploader_ids = [source.maintainer_id]
- if u.pkg.dsc.has_key("uploaders"):
- for up in u.pkg.dsc["uploaders"].split(","):
- up = up.strip()
- uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
-
- added_ids = {}
- for up in uploader_ids:
- if added_ids.has_key(up):
- utils.warn("Already saw uploader %s for source %s" % (up, source.source))
- continue
-
- added_ids[u]=1
-
- su = SrcUploader()
- su.maintainer_id = up
- su.source_id = source.source_id
- session.add(su)
-
- session.flush()
-
- return dsc_component, dsc_location_id
-
-def add_deb_to_db(u, filename, session):
- """
- Contrary to what you might expect, this routine deals with both
- debs and udebs. That info is in 'dbtype', whilst 'type' is
- 'deb' for both of them
- """
- cnf = Config()
- entry = u.pkg.files[filename]
-
- bin = DBBinary()
- bin.package = entry["package"]
- bin.version = entry["version"]
- bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
- bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- bin.arch_id = get_architecture(entry["architecture"], session).arch_id
- bin.binarytype = entry["dbtype"]
-
- # Find poolfile id
- filename = entry["pool name"] + filename
- fullpath = os.path.join(cnf["Dir::Pool"], filename)
- if not entry.get("location id", None):
- entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id
-
- if not entry.get("files id", None):
- poolfile = add_poolfile(filename, entry, entry["location id"], session)
- entry["files id"] = poolfile.file_id
-
- bin.poolfile_id = entry["files id"]
-
- # Find source id
- bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
- if len(bin_sources) != 1:
- raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
- (bin.package, bin.version, bin.architecture.arch_string,
- filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
- bin.source_id = bin_sources[0].source_id
-
- # Add and flush object so it has an ID
- session.add(bin)
- session.flush()
-
- # Add BinAssociations
- for suite_name in u.pkg.changes["distribution"].keys():
- ba = BinAssociation()
- ba.binary_id = bin.binary_id
- ba.suite_id = get_suite(suite_name).suite_id
- session.add(ba)
-
- session.flush()
-
- # Deal with contents - disabled for now
- #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
- #if not contents:
- # print "REJECT\nCould not determine contents of package %s" % bin.package
- # session.rollback()
- # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-
-def install(u, session, log_urgency=True):
- cnf = Config()
- summarystats = SummaryStats()
-
- print "Installing."
-
- Logger.log(["installing changes", u.pkg.changes_file])
-
- # Ensure that we have all the hashes we need below.
- u.ensure_hashes()
- if len(u.rejects) > 0:
- # There were errors. Print them and SKIP the changes.
- for msg in u.rejects:
- utils.warn(msg)
- return
-
- # Add the .dsc file to the DB first
- for newfile, entry in u.pkg.files.items():
- if entry["type"] == "dsc":
- dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session)
-
- # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
- for newfile, entry in u.pkg.files.items():
- if entry["type"] == "deb":
- add_deb_to_db(u, newfile, session)
-
- # If this is a sourceful diff only upload that is moving
- # cross-component we need to copy the .orig files into the new
- # component too for the same reasons as above.
- if u.pkg.changes["architecture"].has_key("source"):
- for orig_file in u.pkg.orig_files.keys():
- if not u.pkg.orig_files[orig_file].has_key("id"):
- continue # Skip if it's not in the pool
- orig_file_id = u.pkg.orig_files[orig_file]["id"]
- if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
- continue # Skip if the location didn't change
-
- # Do the move
- oldf = get_poolfile_by_id(orig_file_id, session)
- old_filename = os.path.join(oldf.location.path, oldf.filename)
- old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
- 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
- new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
- # TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
-
- if newf is None:
- utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
- newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
- # TODO: Check that there's only 1 here
- source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
- dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
- dscf.poolfile_id = newf.file_id
- session.add(dscf)
- session.flush()
-
- # Install the files into the pool
- for newfile, entry in u.pkg.files.items():
- destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
- utils.move(newfile, destination)
- Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
- summarystats.accept_bytes += float(entry["size"])
-
- # Copy the .changes file across for suite which need it.
- copy_changes = {}
- copy_dot_dak = {}
- for suite_name in u.pkg.changes["distribution"].keys():
- if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
- copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
- # and the .dak file...
- if cnf.has_key("Suite::%s::CopyDotDak" % (suite_name)):
- copy_dot_dak[cnf["Suite::%s::CopyDotDak" % (suite_name)]] = ""
-
- for dest in copy_changes.keys():
- utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
-
- for dest in copy_dot_dak.keys():
- utils.copy(u.pkg.changes_file[:-8]+".dak", dest)
-
- # We're done - commit the database changes
- session.commit()
-
- # Move the .changes into the 'done' directory
- utils.move(u.pkg.changes_file,
- os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file)))
-
- # Remove the .dak file
- os.unlink(u.pkg.changes_file[:-8] + ".dak")
-
- if u.pkg.changes["architecture"].has_key("source") and log_urgency:
- UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"])
-
- # Our SQL session will automatically start a new transaction after
- # the last commit
-
- # Undo the work done in queue.py(accept) to help auto-building
- # from accepted.
- now_date = datetime.now()
-
- for suite_name in u.pkg.changes["distribution"].keys():
- if suite_name not in cnf.ValueList("Dinstall::QueueBuildSuites"):
- continue
-
- suite = get_suite(suite_name, session)
- dest_dir = cnf["Dir::QueueBuild"]
-
- if cnf.FindB("Dinstall::SecurityQueueBuild"):
- dest_dir = os.path.join(dest_dir, suite_name)
-
- for newfile, entry in u.pkg.files.items():
- dest = os.path.join(dest_dir, newfile)
-
- qb = get_queue_build(dest, suite.suite_id, session)
-
- # Remove it from the list of packages for later processing by apt-ftparchive
- if qb:
- qb.last_used = now_date
- qb.in_queue = False
- session.add(qb)
-
- if not cnf.FindB("Dinstall::SecurityQueueBuild"):
- # Update the symlink to point to the new location in the pool
- pool_location = utils.poolify(u.pkg.changes["source"], entry["component"])
- src = os.path.join(cnf["Dir::Pool"], pool_location, os.path.basename(newfile))
- if os.path.islink(dest):
- os.unlink(dest)
- os.symlink(src, dest)
-
- # Update last_used on any non-uploaded .orig symlink
- for orig_file in u.pkg.orig_files.keys():
- # Determine the .orig.tar.gz file name
- if not u.pkg.orig_files[orig_file].has_key("id"):
- continue # Skip files not in the pool
- # XXX: do we really want to update the orig_files dict here
- # instead of using a temporary variable?
- u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file)
-
- # Remove it from the list of packages for later processing by apt-ftparchive
- qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session)
- if qb:
- qb.in_queue = False
- qb.last_used = now_date
- session.add(qb)
-
- session.commit()
-
- # Finally...
- summarystats.accept_count += 1
-
-################################################################################
-
-def stable_install(u, session, summary, short_summary, fromsuite_name="proposed-updates"):
- summarystats = SummaryStats()
-
- fromsuite_name = fromsuite_name.lower()
- tosuite_name = "Stable"
- if fromsuite_name == "oldstable-proposed-updates":
- tosuite_name = "OldStable"
-
- print "Installing from %s to %s." % (fromsuite_name, tosuite_name)
-
- fromsuite = get_suite(fromsuite_name)
- tosuite = get_suite(tosuite_name)
-
- # Add the source to stable (and remove it from proposed-updates)
- for newfile, entry in u.pkg.files.items():
- if entry["type"] == "dsc":
- package = u.pkg.dsc["source"]
- # NB: not files[file]["version"], that has no epoch
- version = u.pkg.dsc["version"]
-
- source = get_sources_from_name(package, version, session)
- if len(source) < 1:
- utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s) in source table." % (package, version))
- source = source[0]
-
- # Remove from old suite
- old = session.query(SrcAssociation).filter_by(source_id = source.source_id)
- old = old.filter_by(suite_id = fromsuite.suite_id)
- old.delete()
-
- # Add to new suite
- new = SrcAssociation()
- new.source_id = source.source_id
- new.suite_id = tosuite.suite_id
- session.add(new)
-
- # Add the binaries to stable (and remove it/them from proposed-updates)
- for newfile, entry in u.pkg.files.items():
- if entry["type"] == "deb":
- package = entry["package"]
- version = entry["version"]
- architecture = entry["architecture"]
-
- binary = get_binaries_from_name(package, version, [architecture, 'all'])
-
- if len(binary) < 1:
- utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s for %s architecture) in binaries table." % (package, version, architecture))
- binary = binary[0]
-
- # Remove from old suite
- old = session.query(BinAssociation).filter_by(binary_id = binary.binary_id)
- old = old.filter_by(suite_id = fromsuite.suite_id)
- old.delete()
-
- # Add to new suite
- new = BinAssociation()
- new.binary_id = binary.binary_id
- new.suite_id = tosuite.suite_id
- session.add(new)
-
- session.commit()
-
- utils.move(u.pkg.changes_file,
- os.path.join(cnf["Dir::Morgue"], 'process-accepted', os.path.basename(u.pkg.changes_file)))
-
- ## Update the Stable ChangeLog file
- # TODO: URGH - Use a proper tmp file
- new_changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + ".ChangeLog"
- changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + "ChangeLog"
- if os.path.exists(new_changelog_filename):
- os.unlink(new_changelog_filename)
-
- new_changelog = utils.open_file(new_changelog_filename, 'w')
- for newfile, entry in u.pkg.files.items():
- if entry["type"] == "deb":
- new_changelog.write("%s/%s/binary-%s/%s\n" % (tosuite.suite_name,
- entry["component"],
- entry["architecture"],
- newfile))
- elif re_issource.match(newfile):
- new_changelog.write("%s/%s/source/%s\n" % (tosuite.suite_name,
- entry["component"],
- newfile))
- else:
- new_changelog.write("%s\n" % (newfile))
-
- chop_changes = re_fdnic.sub("\n", u.pkg.changes["changes"])
- new_changelog.write(chop_changes + '\n\n')
-
- if os.access(changelog_filename, os.R_OK) != 0:
- changelog = utils.open_file(changelog_filename)
- new_changelog.write(changelog.read())
-
- new_changelog.close()
-
- if os.access(changelog_filename, os.R_OK) != 0:
- os.unlink(changelog_filename)
- utils.move(new_changelog_filename, changelog_filename)
-
- summarystats.accept_count += 1
-
- if not Options["No-Mail"] and u.pkg.changes["architecture"].has_key("source"):
- u.Subst["__SUITE__"] = " into %s" % (tosuite)
- u.Subst["__SUMMARY__"] = summary
- u.Subst["__BCC__"] = "X-DAK: dak process-accepted"
-
- if cnf.has_key("Dinstall::Bcc"):
- u.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-
- template = os.path.join(cnf["Dir::Templates"], 'process-accepted.install')
-
- mail_message = utils.TemplateSubst(u.Subst, template)
- utils.send_mail(mail_message)
- u.announce(short_summary, True)
-
- # Finally remove the .dak file
- dot_dak_file = os.path.join(cnf["Suite::%s::CopyDotDak" % (fromsuite.suite_name)],
- os.path.basename(u.pkg.changes_file[:-8]+".dak"))
- os.unlink(dot_dak_file)
-
-################################################################################
-
-def process_it(changes_file, stable_queue, log_urgency, session):
- cnf = Config()
- u = Upload()
-
- overwrite_checks = True
-
- # Absolutize the filename to avoid the requirement of being in the
- # same directory as the .changes file.
- cfile = os.path.abspath(changes_file)
-
- # And since handling of installs to stable munges with the CWD
- # save and restore it.
- u.prevdir = os.getcwd()
-
- if stable_queue:
- old = cfile
- cfile = os.path.basename(old)
- os.chdir(cnf["Suite::%s::CopyDotDak" % (stable_queue)])
- # overwrite_checks should not be performed if installing to stable
- overwrite_checks = False
-
- u.pkg.load_dot_dak(cfile)
- u.update_subst()
-
- if stable_queue:
- u.pkg.changes_file = old
-
- u.accepted_checks(overwrite_checks, session)
- action(u, stable_queue, log_urgency, session)
-
- # Restore CWD
- os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
- global Logger
-
- cnf = Config()
- summarystats = SummaryStats()
- changes_files = init()
- log_urgency = False
- stable_queue = None
-
- # -n/--dry-run invalidates some other options which would involve things happening
- if Options["No-Action"]:
- Options["Automatic"] = ""
-
- # Check that we aren't going to clash with the daily cron job
-
- if not Options["No-Action"] and os.path.exists("%s/Archive_Maintenance_In_Progress" % (cnf["Dir::Root"])) and not Options["No-Lock"]:
- utils.fubar("Archive maintenance in progress. Try again later.")
-
- # If running from within proposed-updates; assume an install to stable
- queue = ""
- if os.getenv('PWD').find('oldstable-proposed-updates') != -1:
- stable_queue = "Oldstable-Proposed-Updates"
- elif os.getenv('PWD').find('proposed-updates') != -1:
- stable_queue = "Proposed-Updates"
-
- # Obtain lock if not in no-action mode and initialize the log
- if not Options["No-Action"]:
- lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
- try:
- fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, e:
- if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
- utils.fubar("Couldn't obtain lock; assuming another 'dak process-accepted' is already running.")
- else:
- raise
- Logger = daklog.Logger(cnf, "process-accepted")
- if not stable_queue and cnf.get("Dir::UrgencyLog"):
- # Initialise UrgencyLog()
- log_urgency = True
- UrgencyLog()
-
- # Sort the .changes files so that we process sourceful ones first
- changes_files.sort(utils.changes_compare)
-
-
- # Process the changes files
- for changes_file in changes_files:
- print "\n" + changes_file
- session = DBConn().session()
- process_it(changes_file, stable_queue, log_urgency, session)
- session.close()
-
- if summarystats.accept_count:
- sets = "set"
- if summarystats.accept_count > 1:
- sets = "sets"
- sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
- utils.size_type(int(summarystats.accept_bytes))))
- Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
-
- if not Options["No-Action"]:
- Logger.close()
- if log_urgency:
- UrgencyLog().close()
-
-###############################################################################
-
-if __name__ == '__main__':
- main()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-Checks Debian packages from Incoming
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@copyright: 2009 Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-# Originally based on dinstall by Guy Maor <maor@debian.org>
-
-################################################################################
-
-# Computer games don't affect kids. I mean if Pacman affected our generation as
-# kids, we'd all run around in a darkened room munching pills and listening to
-# repetitive music.
-# -- Unknown
-
-################################################################################
-
-import errno
-import fcntl
-import os
-import sys
-import traceback
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.textutils import fix_maintainer
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer
-from daklib.summarystats import SummaryStats
-from daklib.holding import Holding
-from daklib.config import Config
-
-from types import *
-
-################################################################################
-
-
-################################################################################
-
-# Globals
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
- global Options
-
- apt_pkg.init()
- cnf = Config()
-
- Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
- ('h',"help","Dinstall::Options::Help"),
- ('n',"no-action","Dinstall::Options::No-Action"),
- ('p',"no-lock", "Dinstall::Options::No-Lock"),
- ('s',"no-mail", "Dinstall::Options::No-Mail"),
- ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
- for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
- "override-distribution", "version", "directory"]:
- cnf["Dinstall::Options::%s" % (i)] = ""
-
- changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
- Options = cnf.SubTree("Dinstall::Options")
-
- if Options["Help"]:
- usage()
-
- # If we have a directory flag, use it to find our files
- if cnf["Dinstall::Options::Directory"] != "":
- # Note that we clobber the list of files we were given in this case
- # so warn if the user has done both
- if len(changes_files) > 0:
- utils.warn("Directory provided so ignoring files given on command line")
-
- changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
- return changes_files
-
-################################################################################
-
-def usage (exit_code=0):
- print """Usage: dak process-unchecked [OPTION]... [CHANGES]...
- -a, --automatic automatic run
- -h, --help show this help and exit.
- -n, --no-action don't do anything
- -p, --no-lock don't check lockfile !! for cron.daily only !!
- -s, --no-mail don't send any mail
- -V, --version display the version number and exit"""
- sys.exit(exit_code)
-
-################################################################################
-
-def action(u):
- cnf = Config()
-
- # changes["distribution"] may not exist in corner cases
- # (e.g. unreadable changes files)
- if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType):
- u.pkg.changes["distribution"] = {}
-
- (summary, short_summary) = u.build_summaries()
-
- # q-unapproved hax0ring
- queue_info = {
- "New": { "is": is_new, "process": acknowledge_new },
- "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
- "Byhand" : { "is": is_byhand, "process": do_byhand },
- "OldStableUpdate" : { "is": is_oldstableupdate,
- "process": do_oldstableupdate },
- "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate },
- "Unembargo" : { "is": is_unembargo, "process": queue_unembargo },
- "Embargo" : { "is": is_embargo, "process": queue_embargo },
- }
-
- queues = [ "New", "Autobyhand", "Byhand" ]
- if cnf.FindB("Dinstall::SecurityQueueHandling"):
- queues += [ "Unembargo", "Embargo" ]
- else:
- queues += [ "OldStableUpdate", "StableUpdate" ]
-
- (prompt, answer) = ("", "XXX")
- if Options["No-Action"] or Options["Automatic"]:
- answer = 'S'
-
- queuekey = ''
-
- pi = u.package_info()
-
- if len(u.rejects) > 0:
- if u.upload_too_new():
- print "SKIP (too new)\n" + pi,
- prompt = "[S]kip, Quit ?"
- else:
- print "REJECT\n" + pi
- prompt = "[R]eject, Skip, Quit ?"
- if Options["Automatic"]:
- answer = 'R'
- else:
- qu = None
- for q in queues:
- if queue_info[q]["is"](u):
- qu = q
- break
- if qu:
- print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
- queuekey = qu[0].upper()
- if queuekey in "RQSA":
- queuekey = "D"
- prompt = "[D]ivert, Skip, Quit ?"
- else:
- prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
- if Options["Automatic"]:
- answer = queuekey
- else:
- print "ACCEPT\n" + pi + summary,
- prompt = "[A]ccept, Skip, Quit ?"
- if Options["Automatic"]:
- answer = 'A'
-
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.match(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
-
- if answer == 'R':
- os.chdir(u.pkg.directory)
- u.do_reject(0, pi)
- elif answer == 'A':
- u.pkg.add_known_changes( "Accepted" )
- u.accept(summary, short_summary)
- u.check_override()
- u.remove()
- elif answer == queuekey:
- u.pkg.add_known_changes( qu )
- queue_info[qu]["process"](u, summary, short_summary)
- u.remove()
- elif answer == 'Q':
- sys.exit(0)
-
-################################################################################
-
-def package_to_suite(u, suite):
- if not u.pkg.changes["distribution"].has_key(suite):
- return False
-
- ret = True
-
- if not u.pkg.changes["architecture"].has_key("source"):
- s = DBConn().session()
- q = s.query(SrcAssociation.sa_id)
- q = q.join(Suite).filter_by(suite_name=suite)
- q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
- q = q.filter_by(version=u.pkg.changes['version']).limit(1)
-
- # NB: Careful, this logic isn't what you would think it is
- # Source is already in {old-,}proposed-updates so no need to hold
- # Instead, we don't move to the holding area, we just do an ACCEPT
- if q.count() > 0:
- ret = False
-
- s.close()
-
- return ret
-
-def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None):
- cnf = Config()
- dir = cnf["Dir::Queue::%s" % queue]
-
- print "Moving to %s holding area" % queue.upper()
- Logger.log(["Moving to %s" % queue, u.pkg.changes_file])
-
- u.pkg.write_dot_dak(dir)
- u.move_to_dir(dir, perms=perms)
- if build:
- get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
-
- # Check for override disparities
- u.check_override()
-
- # Send accept mail, announce to lists and close bugs
- if announce and not cnf["Dinstall::Options::No-Mail"]:
- template = os.path.join(cnf["Dir::Templates"], announce)
- u.update_subst()
- u.Subst["__SUITE__"] = ""
- mail_message = utils.TemplateSubst(u.Subst, template)
- utils.send_mail(mail_message)
- u.announce(short_summary, True)
-
-################################################################################
-
-def is_unembargo(u):
- session = DBConn().session()
- cnf = Config()
-
- q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
- if q.rowcount > 0:
- session.close()
- return True
-
- oldcwd = os.getcwd()
- os.chdir(cnf["Dir::Queue::Disembargo"])
- disdir = os.getcwd()
- os.chdir(oldcwd)
-
- ret = False
-
- if u.pkg.directory == disdir:
- if u.pkg.changes["architecture"].has_key("source"):
- if not Options["No-Action"]:
- session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
- session.commit()
-
- ret = True
-
- session.close()
-
- return ret
-
-def queue_unembargo(u, summary, short_summary):
- return package_to_queue(u, summary, short_summary, "Unembargoed",
- perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_embargo(u):
- # if embargoed queues are enabled always embargo
- return True
-
-def queue_embargo(u, summary, short_summary):
- return package_to_queue(u, summary, short_summary, "Unembargoed",
- perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_stableupdate(u):
- return package_to_suite(u, 'proposed-updates')
-
-def do_stableupdate(u, summary, short_summary):
- return package_to_queue(u, summary, short_summary, "ProposedUpdates",
- perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_oldstableupdate(u):
- return package_to_suite(u, 'oldstable-proposed-updates')
-
-def do_oldstableupdate(u, summary, short_summary):
- return package_to_queue(u, summary, short_summary, "OldProposedUpdates",
- perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_autobyhand(u):
- cnf = Config()
-
- all_auto = 1
- any_auto = 0
- for f in u.pkg.files.keys():
- if u.pkg.files[f].has_key("byhand"):
- any_auto = 1
-
- # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
- # don't contain underscores, and ARCH doesn't contain dots.
- # further VER matches the .changes Version:, and ARCH should be in
- # the .changes Architecture: list.
- if f.count("_") < 2:
- all_auto = 0
- continue
-
- (pckg, ver, archext) = f.split("_", 2)
- if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
- all_auto = 0
- continue
-
- ABH = cnf.SubTree("AutomaticByHandPackages")
- if not ABH.has_key(pckg) or \
- ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
- print "not match %s %s" % (pckg, u.pkg.changes["source"])
- all_auto = 0
- continue
-
- (arch, ext) = archext.split(".", 1)
- if arch not in u.pkg.changes["architecture"]:
- all_auto = 0
- continue
-
- u.pkg.files[f]["byhand-arch"] = arch
- u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
-
- return any_auto and all_auto
-
-def do_autobyhand(u, summary, short_summary):
- print "Attempting AUTOBYHAND."
- byhandleft = True
- for f, entry in u.pkg.files.items():
- byhandfile = f
-
- if not entry.has_key("byhand"):
- continue
-
- if not entry.has_key("byhand-script"):
- byhandleft = True
- continue
-
- os.system("ls -l %s" % byhandfile)
-
- result = os.system("%s %s %s %s %s" % (
- entry["byhand-script"],
- byhandfile,
- u.pkg.changes["version"],
- entry["byhand-arch"],
- os.path.abspath(u.pkg.changes_file)))
-
- if result == 0:
- os.unlink(byhandfile)
- del entry
- else:
- print "Error processing %s, left as byhand." % (f)
- byhandleft = True
-
- if byhandleft:
- do_byhand(u, summary, short_summary)
- else:
- u.accept(summary, short_summary)
- u.check_override()
- # XXX: We seem to be missing a u.remove() here
- # This might explain why we get byhand leftovers in unchecked - mhy
-
-################################################################################
-
-def is_byhand(u):
- for f in u.pkg.files.keys():
- if u.pkg.files[f].has_key("byhand"):
- return True
- return False
-
-def do_byhand(u, summary, short_summary):
- return package_to_queue(u, summary, short_summary, "Byhand",
- perms=0660, build=False, announce=None)
-
-################################################################################
-
-def is_new(u):
- for f in u.pkg.files.keys():
- if u.pkg.files[f].has_key("new"):
- return True
- return False
-
-def acknowledge_new(u, summary, short_summary):
- cnf = Config()
-
- print "Moving to NEW holding area."
- Logger.log(["Moving to new", u.pkg.changes_file])
-
- u.pkg.write_dot_dak(cnf["Dir::Queue::New"])
- u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644)
-
- if not Options["No-Mail"]:
- print "Sending new ack."
- template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
- u.update_subst()
- u.Subst["__SUMMARY__"] = summary
- new_ack_message = utils.TemplateSubst(u.Subst, template)
- utils.send_mail(new_ack_message)
-
-################################################################################
-
-# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in
-# Incoming. -1 will reference the .orig.tar.gz, but -2 will not.
-# Upload.check_dsc_against_db() can find the .orig.tar.gz but it will
-# not have processed it during it's checks of -2. If -1 has been
-# deleted or otherwise not checked by 'dak process-unchecked', the
-# .orig.tar.gz will not have been checked at all. To get round this,
-# we force the .orig.tar.gz into the .changes structure and reprocess
-# the .changes file.
-
-def process_it(changes_file):
- global Logger
-
- cnf = Config()
-
- holding = Holding()
-
- u = Upload()
- u.pkg.changes_file = changes_file
- u.pkg.directory = os.getcwd()
- u.logger = Logger
- origchanges = os.path.join(u.pkg.directory, u.pkg.changes_file)
-
- # Some defaults in case we can't fully process the .changes file
- u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
- u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
-
- # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
- bcc = "X-DAK: dak process-unchecked"
- if cnf.has_key("Dinstall::Bcc"):
- u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
- else:
- u.Subst["__BCC__"] = bcc
-
- # Remember where we are so we can come back after cd-ing into the
- # holding directory. TODO: Fix this stupid hack
- u.prevdir = os.getcwd()
-
- # TODO: Figure out something better for this (or whether it's even
- # necessary - it seems to have been for use when we were
- # still doing the is_unchecked check; reprocess = 2)
- u.reprocess = 1
-
- try:
- # If this is the Real Thing(tm), copy things into a private
- # holding directory first to avoid replacable file races.
- if not Options["No-Action"]:
- os.chdir(cnf["Dir::Queue::Holding"])
-
- # Absolutize the filename to avoid the requirement of being in the
- # same directory as the .changes file.
- holding.copy_to_holding(origchanges)
-
- # Relativize the filename so we use the copy in holding
- # rather than the original...
- changespath = os.path.basename(u.pkg.changes_file)
-
- (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
-
- if u.pkg.changes["fingerprint"]:
- valid_changes_p = u.load_changes(changespath)
- else:
- valid_changes_p = False
- u.rejects.extend(rejects)
-
- if valid_changes_p:
- while u.reprocess:
- u.check_distributions()
- u.check_files(not Options["No-Action"])
- valid_dsc_p = u.check_dsc(not Options["No-Action"])
- if valid_dsc_p and not Options["No-Action"]:
- u.check_source()
- u.check_lintian()
- u.check_hashes()
- u.check_urgency()
- u.check_timestamps()
- u.check_signed_by_key()
-
- action(u)
-
- except (SystemExit, KeyboardInterrupt):
- raise
-
- except:
- print "ERROR"
- traceback.print_exc(file=sys.stderr)
-
- # Restore previous WD
- os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
- global Options, Logger
-
- cnf = Config()
- changes_files = init()
-
- # -n/--dry-run invalidates some other options which would involve things happening
- if Options["No-Action"]:
- Options["Automatic"] = ""
-
- # Initialize our Holding singleton
- holding = Holding()
-
- # Ensure all the arguments we were given are .changes files
- for f in changes_files:
- if not f.endswith(".changes"):
- utils.warn("Ignoring '%s' because it's not a .changes file." % (f))
- changes_files.remove(f)
-
- if changes_files == []:
- if cnf["Dinstall::Options::Directory"] == "":
- utils.fubar("Need at least one .changes file as an argument.")
- else:
- sys.exit(0)
-
- # Check that we aren't going to clash with the daily cron job
- if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]:
- utils.fubar("Archive maintenance in progress. Try again later.")
-
- # Obtain lock if not in no-action mode and initialize the log
- if not Options["No-Action"]:
- lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
- try:
- fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, e:
- if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
- utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.")
- else:
- raise
- Logger = daklog.Logger(cnf, "process-unchecked")
-
- # Sort the .changes files so that we process sourceful ones first
- changes_files.sort(utils.changes_compare)
-
- # Process the changes files
- for changes_file in changes_files:
- print "\n" + changes_file
- try:
- process_it (changes_file)
- finally:
- if not Options["No-Action"]:
- holding.clean()
-
- accept_count = SummaryStats().accept_count
- accept_bytes = SummaryStats().accept_bytes
-
- if accept_count:
- sets = "set"
- if accept_count > 1:
- sets = "sets"
- print "Accepted %d package %s, %s." % (accept_count, sets, utils.size_type(int(accept_bytes)))
- Logger.log(["total",accept_count,accept_bytes])
-
- if not Options["No-Action"]:
- Logger.close()
-
-################################################################################
-
-if __name__ == '__main__':
- main()
################################################################################
Cnf = None
-required_database_schema = 20
+required_database_schema = 21
################################################################################
def __repr__(self):
return '<PoolFile %s>' % self.filename
+ @property
+ def fullpath(self):
+ return os.path.join(self.location.path, self.filename)
+
__all__.append('PoolFile')
@session_wrapper
__all__.append('get_knownchange')
################################################################################
+
+class KnownChangePendingFile(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
+
+__all__.append('KnownChangePendingFile')
+
+################################################################################
+
class Location(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<Queue %s>' % self.queue_name
- def autobuild_upload(self, changes, srcpath, session=None):
- """
- Update queue_build database table used for incoming autobuild support.
+ def add_file_from_pool(self, poolfile):
+ """Copies a file into the pool. Assumes that the PoolFile object is
+ attached to the same SQLAlchemy session as the Queue object is.
- @type changes: Changes
- @param changes: changes object for the upload to process
+ The caller is responsible for committing after calling this function."""
+ poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
- @type srcpath: string
- @param srcpath: path for the queue file entries/link destinations
+ # Check if we have a file of this name or this ID already
+ for f in self.queuefiles:
+ if f.fileid is not None and f.fileid == poolfile.file_id or \
+ f.poolfile.filename == poolfile_basename:
+ # In this case, update the QueueFile entry so we
+ # don't remove it too early
+ f.lastused = datetime.now()
+ DBConn().session().object_session(pf).add(f)
+ return f
- @type session: SQLAlchemy session
- @param session: Optional SQLAlchemy session. If this is passed, the
- caller is responsible for ensuring a transaction has begun and
- committing the results or rolling back based on the result code. If
- not passed, a commit will be performed at the end of the function,
- otherwise the caller is responsible for commiting.
+ # Prepare QueueFile object
+ qf = QueueFile()
+ qf.queue_id = self.queue_id
+ qf.lastused = datetime.now()
+ qf.filename = dest
- @rtype: NoneType or string
- @return: None if the operation failed, a string describing the error if not
- """
+ targetpath = qf.fullpath
+ queuepath = os.path.join(self.path, poolfile_basename)
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
- # TODO: Remove by moving queue config into the database
- conf = Config()
-
- for suitename in changes.changes["distribution"].keys():
- # TODO: Move into database as:
- # buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build)
- # buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink)
- # This also gets rid of the SecurityQueueBuild hack below
- if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"):
- continue
-
- # Find suite object
- s = get_suite(suitename, session)
- if s is None:
- return "INTERNAL ERROR: Could not find suite %s" % suitename
-
- # TODO: Get from database as above
- dest_dir = conf["Dir::QueueBuild"]
-
- # TODO: Move into database as above
- if conf.FindB("Dinstall::SecurityQueueBuild"):
- dest_dir = os.path.join(dest_dir, suitename)
-
- for file_entry in changes.files.keys():
- src = os.path.join(srcpath, file_entry)
- dest = os.path.join(dest_dir, file_entry)
-
- # TODO: Move into database as above
- if conf.FindB("Dinstall::SecurityQueueBuild"):
- # Copy it since the original won't be readable by www-data
- import utils
- utils.copy(src, dest)
- else:
- # Create a symlink to it
- os.symlink(src, dest)
-
- qb = QueueBuild()
- qb.suite_id = s.suite_id
- qb.queue_id = self.queue_id
- qb.filename = dest
- qb.in_queue = True
-
- session.add(qb)
-
- # If the .orig tarballs are in the pool, create a symlink to
- # them (if one doesn't already exist)
- for dsc_file in changes.dsc_files.keys():
- # Skip all files except orig tarballs
- from daklib.regexes import re_is_orig_source
- if not re_is_orig_source.match(dsc_file):
- continue
- # Skip orig files not identified in the pool
- if not (changes.orig_files.has_key(dsc_file) and
- changes.orig_files[dsc_file].has_key("id")):
- continue
- orig_file_id = changes.orig_files[dsc_file]["id"]
- dest = os.path.join(dest_dir, dsc_file)
-
- # If it doesn't exist, create a symlink
- if not os.path.exists(dest):
- q = session.execute("SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id",
- {'id': orig_file_id})
- res = q.fetchone()
- if not res:
- return "[INTERNAL ERROR] Couldn't find id %s in files table." % (orig_file_id)
-
- src = os.path.join(res[0], res[1])
- os.symlink(src, dest)
-
- # Add it to the list of packages for later processing by apt-ftparchive
- qb = QueueBuild()
- qb.suite_id = s.suite_id
- qb.queue_id = self.queue_id
- qb.filename = dest
- qb.in_queue = True
- session.add(qb)
-
- # If it does, update things to ensure it's not removed prematurely
- else:
- qb = get_queue_build(dest, s.suite_id, session)
- if qb is None:
- qb.in_queue = True
- qb.last_used = None
- session.add(qb)
+ try:
+ if self.copy_pool_files:
+ # We need to copy instead of symlink
+ import utils
+ utils.copy(targetfile, queuepath)
+ # NULL in the fileid field implies a copy
+ qf.fileid = None
+ else:
+ os.symlink(targetfile, queuepath)
+ qf.fileid = poolfile.file_id
+ except OSError:
+ return None
- if privatetrans:
- session.commit()
- session.close()
+ # Get the same session as the PoolFile is using and add the qf to it
+ DBConn().session().object_session(poolfile).add(qf)
+
+ return qf
- return None
__all__.append('Queue')
@session_wrapper
-def get_or_set_queue(queuename, session=None):
+def get_queue(queuename, session=None):
"""
Returns Queue object for given C{queue name}, creating it if it does not
exist.
q = session.query(Queue).filter_by(queue_name=queuename)
try:
- ret = q.one()
+ return q.one()
except NoResultFound:
- queue = Queue()
- queue.queue_name = queuename
- session.add(queue)
- session.commit_or_flush()
- ret = queue
-
- return ret
+ return None
-__all__.append('get_or_set_queue')
+__all__.append('get_queue')
################################################################################
-class QueueBuild(object):
+class QueueFile(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
- return '<QueueBuild %s (%s)>' % (self.filename, self.queue_id)
-
-__all__.append('QueueBuild')
-
-@session_wrapper
-def get_queue_build(filename, suite, session=None):
- """
- Returns QueueBuild object for given C{filename} and C{suite}.
-
- @type filename: string
- @param filename: The name of the file
-
- @type suiteid: int or str
- @param suiteid: Suite name or ID
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
+ return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
- @rtype: Queue
- @return: Queue object for the given queue
- """
-
- if isinstance(suite, int):
- q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite)
- else:
- q = session.query(QueueBuild).filter_by(filename=filename)
- q = q.join(Suite).filter_by(suite_name=suite)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_queue_build')
+__all__.append('QueueFile')
################################################################################
self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
+ self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
+ self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
self.tbl_files = Table('files', self.db_meta, autoload=True)
self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True)
self.tbl_priority = Table('priority', self.db_meta, autoload=True)
self.tbl_queue = Table('queue', self.db_meta, autoload=True)
- self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True)
+ self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
self.tbl_section = Table('section', self.db_meta, autoload=True)
self.tbl_source = Table('source', self.db_meta, autoload=True)
self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
self.tbl_suite = Table('suite', self.db_meta, autoload=True)
self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
+ self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
self.tbl_uid = Table('uid', self.db_meta, autoload=True)
self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
keyring_id = self.tbl_keyrings.c.id))
mapper(KnownChange, self.tbl_known_changes,
- properties = dict(known_change_id = self.tbl_known_changes.c.id))
+ properties = dict(known_change_id = self.tbl_known_changes.c.id,
+ poolfiles = relation(PoolFile,
+ secondary=self.tbl_changes_pool_files,
+ backref="changeslinks"),
+ files = relation(KnownChangePendingFile, backref="changesfile")))
+
+ mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
+ properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.id))
mapper(KeyringACLMap, self.tbl_keyring_acl_map,
properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
mapper(Queue, self.tbl_queue,
properties = dict(queue_id = self.tbl_queue.c.id))
- mapper(QueueBuild, self.tbl_queue_build,
- properties = dict(suite_id = self.tbl_queue_build.c.suite,
- queue_id = self.tbl_queue_build.c.queue,
- queue = relation(Queue, backref='queuebuild')))
+ mapper(QueueFile, self.tbl_queue_files,
+ properties = dict(queue = relation(Queue, backref='queuefiles'),
+ poolfile = relation(PoolFile, backref='queueinstances')))
mapper(Section, self.tbl_section,
properties = dict(section_id = self.tbl_section.c.id))
mapper(Suite, self.tbl_suite,
properties = dict(suite_id = self.tbl_suite.c.id,
- policy_queue = relation(Queue)))
+ policy_queue = relation(Queue),
+ copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
mapper(SuiteArchitecture, self.tbl_suite_architectures,
properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
os.rename(temp_filename, filename)
os.chmod(filename, 0644)
- # auto-build queue
-# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session)
-# if res:
-# utils.fubar(res)
-# now_date = datetime.now()
+ # This routine returns None on success or an error on failure
+ # TODO: Replace queue copying using the new queue.add_file_from_pool routine
+ # and by looking up which queues in suite.copy_queues
+ #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
+ #if res:
+ # utils.fubar(res)
session.commit()
os.unlink(os.path.join(from_dir, f))
if os.path.exists(os.path.join(h.holding_dir, f)):
os.unlink(os.path.join(h.holding_dir, f))
-
+
os.unlink(os.path.join(from_dir, self.pkg.changes_file))
if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
def determine_target(u):
cnf = Config()
-
+
queues = [ "New", "Autobyhand", "Byhand" ]
if cnf.FindB("Dinstall::SecurityQueueHandling"):
queues += [ "Unembargo", "Embargo" ]
# Finally ensure there's not something we don't recognise
known_keywords = dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="",
- NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="")
+ NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="",POLICY_URL="")
for keyword in keywords.keys():
if not known_keywords.has_key(keyword):