X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=ea11a1454724cd0f77aef276e89cb01c8e1b3642;hb=2f2cc6977f137b0581c83aceb881dd667fee47f7;hp=f7ef853dc6cce6bfe531facd5b298bb8bf5a5337;hpb=29586333f60d53b4a1d4e8c3580ae5c3256c8bd9;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index f7ef853d..ea11a145 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -5,7 +5,7 @@ @contact: Debian FTPMaster @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup @copyright: 2008-2009 Mark Hymers -@copyright: 2009 Joerg Jaspert +@copyright: 2009, 2010 Joerg Jaspert @copyright: 2009 Mike O'Connor @license: GNU General Public License version 2 or later """ @@ -37,7 +37,9 @@ import os import re import psycopg2 import traceback -from datetime import datetime +from datetime import datetime, timedelta +from errno import ENOENT +from tempfile import mkstemp, mkdtemp from inspect import getargspec @@ -53,7 +55,6 @@ from sqlalchemy.orm.exc import NoResultFound # Only import Config until Queue stuff is changed to store its config # in the database from config import Config -from singleton import Singleton from textutils import fix_maintainer ################################################################################ @@ -62,6 +63,10 @@ from textutils import fix_maintainer # reflection class DebVersion(sqltypes.Text): + """ + Support the debversion type + """ + def get_col_spec(self): return "DEBVERSION" @@ -180,8 +185,8 @@ def get_architecture_suites(architecture, session=None): """ Returns list of Suite objects for given C{architecture} name - @type source: str - @param source: Architecture name to search for + @type architecture: str + @param architecture: Architecture name to search for @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -277,8 +282,8 @@ def get_suites_binary_in(package, session=None): """ Returns list of Suite objects which given C{package} name is in - @type source: str - @param source: DBBinary package name to search for + @type package: str + @param package: DBBinary package name to search for @rtype: list @return: list of Suite objects for the given package @@ -324,8 +329,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non @type version: str or None @param version: Version to search for (or None) - @type package: str, list or None - @param package: Architectures to limit to (or None if no limit) + @type architecture: str, list or None + @param architecture: Architectures to limit to (or None if no limit) @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -378,16 +383,16 @@ def get_binary_from_name_suite(package, suitename, session=None): sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su - WHERE b.package=:package + WHERE b.package='%(package)s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id - AND su.suite_name=:suitename + AND su.suite_name %(suitename)s ORDER BY b.version DESC""" - return session.execute(sql, {'package': package, 'suitename': suitename}) + return session.execute(sql % {'package': package, 'suitename': suitename}) __all__.append('get_binary_from_name_suite') @@ -432,6 +437,319 @@ __all__.append('BinaryACLMap') ################################################################################ +MINIMAL_APT_CONF=""" +Dir +{ + ArchiveDir "%(archivepath)s"; + OverrideDir "/srv/ftp.debian.org/scripts/override/"; + CacheDir "/srv/ftp.debian.org/database/"; +}; + +Default +{ + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; + DeLinkLimit 0; + FileMode 0664; +} + +bindirectory "incoming" +{ + Packages "Packages"; + Contents " "; + + BinOverride "override.sid.all3"; + BinCacheDB "packages-accepted.db"; + + FileList "%(filelist)s"; + + PathPrefix ""; + Packages::Extensions ".deb .udeb"; +}; + +bindirectory "incoming/" +{ + Sources "Sources"; + BinOverride "override.sid.all3"; + SrcOverride "override.sid.all3.src"; + FileList "%(filelist)s"; +}; +""" + +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def write_metadata(self, starttime, force=False): + # Do we write out metafiles? + if not (force or self.generate_metadata): + return + + session = DBConn().session().object_session(self) + + fl_fd = fl_name = ac_fd = ac_name = None + tempdir = None + arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ]) + startdir = os.getcwd() + + try: + # Grab files we want to include + newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + # Write file list with newer files + (fl_fd, fl_name) = mkstemp() + for n in newer: + os.write(fl_fd, '%s\n' % n.fullpath) + os.close(fl_fd) + + # Write minimal apt.conf + # TODO: Remove hardcoding from template + (ac_fd, ac_name) = mkstemp() + os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, + 'filelist': fl_name}) + os.close(ac_fd) + + # Run apt-ftparchive generate + os.chdir(os.path.dirname(ac_name)) + os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name)) + + # Run apt-ftparchive release + # TODO: Eww - fix this + bname = os.path.basename(self.path) + os.chdir(self.path) + os.chdir('..') + + # We have to remove the Release file otherwise it'll be included in the + # new one + try: + os.unlink(os.path.join(bname, 'Release')) + except OSError: + pass + + os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + + # Crude hack with open and append, but this whole section is and should be redone. + if self.notautomatic: + release=open("Release", "a") + release.write("NotAutomatic: yes") + release.close() + + # Sign if necessary + if self.signingkey: + cnf = Config() + keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] + if cnf.has_key("Dinstall::SigningPubKeyring"): + keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] + + os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey)) + + # Move the files if we got this far + os.rename('Release', os.path.join(bname, 'Release')) + if self.signingkey: + os.rename('Release.gpg', os.path.join(bname, 'Release.gpg')) + + # Clean up any left behind files + finally: + os.chdir(startdir) + if fl_fd: + try: + os.close(fl_fd) + except OSError: + pass + + if fl_name: + try: + os.unlink(fl_name) + except OSError: + pass + + if ac_fd: + try: + os.close(ac_fd) + except OSError: + pass + + if ac_name: + try: + os.unlink(ac_name) + except OSError: + pass + + def clean_and_update(self, starttime, Logger, dryrun=False): + """WARNING: This routine commits for you""" + session = DBConn().session().object_session(self) + + if self.generate_metadata and not dryrun: + self.write_metadata(starttime) + + # Grab files older than our execution time + older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + + for o in older: + killdb = False + try: + if dryrun: + Logger.log(["I: Would have removed %s from the queue" % o.fullpath]) + else: + Logger.log(["I: Removing %s from the queue" % o.fullpath]) + os.unlink(o.fullpath) + killdb = True + except OSError, e: + # If it wasn't there, don't worry + if e.errno == ENOENT: + killdb = True + else: + # TODO: Replace with proper logging call + Logger.log(["E: Could not remove %s" % o.fullpath]) + + if killdb: + session.delete(o) + + session.commit() + + for f in os.listdir(self.path): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'): + continue + + try: + r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() + except NoResultFound: + fp = os.path.join(self.path, f) + if dryrun: + Logger.log(["I: Would remove unused link %s" % fp]) + else: + Logger.log(["I: Removing unused link %s" % fp]) + try: + os.unlink(fp) + except OSError: + Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the BuildQueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(poolfile).add(f) + return f + + # Prepare BuildQueueFile object + qf = BuildQueueFile() + qf.build_queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = poolfile_basename + + targetpath = poolfile.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_files: + # We need to copy instead of symlink + import utils + utils.copy(targetpath, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetpath, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_build_queue(queuename, session=None): + """ + Returns BuildQueue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: BuildQueue + @return: BuildQueue object for the given queue + """ + + q = session.query(BuildQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_build_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.build_queue_id) + + @property + def fullpath(self): + return os.path.join(self.buildqueue.path, self.filename) + + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -592,8 +910,9 @@ def get_or_set_contents_path_id(filepath, session=None): If no matching file is found, a row is inserted. - @type filename: string - @param filename: The filepath + @type filepath: string + @param filepath: The filepath + @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be generated if not supplied). If not passed, a commit will be performed at @@ -654,12 +973,16 @@ def insert_content_paths(binary_id, fullpaths, session=None): try: # Insert paths - pathcache = {} - for fullpath in fullpaths: - if fullpath.startswith( './' ): - fullpath = fullpath[2:] + def generate_path_dicts(): + for fullpath in fullpaths: + if fullpath.startswith( './' ): + fullpath = fullpath[2:] + + yield {'filename':fullpath, 'id': binary_id } - session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id} ) + for d in generate_path_dicts(): + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", + d ) session.commit() if privatetrans: @@ -731,13 +1054,17 @@ class PoolFile(object): def __repr__(self): return '' % self.filename + @property + def fullpath(self): + return os.path.join(self.location.path, self.filename) + __all__.append('PoolFile') @session_wrapper def check_poolfile(filename, filesize, md5sum, location_id, session=None): """ Returns a tuple: - (ValidFileFound [boolean or None], PoolFile object or None) + (ValidFileFound [boolean or None], PoolFile object or None) @type filename: string @param filename: the filename of the file to check against the DB @@ -753,12 +1080,11 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None): @rtype: tuple @return: Tuple of length 2. - If more than one file found with that name: - (None, None) - If valid pool file found: (True, PoolFile object) - If valid pool file not found: - (False, None) if no file found - (False, PoolFile object) if file found with size/md5sum mismatch + - If more than one file found with that name: (C{None}, C{None}) + - If valid pool file found: (C{True}, C{PoolFile object}) + - If valid pool file not found: + - (C{False}, C{None}) if no file found + - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch """ q = session.query(PoolFile).filter_by(filename=filename) @@ -842,7 +1168,7 @@ def get_poolfile_like_name(filename, session=None): """ # TODO: There must be a way of properly using bind parameters with %FOO% - q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename)) + q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename)) return q.all() @@ -1120,41 +1446,55 @@ __all__.append('KeyringACLMap') ################################################################################ -class KnownChange(object): +class DBChange(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.changesname + return '' % self.changesname + + def clean_from_queue(self): + session = DBConn().session().object_session(self) -__all__.append('KnownChange') + # Remove changes_pool_files entries + self.poolfiles = [] + + # Remove changes_pending_files references + self.files = [] + + # Clear out of queue + self.in_queue = None + self.approved_for_id = None + +__all__.append('DBChange') @session_wrapper -def get_knownchange(filename, session=None): +def get_dbchange(filename, session=None): """ - returns knownchange object for given C{filename}. + returns DBChange object for given C{filename}. - @type archive: string - @param archive: the name of the arhive + @type filename: string + @param filename: the name of the file @type session: Session @param session: Optional SQLA session object (a temporary one will be generated if not supplied) - @rtype: Archive - @return: Archive object for the given name (None if not present) + @rtype: DBChange + @return: DBChange object for the given filename (C{None} if not present) """ - q = session.query(KnownChange).filter_by(changesname=filename) + q = session.query(DBChange).filter_by(changesname=filename) try: return q.one() except NoResultFound: return None -__all__.append('get_knownchange') +__all__.append('get_dbchange') ################################################################################ + class Location(object): def __init__(self, *args, **kwargs): pass @@ -1177,7 +1517,7 @@ def get_location(location, component=None, archive=None, session=None): @param component: the component name (if None, no restriction applied) @type archive: string - @param archive_id: the archive name (if None, no restriction applied) + @param archive: the archive name (if None, no restriction applied) @rtype: Location / None @return: Either a Location object or None if one can't be found @@ -1433,16 +1773,38 @@ __all__.append('get_override_type') ################################################################################ -class PendingContentAssociation(object): +class DebContents(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package.package,self.file) + +__all__.append('DebContents') + + +class UdebContents(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package.package,self.file) + +__all__.append('UdebContents') + +class PendingBinContents(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.pca_id + return '' % self.contents_id -__all__.append('PendingContentAssociation') +__all__.append('PendingBinContents') -def insert_pending_content_paths(package, fullpaths, session=None): +def insert_pending_content_paths(package, + is_udeb, + fullpaths, + session=None): """ Make sure given paths are temporarily associated with given package @@ -1471,32 +1833,27 @@ def insert_pending_content_paths(package, fullpaths, session=None): arch_id = arch.arch_id # Remove any already existing recorded files for this package - q = session.query(PendingContentAssociation) + q = session.query(PendingBinContents) q = q.filter_by(package=package['Package']) q = q.filter_by(version=package['Version']) q = q.filter_by(architecture=arch_id) q.delete() - # Insert paths - pathcache = {} for fullpath in fullpaths: - (path, filename) = os.path.split(fullpath) - - if path.startswith( "./" ): - path = path[2:] - filepath_id = get_or_set_contents_path_id(path, session) - filename_id = get_or_set_contents_file_id(filename, session) - - pathcache[fullpath] = (filepath_id, filename_id) + if fullpath.startswith( "./" ): + fullpath = fullpath[2:] - for fullpath, dat in pathcache.items(): - pca = PendingContentAssociation() + pca = PendingBinContents() pca.package = package['Package'] pca.version = package['Version'] - pca.filepath_id = dat[0] - pca.filename_id = dat[1] + pca.file = fullpath pca.architecture = arch_id + + if isudeb: + pca.type = 8 # gross + else: + pca.type = 7 # also gross session.add(pca) # Only commit if we set up the session ourself @@ -1521,6 +1878,42 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +@session_wrapper +def get_policy_queue(queuename, session=None): + """ + Returns PolicyQueue object for given C{queue name} + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass @@ -1591,209 +1984,6 @@ __all__.append('get_priorities') ################################################################################ -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name - - def autobuild_upload(self, changes, srcpath, session=None): - """ - Update queue_build database table used for incoming autobuild support. - - @type changes: Changes - @param changes: changes object for the upload to process - - @type srcpath: string - @param srcpath: path for the queue file entries/link destinations - - @type session: SQLAlchemy session - @param session: Optional SQLAlchemy session. If this is passed, the - caller is responsible for ensuring a transaction has begun and - committing the results or rolling back based on the result code. If - not passed, a commit will be performed at the end of the function, - otherwise the caller is responsible for commiting. - - @rtype: NoneType or string - @return: None if the operation failed, a string describing the error if not - """ - - privatetrans = False - if session is None: - session = DBConn().session() - privatetrans = True - - # TODO: Remove by moving queue config into the database - conf = Config() - - for suitename in changes.changes["distribution"].keys(): - # TODO: Move into database as: - # buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build) - # buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink) - # This also gets rid of the SecurityQueueBuild hack below - if suitename not in conf.ValueList("Dinstall::QueueBuildSuites"): - continue - - # Find suite object - s = get_suite(suitename, session) - if s is None: - return "INTERNAL ERROR: Could not find suite %s" % suitename - - # TODO: Get from database as above - dest_dir = conf["Dir::QueueBuild"] - - # TODO: Move into database as above - if conf.FindB("Dinstall::SecurityQueueBuild"): - dest_dir = os.path.join(dest_dir, suitename) - - for file_entry in changes.files.keys(): - src = os.path.join(srcpath, file_entry) - dest = os.path.join(dest_dir, file_entry) - - # TODO: Move into database as above - if conf.FindB("Dinstall::SecurityQueueBuild"): - # Copy it since the original won't be readable by www-data - import utils - utils.copy(src, dest) - else: - # Create a symlink to it - os.symlink(src, dest) - - qb = QueueBuild() - qb.suite_id = s.suite_id - qb.queue_id = self.queue_id - qb.filename = dest - qb.in_queue = True - - session.add(qb) - - # If the .orig tarballs are in the pool, create a symlink to - # them (if one doesn't already exist) - for dsc_file in changes.dsc_files.keys(): - # Skip all files except orig tarballs - from daklib.regexes import re_is_orig_source - if not re_is_orig_source.match(dsc_file): - continue - # Skip orig files not identified in the pool - if not (changes.orig_files.has_key(dsc_file) and - changes.orig_files[dsc_file].has_key("id")): - continue - orig_file_id = changes.orig_files[dsc_file]["id"] - dest = os.path.join(dest_dir, dsc_file) - - # If it doesn't exist, create a symlink - if not os.path.exists(dest): - q = session.execute("SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id", - {'id': orig_file_id}) - res = q.fetchone() - if not res: - return "[INTERNAL ERROR] Couldn't find id %s in files table." % (orig_file_id) - - src = os.path.join(res[0], res[1]) - os.symlink(src, dest) - - # Add it to the list of packages for later processing by apt-ftparchive - qb = QueueBuild() - qb.suite_id = s.suite_id - qb.queue_id = self.queue_id - qb.filename = dest - qb.in_queue = True - session.add(qb) - - # If it does, update things to ensure it's not removed prematurely - else: - qb = get_queue_build(dest, s.suite_id, session) - if qb is None: - qb.in_queue = True - qb.last_used = None - session.add(qb) - - if privatetrans: - session.commit() - session.close() - - return None - -__all__.append('Queue') - -@session_wrapper -def get_or_set_queue(queuename, session=None): - """ - Returns Queue object for given C{queue name}, creating it if it does not - exist. - - @type queuename: string - @param queuename: The name of the queue - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - q = session.query(Queue).filter_by(queue_name=queuename) - - try: - ret = q.one() - except NoResultFound: - queue = Queue() - queue.queue_name = queuename - session.add(queue) - session.commit_or_flush() - ret = queue - - return ret - -__all__.append('get_or_set_queue') - -################################################################################ - -class QueueBuild(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueBuild') - -@session_wrapper -def get_queue_build(filename, suite, session=None): - """ - Returns QueueBuild object for given C{filename} and C{suite}. - - @type filename: string - @param filename: The name of the file - - @type suiteid: int or str - @param suiteid: Suite name or ID - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - if isinstance(suite, int): - q = session.query(QueueBuild).filter_by(filename=filename).filter_by(suite_id=suite) - else: - q = session.query(QueueBuild).filter_by(filename=filename) - q = q.join(Suite).filter_by(suite_name=suite) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue_build') - -################################################################################ - class Section(object): def __init__(self, *args, **kwargs): pass @@ -1881,8 +2071,8 @@ def source_exists(source, source_version, suites = ["any"], session=None): 1. exact match => 1.0-3 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 - @type package: string - @param package: package source name + @type source: string + @param source: source name @type source_version: string @param source_version: expected source version @@ -1965,8 +2155,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session= @type source: str @param source: DBSource package name to search for - @type source: str or None - @param source: DBSource version name to search for or None if not applicable + @type version: str or None + @param version: DBSource version name to search for or None if not applicable @type dm_upload_allowed: bool @param dm_upload_allowed: If None, no effect. If True or False, only @@ -2028,6 +2218,7 @@ __all__.append('get_source_in_suite') def add_dsc_to_db(u, filename, session=None): entry = u.pkg.files[filename] source = DBSource() + pfs = [] source.source = u.pkg.dsc["source"] source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch @@ -2045,6 +2236,8 @@ def add_dsc_to_db(u, filename, session=None): if not entry.has_key("files id") or not entry["files id"]: filename = entry["pool name"] + filename poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + pfs.append(poolfile) entry["files id"] = poolfile.file_id source.poolfile_id = entry["files id"] @@ -2088,6 +2281,7 @@ def add_dsc_to_db(u, filename, session=None): # FIXME: needs to check for -1/-2 and or handle exception if found and obj is not None: files_id = obj.file_id + pfs.append(obj) # If still not found, add it if files_id is None: @@ -2095,7 +2289,13 @@ def add_dsc_to_db(u, filename, session=None): dentry["sha1sum"] = dfentry["sha1sum"] dentry["sha256sum"] = dfentry["sha256sum"] poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + pfs.append(poolfile) files_id = poolfile.file_id + else: + poolfile = get_poolfile_by_id(files_id, session) + if poolfile is None: + utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id) + pfs.append(poolfile) df.poolfile_id = files_id session.add(df) @@ -2105,26 +2305,27 @@ def add_dsc_to_db(u, filename, session=None): # Add the src_uploaders to the DB uploader_ids = [source.maintainer_id] if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].split(","): + for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): up = up.strip() uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) added_ids = {} - for up in uploader_ids: - if added_ids.has_key(up): - utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + for up_id in uploader_ids: + if added_ids.has_key(up_id): + import utils + utils.warn("Already saw uploader %s for source %s" % (up_id, source.source)) continue - added_ids[u]=1 + added_ids[up_id]=1 su = SrcUploader() - su.maintainer_id = up + su.maintainer_id = up_id su.source_id = source.source_id session.add(su) session.flush() - return dsc_component, dsc_location_id + return source, dsc_component, dsc_location_id, pfs __all__.append('add_dsc_to_db') @@ -2150,13 +2351,14 @@ def add_deb_to_db(u, filename, session=None): filename = entry["pool name"] + filename fullpath = os.path.join(cnf["Dir::Pool"], filename) if not entry.get("location id", None): - entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id - if not entry.get("files id", None): + if entry.get("files id", None): + poolfile = get_poolfile_by_id(bin.poolfile_id) + bin.poolfile_id = entry["files id"] + else: poolfile = add_poolfile(filename, entry, entry["location id"], session) - entry["files id"] = poolfile.file_id - - bin.poolfile_id = entry["files id"] + bin.poolfile_id = entry["files id"] = poolfile.file_id # Find source id bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) @@ -2187,6 +2389,8 @@ def add_deb_to_db(u, filename, session=None): # session.rollback() # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + return poolfile + __all__.append('add_deb_to_db') ################################################################################ @@ -2357,8 +2561,8 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ Returns list of Architecture objects for given C{suite} name - @type source: str - @param source: Suite name to search for + @type suite: str + @param suite: Suite name to search for @type skipsrc: boolean @param skipsrc: Whether to skip returning the 'source' architecture entry @@ -2532,57 +2736,72 @@ __all__.append('UploadBlock') ################################################################################ -class DBConn(Singleton): +class DBConn(object): """ database module init. """ + __shared_state = {} + def __init__(self, *args, **kwargs): - super(DBConn, self).__init__(*args, **kwargs) + self.__dict__ = self.__shared_state - def _startup(self, *args, **kwargs): - self.debug = False - if kwargs.has_key('debug'): - self.debug = True - self.__createconn() + if not getattr(self, 'initialised', False): + self.initialised = True + self.debug = kwargs.has_key('debug') + self.__createconn() def __setuptables(self): - self.tbl_architecture = Table('architecture', self.db_meta, autoload=True) - self.tbl_archive = Table('archive', self.db_meta, autoload=True) - self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True) - self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) - self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) - self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) - self.tbl_component = Table('component', self.db_meta, autoload=True) - self.tbl_config = Table('config', self.db_meta, autoload=True) - self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) - self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) - self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) - self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) - self.tbl_files = Table('files', self.db_meta, autoload=True) - self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) - self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) - self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True) - self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True) - self.tbl_location = Table('location', self.db_meta, autoload=True) - self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) - self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True) - self.tbl_override = Table('override', self.db_meta, autoload=True) - self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) - self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True) - self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True) - self.tbl_section = Table('section', self.db_meta, autoload=True) - self.tbl_source = Table('source', self.db_meta, autoload=True) - self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) - self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True) - self.tbl_src_format = Table('src_format', self.db_meta, autoload=True) - self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True) - self.tbl_suite = Table('suite', self.db_meta, autoload=True) - self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) - self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) - self.tbl_uid = Table('uid', self.db_meta, autoload=True) - self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) + tables = ( + 'architecture', + 'archive', + 'bin_associations', + 'binaries', + 'binary_acl', + 'binary_acl_map', + 'bin_contents', + 'build_queue', + 'build_queue_files', + 'component', + 'config', + 'changes_pending_binaries', + 'changes_pending_files', + 'changes_pending_files_map', + 'changes_pending_source', + 'changes_pending_source_files', + 'changes_pool_files', + 'deb_contents', + 'dsc_files', + 'files', + 'fingerprint', + 'keyrings', + 'changes', + 'keyring_acl_map', + 'location', + 'maintainer', + 'new_comments', + 'override', + 'override_type', + 'pending_bin_contents', + 'policy_queue', + 'priority', + 'section', + 'source', + 'source_acl', + 'src_associations', + 'src_format', + 'src_uploaders', + 'suite', + 'suite_architectures', + 'suite_src_formats', + 'suite_build_queue_copy', + 'udeb_contents', + 'uid', + 'upload_blocks', + ) + + for table_name in tables: + table = Table(table_name, self.db_meta, autoload=True) + setattr(self, 'tbl_%s' % table_name, table) def __setupmappers(self): mapper(Architecture, self.tbl_architecture, @@ -2599,6 +2818,36 @@ class DBConn(Singleton): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(PendingBinContents, self.tbl_pending_bin_contents, + properties = dict(contents_id =self.tbl_pending_bin_contents.c.id, + filename = self.tbl_pending_bin_contents.c.filename, + package = self.tbl_pending_bin_contents.c.package, + version = self.tbl_pending_bin_contents.c.version, + arch = self.tbl_pending_bin_contents.c.arch, + otype = self.tbl_pending_bin_contents.c.type)) + + mapper(DebContents, self.tbl_deb_contents, + properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, + package=self.tbl_deb_contents.c.package, + suite=self.tbl_deb_contents.c.suite, + arch=self.tbl_deb_contents.c.arch, + section=self.tbl_deb_contents.c.section, + filename=self.tbl_deb_contents.c.filename)) + + mapper(UdebContents, self.tbl_udeb_contents, + properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, + package=self.tbl_udeb_contents.c.package, + suite=self.tbl_udeb_contents.c.suite, + arch=self.tbl_udeb_contents.c.arch, + section=self.tbl_udeb_contents.c.section, + filename=self.tbl_udeb_contents.c.filename)) + + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, @@ -2660,8 +2909,52 @@ class DBConn(Singleton): properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) - mapper(KnownChange, self.tbl_known_changes, - properties = dict(known_change_id = self.tbl_known_changes.c.id)) + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, + poolfiles = relation(PoolFile, + secondary=self.tbl_changes_pool_files, + backref="changeslinks"), + seen = self.tbl_changes.c.seen, + source = self.tbl_changes.c.source, + binaries = self.tbl_changes.c.binaries, + architecture = self.tbl_changes.c.architecture, + distribution = self.tbl_changes.c.distribution, + urgency = self.tbl_changes.c.urgency, + maintainer = self.tbl_changes.c.maintainer, + changedby = self.tbl_changes.c.changedby, + date = self.tbl_changes.c.date, + version = self.tbl_changes.c.version, + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binaries, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id, + filename = self.tbl_changes_pending_files.c.filename, + size = self.tbl_changes_pending_files.c.size, + md5sum = self.tbl_changes_pending_files.c.md5sum, + sha1sum = self.tbl_changes_pending_files.c.sha1sum, + sha256sum = self.tbl_changes_pending_files.c.sha256sum)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) + mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, @@ -2685,6 +2978,7 @@ class DBConn(Singleton): mapper(Override, self.tbl_override, properties = dict(suite_id = self.tbl_override.c.suite, suite = relation(Suite), + package = self.tbl_override.c.package, component_id = self.tbl_override.c.component, component = relation(Component), priority_id = self.tbl_override.c.priority, @@ -2698,19 +2992,15 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueBuild, self.tbl_queue_build, - properties = dict(suite_id = self.tbl_queue_build.c.suite, - queue_id = self.tbl_queue_build.c.queue, - queue = relation(Queue, backref='queuebuild'))) - mapper(Section, self.tbl_section, - properties = dict(section_id = self.tbl_section.c.id)) + properties = dict(section_id = self.tbl_section.c.id, + section=self.tbl_section.c.section)) mapper(DBSource, self.tbl_source, properties = dict(source_id = self.tbl_source.c.id, @@ -2756,7 +3046,8 @@ class DBConn(Singleton): mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, - policy_queue = relation(Queue))) + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite,