X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=8d5dd4b574068d84283050347342612600566747;hb=99aed3cc2eb9db877c71b9a6e039cc38241e45f5;hp=921f1daa03af8c35e066c777ba8872ebd4b5bba8;hpb=1cc079eee7fce279ec6e9701d82f69a1567955cd;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 921f1daa..6f598bb5 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -5,7 +5,7 @@ @contact: Debian FTPMaster @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup @copyright: 2008-2009 Mark Hymers -@copyright: 2009 Joerg Jaspert +@copyright: 2009, 2010 Joerg Jaspert @copyright: 2009 Mike O'Connor @license: GNU General Public License version 2 or later """ @@ -37,13 +37,16 @@ import os import re import psycopg2 import traceback -import datetime +import commands +from datetime import datetime, timedelta +from errno import ENOENT +from tempfile import mkstemp, mkdtemp from inspect import getargspec import sqlalchemy -from sqlalchemy import create_engine, Table, MetaData -from sqlalchemy.orm import sessionmaker, mapper, relation +from sqlalchemy import create_engine, Table, MetaData, Column, Integer +from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, backref from sqlalchemy import types as sqltypes # Don't remove this, we re-export the exceptions to scripts which import us @@ -53,28 +56,52 @@ from sqlalchemy.orm.exc import NoResultFound # Only import Config until Queue stuff is changed to store its config # in the database from config import Config -from singleton import Singleton from textutils import fix_maintainer +from dak_exceptions import NoSourceFieldError + +# suppress some deprecation warnings in squeeze related to sqlalchemy +import warnings +warnings.filterwarnings('ignore', \ + "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \ + SADeprecationWarning) +# TODO: sqlalchemy needs some extra configuration to correctly reflect +# the ind_deb_contents_* indexes - we ignore the warnings at the moment +warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning) + ################################################################################ # Patch in support for the debversion field type so that it works during # reflection -class DebVersion(sqltypes.Text): +try: + # that is for sqlalchemy 0.6 + UserDefinedType = sqltypes.UserDefinedType +except: + # this one for sqlalchemy 0.5 + UserDefinedType = sqltypes.TypeEngine + +class DebVersion(UserDefinedType): def get_col_spec(self): return "DEBVERSION" + def bind_processor(self, dialect): + return None + + # ' = None' is needed for sqlalchemy 0.5: + def result_processor(self, dialect, coltype = None): + return None + sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version == "0.5": +if sa_major_version in ["0.5", "0.6"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") ################################################################################ -__all__ = ['IntegrityError', 'SQLAlchemyError'] +__all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion'] ################################################################################ @@ -125,11 +152,14 @@ def session_wrapper(fn): return wrapped +__all__.append('session_wrapper') + ################################################################################ class Architecture(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, arch_string = None, description = None): + self.arch_string = arch_string + self.description = description def __eq__(self, val): if isinstance(val, str): @@ -173,13 +203,14 @@ def get_architecture(architecture, session=None): __all__.append('get_architecture') +# TODO: should be removed because the implementation is too trivial @session_wrapper def get_architecture_suites(architecture, session=None): """ Returns list of Suite objects for given C{architecture} name - @type source: str - @param source: Architecture name to search for + @type architecture: str + @param architecture: Architecture name to search for @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -189,13 +220,7 @@ def get_architecture_suites(architecture, session=None): @return: list of Suite objects for the given name (may be empty) """ - q = session.query(Suite) - q = q.join(SuiteArchitecture) - q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name') - - ret = q.all() - - return ret + return get_architecture(architecture, session).suites __all__.append('get_architecture_suites') @@ -275,8 +300,8 @@ def get_suites_binary_in(package, session=None): """ Returns list of Suite objects which given C{package} name is in - @type source: str - @param source: DBBinary package name to search for + @type package: str + @param package: DBBinary package name to search for @rtype: list @return: list of Suite objects for the given package @@ -322,8 +347,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non @type version: str or None @param version: Version to search for (or None) - @type package: str, list or None - @param package: Architectures to limit to (or None if no limit) + @type architecture: str, list or None + @param architecture: Architectures to limit to (or None if no limit) @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -376,16 +401,16 @@ def get_binary_from_name_suite(package, suitename, session=None): sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su - WHERE b.package=:package + WHERE b.package='%(package)s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id - AND su.suite_name=:suitename + AND su.suite_name %(suitename)s ORDER BY b.version DESC""" - return session.execute(sql, {'package': package, 'suitename': suitename}) + return session.execute(sql % {'package': package, 'suitename': suitename}) __all__.append('get_binary_from_name_suite') @@ -430,6 +455,323 @@ __all__.append('BinaryACLMap') ################################################################################ +MINIMAL_APT_CONF=""" +Dir +{ + ArchiveDir "%(archivepath)s"; + OverrideDir "%(overridedir)s"; + CacheDir "%(cachedir)s"; +}; + +Default +{ + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; + DeLinkLimit 0; + FileMode 0664; +} + +bindirectory "incoming" +{ + Packages "Packages"; + Contents " "; + + BinOverride "override.sid.all3"; + BinCacheDB "packages-accepted.db"; + + FileList "%(filelist)s"; + + PathPrefix ""; + Packages::Extensions ".deb .udeb"; +}; + +bindirectory "incoming/" +{ + Sources "Sources"; + BinOverride "override.sid.all3"; + SrcOverride "override.sid.all3.src"; + FileList "%(filelist)s"; +}; +""" + +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def write_metadata(self, starttime, force=False): + # Do we write out metafiles? + if not (force or self.generate_metadata): + return + + session = DBConn().session().object_session(self) + + fl_fd = fl_name = ac_fd = ac_name = None + tempdir = None + arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ]) + startdir = os.getcwd() + + try: + # Grab files we want to include + newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + # Write file list with newer files + (fl_fd, fl_name) = mkstemp() + for n in newer: + os.write(fl_fd, '%s\n' % n.fullpath) + os.close(fl_fd) + + cnf = Config() + + # Write minimal apt.conf + # TODO: Remove hardcoding from template + (ac_fd, ac_name) = mkstemp() + os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, + 'filelist': fl_name, + 'cachedir': cnf["Dir::Cache"], + 'overridedir': cnf["Dir::Override"], + }) + os.close(ac_fd) + + # Run apt-ftparchive generate + os.chdir(os.path.dirname(ac_name)) + os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name)) + + # Run apt-ftparchive release + # TODO: Eww - fix this + bname = os.path.basename(self.path) + os.chdir(self.path) + os.chdir('..') + + # We have to remove the Release file otherwise it'll be included in the + # new one + try: + os.unlink(os.path.join(bname, 'Release')) + except OSError: + pass + + os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + + # Crude hack with open and append, but this whole section is and should be redone. + if self.notautomatic: + release=open("Release", "a") + release.write("NotAutomatic: yes") + release.close() + + # Sign if necessary + if self.signingkey: + keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] + if cnf.has_key("Dinstall::SigningPubKeyring"): + keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] + + os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey)) + + # Move the files if we got this far + os.rename('Release', os.path.join(bname, 'Release')) + if self.signingkey: + os.rename('Release.gpg', os.path.join(bname, 'Release.gpg')) + + # Clean up any left behind files + finally: + os.chdir(startdir) + if fl_fd: + try: + os.close(fl_fd) + except OSError: + pass + + if fl_name: + try: + os.unlink(fl_name) + except OSError: + pass + + if ac_fd: + try: + os.close(ac_fd) + except OSError: + pass + + if ac_name: + try: + os.unlink(ac_name) + except OSError: + pass + + def clean_and_update(self, starttime, Logger, dryrun=False): + """WARNING: This routine commits for you""" + session = DBConn().session().object_session(self) + + if self.generate_metadata and not dryrun: + self.write_metadata(starttime) + + # Grab files older than our execution time + older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + + for o in older: + killdb = False + try: + if dryrun: + Logger.log(["I: Would have removed %s from the queue" % o.fullpath]) + else: + Logger.log(["I: Removing %s from the queue" % o.fullpath]) + os.unlink(o.fullpath) + killdb = True + except OSError, e: + # If it wasn't there, don't worry + if e.errno == ENOENT: + killdb = True + else: + # TODO: Replace with proper logging call + Logger.log(["E: Could not remove %s" % o.fullpath]) + + if killdb: + session.delete(o) + + session.commit() + + for f in os.listdir(self.path): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'): + continue + + try: + r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() + except NoResultFound: + fp = os.path.join(self.path, f) + if dryrun: + Logger.log(["I: Would remove unused link %s" % fp]) + else: + Logger.log(["I: Removing unused link %s" % fp]) + try: + os.unlink(fp) + except OSError: + Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the BuildQueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(poolfile).add(f) + return f + + # Prepare BuildQueueFile object + qf = BuildQueueFile() + qf.build_queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = poolfile_basename + + targetpath = poolfile.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_files: + # We need to copy instead of symlink + import utils + utils.copy(targetpath, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetpath, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_build_queue(queuename, session=None): + """ + Returns BuildQueue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: BuildQueue + @return: BuildQueue object for the given queue + """ + + q = session.query(BuildQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_build_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.build_queue_id) + + @property + def fullpath(self): + return os.path.join(self.buildqueue.path, self.filename) + + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -590,8 +932,9 @@ def get_or_set_contents_path_id(filepath, session=None): If no matching file is found, a row is inserted. - @type filename: string - @param filename: The filepath + @type filepath: string + @param filepath: The filepath + @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be generated if not supplied). If not passed, a commit will be performed at @@ -652,17 +995,16 @@ def insert_content_paths(binary_id, fullpaths, session=None): try: # Insert paths - pathcache = {} - def generate_path_dicts(): for fullpath in fullpaths: if fullpath.startswith( './' ): fullpath = fullpath[2:] - yield {'fulename':fullpath, 'id': binary_id } + yield {'filename':fullpath, 'id': binary_id } - session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", - generate_path_dicts() ) + for d in generate_path_dicts(): + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", + d ) session.commit() if privatetrans: @@ -728,8 +1070,12 @@ __all__.append('get_dscfiles') ################################################################################ class PoolFile(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, filename = None, location = None, filesize = -1, \ + md5sum = None): + self.filename = filename + self.location = location + self.filesize = filesize + self.md5sum = md5sum def __repr__(self): return '' % self.filename @@ -738,13 +1084,16 @@ class PoolFile(object): def fullpath(self): return os.path.join(self.location.path, self.filename) + def is_valid(self, filesize = -1, md5sum = None):\ + return self.filesize == filesize and self.md5sum == md5sum + __all__.append('PoolFile') @session_wrapper def check_poolfile(filename, filesize, md5sum, location_id, session=None): """ Returns a tuple: - (ValidFileFound [boolean or None], PoolFile object or None) + (ValidFileFound [boolean], PoolFile object or None) @type filename: string @param filename: the filename of the file to check against the DB @@ -760,35 +1109,24 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None): @rtype: tuple @return: Tuple of length 2. - If more than one file found with that name: - (None, None) - If valid pool file found: (True, PoolFile object) - If valid pool file not found: - (False, None) if no file found - (False, PoolFile object) if file found with size/md5sum mismatch + - If valid pool file found: (C{True}, C{PoolFile object}) + - If valid pool file not found: + - (C{False}, C{None}) if no file found + - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch """ - q = session.query(PoolFile).filter_by(filename=filename) - q = q.join(Location).filter_by(location_id=location_id) + poolfile = session.query(Location).get(location_id). \ + files.filter_by(filename=filename).first() + valid = False + if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum): + valid = True - ret = None - - if q.count() > 1: - ret = (None, None) - elif q.count() < 1: - ret = (False, None) - else: - obj = q.one() - if obj.md5sum != md5sum or obj.filesize != int(filesize): - ret = (False, obj) - - if ret is None: - ret = (True, obj) - - return ret + return (valid, poolfile) __all__.append('check_poolfile') +# TODO: the implementation can trivially be inlined at the place where the +# function is called @session_wrapper def get_poolfile_by_id(file_id, session=None): """ @@ -801,65 +1139,67 @@ def get_poolfile_by_id(file_id, session=None): @return: either the PoolFile object or None """ - q = session.query(PoolFile).filter_by(file_id=file_id) - - try: - return q.one() - except NoResultFound: - return None + return session.query(PoolFile).get(file_id) __all__.append('get_poolfile_by_id') - @session_wrapper -def get_poolfile_by_name(filename, location_id=None, session=None): +def get_poolfile_like_name(filename, session=None): """ - Returns an array of PoolFile objects for the given filename and - (optionally) location_id + Returns an array of PoolFile objects which are like the given name @type filename: string @param filename: the filename of the file to check against the DB - @type location_id: int - @param location_id: the id of the location to look in (optional) - @rtype: array @return: array of PoolFile objects """ - q = session.query(PoolFile).filter_by(filename=filename) - - if location_id is not None: - q = q.join(Location).filter_by(location_id=location_id) + # TODO: There must be a way of properly using bind parameters with %FOO% + q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename)) return q.all() -__all__.append('get_poolfile_by_name') +__all__.append('get_poolfile_like_name') @session_wrapper -def get_poolfile_like_name(filename, session=None): +def add_poolfile(filename, datadict, location_id, session=None): """ - Returns an array of PoolFile objects which are like the given name + Add a new file to the pool @type filename: string - @param filename: the filename of the file to check against the DB + @param filename: filename - @rtype: array - @return: array of PoolFile objects + @type datadict: dict + @param datadict: dict with needed data + + @type location_id: int + @param location_id: database id of the location + + @rtype: PoolFile + @return: the PoolFile object created """ + poolfile = PoolFile() + poolfile.filename = filename + poolfile.filesize = datadict["size"] + poolfile.md5sum = datadict["md5sum"] + poolfile.sha1sum = datadict["sha1sum"] + poolfile.sha256sum = datadict["sha256sum"] + poolfile.location_id = location_id - # TODO: There must be a way of properly using bind parameters with %FOO% - q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename)) + session.add(poolfile) + # Flush to get a file id (NB: This is not a commit) + session.flush() - return q.all() + return poolfile -__all__.append('get_poolfile_like_name') +__all__.append('add_poolfile') ################################################################################ class Fingerprint(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, fingerprint = None): + self.fingerprint = fingerprint def __repr__(self): return '' % self.fingerprint @@ -960,9 +1300,17 @@ class Keyring(object): esclist[x] = "%c" % (int(esclist[x][2:],16)) return "".join(esclist) - def load_keys(self, keyring): + def parse_address(self, uid): + """parses uid and returns a tuple of real name and email address""" import email.Utils + (name, address) = email.Utils.parseaddr(uid) + name = re.sub(r"\s*[(].*[)]", "", name) + name = self.de_escape_gpg_str(name) + if name == "": + name = uid + return (name, address) + def load_keys(self, keyring): if not self.keyring_id: raise Exception('Must be initialized with database information') @@ -974,24 +1322,20 @@ class Keyring(object): field = line.split(":") if field[0] == "pub": key = field[4] - (name, addr) = email.Utils.parseaddr(field[9]) - name = re.sub(r"\s*[(].*[)]", "", name) - if name == "" or addr == "" or "@" not in addr: - name = field[9] - addr = "invalid-uid" - name = self.de_escape_gpg_str(name) - self.keys[key] = {"email": addr} - if name != "": + self.keys[key] = {} + (name, addr) = self.parse_address(field[9]) + if "@" in addr: + self.keys[key]["email"] = addr self.keys[key]["name"] = name - self.keys[key]["aliases"] = [name] self.keys[key]["fingerprints"] = [] signingkey = True elif key and field[0] == "sub" and len(field) >= 12: signingkey = ("s" in field[11]) elif key and field[0] == "uid": - (name, addr) = email.Utils.parseaddr(field[9]) - if name and name not in self.keys[key]["aliases"]: - self.keys[key]["aliases"].append(name) + (name, addr) = self.parse_address(field[9]) + if "email" not in self.keys[key] and "@" in addr: + self.keys[key]["email"] = addr + self.keys[key]["name"] = name elif signingkey and field[0] == "fpr": self.keys[key]["fingerprints"].append(field[9]) self.fpr_lookup[field[9]] = key @@ -1039,7 +1383,7 @@ class Keyring(object): byname = {} any_invalid = False for x in self.keys.keys(): - if self.keys[x]["email"] == "invalid-uid": + if "email" not in self.keys[x]: any_invalid = True self.keys[x]["uid"] = format % "invalid-uid" else: @@ -1094,56 +1438,60 @@ __all__.append('KeyringACLMap') ################################################################################ -class KnownChange(object): +class DBChange(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.changesname + return '' % self.changesname + + def clean_from_queue(self): + session = DBConn().session().object_session(self) + + # Remove changes_pool_files entries + self.poolfiles = [] + + # Remove changes_pending_files references + self.files = [] + + # Clear out of queue + self.in_queue = None + self.approved_for_id = None -__all__.append('KnownChange') +__all__.append('DBChange') @session_wrapper -def get_knownchange(filename, session=None): +def get_dbchange(filename, session=None): """ - returns knownchange object for given C{filename}. + returns DBChange object for given C{filename}. - @type archive: string - @param archive: the name of the arhive + @type filename: string + @param filename: the name of the file @type session: Session @param session: Optional SQLA session object (a temporary one will be generated if not supplied) - @rtype: Archive - @return: Archive object for the given name (None if not present) + @rtype: DBChange + @return: DBChange object for the given filename (C{None} if not present) """ - q = session.query(KnownChange).filter_by(changesname=filename) + q = session.query(DBChange).filter_by(changesname=filename) try: return q.one() except NoResultFound: return None -__all__.append('get_knownchange') - -################################################################################ - -class KnownChangePendingFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.known_change_pending_file_id - -__all__.append('KnownChangePendingFile') +__all__.append('get_dbchange') ################################################################################ class Location(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, path = None): + self.path = path + # the column 'type' should go away, see comment at mapper + self.archive_type = 'pool' def __repr__(self): return '' % (self.path, self.location_id) @@ -1157,13 +1505,13 @@ def get_location(location, component=None, archive=None, session=None): and archive @type location: string - @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/} + @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/} @type component: string @param component: the component name (if None, no restriction applied) @type archive: string - @param archive_id: the archive name (if None, no restriction applied) + @param archive: the archive name (if None, no restriction applied) @rtype: Location / None @return: Either a Location object or None if one can't be found @@ -1187,8 +1535,8 @@ __all__.append('get_location') ################################################################################ class Maintainer(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, name = None): + self.name = name def __repr__(self): return '''''' % (self.name, self.maintainer_id) @@ -1524,6 +1872,67 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +@session_wrapper +def get_policy_queue(queuename, session=None): + """ + Returns PolicyQueue object for given C{queue name} + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue') + +@session_wrapper +def get_policy_queue_from_path(pathname, session=None): + """ + Returns PolicyQueue object for given C{path name} + + @type queuename: string + @param queuename: The path + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(path=pathname) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue_from_path') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass @@ -1594,99 +2003,6 @@ __all__.append('get_priorities') ################################################################################ -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name - - def add_file_from_pool(self, poolfile): - """Copies a file into the pool. Assumes that the PoolFile object is - attached to the same SQLAlchemy session as the Queue object is. - - The caller is responsible for committing after calling this function.""" - poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] - - # Check if we have a file of this name or this ID already - for f in self.queuefiles: - if f.fileid is not None and f.fileid == poolfile.file_id or \ - f.poolfile.filename == poolfile_basename: - # In this case, update the QueueFile entry so we - # don't remove it too early - f.lastused = datetime.now() - DBConn().session().object_session(pf).add(f) - return f - - # Prepare QueueFile object - qf = QueueFile() - qf.queue_id = self.queue_id - qf.lastused = datetime.now() - qf.filename = dest - - targetpath = qf.fullpath - queuepath = os.path.join(self.path, poolfile_basename) - - try: - if self.copy_pool_files: - # We need to copy instead of symlink - import utils - utils.copy(targetfile, queuepath) - # NULL in the fileid field implies a copy - qf.fileid = None - else: - os.symlink(targetfile, queuepath) - qf.fileid = poolfile.file_id - except OSError: - return None - - # Get the same session as the PoolFile is using and add the qf to it - DBConn().session().object_session(poolfile).add(qf) - - return qf - - -__all__.append('Queue') - -@session_wrapper -def get_queue(queuename, session=None): - """ - Returns Queue object for given C{queue name}, creating it if it does not - exist. - - @type queuename: string - @param queuename: The name of the queue - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - q = session.query(Queue).filter_by(queue_name=queuename) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue') - -################################################################################ - -class QueueFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueFile') - -################################################################################ - class Section(object): def __init__(self, *args, **kwargs): pass @@ -1758,8 +2074,14 @@ __all__.append('get_sections') ################################################################################ class DBSource(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, source = None, version = None, maintainer = None, \ + changedby = None, poolfile = None, install_date = None): + self.source = source + self.version = version + self.maintainer = maintainer + self.changedby = changedby + self.poolfile = poolfile + self.install_date = install_date def __repr__(self): return '' % (self.source, self.version) @@ -1774,8 +2096,8 @@ def source_exists(source, source_version, suites = ["any"], session=None): 1. exact match => 1.0-3 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 - @type package: string - @param package: package source name + @type source: string + @param source: source name @type source_version: string @param source_version: expected source version @@ -1846,7 +2168,7 @@ def get_suites_source_in(source, session=None): @return: list of Suite objects for the given source """ - return session.query(Suite).join(SrcAssociation).join(DBSource).filter_by(source=source).all() + return session.query(Suite).filter(Suite.sources.any(source=source)).all() __all__.append('get_suites_source_in') @@ -1858,8 +2180,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session= @type source: str @param source: DBSource package name to search for - @type source: str or None - @param source: DBSource version name to search for or None if not applicable + @type version: str or None + @param version: DBSource version name to search for or None if not applicable @type dm_upload_allowed: bool @param dm_upload_allowed: If None, no effect. If True or False, only @@ -1885,10 +2207,12 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session= __all__.append('get_sources_from_name') +# FIXME: This function fails badly if it finds more than 1 source package and +# its implementation is trivial enough to be inlined. @session_wrapper def get_source_in_suite(source, suite, session=None): """ - Returns list of DBSource objects for a combination of C{source} and C{suite}. + Returns a DBSource object for a combination of C{source} and C{suite}. - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - B{suite} - a suite name, eg. I{unstable} @@ -1904,12 +2228,9 @@ def get_source_in_suite(source, suite, session=None): """ - q = session.query(SrcAssociation) - q = q.join('source').filter_by(source=source) - q = q.join('suite').filter_by(suite_name=suite) - + q = get_suite(suite, session).get_sources(source) try: - return q.one().source + return q.one() except NoResultFound: return None @@ -1917,6 +2238,187 @@ __all__.append('get_source_in_suite') ################################################################################ +@session_wrapper +def add_dsc_to_db(u, filename, session=None): + entry = u.pkg.files[filename] + source = DBSource() + pfs = [] + + source.source = u.pkg.dsc["source"] + source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch + source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + source.install_date = datetime.now().date() + + dsc_component = entry["component"] + dsc_location_id = entry["location id"] + + source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") + + # Set up a new poolfile if necessary + if not entry.has_key("files id") or not entry["files id"]: + filename = entry["pool name"] + filename + poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + pfs.append(poolfile) + entry["files id"] = poolfile.file_id + + source.poolfile_id = entry["files id"] + session.add(source) + session.flush() + + for suite_name in u.pkg.changes["distribution"].keys(): + sa = SrcAssociation() + sa.source_id = source.source_id + sa.suite_id = get_suite(suite_name).suite_id + session.add(sa) + + session.flush() + + # Add the source files to the DB (files and dsc_files) + dscfile = DSCFile() + dscfile.source_id = source.source_id + dscfile.poolfile_id = entry["files id"] + session.add(dscfile) + + for dsc_file, dentry in u.pkg.dsc_files.items(): + df = DSCFile() + df.source_id = source.source_id + + # If the .orig tarball is already in the pool, it's + # files id is stored in dsc_files by check_dsc(). + files_id = dentry.get("files id", None) + + # Find the entry in the files hash + # TODO: Bail out here properly + dfentry = None + for f, e in u.pkg.files.items(): + if f == dsc_file: + dfentry = e + break + + if files_id is None: + filename = dfentry["pool name"] + dsc_file + + (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) + # FIXME: needs to check for -1/-2 and or handle exception + if found and obj is not None: + files_id = obj.file_id + pfs.append(obj) + + # If still not found, add it + if files_id is None: + # HACK: Force sha1sum etc into dentry + dentry["sha1sum"] = dfentry["sha1sum"] + dentry["sha256sum"] = dfentry["sha256sum"] + poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + pfs.append(poolfile) + files_id = poolfile.file_id + else: + poolfile = get_poolfile_by_id(files_id, session) + if poolfile is None: + utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id) + pfs.append(poolfile) + + df.poolfile_id = files_id + session.add(df) + + session.flush() + + # Add the src_uploaders to the DB + uploader_ids = [source.maintainer_id] + if u.pkg.dsc.has_key("uploaders"): + for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): + up = up.strip() + uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) + + added_ids = {} + for up_id in uploader_ids: + if added_ids.has_key(up_id): + import utils + utils.warn("Already saw uploader %s for source %s" % (up_id, source.source)) + continue + + added_ids[up_id]=1 + + su = SrcUploader() + su.maintainer_id = up_id + su.source_id = source.source_id + session.add(su) + + session.flush() + + return source, dsc_component, dsc_location_id, pfs + +__all__.append('add_dsc_to_db') + +@session_wrapper +def add_deb_to_db(u, filename, session=None): + """ + Contrary to what you might expect, this routine deals with both + debs and udebs. That info is in 'dbtype', whilst 'type' is + 'deb' for both of them + """ + cnf = Config() + entry = u.pkg.files[filename] + + bin = DBBinary() + bin.package = entry["package"] + bin.version = entry["version"] + bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id + bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + bin.arch_id = get_architecture(entry["architecture"], session).arch_id + bin.binarytype = entry["dbtype"] + + # Find poolfile id + filename = entry["pool name"] + filename + fullpath = os.path.join(cnf["Dir::Pool"], filename) + if not entry.get("location id", None): + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id + + if entry.get("files id", None): + poolfile = get_poolfile_by_id(bin.poolfile_id) + bin.poolfile_id = entry["files id"] + else: + poolfile = add_poolfile(filename, entry, entry["location id"], session) + bin.poolfile_id = entry["files id"] = poolfile.file_id + + # Find source id + bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + if len(bin_sources) != 1: + raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + (bin.package, bin.version, entry["architecture"], + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.source_id = bin_sources[0].source_id + + # Add and flush object so it has an ID + session.add(bin) + session.flush() + + # Add BinAssociations + for suite_name in u.pkg.changes["distribution"].keys(): + ba = BinAssociation() + ba.binary_id = bin.binary_id + ba.suite_id = get_suite(suite_name).suite_id + session.add(ba) + + session.flush() + + # Deal with contents - disabled for now + #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) + #if not contents: + # print "REJECT\nCould not determine contents of package %s" % bin.package + # session.rollback() + # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + + return poolfile + +__all__.append('add_deb_to_db') + +################################################################################ + class SourceACL(object): def __init__(self, *args, **kwargs): pass @@ -1975,15 +2477,14 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'), ('Priority', 'priority'), ('NotAutomatic', 'notautomatic'), ('CopyChanges', 'copychanges'), - ('CopyDotDak', 'copydotdak'), - ('CommentsDir', 'commentsdir'), - ('OverrideSuite', 'overridesuite'), - ('ChangelogBase', 'changelogbase')] - + ('OverrideSuite', 'overridesuite')] +# Why the heck don't we have any UNIQUE constraints in table suite? +# TODO: Add UNIQUE constraints for appropriate columns. class Suite(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, suite_name = None, version = None): + self.suite_name = suite_name + self.version = version def __repr__(self): return '' % self.suite_name @@ -2009,38 +2510,49 @@ class Suite(object): return "\n".join(ret) -__all__.append('Suite') + def get_architectures(self, skipsrc=False, skipall=False): + """ + Returns list of Architecture objects -@session_wrapper -def get_suite_architecture(suite, architecture, session=None): - """ - Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it - doesn't exist + @type skipsrc: boolean + @param skipsrc: Whether to skip returning the 'source' architecture entry + (Default False) - @type suite: str - @param suite: Suite name to search for + @type skipall: boolean + @param skipall: Whether to skip returning the 'all' architecture entry + (Default False) - @type architecture: str - @param architecture: Architecture name to search for + @rtype: list + @return: list of Architecture objects for the given name (may be empty) + """ - @type session: Session - @param session: Optional SQL session object (a temporary one will be - generated if not supplied) + q = object_session(self).query(Architecture). \ + filter(Architecture.suites.contains(self)) + if skipsrc: + q = q.filter(Architecture.arch_string != 'source') + if skipall: + q = q.filter(Architecture.arch_string != 'all') + return q.order_by(Architecture.arch_string).all() - @rtype: SuiteArchitecture - @return: the SuiteArchitecture object or None - """ + def get_sources(self, source): + """ + Returns a query object representing DBSource that is part of C{suite}. - q = session.query(SuiteArchitecture) - q = q.join(Architecture).filter_by(arch_string=architecture) - q = q.join(Suite).filter_by(suite_name=suite) + - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - try: - return q.one() - except NoResultFound: - return None + @type source: string + @param source: source package name -__all__.append('get_suite_architecture') + @rtype: sqlalchemy.orm.query.Query + @return: a query of DBSource + + """ + + session = object_session(self) + return session.query(DBSource).filter_by(source = source). \ + filter(DBSource.suites.contains(self)) + +__all__.append('Suite') @session_wrapper def get_suite(suite, session=None): @@ -2069,22 +2581,14 @@ __all__.append('get_suite') ################################################################################ -class SuiteArchitecture(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.suite_id, self.arch_id) - -__all__.append('SuiteArchitecture') - +# TODO: should be removed because the implementation is too trivial @session_wrapper def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ Returns list of Architecture objects for given C{suite} name - @type source: str - @param source: Suite name to search for + @type suite: str + @param suite: Suite name to search for @type skipsrc: boolean @param skipsrc: Whether to skip returning the 'source' architecture entry @@ -2102,19 +2606,7 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): @return: list of Architecture objects for the given name (may be empty) """ - q = session.query(Architecture) - q = q.join(SuiteArchitecture) - q = q.join(Suite).filter_by(suite_name=suite) - - if skipsrc: - q = q.filter(Architecture.arch_string != 'source') - - if skipall: - q = q.filter(Architecture.arch_string != 'all') - - q = q.order_by('arch_string') - - return q.all() + return get_suite(suite, session).get_architectures(skipsrc, skipall) __all__.append('get_suite_architectures') @@ -2157,8 +2649,9 @@ __all__.append('get_suite_src_formats') ################################################################################ class Uid(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, uid = None, name = None): + self.uid = uid + self.name = name def __eq__(self, val): if isinstance(val, str): @@ -2177,28 +2670,6 @@ class Uid(object): __all__.append('Uid') -@session_wrapper -def add_database_user(uidname, session=None): - """ - Adds a database user - - @type uidname: string - @param uidname: The uid of the user to add - - @type session: SQLAlchemy - @param session: Optional SQL session object (a temporary one will be - generated if not supplied). If not passed, a commit will be performed at - the end of the function, otherwise the caller is responsible for commiting. - - @rtype: Uid - @return: the uid object for the given uidname - """ - - session.execute("CREATE USER :uid", {'uid': uidname}) - session.commit_or_flush() - -__all__.append('add_database_user') - @session_wrapper def get_or_set_uid(uidname, session=None): """ @@ -2258,67 +2729,126 @@ __all__.append('UploadBlock') ################################################################################ -class DBConn(Singleton): +class DBConn(object): """ database module init. """ + __shared_state = {} + def __init__(self, *args, **kwargs): - super(DBConn, self).__init__(*args, **kwargs) + self.__dict__ = self.__shared_state - def _startup(self, *args, **kwargs): - self.debug = False - if kwargs.has_key('debug'): - self.debug = True - self.__createconn() + if not getattr(self, 'initialised', False): + self.initialised = True + self.debug = kwargs.has_key('debug') + self.__createconn() def __setuptables(self): - self.tbl_architecture = Table('architecture', self.db_meta, autoload=True) - self.tbl_archive = Table('archive', self.db_meta, autoload=True) - self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True) - self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True) - self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) - self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) - self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) - self.tbl_component = Table('component', self.db_meta, autoload=True) - self.tbl_config = Table('config', self.db_meta, autoload=True) - self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) - self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) - self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) - self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True) - self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True) - self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) - self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True) - self.tbl_files = Table('files', self.db_meta, autoload=True) - self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) - self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) - self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True) - self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True) - self.tbl_location = Table('location', self.db_meta, autoload=True) - self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) - self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True) - self.tbl_override = Table('override', self.db_meta, autoload=True) - self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) - self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True) - self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True) - self.tbl_section = Table('section', self.db_meta, autoload=True) - self.tbl_source = Table('source', self.db_meta, autoload=True) - self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) - self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True) - self.tbl_src_format = Table('src_format', self.db_meta, autoload=True) - self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True) - self.tbl_suite = Table('suite', self.db_meta, autoload=True) - self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) - self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) - self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True) - self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True) - self.tbl_uid = Table('uid', self.db_meta, autoload=True) - self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) + tables_with_primary = ( + 'architecture', + 'archive', + 'bin_associations', + 'binaries', + 'binary_acl', + 'binary_acl_map', + 'build_queue', + 'changelogs_text', + 'component', + 'config', + 'changes_pending_binaries', + 'changes_pending_files', + 'changes_pending_source', + 'dsc_files', + 'files', + 'fingerprint', + 'keyrings', + 'keyring_acl_map', + 'location', + 'maintainer', + 'new_comments', + 'override_type', + 'pending_bin_contents', + 'policy_queue', + 'priority', + 'section', + 'source', + 'source_acl', + 'src_associations', + 'src_format', + 'src_uploaders', + 'suite', + 'uid', + 'upload_blocks', + # The following tables have primary keys but sqlalchemy + # version 0.5 fails to reflect them correctly with database + # versions before upgrade #41. + #'changes', + #'build_queue_files', + ) + + tables_no_primary = ( + 'bin_contents', + 'changes_pending_files_map', + 'changes_pending_source_files', + 'changes_pool_files', + 'deb_contents', + 'override', + 'suite_architectures', + 'suite_src_formats', + 'suite_build_queue_copy', + 'udeb_contents', + # see the comment above + 'changes', + 'build_queue_files', + ) + + views = ( + 'almost_obsolete_all_associations', + 'almost_obsolete_src_associations', + 'any_associations_source', + 'bin_assoc_by_arch', + 'bin_associations_binaries', + 'binaries_suite_arch', + 'binfiles_suite_component_arch', + 'changelogs', + 'file_arch_suite', + 'newest_all_associations', + 'newest_any_associations', + 'newest_source', + 'newest_src_association', + 'obsolete_all_associations', + 'obsolete_any_associations', + 'obsolete_any_by_all_associations', + 'obsolete_src_associations', + 'source_suite', + 'src_associations_bin', + 'src_associations_src', + 'suite_arch_by_name', + ) + + # Sqlalchemy version 0.5 fails to reflect the SERIAL type + # correctly and that is why we have to use a workaround. It can + # be removed as soon as we switch to version 0.6. + for table_name in tables_with_primary: + table = Table(table_name, self.db_meta, \ + Column('id', Integer, primary_key = True), \ + autoload=True, useexisting=True) + setattr(self, 'tbl_%s' % table_name, table) + + for table_name in tables_no_primary: + table = Table(table_name, self.db_meta, autoload=True) + setattr(self, 'tbl_%s' % table_name, table) + + for view_name in views: + view = Table(view_name, self.db_meta, autoload=True) + setattr(self, 'view_%s' % view_name, view) def __setupmappers(self): mapper(Architecture, self.tbl_architecture, - properties = dict(arch_id = self.tbl_architecture.c.id)) + properties = dict(arch_id = self.tbl_architecture.c.id, + suites = relation(Suite, secondary=self.tbl_suite_architectures, + order_by='suite_name', + backref=backref('architectures', order_by='arch_string')))) mapper(Archive, self.tbl_archive, properties = dict(archive_id = self.tbl_archive.c.id, @@ -2342,7 +2872,7 @@ class DBConn(Singleton): mapper(DebContents, self.tbl_deb_contents, properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, package=self.tbl_deb_contents.c.package, - component=self.tbl_deb_contents.c.component, + suite=self.tbl_deb_contents.c.suite, arch=self.tbl_deb_contents.c.arch, section=self.tbl_deb_contents.c.section, filename=self.tbl_deb_contents.c.filename)) @@ -2350,11 +2880,18 @@ class DBConn(Singleton): mapper(UdebContents, self.tbl_udeb_contents, properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, package=self.tbl_udeb_contents.c.package, - component=self.tbl_udeb_contents.c.component, + suite=self.tbl_udeb_contents.c.suite, arch=self.tbl_udeb_contents.c.arch, section=self.tbl_udeb_contents.c.section, filename=self.tbl_udeb_contents.c.filename)) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -2400,7 +2937,11 @@ class DBConn(Singleton): properties = dict(file_id = self.tbl_files.c.id, filesize = self.tbl_files.c.size, location_id = self.tbl_files.c.location, - location = relation(Location))) + location = relation(Location, + # using lazy='dynamic' in the back + # reference because we have A LOT of + # files in one location + backref=backref('files', lazy='dynamic')))) mapper(Fingerprint, self.tbl_fingerprint, properties = dict(fingerprint_id = self.tbl_fingerprint.c.id, @@ -2415,15 +2956,52 @@ class DBConn(Singleton): properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) - mapper(KnownChange, self.tbl_known_changes, - properties = dict(known_change_id = self.tbl_known_changes.c.id, + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, poolfiles = relation(PoolFile, secondary=self.tbl_changes_pool_files, backref="changeslinks"), - files = relation(KnownChangePendingFile, backref="changesfile"))) + seen = self.tbl_changes.c.seen, + source = self.tbl_changes.c.source, + binaries = self.tbl_changes.c.binaries, + architecture = self.tbl_changes.c.architecture, + distribution = self.tbl_changes.c.distribution, + urgency = self.tbl_changes.c.urgency, + maintainer = self.tbl_changes.c.maintainer, + changedby = self.tbl_changes.c.changedby, + date = self.tbl_changes.c.date, + version = self.tbl_changes.c.version, + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binaries, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id, + filename = self.tbl_changes_pending_files.c.filename, + size = self.tbl_changes_pending_files.c.size, + md5sum = self.tbl_changes_pending_files.c.md5sum, + sha1sum = self.tbl_changes_pending_files.c.sha1sum, + sha256sum = self.tbl_changes_pending_files.c.sha256sum)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, @@ -2436,10 +3014,16 @@ class DBConn(Singleton): component = relation(Component), archive_id = self.tbl_location.c.archive, archive = relation(Archive), + # FIXME: the 'type' column is old cruft and + # should be removed in the future. archive_type = self.tbl_location.c.type)) mapper(Maintainer, self.tbl_maintainer, - properties = dict(maintainer_id = self.tbl_maintainer.c.id)) + properties = dict(maintainer_id = self.tbl_maintainer.c.id, + maintains_sources = relation(DBSource, backref='maintainer', + primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)), + changed_sources = relation(DBSource, backref='changedby', + primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby)))) mapper(NewComment, self.tbl_new_comments, properties = dict(comment_id = self.tbl_new_comments.c.id)) @@ -2461,16 +3045,12 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueFile, self.tbl_queue_files, - properties = dict(queue = relation(Queue, backref='queuefiles'), - poolfile = relation(PoolFile, backref='queueinstances'))) - mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id, section=self.tbl_section.c.section)) @@ -2479,19 +3059,15 @@ class DBConn(Singleton): properties = dict(source_id = self.tbl_source.c.id, version = self.tbl_source.c.version, maintainer_id = self.tbl_source.c.maintainer, - maintainer = relation(Maintainer, - primaryjoin=(self.tbl_source.c.maintainer==self.tbl_maintainer.c.id)), poolfile_id = self.tbl_source.c.file, - poolfile = relation(PoolFile), + poolfile = relation(PoolFile, backref=backref('source', uselist = False)), fingerprint_id = self.tbl_source.c.sig_fpr, fingerprint = relation(Fingerprint), changedby_id = self.tbl_source.c.changedby, - changedby = relation(Maintainer, - primaryjoin=(self.tbl_source.c.changedby==self.tbl_maintainer.c.id)), srcfiles = relation(DSCFile, primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)), - srcassociations = relation(SrcAssociation, - primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)), + suites = relation(Suite, secondary=self.tbl_src_associations, + backref='sources'), srcuploaders = relation(SrcUploader))) mapper(SourceACL, self.tbl_source_acl, @@ -2519,14 +3095,8 @@ class DBConn(Singleton): mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, - policy_queue = relation(Queue), - copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy))) - - mapper(SuiteArchitecture, self.tbl_suite_architectures, - properties = dict(suite_id = self.tbl_suite_architectures.c.suite, - suite = relation(Suite, backref='suitearchitectures'), - arch_id = self.tbl_suite_architectures.c.architecture, - architecture = relation(Architecture))) + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteSrcFormat, self.tbl_suite_src_formats, properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,