X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=3c0bc50d38750c3dbecebb0280f1eb620aaf2fce;hb=b7f4f39d244e3282303015c9ff4a116252a64613;hp=113c38aa5fef3bf2d7a31015fa7622440ad73be4;hpb=4258a4e33218d4b6c271bd19b0a0723dba1fbed5;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py old mode 100755 new mode 100644 index 113c38aa..4e54a327 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -34,33 +34,122 @@ ################################################################################ import os +import re import psycopg2 import traceback +from datetime import datetime, timedelta +from errno import ENOENT +from tempfile import mkstemp, mkdtemp -from sqlalchemy import create_engine, Table, MetaData, select +from inspect import getargspec + +import sqlalchemy +from sqlalchemy import create_engine, Table, MetaData from sqlalchemy.orm import sessionmaker, mapper, relation +from sqlalchemy import types as sqltypes # Don't remove this, we re-export the exceptions to scripts which import us from sqlalchemy.exc import * +from sqlalchemy.orm.exc import NoResultFound -from singleton import Singleton +from config import Config from textutils import fix_maintainer ################################################################################ +# Patch in support for the debversion field type so that it works during +# reflection + +class DebVersion(sqltypes.Text): + def get_col_spec(self): + return "DEBVERSION" + +sa_major_version = sqlalchemy.__version__[0:3] +if sa_major_version == "0.5": + from sqlalchemy.databases import postgres + postgres.ischema_names['debversion'] = DebVersion +else: + raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + +################################################################################ + __all__ = ['IntegrityError', 'SQLAlchemyError'] ################################################################################ +def session_wrapper(fn): + """ + Wrapper around common ".., session=None):" handling. If the wrapped + function is called without passing 'session', we create a local one + and destroy it when the function ends. + + Also attaches a commit_or_flush method to the session; if we created a + local session, this is a synonym for session.commit(), otherwise it is a + synonym for session.flush(). + """ + + def wrapped(*args, **kwargs): + private_transaction = False + + # Find the session object + session = kwargs.get('session') + + if session is None: + if len(args) <= len(getargspec(fn)[0]) - 1: + # No session specified as last argument or in kwargs + private_transaction = True + session = kwargs['session'] = DBConn().session() + else: + # Session is last argument in args + session = args[-1] + if session is None: + args = list(args) + session = args[-1] = DBConn().session() + private_transaction = True + + if private_transaction: + session.commit_or_flush = session.commit + else: + session.commit_or_flush = session.flush + + try: + return fn(*args, **kwargs) + finally: + if private_transaction: + # We created a session; close it. + session.close() + + wrapped.__doc__ = fn.__doc__ + wrapped.func_name = fn.func_name + + return wrapped + +__all__.append('session_wrapper') + +################################################################################ + class Architecture(object): def __init__(self, *args, **kwargs): pass + def __eq__(self, val): + if isinstance(val, str): + return (self.arch_string== val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.arch_string != val) + # This signals to use the normal comparison operator + return NotImplemented + def __repr__(self): return '' % self.arch_string __all__.append('Architecture') +@session_wrapper def get_architecture(architecture, session=None): """ Returns database id for given C{architecture}. @@ -74,17 +163,18 @@ def get_architecture(architecture, session=None): @rtype: Architecture @return: Architecture object for the given arch (None if not present) - """ - if session is None: - session = DBConn().session() + q = session.query(Architecture).filter_by(arch_string=architecture) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_architecture') +@session_wrapper def get_architecture_suites(architecture, session=None): """ Returns list of Suite objects for given C{architecture} name @@ -100,13 +190,13 @@ def get_architecture_suites(architecture, session=None): @return: list of Suite objects for the given name (may be empty) """ - if session is None: - session = DBConn().session() - q = session.query(Suite) q = q.join(SuiteArchitecture) q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name') - return q.all() + + ret = q.all() + + return ret __all__.append('get_architecture_suites') @@ -117,13 +207,14 @@ class Archive(object): pass def __repr__(self): - return '' % self.name + return '' % self.archive_name __all__.append('Archive') +@session_wrapper def get_archive(archive, session=None): """ - returns database id for given c{archive}. + returns database id for given C{archive}. @type archive: string @param archive: the name of the arhive @@ -137,12 +228,13 @@ def get_archive(archive, session=None): """ archive = archive.lower() - if session is None: - session = DBConn().session() + q = session.query(Archive).filter_by(archive_name=archive) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_archive') @@ -159,6 +251,17 @@ __all__.append('BinAssociation') ################################################################################ +class BinContents(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.binary, self.filename) + +__all__.append('BinContents') + +################################################################################ + class DBBinary(object): def __init__(self, *args, **kwargs): pass @@ -168,12 +271,29 @@ class DBBinary(object): __all__.append('DBBinary') -def get_binary_from_id(id, session=None): +@session_wrapper +def get_suites_binary_in(package, session=None): + """ + Returns list of Suite objects which given C{package} name is in + + @type source: str + @param source: DBBinary package name to search for + + @rtype: list + @return: list of Suite objects for the given package + """ + + return session.query(Suite).join(BinAssociation).join(DBBinary).filter_by(package=package).all() + +__all__.append('get_suites_binary_in') + +@session_wrapper +def get_binary_from_id(binary_id, session=None): """ Returns DBBinary object for given C{id} - @type id: int - @param id: Id of the required binary + @type binary_id: int + @param binary_id: Id of the required binary @type session: Session @param session: Optional SQLA session object (a temporary one will be @@ -182,22 +302,30 @@ def get_binary_from_id(id, session=None): @rtype: DBBinary @return: DBBinary object for the given binary (None if not present) """ - if session is None: - session = DBConn().session() - q = session.query(DBBinary).filter_by(binary_id=id) - if q.count() == 0: + + q = session.query(DBBinary).filter_by(binary_id=binary_id) + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_binary_from_id') -def get_binaries_from_name(package, session=None): +@session_wrapper +def get_binaries_from_name(package, version=None, architecture=None, session=None): """ Returns list of DBBinary objects for given C{package} name @type package: str @param package: DBBinary package name to search for + @type version: str or None + @param version: Version to search for (or None) + + @type package: str, list or None + @param package: Architectures to limit to (or None if no limit) + @type session: Session @param session: Optional SQL session object (a temporary one will be generated if not supplied) @@ -205,14 +333,66 @@ def get_binaries_from_name(package, session=None): @rtype: list @return: list of DBBinary objects for the given name (may be empty) """ - if session is None: - session = DBConn().session() - return session.query(DBBinary).filter_by(package=package).all() + + q = session.query(DBBinary).filter_by(package=package) + + if version is not None: + q = q.filter_by(version=version) + + if architecture is not None: + if not isinstance(architecture, list): + architecture = [architecture] + q = q.join(Architecture).filter(Architecture.arch_string.in_(architecture)) + + ret = q.all() + + return ret __all__.append('get_binaries_from_name') +@session_wrapper +def get_binaries_from_source_id(source_id, session=None): + """ + Returns list of DBBinary objects for given C{source_id} + + @type source_id: int + @param source_id: source_id to search for + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: list of DBBinary objects for the given name (may be empty) + """ + + return session.query(DBBinary).filter_by(source_id=source_id).all() + +__all__.append('get_binaries_from_source_id') + +@session_wrapper +def get_binary_from_name_suite(package, suitename, session=None): + ### For dak examine-package + ### XXX: Doesn't use object API yet + + sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name + FROM binaries b, files fi, location l, component c, bin_associations ba, suite su + WHERE b.package=:package + AND b.file = fi.id + AND fi.location = l.id + AND l.component = c.id + AND ba.bin=b.id + AND ba.suite = su.id + AND su.suite_name=:suitename + ORDER BY b.version DESC""" + + return session.execute(sql, {'package': package, 'suitename': suitename}) + +__all__.append('get_binary_from_name_suite') + +@session_wrapper def get_binary_components(package, suitename, arch, session=None): -# Check for packages that have moved from one component to another + # Check for packages that have moved from one component to another query = """SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f WHERE b.package=:package AND s.suite_name=:suitename AND (a.arch_string = :arch OR a.arch_string = 'all') @@ -223,23 +403,364 @@ def get_binary_components(package, suitename, arch, session=None): vals = {'package': package, 'suitename': suitename, 'arch': arch} - if session is None: - session = DBConn().session() return session.execute(query, vals) __all__.append('get_binary_components') + +################################################################################ + +class BinaryACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_id + +__all__.append('BinaryACL') + +################################################################################ + +class BinaryACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_map_id + +__all__.append('BinaryACLMap') + +################################################################################ + +MINIMAL_APT_CONF=""" +Dir +{ + ArchiveDir "%(archivepath)s"; + OverrideDir "/srv/ftp.debian.org/scripts/override/"; + CacheDir "/srv/ftp.debian.org/database/"; +}; + +Default +{ + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; + DeLinkLimit 0; + FileMode 0664; +} + +bindirectory "incoming" +{ + Packages "Packages"; + Contents " "; + + BinOverride "override.sid.all3"; + BinCacheDB "packages-accepted.db"; + + FileList "%(filelist)s"; + + PathPrefix ""; + Packages::Extensions ".deb .udeb"; +}; + +bindirectory "incoming/" +{ + Sources "Sources"; + BinOverride "override.sid.all3"; + SrcOverride "override.sid.all3.src"; + FileList "%(filelist)s"; +}; +""" + +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def write_metadata(self, starttime, force=False): + # Do we write out metafiles? + if not (force or self.generate_metadata): + return + + session = DBConn().session().object_session(self) + + fl_fd = fl_name = ac_fd = ac_name = None + tempdir = None + arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ]) + startdir = os.getcwd() + + try: + # Grab files we want to include + newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + # Write file list with newer files + (fl_fd, fl_name) = mkstemp() + for n in newer: + os.write(fl_fd, '%s\n' % n.fullpath) + os.close(fl_fd) + + # Write minimal apt.conf + # TODO: Remove hardcoding from template + (ac_fd, ac_name) = mkstemp() + os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, + 'filelist': fl_name}) + os.close(ac_fd) + + # Run apt-ftparchive generate + os.chdir(os.path.dirname(ac_name)) + os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name)) + + # Run apt-ftparchive release + # TODO: Eww - fix this + bname = os.path.basename(self.path) + os.chdir(self.path) + os.chdir('..') + + # We have to remove the Release file otherwise it'll be included in the + # new one + try: + os.unlink(os.path.join(bname, 'Release')) + except OSError: + pass + + os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + + # Sign if necessary + if self.signingkey: + cnf = Config() + keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] + if cnf.has_key("Dinstall::SigningPubKeyring"): + keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] + + os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey)) + + # Move the files if we got this far + os.rename('Release', os.path.join(bname, 'Release')) + if self.signingkey: + os.rename('Release.gpg', os.path.join(bname, 'Release.gpg')) + + # Clean up any left behind files + finally: + os.chdir(startdir) + if fl_fd: + try: + os.close(fl_fd) + except OSError: + pass + + if fl_name: + try: + os.unlink(fl_name) + except OSError: + pass + + if ac_fd: + try: + os.close(ac_fd) + except OSError: + pass + + if ac_name: + try: + os.unlink(ac_name) + except OSError: + pass + + def clean_and_update(self, starttime, Logger, dryrun=False): + """WARNING: This routine commits for you""" + session = DBConn().session().object_session(self) + + if self.generate_metadata and not dryrun: + self.write_metadata(starttime) + + # Grab files older than our execution time + older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + + for o in older: + killdb = False + try: + if dryrun: + Logger.log(["I: Would have removed %s from the queue" % o.fullpath]) + else: + Logger.log(["I: Removing %s from the queue" % o.fullpath]) + os.unlink(o.fullpath) + killdb = True + except OSError, e: + # If it wasn't there, don't worry + if e.errno == ENOENT: + killdb = True + else: + # TODO: Replace with proper logging call + Logger.log(["E: Could not remove %s" % o.fullpath]) + + if killdb: + session.delete(o) + + session.commit() + + for f in os.listdir(self.path): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'): + continue + + try: + r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() + except NoResultFound: + fp = os.path.join(self.path, f) + if dryrun: + Logger.log(["I: Would remove unused link %s" % fp]) + else: + Logger.log(["I: Removing unused link %s" % fp]) + try: + os.unlink(fp) + except OSError: + Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the BuildQueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(poolfile).add(f) + return f + + # Prepare BuildQueueFile object + qf = BuildQueueFile() + qf.build_queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = poolfile_basename + + targetpath = poolfile.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_files: + # We need to copy instead of symlink + import utils + utils.copy(targetpath, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetpath, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_build_queue(queuename, session=None): + """ + Returns BuildQueue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: BuildQueue + @return: BuildQueue object for the given queue + """ + + q = session.query(BuildQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_build_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.build_queue_id) + + @property + def fullpath(self): + return os.path.join(self.buildqueue.path, self.filename) + + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + ################################################################################ class Component(object): def __init__(self, *args, **kwargs): pass + def __eq__(self, val): + if isinstance(val, str): + return (self.component_name == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.component_name != val) + # This signals to use the normal comparison operator + return NotImplemented + def __repr__(self): return '' % self.component_name __all__.append('Component') +@session_wrapper def get_component(component, session=None): """ Returns database id for given C{component}. @@ -252,12 +773,13 @@ def get_component(component, session=None): """ component = component.lower() - if session is None: - session = DBConn().session() + q = session.query(Component).filter_by(component_name=component) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_component') @@ -274,15 +796,7 @@ __all__.append('DBConfig') ################################################################################ -class ContentFilename(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.filename - -__all__.append('ContentFilename') - +@session_wrapper def get_or_set_contents_file_id(filename, session=None): """ Returns database id for given filename. @@ -293,30 +807,79 @@ def get_or_set_contents_file_id(filename, session=None): @param filename: The filename @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be - generated if not supplied) + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. @rtype: int @return: the database id for the given component """ - if session is None: - session = DBConn().session() + + q = session.query(ContentFilename).filter_by(filename=filename) try: - q = session.query(ContentFilename).filter_by(filename=filename) - if q.count() < 1: - cf = ContentFilename() - cf.filename = filename - session.add(cf) - return cf.cafilename_id - else: - return q.one().cafilename_id + ret = q.one().cafilename_id + except NoResultFound: + cf = ContentFilename() + cf.filename = filename + session.add(cf) + session.commit_or_flush() + ret = cf.cafilename_id - except: - traceback.print_exc() - raise + return ret __all__.append('get_or_set_contents_file_id') +@session_wrapper +def get_contents(suite, overridetype, section=None, session=None): + """ + Returns contents for a suite / overridetype combination, limiting + to a section if not None. + + @type suite: Suite + @param suite: Suite object + + @type overridetype: OverrideType + @param overridetype: OverrideType object + + @type section: Section + @param section: Optional section object to limit results to + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: ResultsProxy + @return: ResultsProxy object set up to return tuples of (filename, section, + package, arch_id) + """ + + # find me all of the contents for a given suite + contents_q = """SELECT (p.path||'/'||n.file) AS fn, + s.section, + b.package, + b.architecture + FROM content_associations c join content_file_paths p ON (c.filepath=p.id) + JOIN content_file_names n ON (c.filename=n.id) + JOIN binaries b ON (b.id=c.binary_pkg) + JOIN override o ON (o.package=b.package) + JOIN section s ON (s.id=o.section) + WHERE o.suite = :suiteid AND o.type = :overridetypeid + AND b.type=:overridetypename""" + + vals = {'suiteid': suite.suite_id, + 'overridetypeid': overridetype.overridetype_id, + 'overridetypename': overridetype.overridetype} + + if section is not None: + contents_q += " AND s.id = :sectionid" + vals['sectionid'] = section.section_id + + contents_q += " ORDER BY fn" + + return session.execute(contents_q, vals) + +__all__.append('get_contents') + ################################################################################ class ContentFilepath(object): @@ -328,7 +891,8 @@ class ContentFilepath(object): __all__.append('ContentFilepath') -def get_or_set_contents_path_id(filepath, session): +@session_wrapper +def get_or_set_contents_path_id(filepath, session=None): """ Returns database id for given path. @@ -338,27 +902,25 @@ def get_or_set_contents_path_id(filepath, session): @param filename: The filepath @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be - generated if not supplied) + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. @rtype: int @return: the database id for the given path """ - if session is None: - session = DBConn().session() + + q = session.query(ContentFilepath).filter_by(filepath=filepath) try: - q = session.query(ContentFilepath).filter_by(filepath=filepath) - if q.count() < 1: - cf = ContentFilepath() - cf.filepath = filepath - session.add(cf) - return cf.cafilepath_id - else: - return q.one().cafilepath_id + ret = q.one().cafilepath_id + except NoResultFound: + cf = ContentFilepath() + cf.filepath = filepath + session.add(cf) + session.commit_or_flush() + ret = cf.cafilepath_id - except: - traceback.print_exc() - raise + return ret __all__.append('get_or_set_contents_path_id') @@ -385,39 +947,38 @@ def insert_content_paths(binary_id, fullpaths, session=None): @param session: Optional SQLAlchemy session. If this is passed, the caller is responsible for ensuring a transaction has begun and committing the results or rolling back based on the result code. If not passed, a commit - will be performed at the end of the function + will be performed at the end of the function, otherwise the caller is + responsible for commiting. @return: True upon success """ privatetrans = False - if session is None: session = DBConn().session() privatetrans = True try: + # Insert paths + pathcache = {} for fullpath in fullpaths: - (path, file) = os.path.split(fullpath) + if fullpath.startswith( './' ): + fullpath = fullpath[2:] - # Get the necessary IDs ... - ca = ContentAssociation() - ca.binary_id = binary_id - ca.filename_id = get_or_set_contents_file_id(file) - ca.filepath_id = get_or_set_contents_path_id(path) - session.add(ca) + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id} ) - # Only commit if we set up the session ourself + session.commit() if privatetrans: - session.commit() - + session.close() return True + except: traceback.print_exc() # Only rollback if we set up the session ourself if privatetrans: session.rollback() + session.close() return False @@ -434,6 +995,39 @@ class DSCFile(object): __all__.append('DSCFile') +@session_wrapper +def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None): + """ + Returns a list of DSCFiles which may be empty + + @type dscfile_id: int (optional) + @param dscfile_id: the dscfile_id of the DSCFiles to find + + @type source_id: int (optional) + @param source_id: the source id related to the DSCFiles to find + + @type poolfile_id: int (optional) + @param poolfile_id: the poolfile id related to the DSCFiles to find + + @rtype: list + @return: Possibly empty list of DSCFiles + """ + + q = session.query(DSCFile) + + if dscfile_id is not None: + q = q.filter_by(dscfile_id=dscfile_id) + + if source_id is not None: + q = q.filter_by(source_id=source_id) + + if poolfile_id is not None: + q = q.filter_by(poolfile_id=poolfile_id) + + return q.all() + +__all__.append('get_dscfiles') + ################################################################################ class PoolFile(object): @@ -443,8 +1037,84 @@ class PoolFile(object): def __repr__(self): return '' % self.filename + @property + def fullpath(self): + return os.path.join(self.location.path, self.filename) + __all__.append('PoolFile') +@session_wrapper +def check_poolfile(filename, filesize, md5sum, location_id, session=None): + """ + Returns a tuple: + (ValidFileFound [boolean or None], PoolFile object or None) + + @type filename: string + @param filename: the filename of the file to check against the DB + + @type filesize: int + @param filesize: the size of the file to check against the DB + + @type md5sum: string + @param md5sum: the md5sum of the file to check against the DB + + @type location_id: int + @param location_id: the id of the location to look in + + @rtype: tuple + @return: Tuple of length 2. + If more than one file found with that name: + (None, None) + If valid pool file found: (True, PoolFile object) + If valid pool file not found: + (False, None) if no file found + (False, PoolFile object) if file found with size/md5sum mismatch + """ + + q = session.query(PoolFile).filter_by(filename=filename) + q = q.join(Location).filter_by(location_id=location_id) + + ret = None + + if q.count() > 1: + ret = (None, None) + elif q.count() < 1: + ret = (False, None) + else: + obj = q.one() + if obj.md5sum != md5sum or obj.filesize != int(filesize): + ret = (False, obj) + + if ret is None: + ret = (True, obj) + + return ret + +__all__.append('check_poolfile') + +@session_wrapper +def get_poolfile_by_id(file_id, session=None): + """ + Returns a PoolFile objects or None for the given id + + @type file_id: int + @param file_id: the id of the file to look for + + @rtype: PoolFile or None + @return: either the PoolFile object or None + """ + + q = session.query(PoolFile).filter_by(file_id=file_id) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_poolfile_by_id') + + +@session_wrapper def get_poolfile_by_name(filename, location_id=None, session=None): """ Returns an array of PoolFile objects for the given filename and @@ -460,9 +1130,6 @@ def get_poolfile_by_name(filename, location_id=None, session=None): @return: array of PoolFile objects """ - if session is not None: - session = DBConn().session() - q = session.query(PoolFile).filter_by(filename=filename) if location_id is not None: @@ -472,6 +1139,58 @@ def get_poolfile_by_name(filename, location_id=None, session=None): __all__.append('get_poolfile_by_name') +@session_wrapper +def get_poolfile_like_name(filename, session=None): + """ + Returns an array of PoolFile objects which are like the given name + + @type filename: string + @param filename: the filename of the file to check against the DB + + @rtype: array + @return: array of PoolFile objects + """ + + # TODO: There must be a way of properly using bind parameters with %FOO% + q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename)) + + return q.all() + +__all__.append('get_poolfile_like_name') + +@session_wrapper +def add_poolfile(filename, datadict, location_id, session=None): + """ + Add a new file to the pool + + @type filename: string + @param filename: filename + + @type datadict: dict + @param datadict: dict with needed data + + @type location_id: int + @param location_id: database id of the location + + @rtype: PoolFile + @return: the PoolFile object created + """ + poolfile = PoolFile() + poolfile.filename = filename + poolfile.filesize = datadict["size"] + poolfile.md5sum = datadict["md5sum"] + poolfile.sha1sum = datadict["sha1sum"] + poolfile.sha256sum = datadict["sha256sum"] + poolfile.location_id = location_id + + session.add(poolfile) + # Flush to get a file id (NB: This is not a commit) + session.flush() + + return poolfile + +__all__.append('add_poolfile') + ################################################################################ class Fingerprint(object): @@ -483,17 +1202,337 @@ class Fingerprint(object): __all__.append('Fingerprint') +@session_wrapper +def get_fingerprint(fpr, session=None): + """ + Returns Fingerprint object for given fpr. + + @type fpr: string + @param fpr: The fpr to find / add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). + + @rtype: Fingerprint + @return: the Fingerprint object for the given fpr or None + """ + + q = session.query(Fingerprint).filter_by(fingerprint=fpr) + + try: + ret = q.one() + except NoResultFound: + ret = None + + return ret + +__all__.append('get_fingerprint') + +@session_wrapper +def get_or_set_fingerprint(fpr, session=None): + """ + Returns Fingerprint object for given fpr. + + If no matching fpr is found, a row is inserted. + + @type fpr: string + @param fpr: The fpr to find / add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + A flush will be performed either way. + + @rtype: Fingerprint + @return: the Fingerprint object for the given fpr + """ + + q = session.query(Fingerprint).filter_by(fingerprint=fpr) + + try: + ret = q.one() + except NoResultFound: + fingerprint = Fingerprint() + fingerprint.fingerprint = fpr + session.add(fingerprint) + session.commit_or_flush() + ret = fingerprint + + return ret + +__all__.append('get_or_set_fingerprint') + +################################################################################ + +# Helper routine for Keyring class +def get_ldap_name(entry): + name = [] + for k in ["cn", "mn", "sn"]: + ret = entry.get(k) + if ret and ret[0] != "" and ret[0] != "-": + name.append(ret[0]) + return " ".join(name) + ################################################################################ class Keyring(object): + gpg_invocation = "gpg --no-default-keyring --keyring %s" +\ + " --with-colons --fingerprint --fingerprint" + + keys = {} + fpr_lookup = {} + def __init__(self, *args, **kwargs): pass def __repr__(self): return '' % self.keyring_name + def de_escape_gpg_str(self, txt): + esclist = re.split(r'(\\x..)', txt) + for x in range(1,len(esclist),2): + esclist[x] = "%c" % (int(esclist[x][2:],16)) + return "".join(esclist) + + def load_keys(self, keyring): + import email.Utils + + if not self.keyring_id: + raise Exception('Must be initialized with database information') + + k = os.popen(self.gpg_invocation % keyring, "r") + key = None + signingkey = False + + for line in k.xreadlines(): + field = line.split(":") + if field[0] == "pub": + key = field[4] + (name, addr) = email.Utils.parseaddr(field[9]) + name = re.sub(r"\s*[(].*[)]", "", name) + if name == "" or addr == "" or "@" not in addr: + name = field[9] + addr = "invalid-uid" + name = self.de_escape_gpg_str(name) + self.keys[key] = {"email": addr} + if name != "": + self.keys[key]["name"] = name + self.keys[key]["aliases"] = [name] + self.keys[key]["fingerprints"] = [] + signingkey = True + elif key and field[0] == "sub" and len(field) >= 12: + signingkey = ("s" in field[11]) + elif key and field[0] == "uid": + (name, addr) = email.Utils.parseaddr(field[9]) + if name and name not in self.keys[key]["aliases"]: + self.keys[key]["aliases"].append(name) + elif signingkey and field[0] == "fpr": + self.keys[key]["fingerprints"].append(field[9]) + self.fpr_lookup[field[9]] = key + + def import_users_from_ldap(self, session): + import ldap + cnf = Config() + + LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"] + LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"] + + l = ldap.open(LDAPServer) + l.simple_bind_s("","") + Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL, + "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]), + ["uid", "keyfingerprint", "cn", "mn", "sn"]) + + ldap_fin_uid_id = {} + + byuid = {} + byname = {} + + for i in Attrs: + entry = i[1] + uid = entry["uid"][0] + name = get_ldap_name(entry) + fingerprints = entry["keyFingerPrint"] + keyid = None + for f in fingerprints: + key = self.fpr_lookup.get(f, None) + if key not in self.keys: + continue + self.keys[key]["uid"] = uid + + if keyid != None: + continue + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, name) + byname[uid] = (keyid, name) + + return (byname, byuid) + + def generate_users_from_keyring(self, format, session): + byuid = {} + byname = {} + any_invalid = False + for x in self.keys.keys(): + if self.keys[x]["email"] == "invalid-uid": + any_invalid = True + self.keys[x]["uid"] = format % "invalid-uid" + else: + uid = format % self.keys[x]["email"] + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, self.keys[x]["name"]) + byname[uid] = (keyid, self.keys[x]["name"]) + self.keys[x]["uid"] = uid + + if any_invalid: + uid = format % "invalid-uid" + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, "ungeneratable user id") + byname[uid] = (keyid, "ungeneratable user id") + + return (byname, byuid) + __all__.append('Keyring') +@session_wrapper +def get_keyring(keyring, session=None): + """ + If C{keyring} does not have an entry in the C{keyrings} table yet, return None + If C{keyring} already has an entry, simply return the existing Keyring + + @type keyring: string + @param keyring: the keyring name + + @rtype: Keyring + @return: the Keyring object for this keyring + """ + + q = session.query(Keyring).filter_by(keyring_name=keyring) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_keyring') + +################################################################################ + +class KeyringACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.keyring_acl_map_id + +__all__.append('KeyringACLMap') + +################################################################################ + +class DBChange(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.changesname + + def upload_into_db(self, u, path): + cnf = Config() + session = DBConn().session().object_session(self) + + files = [] + for chg_fn, entry in u.pkg.files.items(): + try: + f = open(os.path.join(path, chg_fn)) + cpf = ChangePendingFile() + cpf.filename = chg_fn + cpf.size = entry['size'] + cpf.md5sum = entry['md5sum'] + + if entry.has_key('sha1sum'): + cpf.sha1sum = entry['sha1sum'] + else: + f.seek(0) + cpf.sha1sum = apt_pkg.sha1sum(f) + + if entry.has_key('sha256sum'): + cpf.sha256sum = entry['sha256sum'] + else: + f.seek(0) + cpf.sha256sum = apt_pkg.sha256sum(f) + + session.add(cpf) + files.append(cpf) + f.close() + + except IOError: + # Can't find the file, try to look it up in the pool + from utils import poolify + poolname = poolify(entry["source"], entry["component"]) + l = get_location(cnf["Dir::Pool"], entry["component"], session=session) + + found, poolfile = check_poolfile(os.path.join(poolname, chg_fn), + entry['size'], + entry["md5sum"], + l.location_id, + session=session) + + if found is None: + Logger.log(["E: Found multiple files for pool (%s) for %s" % % (chg_fn, entry["component"])) + elif found is False and poolfile is not None: + Logger.log(["E: md5sum/size mismatch for %s in pool" % % (chg_fn)) + else: + if poolfile is None: + Logger.log(["E: Could not find %s in pool" % % (chg_fn)) + else: + chg.poolfiles.append(poolfile) + + chg.files = files + + + def clean_from_queue(self): + session = DBConn().session().object_session(self) + + # Remove changes_pool_files entries + for pf in self.poolfiles: + self.poolfiles.remove(pf) + + # Remove change + for cf in self.files: + self.files.remove(cf) + + # Clear out of queue + self.in_queue = None + self.approved_for_id = None + +__all__.append('DBChange') + +@session_wrapper +def get_dbchange(filename, session=None): + """ + returns DBChange object for given C{filename}. + + @type archive: string + @param archive: the name of the arhive + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Archive + @return: Archive object for the given name (None if not present) + + """ + q = session.query(DBChange).filter_by(changesname=filename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_dbchange') + ################################################################################ class Location(object): @@ -505,6 +1544,7 @@ class Location(object): __all__.append('Location') +@session_wrapper def get_location(location, component=None, archive=None, session=None): """ Returns Location object for the given combination of location, component @@ -523,9 +1563,6 @@ def get_location(location, component=None, archive=None, session=None): @return: Either a Location object or None if one can't be found """ - if session is None: - session = DBConn().session() - q = session.query(Location).filter_by(path=location) if archive is not None: @@ -534,10 +1571,10 @@ def get_location(location, component=None, archive=None, session=None): if component is not None: q = q.join(Component).filter_by(component_name=component) - if q.count() < 1: - return None - else: + try: return q.one() + except NoResultFound: + return None __all__.append('get_location') @@ -558,6 +1595,127 @@ class Maintainer(object): __all__.append('Maintainer') +@session_wrapper +def get_or_set_maintainer(name, session=None): + """ + Returns Maintainer object for given maintainer name. + + If no matching maintainer name is found, a row is inserted. + + @type name: string + @param name: The maintainer name to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + A flush will be performed either way. + + @rtype: Maintainer + @return: the Maintainer object for the given maintainer + """ + + q = session.query(Maintainer).filter_by(name=name) + try: + ret = q.one() + except NoResultFound: + maintainer = Maintainer() + maintainer.name = name + session.add(maintainer) + session.commit_or_flush() + ret = maintainer + + return ret + +__all__.append('get_or_set_maintainer') + +@session_wrapper +def get_maintainer(maintainer_id, session=None): + """ + Return the name of the maintainer behind C{maintainer_id} or None if that + maintainer_id is invalid. + + @type maintainer_id: int + @param maintainer_id: the id of the maintainer + + @rtype: Maintainer + @return: the Maintainer with this C{maintainer_id} + """ + + return session.query(Maintainer).get(maintainer_id) + +__all__.append('get_maintainer') + +################################################################################ + +class NewComment(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '''''' % (self.package, self.version, self.comment_id) + +__all__.append('NewComment') + +@session_wrapper +def has_new_comment(package, version, session=None): + """ + Returns true if the given combination of C{package}, C{version} has a comment. + + @type package: string + @param package: name of the package + + @type version: string + @param version: package version + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: boolean + @return: true/false + """ + + q = session.query(NewComment) + q = q.filter_by(package=package) + q = q.filter_by(version=version) + + return bool(q.count() > 0) + +__all__.append('has_new_comment') + +@session_wrapper +def get_new_comments(package=None, version=None, comment_id=None, session=None): + """ + Returns (possibly empty) list of NewComment objects for the given + parameters + + @type package: string (optional) + @param package: name of the package + + @type version: string (optional) + @param version: package version + + @type comment_id: int (optional) + @param comment_id: An id of a comment + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: A (possibly empty) list of NewComment objects will be returned + """ + + q = session.query(NewComment) + if package is not None: q = q.filter_by(package=package) + if version is not None: q = q.filter_by(version=version) + if comment_id is not None: q = q.filter_by(comment_id=comment_id) + + return q.all() + +__all__.append('get_new_comments') + ################################################################################ class Override(object): @@ -569,6 +1727,54 @@ class Override(object): __all__.append('Override') +@session_wrapper +def get_override(package, suite=None, component=None, overridetype=None, session=None): + """ + Returns Override object for the given parameters + + @type package: string + @param package: The name of the package + + @type suite: string, list or None + @param suite: The name of the suite (or suites if a list) to limit to. If + None, don't limit. Defaults to None. + + @type component: string, list or None + @param component: The name of the component (or components if a list) to + limit to. If None, don't limit. Defaults to None. + + @type overridetype: string, list or None + @param overridetype: The name of the overridetype (or overridetypes if a list) to + limit to. If None, don't limit. Defaults to None. + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: A (possibly empty) list of Override objects will be returned + """ + + q = session.query(Override) + q = q.filter_by(package=package) + + if suite is not None: + if not isinstance(suite, list): suite = [suite] + q = q.join(Suite).filter(Suite.suite_name.in_(suite)) + + if component is not None: + if not isinstance(component, list): component = [component] + q = q.join(Component).filter(Component.component_name.in_(component)) + + if overridetype is not None: + if not isinstance(overridetype, list): overridetype = [overridetype] + q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype)) + + return q.all() + +__all__.append('get_override') + + ################################################################################ class OverrideType(object): @@ -580,6 +1786,7 @@ class OverrideType(object): __all__.append('OverrideType') +@session_wrapper def get_override_type(override_type, session=None): """ Returns OverrideType object for given C{override type}. @@ -593,14 +1800,14 @@ def get_override_type(override_type, session=None): @rtype: int @return: the database id for the given override type - """ - if session is None: - session = DBConn().session() - q = session.query(Priority).filter_by(priority=priority) - if q.count() == 0: + + q = session.query(OverrideType).filter_by(overridetype=override_type) + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_override_type') @@ -651,31 +1858,42 @@ def insert_pending_content_paths(package, fullpaths, session=None): q.delete() # Insert paths + pathcache = {} for fullpath in fullpaths: - (path, file) = os.path.split(fullpath) + (path, filename) = os.path.split(fullpath) if path.startswith( "./" ): path = path[2:] + filepath_id = get_or_set_contents_path_id(path, session) + filename_id = get_or_set_contents_file_id(filename, session) + + pathcache[fullpath] = (filepath_id, filename_id) + + for fullpath, dat in pathcache.items(): pca = PendingContentAssociation() pca.package = package['Package'] pca.version = package['Version'] - pca.filename_id = get_or_set_contents_file_id(file, session) - pca.filepath_id = get_or_set_contents_path_id(path, session) + pca.filepath_id = dat[0] + pca.filename_id = dat[1] pca.architecture = arch_id session.add(pca) # Only commit if we set up the session ourself if privatetrans: session.commit() + session.close() + else: + session.flush() return True - except: + except Exception, e: traceback.print_exc() # Only rollback if we set up the session ourself if privatetrans: session.rollback() + session.close() return False @@ -683,15 +1901,64 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +@session_wrapper +def get_policy_queue(queuename, session=None): + """ + Returns PolicyQueue object for given C{queue name} + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass + def __eq__(self, val): + if isinstance(val, str): + return (self.priority == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.priority != val) + # This signals to use the normal comparison operator + return NotImplemented + def __repr__(self): return '' % (self.priority, self.priority_id) __all__.append('Priority') +@session_wrapper def get_priority(priority, session=None): """ Returns Priority object for given C{priority name}. @@ -705,38 +1972,38 @@ def get_priority(priority, session=None): @rtype: Priority @return: Priority object for the given priority - """ - if session is None: - session = DBConn().session() + q = session.query(Priority).filter_by(priority=priority) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_priority') -################################################################################ - -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name +@session_wrapper +def get_priorities(session=None): + """ + Returns dictionary of priority names -> id mappings -__all__.append('Queue') + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) -################################################################################ + @rtype: dictionary + @return: dictionary of priority names -> id mappings + """ -class QueueBuild(object): - def __init__(self, *args, **kwargs): - pass + ret = {} + q = session.query(Priority) + for x in q.all(): + ret[x.priority] = x.priority_id - def __repr__(self): - return '' % (self.filename, self.queue_id) + return ret -__all__.append('QueueBuild') +__all__.append('get_priorities') ################################################################################ @@ -744,11 +2011,24 @@ class Section(object): def __init__(self, *args, **kwargs): pass + def __eq__(self, val): + if isinstance(val, str): + return (self.section == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.section != val) + # This signals to use the normal comparison operator + return NotImplemented + def __repr__(self): return '
' % self.section __all__.append('Section') +@session_wrapper def get_section(section, session=None): """ Returns Section object for given C{section name}. @@ -762,17 +2042,39 @@ def get_section(section, session=None): @rtype: Section @return: Section object for the given section name - """ - if session is None: - session = DBConn().session() + q = session.query(Section).filter_by(section=section) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_section') +@session_wrapper +def get_sections(session=None): + """ + Returns dictionary of section names -> id mappings + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: dictionary + @return: dictionary of section names -> id mappings + """ + + ret = {} + q = session.query(Section) + for x in q.all(): + ret[x.section] = x.section_id + + return ret + +__all__.append('get_sections') + ################################################################################ class DBSource(object): @@ -784,13 +2086,101 @@ class DBSource(object): __all__.append('DBSource') -def get_sources_from_name(source, dm_upload_allowed=None, session=None): +@session_wrapper +def source_exists(source, source_version, suites = ["any"], session=None): + """ + Ensure that source exists somewhere in the archive for the binary + upload being processed. + 1. exact match => 1.0-3 + 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 + + @type package: string + @param package: package source name + + @type source_version: string + @param source_version: expected source version + + @type suites: list + @param suites: list of suites to check in, default I{any} + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: int + @return: returns 1 if a source with expected version is found, otherwise 0 + + """ + + cnf = Config() + ret = 1 + + for suite in suites: + q = session.query(DBSource).filter_by(source=source) + if suite != "any": + # source must exist in suite X, or in some other suite that's + # mapped to X, recursively... silent-maps are counted too, + # unreleased-maps aren't. + maps = cnf.ValueList("SuiteMappings")[:] + maps.reverse() + maps = [ m.split() for m in maps ] + maps = [ (x[1], x[2]) for x in maps + if x[0] == "map" or x[0] == "silent-map" ] + s = [suite] + for x in maps: + if x[1] in s and x[0] not in s: + s.append(x[0]) + + q = q.join(SrcAssociation).join(Suite) + q = q.filter(Suite.suite_name.in_(s)) + + # Reduce the query results to a list of version numbers + ql = [ j.version for j in q.all() ] + + # Try (1) + if source_version in ql: + continue + + # Try (2) + from daklib.regexes import re_bin_only_nmu + orig_source_version = re_bin_only_nmu.sub('', source_version) + if orig_source_version in ql: + continue + + # No source found so return not ok + ret = 0 + + return ret + +__all__.append('source_exists') + +@session_wrapper +def get_suites_source_in(source, session=None): """ - Returns list of DBSource objects for given C{source} name + Returns list of Suite objects which given C{source} name is in @type source: str @param source: DBSource package name to search for + @rtype: list + @return: list of Suite objects for the given source + """ + + return session.query(Suite).join(SrcAssociation).join(DBSource).filter_by(source=source).all() + +__all__.append('get_suites_source_in') + +@session_wrapper +def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None): + """ + Returns list of DBSource objects for given C{source} name and other parameters + + @type source: str + @param source: DBSource package name to search for + + @type source: str or None + @param source: DBSource version name to search for or None if not applicable + @type dm_upload_allowed: bool @param dm_upload_allowed: If None, no effect. If True or False, only return packages with that dm_upload_allowed setting @@ -802,10 +2192,12 @@ def get_sources_from_name(source, dm_upload_allowed=None, session=None): @rtype: list @return: list of DBSource objects for the given name (may be empty) """ - if session is None: - session = DBConn().session() q = session.query(DBSource).filter_by(source=source) + + if version is not None: + q = q.filter_by(version=version) + if dm_upload_allowed is not None: q = q.filter_by(dm_upload_allowed=dm_upload_allowed) @@ -813,6 +2205,7 @@ def get_sources_from_name(source, dm_upload_allowed=None, session=None): __all__.append('get_sources_from_name') +@session_wrapper def get_source_in_suite(source, suite, session=None): """ Returns list of DBSource objects for a combination of C{source} and C{suite}. @@ -830,20 +2223,211 @@ def get_source_in_suite(source, suite, session=None): @return: the version for I{source} in I{suite} """ - if session is None: - session = DBConn().session() + q = session.query(SrcAssociation) q = q.join('source').filter_by(source=source) q = q.join('suite').filter_by(suite_name=suite) - if q.count() == 0: + + try: + return q.one().source + except NoResultFound: return None - # ???: Maybe we should just return the SrcAssociation object instead - return q.one().source __all__.append('get_source_in_suite') ################################################################################ +@session_wrapper +def add_dsc_to_db(u, filename, session=None): + entry = u.pkg.files[filename] + source = DBSource() + pfs = [] + + source.source = u.pkg.dsc["source"] + source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch + source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + source.install_date = datetime.now().date() + + dsc_component = entry["component"] + dsc_location_id = entry["location id"] + + source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") + + # Set up a new poolfile if necessary + if not entry.has_key("files id") or not entry["files id"]: + filename = entry["pool name"] + filename + poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + pfs.append(poolfile) + entry["files id"] = poolfile.file_id + + source.poolfile_id = entry["files id"] + session.add(source) + session.flush() + + for suite_name in u.pkg.changes["distribution"].keys(): + sa = SrcAssociation() + sa.source_id = source.source_id + sa.suite_id = get_suite(suite_name).suite_id + session.add(sa) + + session.flush() + + # Add the source files to the DB (files and dsc_files) + dscfile = DSCFile() + dscfile.source_id = source.source_id + dscfile.poolfile_id = entry["files id"] + session.add(dscfile) + + for dsc_file, dentry in u.pkg.dsc_files.items(): + df = DSCFile() + df.source_id = source.source_id + + # If the .orig tarball is already in the pool, it's + # files id is stored in dsc_files by check_dsc(). + files_id = dentry.get("files id", None) + + # Find the entry in the files hash + # TODO: Bail out here properly + dfentry = None + for f, e in u.pkg.files.items(): + if f == dsc_file: + dfentry = e + break + + if files_id is None: + filename = dfentry["pool name"] + dsc_file + + (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) + # FIXME: needs to check for -1/-2 and or handle exception + if found and obj is not None: + files_id = obj.file_id + pfs.append(obj) + + # If still not found, add it + if files_id is None: + # HACK: Force sha1sum etc into dentry + dentry["sha1sum"] = dfentry["sha1sum"] + dentry["sha256sum"] = dfentry["sha256sum"] + poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + pfs.append(poolfile) + files_id = poolfile.file_id + else: + poolfile = get_poolfile_by_id(files_id, session) + if poolfile is None: + utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id) + pfs.append(poolfile) + + df.poolfile_id = files_id + session.add(df) + + session.flush() + + # Add the src_uploaders to the DB + uploader_ids = [source.maintainer_id] + if u.pkg.dsc.has_key("uploaders"): + for up in u.pkg.dsc["uploaders"].split(","): + up = up.strip() + uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) + + added_ids = {} + for up in uploader_ids: + if added_ids.has_key(up): + utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + continue + + added_ids[u]=1 + + su = SrcUploader() + su.maintainer_id = up + su.source_id = source.source_id + session.add(su) + + session.flush() + + return dsc_component, dsc_location_id, pfs + +__all__.append('add_dsc_to_db') + +@session_wrapper +def add_deb_to_db(u, filename, session=None): + """ + Contrary to what you might expect, this routine deals with both + debs and udebs. That info is in 'dbtype', whilst 'type' is + 'deb' for both of them + """ + cnf = Config() + entry = u.pkg.files[filename] + + bin = DBBinary() + bin.package = entry["package"] + bin.version = entry["version"] + bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id + bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + bin.arch_id = get_architecture(entry["architecture"], session).arch_id + bin.binarytype = entry["dbtype"] + + # Find poolfile id + filename = entry["pool name"] + filename + fullpath = os.path.join(cnf["Dir::Pool"], filename) + if not entry.get("location id", None): + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id + + if entry.get("files id", None): + poolfile = get_poolfile_by_id(bin.poolfile_id) + bin.poolfile_id = entry["files id"] + else: + poolfile = add_poolfile(filename, entry, entry["location id"], session) + bin.poolfile_id = entry["files id"] = poolfile.file_id + + # Find source id + bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + if len(bin_sources) != 1: + raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + (bin.package, bin.version, bin.architecture.arch_string, + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.source_id = bin_sources[0].source_id + + # Add and flush object so it has an ID + session.add(bin) + session.flush() + + # Add BinAssociations + for suite_name in u.pkg.changes["distribution"].keys(): + ba = BinAssociation() + ba.binary_id = bin.binary_id + ba.suite_id = get_suite(suite_name).suite_id + session.add(ba) + + session.flush() + + # Deal with contents - disabled for now + #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) + #if not contents: + # print "REJECT\nCould not determine contents of package %s" % bin.package + # session.rollback() + # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + + return poolfile + +__all__.append('add_deb_to_db') + +################################################################################ + +class SourceACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.source_acl_id + +__all__.append('SourceACL') + +################################################################################ + class SrcAssociation(object): def __init__(self, *args, **kwargs): pass @@ -855,6 +2439,17 @@ __all__.append('SrcAssociation') ################################################################################ +class SrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.format_name) + +__all__.append('SrcFormat') + +################################################################################ + class SrcUploader(object): def __init__(self, *args, **kwargs): pass @@ -866,6 +2461,26 @@ __all__.append('SrcUploader') ################################################################################ +SUITE_FIELDS = [ ('SuiteName', 'suite_name'), + ('SuiteID', 'suite_id'), + ('Version', 'version'), + ('Origin', 'origin'), + ('Label', 'label'), + ('Description', 'description'), + ('Untouchable', 'untouchable'), + ('Announce', 'announce'), + ('Codename', 'codename'), + ('OverrideCodename', 'overridecodename'), + ('ValidTime', 'validtime'), + ('Priority', 'priority'), + ('NotAutomatic', 'notautomatic'), + ('CopyChanges', 'copychanges'), + ('CopyDotDak', 'copydotdak'), + ('CommentsDir', 'commentsdir'), + ('OverrideSuite', 'overridesuite'), + ('ChangelogBase', 'changelogbase')] + + class Suite(object): def __init__(self, *args, **kwargs): pass @@ -873,8 +2488,30 @@ class Suite(object): def __repr__(self): return '' % self.suite_name + def __eq__(self, val): + if isinstance(val, str): + return (self.suite_name == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.suite_name != val) + # This signals to use the normal comparison operator + return NotImplemented + + def details(self): + ret = [] + for disp, field in SUITE_FIELDS: + val = getattr(self, field, None) + if val is not None: + ret.append("%s: %s" % (disp, val)) + + return "\n".join(ret) + __all__.append('Suite') +@session_wrapper def get_suite_architecture(suite, architecture, session=None): """ Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it @@ -894,18 +2531,18 @@ def get_suite_architecture(suite, architecture, session=None): @return: the SuiteArchitecture object or None """ - if session is None: - session = DBConn().session() - q = session.query(SuiteArchitecture) q = q.join(Architecture).filter_by(arch_string=architecture) q = q.join(Suite).filter_by(suite_name=suite) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_suite_architecture') +@session_wrapper def get_suite(suite, session=None): """ Returns Suite object for given C{suite name}. @@ -918,15 +2555,15 @@ def get_suite(suite, session=None): generated if not supplied) @rtype: Suite - @return: Suite object for the requested suite name (None if not presenT) - + @return: Suite object for the requested suite name (None if not present) """ - if session is None: - session = DBConn().session() + q = session.query(Suite).filter_by(suite_name=suite) - if q.count() == 0: + + try: + return q.one() + except NoResultFound: return None - return q.one() __all__.append('get_suite') @@ -941,6 +2578,7 @@ class SuiteArchitecture(object): __all__.append('SuiteArchitecture') +@session_wrapper def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ Returns list of Architecture objects for given C{suite} name @@ -964,90 +2602,228 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): @return: list of Architecture objects for the given name (may be empty) """ - if session is None: - session = DBConn().session() - q = session.query(Architecture) q = q.join(SuiteArchitecture) q = q.join(Suite).filter_by(suite_name=suite) + if skipsrc: q = q.filter(Architecture.arch_string != 'source') + if skipall: q = q.filter(Architecture.arch_string != 'all') + q = q.order_by('arch_string') + return q.all() __all__.append('get_suite_architectures') ################################################################################ +class SuiteSrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.suite_id, self.src_format_id) + +__all__.append('SuiteSrcFormat') + +@session_wrapper +def get_suite_src_formats(suite, session=None): + """ + Returns list of allowed SrcFormat for C{suite}. + + @type suite: str + @param suite: Suite name to search for + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: the list of allowed source formats for I{suite} + """ + + q = session.query(SrcFormat) + q = q.join(SuiteSrcFormat) + q = q.join(Suite).filter_by(suite_name=suite) + q = q.order_by('format_name') + + return q.all() + +__all__.append('get_suite_src_formats') + +################################################################################ + class Uid(object): def __init__(self, *args, **kwargs): pass + def __eq__(self, val): + if isinstance(val, str): + return (self.uid == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.uid != val) + # This signals to use the normal comparison operator + return NotImplemented + def __repr__(self): return '' % (self.uid, self.name) __all__.append('Uid') -def get_uid_from_fingerprint(fpr, session=None): - if session is None: - session = DBConn().session() +@session_wrapper +def add_database_user(uidname, session=None): + """ + Adds a database user + + @type uidname: string + @param uidname: The uid of the user to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: Uid + @return: the uid object for the given uidname + """ + + session.execute("CREATE USER :uid", {'uid': uidname}) + session.commit_or_flush() + +__all__.append('add_database_user') + +@session_wrapper +def get_or_set_uid(uidname, session=None): + """ + Returns uid object for given uidname. + If no matching uidname is found, a row is inserted. + + @type uidname: string + @param uidname: The uid to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: Uid + @return: the uid object for the given uidname + """ + + q = session.query(Uid).filter_by(uid=uidname) + + try: + ret = q.one() + except NoResultFound: + uid = Uid() + uid.uid = uidname + session.add(uid) + session.commit_or_flush() + ret = uid + + return ret + +__all__.append('get_or_set_uid') + +@session_wrapper +def get_uid_from_fingerprint(fpr, session=None): q = session.query(Uid) q = q.join(Fingerprint).filter_by(fingerprint=fpr) - if q.count() != 1: - return None - else: + try: return q.one() + except NoResultFound: + return None __all__.append('get_uid_from_fingerprint') ################################################################################ -class DBConn(Singleton): +class UploadBlock(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.source, self.upload_block_id) + +__all__.append('UploadBlock') + +################################################################################ + +class DBConn(object): """ database module init. """ + __shared_state = {} + def __init__(self, *args, **kwargs): - super(DBConn, self).__init__(*args, **kwargs) + self.__dict__ = self.__shared_state - def _startup(self, *args, **kwargs): - self.debug = False - if kwargs.has_key('debug'): - self.debug = True - self.__createconn() + if not getattr(self, 'initialised', False): + self.initialised = True + self.debug = kwargs.has_key('debug') + self.__createconn() def __setuptables(self): - self.tbl_architecture = Table('architecture', self.db_meta, autoload=True) - self.tbl_archive = Table('archive', self.db_meta, autoload=True) - self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True) - self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) - self.tbl_component = Table('component', self.db_meta, autoload=True) - self.tbl_config = Table('config', self.db_meta, autoload=True) - self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) - self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) - self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) - self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) - self.tbl_files = Table('files', self.db_meta, autoload=True) - self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) - self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) - self.tbl_location = Table('location', self.db_meta, autoload=True) - self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) - self.tbl_override = Table('override', self.db_meta, autoload=True) - self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) - self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True) - self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True) - self.tbl_section = Table('section', self.db_meta, autoload=True) - self.tbl_source = Table('source', self.db_meta, autoload=True) - self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True) - self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True) - self.tbl_suite = Table('suite', self.db_meta, autoload=True) - self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) - self.tbl_uid = Table('uid', self.db_meta, autoload=True) + tables = ( + 'architecture', + 'archive', + 'bin_associations', + 'binaries', + 'binary_acl', + 'binary_acl_map', + 'build_queue', + 'build_queue_files', + 'component', + 'config', + 'content_associations', + 'content_file_names', + 'content_file_paths', + 'changes_pending_binaries', + 'changes_pending_files', + 'changes_pending_files_map', + 'changes_pending_source', + 'changes_pending_source_files', + 'changes_pool_files', + 'dsc_files', + 'files', + 'fingerprint', + 'keyrings', + 'changes', + 'keyring_acl_map', + 'location', + 'maintainer', + 'new_comments', + 'override', + 'override_type', + 'pending_content_associations', + 'policy_queue', + 'priority', + 'section', + 'source', + 'source_acl', + 'src_associations', + 'src_format', + 'src_uploaders', + 'suite', + 'suite_architectures', + 'suite_src_formats', + 'suite_build_queue_copy', + 'uid', + 'upload_blocks', + ) + + for table_name in tables: + table = Table(table_name, self.db_meta, autoload=True) + setattr(self, 'tbl_%s' % table_name, table) def __setupmappers(self): mapper(Architecture, self.tbl_architecture, @@ -1064,6 +2840,13 @@ class DBConn(Singleton): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -1083,6 +2866,14 @@ class DBConn(Singleton): binassociations = relation(BinAssociation, primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin)))) + mapper(BinaryACL, self.tbl_binary_acl, + properties = dict(binary_acl_id = self.tbl_binary_acl.c.id)) + + mapper(BinaryACLMap, self.tbl_binary_acl_map, + properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id, + fingerprint = relation(Fingerprint, backref="binary_acl_map"), + architecture = relation(Architecture))) + mapper(Component, self.tbl_component, properties = dict(component_id = self.tbl_component.c.id, component_name = self.tbl_component.c.name)) @@ -1090,24 +2881,6 @@ class DBConn(Singleton): mapper(DBConfig, self.tbl_config, properties = dict(config_id = self.tbl_config.c.id)) - mapper(ContentAssociation, self.tbl_content_associations, - properties = dict(ca_id = self.tbl_content_associations.c.id, - filename_id = self.tbl_content_associations.c.filename, - filename = relation(ContentFilename), - filepath_id = self.tbl_content_associations.c.filepath, - filepath = relation(ContentFilepath), - binary_id = self.tbl_content_associations.c.binary_pkg, - binary = relation(DBBinary))) - - - mapper(ContentFilename, self.tbl_content_file_names, - properties = dict(cafilename_id = self.tbl_content_file_names.c.id, - filename = self.tbl_content_file_names.c.file)) - - mapper(ContentFilepath, self.tbl_content_file_paths, - properties = dict(cafilepath_id = self.tbl_content_file_paths.c.id, - filepath = self.tbl_content_file_paths.c.path)) - mapper(DSCFile, self.tbl_dsc_files, properties = dict(dscfile_id = self.tbl_dsc_files.c.id, source_id = self.tbl_dsc_files.c.source, @@ -1126,12 +2899,64 @@ class DBConn(Singleton): uid_id = self.tbl_fingerprint.c.uid, uid = relation(Uid), keyring_id = self.tbl_fingerprint.c.keyring, - keyring = relation(Keyring))) + keyring = relation(Keyring), + source_acl = relation(SourceACL), + binary_acl = relation(BinaryACL))) mapper(Keyring, self.tbl_keyrings, properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, + poolfiles = relation(PoolFile, + secondary=self.tbl_changes_pool_files, + backref="changeslinks"), + seen = self.tbl_changes.c.seen, + source = self.tbl_changes.c.source, + binaries = self.tbl_changes.c.binaries, + architecture = self.tbl_changes.c.architecture, + distribution = self.tbl_changes.c.distribution, + urgency = self.tbl_changes.c.urgency, + maintainer = self.tbl_changes.c.maintainer, + changedby = self.tbl_changes.c.changedby, + date = self.tbl_changes.c.date, + version = self.tbl_changes.c.version, + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binaries, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id, + filename = self.tbl_changes_pending_files.c.filename, + size = self.tbl_changes_pending_files.c.size, + md5sum = self.tbl_changes_pending_files.c.md5sum, + sha1sum = self.tbl_changes_pending_files.c.sha1sum, + sha256sum = self.tbl_changes_pending_files.c.sha256sum)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) + mapper(KeyringACLMap, self.tbl_keyring_acl_map, + properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, + keyring = relation(Keyring, backref="keyring_acl_map"), + architecture = relation(Architecture))) + mapper(Location, self.tbl_location, properties = dict(location_id = self.tbl_location.c.id, component_id = self.tbl_location.c.component, @@ -1143,6 +2968,9 @@ class DBConn(Singleton): mapper(Maintainer, self.tbl_maintainer, properties = dict(maintainer_id = self.tbl_maintainer.c.id)) + mapper(NewComment, self.tbl_new_comments, + properties = dict(comment_id = self.tbl_new_comments.c.id)) + mapper(Override, self.tbl_override, properties = dict(suite_id = self.tbl_override.c.suite, suite = relation(Suite), @@ -1159,24 +2987,12 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) - mapper(PendingContentAssociation, self.tbl_pending_content_associations, - properties = dict(pca_id = self.tbl_pending_content_associations.c.id, - filepath_id = self.tbl_pending_content_associations.c.filepath, - filepath = relation(ContentFilepath), - filename_id = self.tbl_pending_content_associations.c.filename, - filename = relation(ContentFilename))) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueBuild, self.tbl_queue_build, - properties = dict(suite_id = self.tbl_queue_build.c.suite, - queue_id = self.tbl_queue_build.c.queue, - queue = relation(Queue))) - mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id)) @@ -1196,7 +3012,11 @@ class DBConn(Singleton): srcfiles = relation(DSCFile, primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)), srcassociations = relation(SrcAssociation, - primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)))) + primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)), + srcuploaders = relation(SrcUploader))) + + mapper(SourceACL, self.tbl_source_acl, + properties = dict(source_acl_id = self.tbl_source_acl.c.id)) mapper(SrcAssociation, self.tbl_src_associations, properties = dict(sa_id = self.tbl_src_associations.c.id, @@ -1205,6 +3025,10 @@ class DBConn(Singleton): source_id = self.tbl_src_associations.c.source, source = relation(DBSource))) + mapper(SrcFormat, self.tbl_src_format, + properties = dict(src_format_id = self.tbl_src_format.c.id, + format_name = self.tbl_src_format.c.format_name)) + mapper(SrcUploader, self.tbl_src_uploaders, properties = dict(uploader_id = self.tbl_src_uploaders.c.id, source_id = self.tbl_src_uploaders.c.source, @@ -1215,7 +3039,9 @@ class DBConn(Singleton): primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id)))) mapper(Suite, self.tbl_suite, - properties = dict(suite_id = self.tbl_suite.c.id)) + properties = dict(suite_id = self.tbl_suite.c.id, + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite, @@ -1223,10 +3049,21 @@ class DBConn(Singleton): arch_id = self.tbl_suite_architectures.c.architecture, architecture = relation(Architecture))) + mapper(SuiteSrcFormat, self.tbl_suite_src_formats, + properties = dict(suite_id = self.tbl_suite_src_formats.c.suite, + suite = relation(Suite, backref='suitesrcformats'), + src_format_id = self.tbl_suite_src_formats.c.src_format, + src_format = relation(SrcFormat))) + mapper(Uid, self.tbl_uid, properties = dict(uid_id = self.tbl_uid.c.id, fingerprint = relation(Fingerprint))) + mapper(UploadBlock, self.tbl_upload_blocks, + properties = dict(upload_block_id = self.tbl_upload_blocks.c.id, + fingerprint = relation(Fingerprint, backref="uploadblocks"), + uid = relation(Uid, backref="uploadblocks"))) + ## Connection functions def __createconn(self): from config import Config @@ -1258,3 +3095,4 @@ class DBConn(Singleton): __all__.append('DBConn') +