X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=8543ab101642dca14056a4d77fba72bc6091f24e;hb=c55d086c0c8463fcd2d8dcee2dcc6c414ee36d91;hp=a1ac7038ce8cb920fea6ac6a9e80fe20855ad2f1;hpb=04a4fe70fce62ece3b61fce34d5e6effdb4ab8b0;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py old mode 100755 new mode 100644 index a1ac7038..8543ab10 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -34,553 +34,2959 @@ ################################################################################ import os +import re import psycopg2 import traceback +from datetime import datetime, timedelta +from errno import ENOENT +from tempfile import mkstemp, mkdtemp + +from inspect import getargspec + +import sqlalchemy +from sqlalchemy import create_engine, Table, MetaData +from sqlalchemy.orm import sessionmaker, mapper, relation +from sqlalchemy import types as sqltypes + +# Don't remove this, we re-export the exceptions to scripts which import us +from sqlalchemy.exc import * +from sqlalchemy.orm.exc import NoResultFound -from singleton import Singleton from config import Config +from textutils import fix_maintainer ################################################################################ -class Cache(object): - def __init__(self, hashfunc=None): - if hashfunc: - self.hashfunc = hashfunc - else: - self.hashfunc = lambda x: x['value'] +# Patch in support for the debversion field type so that it works during +# reflection - self.data = {} +class DebVersion(sqltypes.Text): + def get_col_spec(self): + return "DEBVERSION" - def SetValue(self, keys, value): - self.data[self.hashfunc(keys)] = value +sa_major_version = sqlalchemy.__version__[0:3] +if sa_major_version == "0.5": + from sqlalchemy.databases import postgres + postgres.ischema_names['debversion'] = DebVersion +else: + raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") - def GetValue(self, keys): - return self.data.get(self.hashfunc(keys)) +################################################################################ + +__all__ = ['IntegrityError', 'SQLAlchemyError'] ################################################################################ -class DBConn(Singleton): +def session_wrapper(fn): """ - database module init. + Wrapper around common ".., session=None):" handling. If the wrapped + function is called without passing 'session', we create a local one + and destroy it when the function ends. + + Also attaches a commit_or_flush method to the session; if we created a + local session, this is a synonym for session.commit(), otherwise it is a + synonym for session.flush(). """ - def __init__(self, *args, **kwargs): - super(DBConn, self).__init__(*args, **kwargs) - def _startup(self, *args, **kwargs): - self.__createconn() - self.__init_caches() + def wrapped(*args, **kwargs): + private_transaction = False - ## Connection functions - def __createconn(self): - cnf = Config() - connstr = "dbname=%s" % "projectbstew" #cnf["DB::Name"] - print( "connstr: %s "% connstr) - if cnf["DB::Host"]: - connstr += " host=%s" % cnf["DB::Host"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": - connstr += " port=%s" % cnf["DB::Port"] + # Find the session object + session = kwargs.get('session') - self.db_con = psycopg2.connect(connstr) + if session is None: + if len(args) <= len(getargspec(fn)[0]) - 1: + # No session specified as last argument or in kwargs + private_transaction = True + session = kwargs['session'] = DBConn().session() + else: + # Session is last argument in args + session = args[-1] + if session is None: + args = list(args) + session = args[-1] = DBConn().session() + private_transaction = True + + if private_transaction: + session.commit_or_flush = session.commit + else: + session.commit_or_flush = session.flush - def reconnect(self): try: - self.db_con.close() - except psycopg2.InterfaceError: - pass - - self.db_con = None - self.__createconn() - - ## Cache functions - def __init_caches(self): - self.caches = {'suite': Cache(), - 'section': Cache(), - 'priority': Cache(), - 'override_type': Cache(), - 'architecture': Cache(), - 'archive': Cache(), - 'component': Cache(), - 'content_path_names': Cache(), - 'content_file_names': Cache(), - 'location': Cache(lambda x: '%s_%s_%s' % (x['location'], x['component'], x['location'])), - 'maintainer': {}, # TODO - 'keyring': {}, # TODO - 'source': Cache(lambda x: '%s_%s_' % (x['source'], x['version'])), - 'files': Cache(lambda x: '%s_%s_' % (x['filename'], x['location'])), - 'maintainer': {}, # TODO - 'fingerprint': {}, # TODO - 'queue': {}, # TODO - 'uid': {}, # TODO - 'suite_version': Cache(lambda x: '%s_%s' % (x['source'], x['suite'])), - } - - self.prepared_statements = {} - - def prepare(self,name,statement): - if not self.prepared_statements.has_key(name): - c = self.cursor() - c.execute(statement) - self.prepared_statements[name] = statement - - def clear_caches(self): - self.__init_caches() - - ## Functions to pass through to the database connector - def cursor(self): - return self.db_con.cursor() - - def commit(self): - return self.db_con.commit() - - ## Get functions - def __get_single_id(self, query, values, cachename=None): - # This is a bit of a hack but it's an internal function only - if cachename is not None: - res = self.caches[cachename].GetValue(values) - if res: - return res - - c = self.db_con.cursor() - c.execute(query, values) - - if c.rowcount != 1: - return None + return fn(*args, **kwargs) + finally: + if private_transaction: + # We created a session; close it. + session.close() - res = c.fetchone()[0] + wrapped.__doc__ = fn.__doc__ + wrapped.func_name = fn.func_name - if cachename is not None: - self.caches[cachename].SetValue(values, res) + return wrapped - return res +__all__.append('session_wrapper') - def __get_id(self, retfield, table, qfield, value): - query = "SELECT %s FROM %s WHERE %s = %%(value)s" % (retfield, table, qfield) - return self.__get_single_id(query, {'value': value}, cachename=table) +################################################################################ - def get_suite_id(self, suite): - """ - Returns database id for given C{suite}. - Results are kept in a cache during runtime to minimize database queries. +class Architecture(object): + def __init__(self, *args, **kwargs): + pass - @type suite: string - @param suite: The name of the suite + def __eq__(self, val): + if isinstance(val, str): + return (self.arch_string== val) + # This signals to use the normal comparison operator + return NotImplemented - @rtype: int - @return: the database id for the given suite + def __ne__(self, val): + if isinstance(val, str): + return (self.arch_string != val) + # This signals to use the normal comparison operator + return NotImplemented - """ - suiteid = self.__get_id('id', 'suite', 'suite_name', suite) - if suiteid is None: - return None - else: - return int(suiteid) + def __repr__(self): + return '' % self.arch_string - def get_section_id(self, section): - """ - Returns database id for given C{section}. - Results are kept in a cache during runtime to minimize database queries. +__all__.append('Architecture') - @type section: string - @param section: The name of the section +@session_wrapper +def get_architecture(architecture, session=None): + """ + Returns database id for given C{architecture}. + + @type architecture: string + @param architecture: The name of the architecture - @rtype: int - @return: the database id for the given section + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) - """ - return self.__get_id('id', 'section', 'section', section) + @rtype: Architecture + @return: Architecture object for the given arch (None if not present) + """ - def get_priority_id(self, priority): - """ - Returns database id for given C{priority}. - Results are kept in a cache during runtime to minimize database queries. + q = session.query(Architecture).filter_by(arch_string=architecture) - @type priority: string - @param priority: The name of the priority + try: + return q.one() + except NoResultFound: + return None - @rtype: int - @return: the database id for the given priority +__all__.append('get_architecture') - """ - return self.__get_id('id', 'priority', 'priority', priority) +@session_wrapper +def get_architecture_suites(architecture, session=None): + """ + Returns list of Suite objects for given C{architecture} name - def get_override_type_id(self, override_type): - """ - Returns database id for given override C{type}. - Results are kept in a cache during runtime to minimize database queries. + @type source: str + @param source: Architecture name to search for - @type override_type: string - @param override_type: The name of the override type + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) - @rtype: int - @return: the database id for the given override type + @rtype: list + @return: list of Suite objects for the given name (may be empty) + """ - """ - return self.__get_id('id', 'override_type', 'type', override_type) + q = session.query(Suite) + q = q.join(SuiteArchitecture) + q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name') - def get_architecture_id(self, architecture): - """ - Returns database id for given C{architecture}. - Results are kept in a cache during runtime to minimize database queries. + ret = q.all() - @type architecture: string - @param architecture: The name of the override type + return ret - @rtype: int - @return: the database id for the given architecture +__all__.append('get_architecture_suites') - """ - return self.__get_id('id', 'architecture', 'arch_string', architecture) +################################################################################ - def get_archive_id(self, archive): - """ - returns database id for given c{archive}. - results are kept in a cache during runtime to minimize database queries. +class Archive(object): + def __init__(self, *args, **kwargs): + pass - @type archive: string - @param archive: the name of the override type + def __repr__(self): + return '' % self.archive_name - @rtype: int - @return: the database id for the given archive +__all__.append('Archive') - """ - return self.__get_id('id', 'archive', 'lower(name)', archive) +@session_wrapper +def get_archive(archive, session=None): + """ + returns database id for given C{archive}. - def get_component_id(self, component): - """ - Returns database id for given C{component}. - Results are kept in a cache during runtime to minimize database queries. + @type archive: string + @param archive: the name of the arhive - @type component: string - @param component: The name of the override type + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) - @rtype: int - @return: the database id for the given component + @rtype: Archive + @return: Archive object for the given name (None if not present) - """ - return self.__get_id('id', 'component', 'lower(name)', component) + """ + archive = archive.lower() - def get_location_id(self, location, component, archive): - """ - Returns database id for the location behind the given combination of - - B{location} - the path of the location, eg. I{/srv/ftp.debian.org/ftp/pool/} - - B{component} - the id of the component as returned by L{get_component_id} - - B{archive} - the id of the archive as returned by L{get_archive_id} - Results are kept in a cache during runtime to minimize database queries. + q = session.query(Archive).filter_by(archive_name=archive) - @type location: string - @param location: the path of the location + try: + return q.one() + except NoResultFound: + return None - @type component: int - @param component: the id of the component +__all__.append('get_archive') - @type archive: int - @param archive: the id of the archive +################################################################################ - @rtype: int - @return: the database id for the location +class BinAssociation(object): + def __init__(self, *args, **kwargs): + pass - """ + def __repr__(self): + return '' % (self.ba_id, self.binary, self.suite) - archive_id = self.get_archive_id(archive) +__all__.append('BinAssociation') - if not archive_id: - return None +################################################################################ + +class BinContents(object): + def __init__(self, *args, **kwargs): + pass - res = None + def __repr__(self): + return '' % (self.binary, self.filename) - if component: - component_id = self.get_component_id(component) - if component_id: - res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND component=%(component)s AND archive=%(archive)s", - {'location': location, - 'archive': int(archive_id), - 'component': component_id}, cachename='location') - else: - res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND archive=%(archive)d", - {'location': location, 'archive': archive_id, 'component': ''}, cachename='location') +__all__.append('BinContents') - return res +################################################################################ + +class DBBinary(object): + def __init__(self, *args, **kwargs): + pass - def get_source_id(self, source, version): - """ - Returns database id for the combination of C{source} and C{version} - - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - - B{version} - Results are kept in a cache during runtime to minimize database queries. + def __repr__(self): + return '' % (self.package, self.version, self.architecture) - @type source: string - @param source: source package name +__all__.append('DBBinary') - @type version: string - @param version: the source version +@session_wrapper +def get_suites_binary_in(package, session=None): + """ + Returns list of Suite objects which given C{package} name is in - @rtype: int - @return: the database id for the source + @type source: str + @param source: DBBinary package name to search for - """ - return self.__get_single_id("SELECT id FROM source s WHERE s.source=%(source)s AND s.version=%(version)s", - {'source': source, 'version': version}, cachename='source') + @rtype: list + @return: list of Suite objects for the given package + """ - def get_suite_version(self, source, suite): - """ - Returns database id for a combination of C{source} and C{suite}. + return session.query(Suite).join(BinAssociation).join(DBBinary).filter_by(package=package).all() - - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} - - B{suite} - a suite name, eg. I{unstable} +__all__.append('get_suites_binary_in') - Results are kept in a cache during runtime to minimize database queries. +@session_wrapper +def get_binary_from_id(binary_id, session=None): + """ + Returns DBBinary object for given C{id} - @type source: string - @param source: source package name + @type binary_id: int + @param binary_id: Id of the required binary - @type suite: string - @param suite: the suite name + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) - @rtype: string - @return: the version for I{source} in I{suite} + @rtype: DBBinary + @return: DBBinary object for the given binary (None if not present) + """ - """ - return self.__get_single_id(""" - SELECT s.version FROM source s, suite su, src_associations sa - WHERE sa.source=s.id - AND sa.suite=su.id - AND su.suite_name=%(suite)s - AND s.source=%(source)""", {'suite': suite, 'source': source}, cachename='suite_version') + q = session.query(DBBinary).filter_by(binary_id=binary_id) + try: + return q.one() + except NoResultFound: + return None - def get_files_id (self, filename, size, md5sum, location_id): - """ - Returns -1, -2 or the file_id for filename, if its C{size} and C{md5sum} match an - existing copy. +__all__.append('get_binary_from_id') - The database is queried using the C{filename} and C{location_id}. If a file does exist - at that location, the existing size and md5sum are checked against the provided - parameters. A size or checksum mismatch returns -2. If more than one entry is - found within the database, a -1 is returned, no result returns None, otherwise - the file id. +@session_wrapper +def get_binaries_from_name(package, version=None, architecture=None, session=None): + """ + Returns list of DBBinary objects for given C{package} name - Results are kept in a cache during runtime to minimize database queries. + @type package: str + @param package: DBBinary package name to search for - @type filename: string - @param filename: the filename of the file to check against the DB + @type version: str or None + @param version: Version to search for (or None) - @type size: int - @param size: the size of the file to check against the DB + @type package: str, list or None + @param package: Architectures to limit to (or None if no limit) - @type md5sum: string - @param md5sum: the md5sum of the file to check against the DB + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) - @type location_id: int - @param location_id: the id of the location as returned by L{get_location_id} + @rtype: list + @return: list of DBBinary objects for the given name (may be empty) + """ - @rtype: int / None - @return: Various return values are possible: - - -2: size/checksum error - - -1: more than one file found in database - - None: no file found in database - - int: file id + q = session.query(DBBinary).filter_by(package=package) - """ - values = {'filename' : filename, - 'location' : location_id} + if version is not None: + q = q.filter_by(version=version) - res = self.caches['files'].GetValue( values ) + if architecture is not None: + if not isinstance(architecture, list): + architecture = [architecture] + q = q.join(Architecture).filter(Architecture.arch_string.in_(architecture)) - if not res: - query = """SELECT id, size, md5sum - FROM files - WHERE filename = %(filename)s AND location = %(location)s""" + ret = q.all() - cursor = self.db_con.cursor() - cursor.execute( query, values ) + return ret - if cursor.rowcount == 0: - res = None +__all__.append('get_binaries_from_name') - elif cursor.rowcount != 1: - res = -1 +@session_wrapper +def get_binaries_from_source_id(source_id, session=None): + """ + Returns list of DBBinary objects for given C{source_id} - else: - row = cursor.fetchone() + @type source_id: int + @param source_id: source_id to search for - if row[1] != int(size) or row[2] != md5sum: - res = -2 + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) - else: - self.caches['files'].SetValue(values, row[0]) - res = row[0] + @rtype: list + @return: list of DBBinary objects for the given name (may be empty) + """ + + return session.query(DBBinary).filter_by(source_id=source_id).all() + +__all__.append('get_binaries_from_source_id') + +@session_wrapper +def get_binary_from_name_suite(package, suitename, session=None): + ### For dak examine-package + ### XXX: Doesn't use object API yet + + sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name + FROM binaries b, files fi, location l, component c, bin_associations ba, suite su + WHERE b.package=:package + AND b.file = fi.id + AND fi.location = l.id + AND l.component = c.id + AND ba.bin=b.id + AND ba.suite = su.id + AND su.suite_name=:suitename + ORDER BY b.version DESC""" + + return session.execute(sql, {'package': package, 'suitename': suitename}) + +__all__.append('get_binary_from_name_suite') + +@session_wrapper +def get_binary_components(package, suitename, arch, session=None): + # Check for packages that have moved from one component to another + query = """SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f + WHERE b.package=:package AND s.suite_name=:suitename + AND (a.arch_string = :arch OR a.arch_string = 'all') + AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id + AND f.location = l.id + AND l.component = c.id + AND b.file = f.id""" + + vals = {'package': package, 'suitename': suitename, 'arch': arch} + + return session.execute(query, vals) + +__all__.append('get_binary_components') + +################################################################################ + +class BinaryACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_id - return res +__all__.append('BinaryACL') +################################################################################ + +class BinaryACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.binary_acl_map_id + +__all__.append('BinaryACLMap') + +################################################################################ + +MINIMAL_APT_CONF=""" +Dir +{ + ArchiveDir "%(archivepath)s"; + OverrideDir "/srv/ftp.debian.org/scripts/override/"; + CacheDir "/srv/ftp.debian.org/database/"; +}; + +Default +{ + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; + DeLinkLimit 0; + FileMode 0664; +} + +bindirectory "incoming" +{ + Packages "Packages"; + Contents " "; + + BinOverride "override.sid.all3"; + BinCacheDB "packages-accepted.db"; + + FileList "%(filelist)s"; + + PathPrefix ""; + Packages::Extensions ".deb .udeb"; +}; + +bindirectory "incoming/" +{ + Sources "Sources"; + BinOverride "override.sid.all3"; + SrcOverride "override.sid.all3.src"; + FileList "%(filelist)s"; +}; +""" + +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name - def get_or_set_contents_file_id(self, filename): - """ - Returns database id for given filename. + def write_metadata(self, ourtime, force=False): + # Do we write out metafiles? + if not (force or self.generate_metadata): + return - Results are kept in a cache during runtime to minimize database queries. - If no matching file is found, a row is inserted. + session = DBConn().session().object_session(self) - @type filename: string - @param filename: The filename + fl_fd = fl_name = ac_fd = ac_name = None + tempdir = None + arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ]) + startdir = os.getcwd() - @rtype: int - @return: the database id for the given component - """ try: - values={'value': filename} - query = "SELECT id FROM content_file_names WHERE file = %(value)s" - id = self.__get_single_id(query, values, cachename='content_file_names') - if not id: - c = self.db_con.cursor() - c.execute( "INSERT INTO content_file_names VALUES (DEFAULT, %(value)s) RETURNING id", - values ) - - id = c.fetchone()[0] - self.caches['content_file_names'].SetValue(values, id) - - return id - except: - traceback.print_exc() - raise - - def get_or_set_contents_path_id(self, path): - """ - Returns database id for given path. - - Results are kept in a cache during runtime to minimize database queries. - If no matching file is found, a row is inserted. - - @type path: string - @param path: The filename - - @rtype: int - @return: the database id for the given component - """ + # Grab files we want to include + newer = session.query(BuildQueueFile).filter_by(build_queue_id = 1).filter(BuildQueueFile.lastused > ourtime).all() + + # Write file list with newer files + (fl_fd, fl_name) = mkstemp() + for n in newer: + os.write(fl_fd, '%s\n' % n.fullpath) + os.close(fl_fd) + + # Write minimal apt.conf + # TODO: Remove hardcoding from template + (ac_fd, ac_name) = mkstemp() + os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, + 'filelist': fl_name}) + os.close() + + # Run apt-ftparchive generate + os.chdir(os.path.dirname(fl_name)) + os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(fl_name)) + + # Run apt-ftparchive release + # TODO: Eww - fix this + bname = os.path.basename(self.path) + os.chdir(self.path) + os.chdir('..') + os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="${archs}" release %s > Release""", [self.origin, self.label, self.releasedescription, arches, bname]) + + # Sign if necessary + if self.signingkey: + cnf = Config() + keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] + if cnf.has_key("Dinstall::SigningPubKeyring"): + keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] + + os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey)) + + # Move the files if we got this far + os.rename('Release', os.path.join(bname, 'Release')) + if self.signingkey: + os.rename('Release.gpg', os.path.join(bname, 'Release.gpg')) + + # Clean up any left behind files + finally: + os.chdir(startdir) + if fl_fd: + try: + os.close(fl_fd) + except OSError: + pass + + if fl_name: + try: + os.unlink(fl_name) + except OSError: + pass + + if ac_fd: + try: + os.close(ac_fd) + except OSError: + pass + + if ac_name: + try: + os.unlink(ac_name) + except OSError: + pass + + def clean_and_update(self, starttime, dryrun=False): + """WARNING: This routine commits for you""" + session = DBConn().session().object_session(self) + + ourtime = starttime + timedelta(seconds=self.stay_of_execution) + + if self.generate_metadata: + self.write_metadata(ourtime) + + # Grab files older than our execution time + older = session.query(BuildQueueFile).filter_by(build_queue_id = 1).filter(BuildQueueFile.lastused <= ourtime).all() + + for o in older: + killdb = False + try: + if dryrun: + print "I: Would have removed %s from the queue" + else: + os.unlink(o.fullpath) + killdb = True + except OSError, e: + # If it wasn't there, don't worry + if e.errno == ENOENT: + killdb = True + else: + # TODO: Replace with proper logging call + print "E: Could not remove %s" % o.fullpath + + if killdb: + session.delete(o) + + session.commit() + + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the BuildQueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(poolfile).add(f) + return f + + # Prepare BuildQueueFile object + qf = BuildQueueFile() + qf.build_queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = poolfile_basename + + targetpath = poolfile.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + try: - values={'value': path} - query = "SELECT id FROM content_file_paths WHERE path = %(value)s" - id = self.__get_single_id(query, values, cachename='content_path_names') - if not id: - c = self.db_con.cursor() - c.execute( "INSERT INTO content_file_paths VALUES (DEFAULT, %(value)s) RETURNING id", - values ) - - id = c.fetchone()[0] - self.caches['content_path_names'].SetValue(values, id) - - return id - except: - traceback.print_exc() - raise - - def get_suite_architectures(self, suite): - """ - Returns list of architectures for C{suite}. - - @type suite: string, int - @param suite: the suite name or the suite_id - - @rtype: list - @return: the list of architectures for I{suite} - """ - - suite_id = None - if type(suite) == str: - suite_id = self.get_suite_id(suite) - elif type(suite) == int: - suite_id = suite - else: + if self.copy_files: + # We need to copy instead of symlink + import utils + utils.copy(targetpath, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetpath, queuepath) + qf.fileid = poolfile.file_id + except OSError: return None - c = self.db_con.cursor() - c.execute( """SELECT a.arch_string FROM suite_architectures sa - JOIN architecture a ON (a.id = sa.architecture) - WHERE suite='%s'""" % suite_id ) + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf - return map(lambda x: x[0], c.fetchall()) - def insert_content_paths(self, bin_id, fullpaths): - """ - Make sure given path is associated with given binary id +__all__.append('BuildQueue') - @type bin_id: int - @param bin_id: the id of the binary - @type fullpaths: list - @param fullpaths: the list of paths of the file being associated with the binary +@session_wrapper +def get_build_queue(queuename, session=None): + """ + Returns BuildQueue object for given C{queue name}, creating it if it does not + exist. - @return: True upon success - """ + @type queuename: string + @param queuename: The name of the queue - c = self.db_con.cursor() + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) - c.execute("BEGIN WORK") - try: + @rtype: BuildQueue + @return: BuildQueue object for the given queue + """ - for fullpath in fullpaths: - (path, file) = os.path.split(fullpath) + q = session.query(BuildQueue).filter_by(queue_name=queuename) - if path.startswith( "./" ): - path = path[2:] - # Get the necessary IDs ... - file_id = self.get_or_set_contents_file_id(file) - path_id = self.get_or_set_contents_path_id(path) + try: + return q.one() + except NoResultFound: + return None - c.execute("""INSERT INTO deb_contents - - (binary_pkg, filepath, filename) - VALUES ( '%d', '%d', '%d')""" % (bin_id, path_id, file_id) ) +__all__.append('get_build_queue') - c.execute("COMMIT") - return True - except: - traceback.print_exc() - c.execute("ROLLBACK") - return False +################################################################################ - def insert_pending_content_paths(self, package, fullpaths): - """ - Make sure given paths are temporarily associated with given - package +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass - @type package: dict - @param package: the package to associate with should have been read in from the binary control file - @type fullpaths: list - @param fullpaths: the list of paths of the file being associated with the binary + def __repr__(self): + return '' % (self.filename, self.build_queue_id) - @return: True upon success - """ + @property + def fullpath(self): + return os.path.join(self.buildqueue.path, self.filename) + + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + +class Component(object): + def __init__(self, *args, **kwargs): + pass + + def __eq__(self, val): + if isinstance(val, str): + return (self.component_name == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.component_name != val) + # This signals to use the normal comparison operator + return NotImplemented + + def __repr__(self): + return '' % self.component_name + + +__all__.append('Component') + +@session_wrapper +def get_component(component, session=None): + """ + Returns database id for given C{component}. + + @type component: string + @param component: The name of the override type + + @rtype: int + @return: the database id for the given component + + """ + component = component.lower() + + q = session.query(Component).filter_by(component_name=component) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_component') + +################################################################################ + +class DBConfig(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.name + +__all__.append('DBConfig') + +################################################################################ + +@session_wrapper +def get_or_set_contents_file_id(filename, session=None): + """ + Returns database id for given filename. + + If no matching file is found, a row is inserted. + + @type filename: string + @param filename: The filename + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: int + @return: the database id for the given component + """ + + q = session.query(ContentFilename).filter_by(filename=filename) + + try: + ret = q.one().cafilename_id + except NoResultFound: + cf = ContentFilename() + cf.filename = filename + session.add(cf) + session.commit_or_flush() + ret = cf.cafilename_id + + return ret + +__all__.append('get_or_set_contents_file_id') + +@session_wrapper +def get_contents(suite, overridetype, section=None, session=None): + """ + Returns contents for a suite / overridetype combination, limiting + to a section if not None. + + @type suite: Suite + @param suite: Suite object + + @type overridetype: OverrideType + @param overridetype: OverrideType object + + @type section: Section + @param section: Optional section object to limit results to + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: ResultsProxy + @return: ResultsProxy object set up to return tuples of (filename, section, + package, arch_id) + """ + + # find me all of the contents for a given suite + contents_q = """SELECT (p.path||'/'||n.file) AS fn, + s.section, + b.package, + b.architecture + FROM content_associations c join content_file_paths p ON (c.filepath=p.id) + JOIN content_file_names n ON (c.filename=n.id) + JOIN binaries b ON (b.id=c.binary_pkg) + JOIN override o ON (o.package=b.package) + JOIN section s ON (s.id=o.section) + WHERE o.suite = :suiteid AND o.type = :overridetypeid + AND b.type=:overridetypename""" + + vals = {'suiteid': suite.suite_id, + 'overridetypeid': overridetype.overridetype_id, + 'overridetypename': overridetype.overridetype} + + if section is not None: + contents_q += " AND s.id = :sectionid" + vals['sectionid'] = section.section_id + + contents_q += " ORDER BY fn" + + return session.execute(contents_q, vals) + +__all__.append('get_contents') + +################################################################################ + +class ContentFilepath(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.filepath + +__all__.append('ContentFilepath') + +@session_wrapper +def get_or_set_contents_path_id(filepath, session=None): + """ + Returns database id for given path. + + If no matching file is found, a row is inserted. + + @type filename: string + @param filename: The filepath + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: int + @return: the database id for the given path + """ + + q = session.query(ContentFilepath).filter_by(filepath=filepath) + + try: + ret = q.one().cafilepath_id + except NoResultFound: + cf = ContentFilepath() + cf.filepath = filepath + session.add(cf) + session.commit_or_flush() + ret = cf.cafilepath_id + + return ret + +__all__.append('get_or_set_contents_path_id') + +################################################################################ + +class ContentAssociation(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.ca_id + +__all__.append('ContentAssociation') + +def insert_content_paths(binary_id, fullpaths, session=None): + """ + Make sure given path is associated with given binary id + + @type binary_id: int + @param binary_id: the id of the binary + @type fullpaths: list + @param fullpaths: the list of paths of the file being associated with the binary + @type session: SQLAlchemy session + @param session: Optional SQLAlchemy session. If this is passed, the caller + is responsible for ensuring a transaction has begun and committing the + results or rolling back based on the result code. If not passed, a commit + will be performed at the end of the function, otherwise the caller is + responsible for commiting. + + @return: True upon success + """ + + privatetrans = False + if session is None: + session = DBConn().session() + privatetrans = True + + try: + # Insert paths + pathcache = {} + for fullpath in fullpaths: + if fullpath.startswith( './' ): + fullpath = fullpath[2:] + + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id} ) + + session.commit() + if privatetrans: + session.close() + return True + + except: + traceback.print_exc() + + # Only rollback if we set up the session ourself + if privatetrans: + session.rollback() + session.close() + + return False + +__all__.append('insert_content_paths') + +################################################################################ + +class DSCFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.dscfile_id + +__all__.append('DSCFile') + +@session_wrapper +def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None): + """ + Returns a list of DSCFiles which may be empty + + @type dscfile_id: int (optional) + @param dscfile_id: the dscfile_id of the DSCFiles to find + + @type source_id: int (optional) + @param source_id: the source id related to the DSCFiles to find + + @type poolfile_id: int (optional) + @param poolfile_id: the poolfile id related to the DSCFiles to find + + @rtype: list + @return: Possibly empty list of DSCFiles + """ + + q = session.query(DSCFile) + + if dscfile_id is not None: + q = q.filter_by(dscfile_id=dscfile_id) + + if source_id is not None: + q = q.filter_by(source_id=source_id) + + if poolfile_id is not None: + q = q.filter_by(poolfile_id=poolfile_id) + + return q.all() + +__all__.append('get_dscfiles') + +################################################################################ + +class PoolFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.filename + + @property + def fullpath(self): + return os.path.join(self.location.path, self.filename) + +__all__.append('PoolFile') + +@session_wrapper +def check_poolfile(filename, filesize, md5sum, location_id, session=None): + """ + Returns a tuple: + (ValidFileFound [boolean or None], PoolFile object or None) + + @type filename: string + @param filename: the filename of the file to check against the DB + + @type filesize: int + @param filesize: the size of the file to check against the DB + + @type md5sum: string + @param md5sum: the md5sum of the file to check against the DB + + @type location_id: int + @param location_id: the id of the location to look in + + @rtype: tuple + @return: Tuple of length 2. + If more than one file found with that name: + (None, None) + If valid pool file found: (True, PoolFile object) + If valid pool file not found: + (False, None) if no file found + (False, PoolFile object) if file found with size/md5sum mismatch + """ + + q = session.query(PoolFile).filter_by(filename=filename) + q = q.join(Location).filter_by(location_id=location_id) + + ret = None + + if q.count() > 1: + ret = (None, None) + elif q.count() < 1: + ret = (False, None) + else: + obj = q.one() + if obj.md5sum != md5sum or obj.filesize != int(filesize): + ret = (False, obj) + + if ret is None: + ret = (True, obj) + + return ret + +__all__.append('check_poolfile') + +@session_wrapper +def get_poolfile_by_id(file_id, session=None): + """ + Returns a PoolFile objects or None for the given id + + @type file_id: int + @param file_id: the id of the file to look for + + @rtype: PoolFile or None + @return: either the PoolFile object or None + """ + + q = session.query(PoolFile).filter_by(file_id=file_id) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_poolfile_by_id') + + +@session_wrapper +def get_poolfile_by_name(filename, location_id=None, session=None): + """ + Returns an array of PoolFile objects for the given filename and + (optionally) location_id + + @type filename: string + @param filename: the filename of the file to check against the DB + + @type location_id: int + @param location_id: the id of the location to look in (optional) + + @rtype: array + @return: array of PoolFile objects + """ + + q = session.query(PoolFile).filter_by(filename=filename) + + if location_id is not None: + q = q.join(Location).filter_by(location_id=location_id) + + return q.all() + +__all__.append('get_poolfile_by_name') + +@session_wrapper +def get_poolfile_like_name(filename, session=None): + """ + Returns an array of PoolFile objects which are like the given name + + @type filename: string + @param filename: the filename of the file to check against the DB + + @rtype: array + @return: array of PoolFile objects + """ + + # TODO: There must be a way of properly using bind parameters with %FOO% + q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename)) + + return q.all() + +__all__.append('get_poolfile_like_name') + +@session_wrapper +def add_poolfile(filename, datadict, location_id, session=None): + """ + Add a new file to the pool + + @type filename: string + @param filename: filename + + @type datadict: dict + @param datadict: dict with needed data + + @type location_id: int + @param location_id: database id of the location + + @rtype: PoolFile + @return: the PoolFile object created + """ + poolfile = PoolFile() + poolfile.filename = filename + poolfile.filesize = datadict["size"] + poolfile.md5sum = datadict["md5sum"] + poolfile.sha1sum = datadict["sha1sum"] + poolfile.sha256sum = datadict["sha256sum"] + poolfile.location_id = location_id + + session.add(poolfile) + # Flush to get a file id (NB: This is not a commit) + session.flush() + + return poolfile + +__all__.append('add_poolfile') + +################################################################################ + +class Fingerprint(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.fingerprint + +__all__.append('Fingerprint') + +@session_wrapper +def get_fingerprint(fpr, session=None): + """ + Returns Fingerprint object for given fpr. + + @type fpr: string + @param fpr: The fpr to find / add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). + + @rtype: Fingerprint + @return: the Fingerprint object for the given fpr or None + """ + + q = session.query(Fingerprint).filter_by(fingerprint=fpr) + + try: + ret = q.one() + except NoResultFound: + ret = None + + return ret + +__all__.append('get_fingerprint') + +@session_wrapper +def get_or_set_fingerprint(fpr, session=None): + """ + Returns Fingerprint object for given fpr. + + If no matching fpr is found, a row is inserted. + + @type fpr: string + @param fpr: The fpr to find / add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + A flush will be performed either way. + + @rtype: Fingerprint + @return: the Fingerprint object for the given fpr + """ + + q = session.query(Fingerprint).filter_by(fingerprint=fpr) + + try: + ret = q.one() + except NoResultFound: + fingerprint = Fingerprint() + fingerprint.fingerprint = fpr + session.add(fingerprint) + session.commit_or_flush() + ret = fingerprint + + return ret + +__all__.append('get_or_set_fingerprint') + +################################################################################ + +# Helper routine for Keyring class +def get_ldap_name(entry): + name = [] + for k in ["cn", "mn", "sn"]: + ret = entry.get(k) + if ret and ret[0] != "" and ret[0] != "-": + name.append(ret[0]) + return " ".join(name) + +################################################################################ + +class Keyring(object): + gpg_invocation = "gpg --no-default-keyring --keyring %s" +\ + " --with-colons --fingerprint --fingerprint" + + keys = {} + fpr_lookup = {} + + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.keyring_name + + def de_escape_gpg_str(self, txt): + esclist = re.split(r'(\\x..)', txt) + for x in range(1,len(esclist),2): + esclist[x] = "%c" % (int(esclist[x][2:],16)) + return "".join(esclist) + + def load_keys(self, keyring): + import email.Utils + + if not self.keyring_id: + raise Exception('Must be initialized with database information') + + k = os.popen(self.gpg_invocation % keyring, "r") + key = None + signingkey = False + + for line in k.xreadlines(): + field = line.split(":") + if field[0] == "pub": + key = field[4] + (name, addr) = email.Utils.parseaddr(field[9]) + name = re.sub(r"\s*[(].*[)]", "", name) + if name == "" or addr == "" or "@" not in addr: + name = field[9] + addr = "invalid-uid" + name = self.de_escape_gpg_str(name) + self.keys[key] = {"email": addr} + if name != "": + self.keys[key]["name"] = name + self.keys[key]["aliases"] = [name] + self.keys[key]["fingerprints"] = [] + signingkey = True + elif key and field[0] == "sub" and len(field) >= 12: + signingkey = ("s" in field[11]) + elif key and field[0] == "uid": + (name, addr) = email.Utils.parseaddr(field[9]) + if name and name not in self.keys[key]["aliases"]: + self.keys[key]["aliases"].append(name) + elif signingkey and field[0] == "fpr": + self.keys[key]["fingerprints"].append(field[9]) + self.fpr_lookup[field[9]] = key + + def import_users_from_ldap(self, session): + import ldap + cnf = Config() + + LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"] + LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"] + + l = ldap.open(LDAPServer) + l.simple_bind_s("","") + Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL, + "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]), + ["uid", "keyfingerprint", "cn", "mn", "sn"]) + + ldap_fin_uid_id = {} + + byuid = {} + byname = {} + + for i in Attrs: + entry = i[1] + uid = entry["uid"][0] + name = get_ldap_name(entry) + fingerprints = entry["keyFingerPrint"] + keyid = None + for f in fingerprints: + key = self.fpr_lookup.get(f, None) + if key not in self.keys: + continue + self.keys[key]["uid"] = uid + + if keyid != None: + continue + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, name) + byname[uid] = (keyid, name) + + return (byname, byuid) + + def generate_users_from_keyring(self, format, session): + byuid = {} + byname = {} + any_invalid = False + for x in self.keys.keys(): + if self.keys[x]["email"] == "invalid-uid": + any_invalid = True + self.keys[x]["uid"] = format % "invalid-uid" + else: + uid = format % self.keys[x]["email"] + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, self.keys[x]["name"]) + byname[uid] = (keyid, self.keys[x]["name"]) + self.keys[x]["uid"] = uid + + if any_invalid: + uid = format % "invalid-uid" + keyid = get_or_set_uid(uid, session).uid_id + byuid[keyid] = (uid, "ungeneratable user id") + byname[uid] = (keyid, "ungeneratable user id") + + return (byname, byuid) + +__all__.append('Keyring') + +@session_wrapper +def get_keyring(keyring, session=None): + """ + If C{keyring} does not have an entry in the C{keyrings} table yet, return None + If C{keyring} already has an entry, simply return the existing Keyring + + @type keyring: string + @param keyring: the keyring name + + @rtype: Keyring + @return: the Keyring object for this keyring + """ + + q = session.query(Keyring).filter_by(keyring_name=keyring) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_keyring') + +################################################################################ + +class KeyringACLMap(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.keyring_acl_map_id + +__all__.append('KeyringACLMap') + +################################################################################ + +class DBChange(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.changesname + +__all__.append('DBChange') + +@session_wrapper +def get_dbchange(filename, session=None): + """ + returns DBChange object for given C{filename}. + + @type archive: string + @param archive: the name of the arhive + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Archive + @return: Archive object for the given name (None if not present) + + """ + q = session.query(DBChange).filter_by(changesname=filename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_dbchange') + +################################################################################ + +class Location(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.path, self.location_id) + +__all__.append('Location') + +@session_wrapper +def get_location(location, component=None, archive=None, session=None): + """ + Returns Location object for the given combination of location, component + and archive + + @type location: string + @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/} + + @type component: string + @param component: the component name (if None, no restriction applied) + + @type archive: string + @param archive_id: the archive name (if None, no restriction applied) + + @rtype: Location / None + @return: Either a Location object or None if one can't be found + """ + + q = session.query(Location).filter_by(path=location) + + if archive is not None: + q = q.join(Archive).filter_by(archive_name=archive) + + if component is not None: + q = q.join(Component).filter_by(component_name=component) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_location') + +################################################################################ + +class Maintainer(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '''''' % (self.name, self.maintainer_id) + + def get_split_maintainer(self): + if not hasattr(self, 'name') or self.name is None: + return ('', '', '', '') + + return fix_maintainer(self.name.strip()) + +__all__.append('Maintainer') + +@session_wrapper +def get_or_set_maintainer(name, session=None): + """ + Returns Maintainer object for given maintainer name. + + If no matching maintainer name is found, a row is inserted. + + @type name: string + @param name: The maintainer name to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + A flush will be performed either way. + + @rtype: Maintainer + @return: the Maintainer object for the given maintainer + """ + + q = session.query(Maintainer).filter_by(name=name) + try: + ret = q.one() + except NoResultFound: + maintainer = Maintainer() + maintainer.name = name + session.add(maintainer) + session.commit_or_flush() + ret = maintainer + + return ret + +__all__.append('get_or_set_maintainer') + +@session_wrapper +def get_maintainer(maintainer_id, session=None): + """ + Return the name of the maintainer behind C{maintainer_id} or None if that + maintainer_id is invalid. + + @type maintainer_id: int + @param maintainer_id: the id of the maintainer + + @rtype: Maintainer + @return: the Maintainer with this C{maintainer_id} + """ + + return session.query(Maintainer).get(maintainer_id) + +__all__.append('get_maintainer') + +################################################################################ + +class NewComment(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '''''' % (self.package, self.version, self.comment_id) + +__all__.append('NewComment') + +@session_wrapper +def has_new_comment(package, version, session=None): + """ + Returns true if the given combination of C{package}, C{version} has a comment. + + @type package: string + @param package: name of the package + + @type version: string + @param version: package version + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: boolean + @return: true/false + """ + + q = session.query(NewComment) + q = q.filter_by(package=package) + q = q.filter_by(version=version) + + return bool(q.count() > 0) + +__all__.append('has_new_comment') + +@session_wrapper +def get_new_comments(package=None, version=None, comment_id=None, session=None): + """ + Returns (possibly empty) list of NewComment objects for the given + parameters + + @type package: string (optional) + @param package: name of the package + + @type version: string (optional) + @param version: package version + + @type comment_id: int (optional) + @param comment_id: An id of a comment + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: A (possibly empty) list of NewComment objects will be returned + """ + + q = session.query(NewComment) + if package is not None: q = q.filter_by(package=package) + if version is not None: q = q.filter_by(version=version) + if comment_id is not None: q = q.filter_by(comment_id=comment_id) + + return q.all() + +__all__.append('get_new_comments') + +################################################################################ + +class Override(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package, self.suite_id) + +__all__.append('Override') + +@session_wrapper +def get_override(package, suite=None, component=None, overridetype=None, session=None): + """ + Returns Override object for the given parameters + + @type package: string + @param package: The name of the package + + @type suite: string, list or None + @param suite: The name of the suite (or suites if a list) to limit to. If + None, don't limit. Defaults to None. + + @type component: string, list or None + @param component: The name of the component (or components if a list) to + limit to. If None, don't limit. Defaults to None. + + @type overridetype: string, list or None + @param overridetype: The name of the overridetype (or overridetypes if a list) to + limit to. If None, don't limit. Defaults to None. + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: A (possibly empty) list of Override objects will be returned + """ + + q = session.query(Override) + q = q.filter_by(package=package) + + if suite is not None: + if not isinstance(suite, list): suite = [suite] + q = q.join(Suite).filter(Suite.suite_name.in_(suite)) + + if component is not None: + if not isinstance(component, list): component = [component] + q = q.join(Component).filter(Component.component_name.in_(component)) + + if overridetype is not None: + if not isinstance(overridetype, list): overridetype = [overridetype] + q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype)) + + return q.all() + +__all__.append('get_override') + + +################################################################################ + +class OverrideType(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.overridetype + +__all__.append('OverrideType') + +@session_wrapper +def get_override_type(override_type, session=None): + """ + Returns OverrideType object for given C{override type}. + + @type override_type: string + @param override_type: The name of the override type + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: int + @return: the database id for the given override type + """ + + q = session.query(OverrideType).filter_by(overridetype=override_type) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_override_type') + +################################################################################ + +class PendingContentAssociation(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.pca_id + +__all__.append('PendingContentAssociation') + +def insert_pending_content_paths(package, fullpaths, session=None): + """ + Make sure given paths are temporarily associated with given + package + + @type package: dict + @param package: the package to associate with should have been read in from the binary control file + @type fullpaths: list + @param fullpaths: the list of paths of the file being associated with the binary + @type session: SQLAlchemy session + @param session: Optional SQLAlchemy session. If this is passed, the caller + is responsible for ensuring a transaction has begun and committing the + results or rolling back based on the result code. If not passed, a commit + will be performed at the end of the function + + @return: True upon success, False if there is a problem + """ + + privatetrans = False + + if session is None: + session = DBConn().session() + privatetrans = True + + try: + arch = get_architecture(package['Architecture'], session) + arch_id = arch.arch_id + + # Remove any already existing recorded files for this package + q = session.query(PendingContentAssociation) + q = q.filter_by(package=package['Package']) + q = q.filter_by(version=package['Version']) + q = q.filter_by(architecture=arch_id) + q.delete() + + # Insert paths + pathcache = {} + for fullpath in fullpaths: + (path, filename) = os.path.split(fullpath) + + if path.startswith( "./" ): + path = path[2:] + + filepath_id = get_or_set_contents_path_id(path, session) + filename_id = get_or_set_contents_file_id(filename, session) + + pathcache[fullpath] = (filepath_id, filename_id) + + for fullpath, dat in pathcache.items(): + pca = PendingContentAssociation() + pca.package = package['Package'] + pca.version = package['Version'] + pca.filepath_id = dat[0] + pca.filename_id = dat[1] + pca.architecture = arch_id + session.add(pca) + + # Only commit if we set up the session ourself + if privatetrans: + session.commit() + session.close() + else: + session.flush() + + return True + except Exception, e: + traceback.print_exc() + + # Only rollback if we set up the session ourself + if privatetrans: + session.rollback() + session.close() + + return False + +__all__.append('insert_pending_content_paths') + +################################################################################ + +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +@session_wrapper +def get_policy_queue(queuename, session=None): + """ + Returns PolicyQueue object for given C{queue name} + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue') + +################################################################################ + +class Priority(object): + def __init__(self, *args, **kwargs): + pass + + def __eq__(self, val): + if isinstance(val, str): + return (self.priority == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.priority != val) + # This signals to use the normal comparison operator + return NotImplemented + + def __repr__(self): + return '' % (self.priority, self.priority_id) + +__all__.append('Priority') + +@session_wrapper +def get_priority(priority, session=None): + """ + Returns Priority object for given C{priority name}. + + @type priority: string + @param priority: The name of the priority + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Priority + @return: Priority object for the given priority + """ + + q = session.query(Priority).filter_by(priority=priority) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_priority') + +@session_wrapper +def get_priorities(session=None): + """ + Returns dictionary of priority names -> id mappings + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: dictionary + @return: dictionary of priority names -> id mappings + """ + + ret = {} + q = session.query(Priority) + for x in q.all(): + ret[x.priority] = x.priority_id + + return ret + +__all__.append('get_priorities') + +################################################################################ + +class Section(object): + def __init__(self, *args, **kwargs): + pass + + def __eq__(self, val): + if isinstance(val, str): + return (self.section == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.section != val) + # This signals to use the normal comparison operator + return NotImplemented + + def __repr__(self): + return '
' % self.section + +__all__.append('Section') + +@session_wrapper +def get_section(section, session=None): + """ + Returns Section object for given C{section name}. + + @type section: string + @param section: The name of the section + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Section + @return: Section object for the given section name + """ + + q = session.query(Section).filter_by(section=section) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_section') + +@session_wrapper +def get_sections(session=None): + """ + Returns dictionary of section names -> id mappings + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: dictionary + @return: dictionary of section names -> id mappings + """ + + ret = {} + q = session.query(Section) + for x in q.all(): + ret[x.section] = x.section_id + + return ret + +__all__.append('get_sections') + +################################################################################ + +class DBSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.source, self.version) + +__all__.append('DBSource') + +@session_wrapper +def source_exists(source, source_version, suites = ["any"], session=None): + """ + Ensure that source exists somewhere in the archive for the binary + upload being processed. + 1. exact match => 1.0-3 + 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 + + @type package: string + @param package: package source name + + @type source_version: string + @param source_version: expected source version + + @type suites: list + @param suites: list of suites to check in, default I{any} + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: int + @return: returns 1 if a source with expected version is found, otherwise 0 + + """ + + cnf = Config() + ret = 1 + + for suite in suites: + q = session.query(DBSource).filter_by(source=source) + if suite != "any": + # source must exist in suite X, or in some other suite that's + # mapped to X, recursively... silent-maps are counted too, + # unreleased-maps aren't. + maps = cnf.ValueList("SuiteMappings")[:] + maps.reverse() + maps = [ m.split() for m in maps ] + maps = [ (x[1], x[2]) for x in maps + if x[0] == "map" or x[0] == "silent-map" ] + s = [suite] + for x in maps: + if x[1] in s and x[0] not in s: + s.append(x[0]) + + q = q.join(SrcAssociation).join(Suite) + q = q.filter(Suite.suite_name.in_(s)) + + # Reduce the query results to a list of version numbers + ql = [ j.version for j in q.all() ] + + # Try (1) + if source_version in ql: + continue + + # Try (2) + from daklib.regexes import re_bin_only_nmu + orig_source_version = re_bin_only_nmu.sub('', source_version) + if orig_source_version in ql: + continue + + # No source found so return not ok + ret = 0 + + return ret + +__all__.append('source_exists') + +@session_wrapper +def get_suites_source_in(source, session=None): + """ + Returns list of Suite objects which given C{source} name is in + + @type source: str + @param source: DBSource package name to search for + + @rtype: list + @return: list of Suite objects for the given source + """ + + return session.query(Suite).join(SrcAssociation).join(DBSource).filter_by(source=source).all() + +__all__.append('get_suites_source_in') + +@session_wrapper +def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None): + """ + Returns list of DBSource objects for given C{source} name and other parameters + + @type source: str + @param source: DBSource package name to search for + + @type source: str or None + @param source: DBSource version name to search for or None if not applicable + + @type dm_upload_allowed: bool + @param dm_upload_allowed: If None, no effect. If True or False, only + return packages with that dm_upload_allowed setting + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: list of DBSource objects for the given name (may be empty) + """ + + q = session.query(DBSource).filter_by(source=source) + + if version is not None: + q = q.filter_by(version=version) + + if dm_upload_allowed is not None: + q = q.filter_by(dm_upload_allowed=dm_upload_allowed) + + return q.all() + +__all__.append('get_sources_from_name') + +@session_wrapper +def get_source_in_suite(source, suite, session=None): + """ + Returns list of DBSource objects for a combination of C{source} and C{suite}. + + - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc} + - B{suite} - a suite name, eg. I{unstable} + + @type source: string + @param source: source package name + + @type suite: string + @param suite: the suite name + + @rtype: string + @return: the version for I{source} in I{suite} + + """ + + q = session.query(SrcAssociation) + q = q.join('source').filter_by(source=source) + q = q.join('suite').filter_by(suite_name=suite) + + try: + return q.one().source + except NoResultFound: + return None + +__all__.append('get_source_in_suite') + +################################################################################ + +@session_wrapper +def add_dsc_to_db(u, filename, session=None): + entry = u.pkg.files[filename] + source = DBSource() + pfs = [] + + source.source = u.pkg.dsc["source"] + source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch + source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + source.install_date = datetime.now().date() + + dsc_component = entry["component"] + dsc_location_id = entry["location id"] + + source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") + + # Set up a new poolfile if necessary + if not entry.has_key("files id") or not entry["files id"]: + filename = entry["pool name"] + filename + poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + pfs.append(poolfile) + entry["files id"] = poolfile.file_id + + source.poolfile_id = entry["files id"] + session.add(source) + session.flush() + + for suite_name in u.pkg.changes["distribution"].keys(): + sa = SrcAssociation() + sa.source_id = source.source_id + sa.suite_id = get_suite(suite_name).suite_id + session.add(sa) + + session.flush() + + # Add the source files to the DB (files and dsc_files) + dscfile = DSCFile() + dscfile.source_id = source.source_id + dscfile.poolfile_id = entry["files id"] + session.add(dscfile) + + for dsc_file, dentry in u.pkg.dsc_files.items(): + df = DSCFile() + df.source_id = source.source_id + + # If the .orig tarball is already in the pool, it's + # files id is stored in dsc_files by check_dsc(). + files_id = dentry.get("files id", None) + + # Find the entry in the files hash + # TODO: Bail out here properly + dfentry = None + for f, e in u.pkg.files.items(): + if f == dsc_file: + dfentry = e + break + + if files_id is None: + filename = dfentry["pool name"] + dsc_file + + (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) + # FIXME: needs to check for -1/-2 and or handle exception + if found and obj is not None: + files_id = obj.file_id + pfs.append(obj) + + # If still not found, add it + if files_id is None: + # HACK: Force sha1sum etc into dentry + dentry["sha1sum"] = dfentry["sha1sum"] + dentry["sha256sum"] = dfentry["sha256sum"] + poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + pfs.append(poolfile) + files_id = poolfile.file_id + else: + poolfile = get_poolfile_by_id(files_id, session) + if poolfile is None: + utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id) + pfs.append(poolfile) + + df.poolfile_id = files_id + session.add(df) + + session.flush() + + # Add the src_uploaders to the DB + uploader_ids = [source.maintainer_id] + if u.pkg.dsc.has_key("uploaders"): + for up in u.pkg.dsc["uploaders"].split(","): + up = up.strip() + uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) + + added_ids = {} + for up in uploader_ids: + if added_ids.has_key(up): + utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + continue + + added_ids[u]=1 + + su = SrcUploader() + su.maintainer_id = up + su.source_id = source.source_id + session.add(su) + + session.flush() + + return dsc_component, dsc_location_id, pfs + +__all__.append('add_dsc_to_db') + +@session_wrapper +def add_deb_to_db(u, filename, session=None): + """ + Contrary to what you might expect, this routine deals with both + debs and udebs. That info is in 'dbtype', whilst 'type' is + 'deb' for both of them + """ + cnf = Config() + entry = u.pkg.files[filename] + + bin = DBBinary() + bin.package = entry["package"] + bin.version = entry["version"] + bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id + bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + bin.arch_id = get_architecture(entry["architecture"], session).arch_id + bin.binarytype = entry["dbtype"] + + # Find poolfile id + filename = entry["pool name"] + filename + fullpath = os.path.join(cnf["Dir::Pool"], filename) + if not entry.get("location id", None): + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id + + if entry.get("files id", None): + poolfile = get_poolfile_by_id(bin.poolfile_id) + bin.poolfile_id = entry["files id"] + else: + poolfile = add_poolfile(filename, entry, entry["location id"], session) + bin.poolfile_id = entry["files id"] = poolfile.file_id + + # Find source id + bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + if len(bin_sources) != 1: + raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + (bin.package, bin.version, bin.architecture.arch_string, + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.source_id = bin_sources[0].source_id + + # Add and flush object so it has an ID + session.add(bin) + session.flush() + + # Add BinAssociations + for suite_name in u.pkg.changes["distribution"].keys(): + ba = BinAssociation() + ba.binary_id = bin.binary_id + ba.suite_id = get_suite(suite_name).suite_id + session.add(ba) + + session.flush() + + # Deal with contents - disabled for now + #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) + #if not contents: + # print "REJECT\nCould not determine contents of package %s" % bin.package + # session.rollback() + # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + + return poolfile + +__all__.append('add_deb_to_db') + +################################################################################ + +class SourceACL(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.source_acl_id + +__all__.append('SourceACL') + +################################################################################ + +class SrcAssociation(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.sa_id, self.source, self.suite) + +__all__.append('SrcAssociation') + +################################################################################ + +class SrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.format_name) + +__all__.append('SrcFormat') + +################################################################################ + +class SrcUploader(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.uploader_id + +__all__.append('SrcUploader') + +################################################################################ + +SUITE_FIELDS = [ ('SuiteName', 'suite_name'), + ('SuiteID', 'suite_id'), + ('Version', 'version'), + ('Origin', 'origin'), + ('Label', 'label'), + ('Description', 'description'), + ('Untouchable', 'untouchable'), + ('Announce', 'announce'), + ('Codename', 'codename'), + ('OverrideCodename', 'overridecodename'), + ('ValidTime', 'validtime'), + ('Priority', 'priority'), + ('NotAutomatic', 'notautomatic'), + ('CopyChanges', 'copychanges'), + ('CopyDotDak', 'copydotdak'), + ('CommentsDir', 'commentsdir'), + ('OverrideSuite', 'overridesuite'), + ('ChangelogBase', 'changelogbase')] + + +class Suite(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.suite_name + + def __eq__(self, val): + if isinstance(val, str): + return (self.suite_name == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.suite_name != val) + # This signals to use the normal comparison operator + return NotImplemented + + def details(self): + ret = [] + for disp, field in SUITE_FIELDS: + val = getattr(self, field, None) + if val is not None: + ret.append("%s: %s" % (disp, val)) + + return "\n".join(ret) + +__all__.append('Suite') + +@session_wrapper +def get_suite_architecture(suite, architecture, session=None): + """ + Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it + doesn't exist + + @type suite: str + @param suite: Suite name to search for + + @type architecture: str + @param architecture: Architecture name to search for + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: SuiteArchitecture + @return: the SuiteArchitecture object or None + """ + + q = session.query(SuiteArchitecture) + q = q.join(Architecture).filter_by(arch_string=architecture) + q = q.join(Suite).filter_by(suite_name=suite) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_suite_architecture') + +@session_wrapper +def get_suite(suite, session=None): + """ + Returns Suite object for given C{suite name}. + + @type suite: string + @param suite: The name of the suite + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Suite + @return: Suite object for the requested suite name (None if not present) + """ + + q = session.query(Suite).filter_by(suite_name=suite) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_suite') + +################################################################################ + +class SuiteArchitecture(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.suite_id, self.arch_id) + +__all__.append('SuiteArchitecture') + +@session_wrapper +def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): + """ + Returns list of Architecture objects for given C{suite} name + + @type source: str + @param source: Suite name to search for + + @type skipsrc: boolean + @param skipsrc: Whether to skip returning the 'source' architecture entry + (Default False) + + @type skipall: boolean + @param skipall: Whether to skip returning the 'all' architecture entry + (Default False) + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: list of Architecture objects for the given name (may be empty) + """ + + q = session.query(Architecture) + q = q.join(SuiteArchitecture) + q = q.join(Suite).filter_by(suite_name=suite) + + if skipsrc: + q = q.filter(Architecture.arch_string != 'source') + + if skipall: + q = q.filter(Architecture.arch_string != 'all') + + q = q.order_by('arch_string') + + return q.all() + +__all__.append('get_suite_architectures') + +################################################################################ + +class SuiteSrcFormat(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.suite_id, self.src_format_id) + +__all__.append('SuiteSrcFormat') + +@session_wrapper +def get_suite_src_formats(suite, session=None): + """ + Returns list of allowed SrcFormat for C{suite}. + + @type suite: str + @param suite: Suite name to search for + + @type session: Session + @param session: Optional SQL session object (a temporary one will be + generated if not supplied) + + @rtype: list + @return: the list of allowed source formats for I{suite} + """ + + q = session.query(SrcFormat) + q = q.join(SuiteSrcFormat) + q = q.join(Suite).filter_by(suite_name=suite) + q = q.order_by('format_name') + + return q.all() + +__all__.append('get_suite_src_formats') + +################################################################################ + +class Uid(object): + def __init__(self, *args, **kwargs): + pass + + def __eq__(self, val): + if isinstance(val, str): + return (self.uid == val) + # This signals to use the normal comparison operator + return NotImplemented + + def __ne__(self, val): + if isinstance(val, str): + return (self.uid != val) + # This signals to use the normal comparison operator + return NotImplemented + + def __repr__(self): + return '' % (self.uid, self.name) + +__all__.append('Uid') + +@session_wrapper +def add_database_user(uidname, session=None): + """ + Adds a database user + + @type uidname: string + @param uidname: The uid of the user to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: Uid + @return: the uid object for the given uidname + """ + + session.execute("CREATE USER :uid", {'uid': uidname}) + session.commit_or_flush() + +__all__.append('add_database_user') + +@session_wrapper +def get_or_set_uid(uidname, session=None): + """ + Returns uid object for given uidname. + + If no matching uidname is found, a row is inserted. + + @type uidname: string + @param uidname: The uid to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: Uid + @return: the uid object for the given uidname + """ + + q = session.query(Uid).filter_by(uid=uidname) + + try: + ret = q.one() + except NoResultFound: + uid = Uid() + uid.uid = uidname + session.add(uid) + session.commit_or_flush() + ret = uid + + return ret + +__all__.append('get_or_set_uid') + +@session_wrapper +def get_uid_from_fingerprint(fpr, session=None): + q = session.query(Uid) + q = q.join(Fingerprint).filter_by(fingerprint=fpr) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_uid_from_fingerprint') + +################################################################################ + +class UploadBlock(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.source, self.upload_block_id) + +__all__.append('UploadBlock') + +################################################################################ + +class DBConn(object): + """ + database module init. + """ + __shared_state = {} + + def __init__(self, *args, **kwargs): + self.__dict__ = self.__shared_state + + if not getattr(self, 'initialised', False): + self.initialised = True + self.debug = kwargs.has_key('debug') + self.__createconn() + + def __setuptables(self): + tables = ( + 'architecture', + 'archive', + 'bin_associations', + 'binaries', + 'binary_acl', + 'binary_acl_map', + 'build_queue', + 'build_queue_files', + 'component', + 'config', + 'content_associations', + 'content_file_names', + 'content_file_paths', + 'changes_pending_binaries', + 'changes_pending_files', + 'changes_pending_files_map', + 'changes_pending_source', + 'changes_pending_source_files', + 'changes_pool_files', + 'dsc_files', + 'files', + 'fingerprint', + 'keyrings', + 'changes', + 'keyring_acl_map', + 'location', + 'maintainer', + 'new_comments', + 'override', + 'override_type', + 'pending_content_associations', + 'policy_queue', + 'priority', + 'section', + 'source', + 'source_acl', + 'src_associations', + 'src_format', + 'src_uploaders', + 'suite', + 'suite_architectures', + 'suite_src_formats', + 'suite_build_queue_copy', + 'uid', + 'upload_blocks', + ) + + for table_name in tables: + table = Table(table_name, self.db_meta, autoload=True) + setattr(self, 'tbl_%s' % table_name, table) + + def __setupmappers(self): + mapper(Architecture, self.tbl_architecture, + properties = dict(arch_id = self.tbl_architecture.c.id)) + + mapper(Archive, self.tbl_archive, + properties = dict(archive_id = self.tbl_archive.c.id, + archive_name = self.tbl_archive.c.name)) + + mapper(BinAssociation, self.tbl_bin_associations, + properties = dict(ba_id = self.tbl_bin_associations.c.id, + suite_id = self.tbl_bin_associations.c.suite, + suite = relation(Suite), + binary_id = self.tbl_bin_associations.c.bin, + binary = relation(DBBinary))) + + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) + + mapper(DBBinary, self.tbl_binaries, + properties = dict(binary_id = self.tbl_binaries.c.id, + package = self.tbl_binaries.c.package, + version = self.tbl_binaries.c.version, + maintainer_id = self.tbl_binaries.c.maintainer, + maintainer = relation(Maintainer), + source_id = self.tbl_binaries.c.source, + source = relation(DBSource), + arch_id = self.tbl_binaries.c.architecture, + architecture = relation(Architecture), + poolfile_id = self.tbl_binaries.c.file, + poolfile = relation(PoolFile), + binarytype = self.tbl_binaries.c.type, + fingerprint_id = self.tbl_binaries.c.sig_fpr, + fingerprint = relation(Fingerprint), + install_date = self.tbl_binaries.c.install_date, + binassociations = relation(BinAssociation, + primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin)))) + + mapper(BinaryACL, self.tbl_binary_acl, + properties = dict(binary_acl_id = self.tbl_binary_acl.c.id)) + + mapper(BinaryACLMap, self.tbl_binary_acl_map, + properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id, + fingerprint = relation(Fingerprint, backref="binary_acl_map"), + architecture = relation(Architecture))) + + mapper(Component, self.tbl_component, + properties = dict(component_id = self.tbl_component.c.id, + component_name = self.tbl_component.c.name)) + + mapper(DBConfig, self.tbl_config, + properties = dict(config_id = self.tbl_config.c.id)) + + mapper(DSCFile, self.tbl_dsc_files, + properties = dict(dscfile_id = self.tbl_dsc_files.c.id, + source_id = self.tbl_dsc_files.c.source, + source = relation(DBSource), + poolfile_id = self.tbl_dsc_files.c.file, + poolfile = relation(PoolFile))) + + mapper(PoolFile, self.tbl_files, + properties = dict(file_id = self.tbl_files.c.id, + filesize = self.tbl_files.c.size, + location_id = self.tbl_files.c.location, + location = relation(Location))) + + mapper(Fingerprint, self.tbl_fingerprint, + properties = dict(fingerprint_id = self.tbl_fingerprint.c.id, + uid_id = self.tbl_fingerprint.c.uid, + uid = relation(Uid), + keyring_id = self.tbl_fingerprint.c.keyring, + keyring = relation(Keyring), + source_acl = relation(SourceACL), + binary_acl = relation(BinaryACL))) + + mapper(Keyring, self.tbl_keyrings, + properties = dict(keyring_name = self.tbl_keyrings.c.name, + keyring_id = self.tbl_keyrings.c.id)) + + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, + poolfiles = relation(PoolFile, + secondary=self.tbl_changes_pool_files, + backref="changeslinks"), + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binaries, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) + mapper(KeyringACLMap, self.tbl_keyring_acl_map, + properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, + keyring = relation(Keyring, backref="keyring_acl_map"), + architecture = relation(Architecture))) + + mapper(Location, self.tbl_location, + properties = dict(location_id = self.tbl_location.c.id, + component_id = self.tbl_location.c.component, + component = relation(Component), + archive_id = self.tbl_location.c.archive, + archive = relation(Archive), + archive_type = self.tbl_location.c.type)) + + mapper(Maintainer, self.tbl_maintainer, + properties = dict(maintainer_id = self.tbl_maintainer.c.id)) + + mapper(NewComment, self.tbl_new_comments, + properties = dict(comment_id = self.tbl_new_comments.c.id)) + + mapper(Override, self.tbl_override, + properties = dict(suite_id = self.tbl_override.c.suite, + suite = relation(Suite), + component_id = self.tbl_override.c.component, + component = relation(Component), + priority_id = self.tbl_override.c.priority, + priority = relation(Priority), + section_id = self.tbl_override.c.section, + section = relation(Section), + overridetype_id = self.tbl_override.c.type, + overridetype = relation(OverrideType))) + + mapper(OverrideType, self.tbl_override_type, + properties = dict(overridetype = self.tbl_override_type.c.type, + overridetype_id = self.tbl_override_type.c.id)) + + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + + mapper(Priority, self.tbl_priority, + properties = dict(priority_id = self.tbl_priority.c.id)) + + mapper(Section, self.tbl_section, + properties = dict(section_id = self.tbl_section.c.id)) + + mapper(DBSource, self.tbl_source, + properties = dict(source_id = self.tbl_source.c.id, + version = self.tbl_source.c.version, + maintainer_id = self.tbl_source.c.maintainer, + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_source.c.maintainer==self.tbl_maintainer.c.id)), + poolfile_id = self.tbl_source.c.file, + poolfile = relation(PoolFile), + fingerprint_id = self.tbl_source.c.sig_fpr, + fingerprint = relation(Fingerprint), + changedby_id = self.tbl_source.c.changedby, + changedby = relation(Maintainer, + primaryjoin=(self.tbl_source.c.changedby==self.tbl_maintainer.c.id)), + srcfiles = relation(DSCFile, + primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)), + srcassociations = relation(SrcAssociation, + primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)), + srcuploaders = relation(SrcUploader))) + + mapper(SourceACL, self.tbl_source_acl, + properties = dict(source_acl_id = self.tbl_source_acl.c.id)) + + mapper(SrcAssociation, self.tbl_src_associations, + properties = dict(sa_id = self.tbl_src_associations.c.id, + suite_id = self.tbl_src_associations.c.suite, + suite = relation(Suite), + source_id = self.tbl_src_associations.c.source, + source = relation(DBSource))) + + mapper(SrcFormat, self.tbl_src_format, + properties = dict(src_format_id = self.tbl_src_format.c.id, + format_name = self.tbl_src_format.c.format_name)) + + mapper(SrcUploader, self.tbl_src_uploaders, + properties = dict(uploader_id = self.tbl_src_uploaders.c.id, + source_id = self.tbl_src_uploaders.c.source, + source = relation(DBSource, + primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)), + maintainer_id = self.tbl_src_uploaders.c.maintainer, + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id)))) + + mapper(Suite, self.tbl_suite, + properties = dict(suite_id = self.tbl_suite.c.id, + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) + + mapper(SuiteArchitecture, self.tbl_suite_architectures, + properties = dict(suite_id = self.tbl_suite_architectures.c.suite, + suite = relation(Suite, backref='suitearchitectures'), + arch_id = self.tbl_suite_architectures.c.architecture, + architecture = relation(Architecture))) + + mapper(SuiteSrcFormat, self.tbl_suite_src_formats, + properties = dict(suite_id = self.tbl_suite_src_formats.c.suite, + suite = relation(Suite, backref='suitesrcformats'), + src_format_id = self.tbl_suite_src_formats.c.src_format, + src_format = relation(SrcFormat))) + + mapper(Uid, self.tbl_uid, + properties = dict(uid_id = self.tbl_uid.c.id, + fingerprint = relation(Fingerprint))) + + mapper(UploadBlock, self.tbl_upload_blocks, + properties = dict(upload_block_id = self.tbl_upload_blocks.c.id, + fingerprint = relation(Fingerprint, backref="uploadblocks"), + uid = relation(Uid, backref="uploadblocks"))) + + ## Connection functions + def __createconn(self): + from config import Config + cnf = Config() + if cnf["DB::Host"]: + # TCP/IP + connstr = "postgres://%s" % cnf["DB::Host"] + if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr += ":%s" % cnf["DB::Port"] + connstr += "/%s" % cnf["DB::Name"] + else: + # Unix Socket + connstr = "postgres:///%s" % cnf["DB::Name"] + if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr += "?port=%s" % cnf["DB::Port"] + + self.db_pg = create_engine(connstr, echo=self.debug) + self.db_meta = MetaData() + self.db_meta.bind = self.db_pg + self.db_smaker = sessionmaker(bind=self.db_pg, + autoflush=True, + autocommit=False) + + self.__setuptables() + self.__setupmappers() + + def session(self): + return self.db_smaker() + +__all__.append('DBConn') - c = self.db_con.cursor() - c.execute("BEGIN WORK") - try: - arch_id = self.get_architecture_id(package['Architecture']) - - # Remove any already existing recorded files for this package - c.execute("""DELETE FROM pending_content_associations - WHERE package=%(Package)s - AND version=%(Version)s - AND architecture=%(ArchID)s""", {'Package': package['Package'], - 'Version': package['Version'], - 'ArchID': arch_id}) - - for fullpath in fullpaths: - (path, file) = os.path.split(fullpath) - - if path.startswith( "./" ): - path = path[2:] - # Get the necessary IDs ... - file_id = self.get_or_set_contents_file_id(file) - path_id = self.get_or_set_contents_path_id(path) - - c.execute("""INSERT INTO pending_content_associations - (package, version, architecture, filepath, filename) - VALUES (%%(Package)s, %%(Version)s, '%d', '%d', '%d')""" - % (arch_id, path_id, file_id), package ) - - c.execute("COMMIT") - return True - except: - traceback.print_exc() - c.execute("ROLLBACK") - return False