X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=72453f643d26dc35b768395d5d9886264d49b3eb;hb=4830d9be143c7645ea932b53fae095e275ad7814;hp=ecb610311bb23ad36cfecb81fa37c65f1a3032c6;hpb=f9281ce6bc216994d5b272c265bb74bc8ea289bc;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py old mode 100644 new mode 100755 index ecb61031..72453f64 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -5,7 +5,7 @@ @contact: Debian FTPMaster @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup @copyright: 2008-2009 Mark Hymers -@copyright: 2009 Joerg Jaspert +@copyright: 2009, 2010 Joerg Jaspert @copyright: 2009 Mike O'Connor @license: GNU General Public License version 2 or later """ @@ -37,6 +37,7 @@ import os import re import psycopg2 import traceback +import commands from datetime import datetime, timedelta from errno import ENOENT from tempfile import mkstemp, mkdtemp @@ -44,36 +45,63 @@ from tempfile import mkstemp, mkdtemp from inspect import getargspec import sqlalchemy -from sqlalchemy import create_engine, Table, MetaData -from sqlalchemy.orm import sessionmaker, mapper, relation +from sqlalchemy import create_engine, Table, MetaData, Column, Integer +from sqlalchemy.orm import sessionmaker, mapper, relation, object_session from sqlalchemy import types as sqltypes # Don't remove this, we re-export the exceptions to scripts which import us from sqlalchemy.exc import * from sqlalchemy.orm.exc import NoResultFound +# Only import Config until Queue stuff is changed to store its config +# in the database from config import Config from textutils import fix_maintainer +from dak_exceptions import NoSourceFieldError + +# suppress some deprecation warnings in squeeze related to sqlalchemy +import warnings +warnings.filterwarnings('ignore', \ + "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \ + SADeprecationWarning) +# TODO: sqlalchemy needs some extra configuration to correctly reflect +# the ind_deb_contents_* indexes - we ignore the warnings at the moment +warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning) + ################################################################################ # Patch in support for the debversion field type so that it works during # reflection -class DebVersion(sqltypes.Text): +try: + # that is for sqlalchemy 0.6 + UserDefinedType = sqltypes.UserDefinedType +except: + # this one for sqlalchemy 0.5 + UserDefinedType = sqltypes.TypeEngine + +class DebVersion(UserDefinedType): def get_col_spec(self): return "DEBVERSION" + def bind_processor(self, dialect): + return None + + # ' = None' is needed for sqlalchemy 0.5: + def result_processor(self, dialect, coltype = None): + return None + sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version == "0.5": +if sa_major_version in ["0.5", "0.6"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") ################################################################################ -__all__ = ['IntegrityError', 'SQLAlchemyError'] +__all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion'] ################################################################################ @@ -129,8 +157,9 @@ __all__.append('session_wrapper') ################################################################################ class Architecture(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, arch_string = None, description = None): + self.arch_string = arch_string + self.description = description def __eq__(self, val): if isinstance(val, str): @@ -174,13 +203,14 @@ def get_architecture(architecture, session=None): __all__.append('get_architecture') +# TODO: should be removed because the implementation is too trivial @session_wrapper def get_architecture_suites(architecture, session=None): """ Returns list of Suite objects for given C{architecture} name - @type source: str - @param source: Architecture name to search for + @type architecture: str + @param architecture: Architecture name to search for @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -190,13 +220,7 @@ def get_architecture_suites(architecture, session=None): @return: list of Suite objects for the given name (may be empty) """ - q = session.query(Suite) - q = q.join(SuiteArchitecture) - q = q.join(Architecture).filter_by(arch_string=architecture).order_by('suite_name') - - ret = q.all() - - return ret + return get_architecture(architecture, session).suites __all__.append('get_architecture_suites') @@ -276,8 +300,8 @@ def get_suites_binary_in(package, session=None): """ Returns list of Suite objects which given C{package} name is in - @type source: str - @param source: DBBinary package name to search for + @type package: str + @param package: DBBinary package name to search for @rtype: list @return: list of Suite objects for the given package @@ -323,8 +347,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non @type version: str or None @param version: Version to search for (or None) - @type package: str, list or None - @param package: Architectures to limit to (or None if no limit) + @type architecture: str, list or None + @param architecture: Architectures to limit to (or None if no limit) @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -377,16 +401,16 @@ def get_binary_from_name_suite(package, suitename, session=None): sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su - WHERE b.package=:package + WHERE b.package='%(package)s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id - AND su.suite_name=:suitename + AND su.suite_name %(suitename)s ORDER BY b.version DESC""" - return session.execute(sql, {'package': package, 'suitename': suitename}) + return session.execute(sql % {'package': package, 'suitename': suitename}) __all__.append('get_binary_from_name_suite') @@ -435,8 +459,8 @@ MINIMAL_APT_CONF=""" Dir { ArchiveDir "%(archivepath)s"; - OverrideDir "/srv/ftp.debian.org/scripts/override/"; - CacheDir "/srv/ftp.debian.org/database/"; + OverrideDir "%(overridedir)s"; + CacheDir "%(cachedir)s"; }; Default @@ -498,11 +522,16 @@ class BuildQueue(object): os.write(fl_fd, '%s\n' % n.fullpath) os.close(fl_fd) + cnf = Config() + # Write minimal apt.conf # TODO: Remove hardcoding from template (ac_fd, ac_name) = mkstemp() os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, - 'filelist': fl_name}) + 'filelist': fl_name, + 'cachedir': cnf["Dir::Cache"], + 'overridedir': cnf["Dir::Override"], + }) os.close(ac_fd) # Run apt-ftparchive generate @@ -524,9 +553,14 @@ class BuildQueue(object): os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + # Crude hack with open and append, but this whole section is and should be redone. + if self.notautomatic: + release=open("Release", "a") + release.write("NotAutomatic: yes") + release.close() + # Sign if necessary if self.signingkey: - cnf = Config() keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] if cnf.has_key("Dinstall::SigningPubKeyring"): keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] @@ -598,7 +632,7 @@ class BuildQueue(object): session.commit() for f in os.listdir(self.path): - if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'): continue try: @@ -898,8 +932,9 @@ def get_or_set_contents_path_id(filepath, session=None): If no matching file is found, a row is inserted. - @type filename: string - @param filename: The filepath + @type filepath: string + @param filepath: The filepath + @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be generated if not supplied). If not passed, a commit will be performed at @@ -960,12 +995,16 @@ def insert_content_paths(binary_id, fullpaths, session=None): try: # Insert paths - pathcache = {} - for fullpath in fullpaths: - if fullpath.startswith( './' ): - fullpath = fullpath[2:] + def generate_path_dicts(): + for fullpath in fullpaths: + if fullpath.startswith( './' ): + fullpath = fullpath[2:] - session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id} ) + yield {'filename':fullpath, 'id': binary_id } + + for d in generate_path_dicts(): + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", + d ) session.commit() if privatetrans: @@ -1047,7 +1086,7 @@ __all__.append('PoolFile') def check_poolfile(filename, filesize, md5sum, location_id, session=None): """ Returns a tuple: - (ValidFileFound [boolean or None], PoolFile object or None) + (ValidFileFound [boolean or None], PoolFile object or None) @type filename: string @param filename: the filename of the file to check against the DB @@ -1063,12 +1102,11 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None): @rtype: tuple @return: Tuple of length 2. - If more than one file found with that name: - (None, None) - If valid pool file found: (True, PoolFile object) - If valid pool file not found: - (False, None) if no file found - (False, PoolFile object) if file found with size/md5sum mismatch + - If more than one file found with that name: (C{None}, C{None}) + - If valid pool file found: (C{True}, C{PoolFile object}) + - If valid pool file not found: + - (C{False}, C{None}) if no file found + - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch """ q = session.query(PoolFile).filter_by(filename=filename) @@ -1194,8 +1232,8 @@ __all__.append('add_poolfile') ################################################################################ class Fingerprint(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, fingerprint = None): + self.fingerprint = fingerprint def __repr__(self): return '' % self.fingerprint @@ -1296,9 +1334,17 @@ class Keyring(object): esclist[x] = "%c" % (int(esclist[x][2:],16)) return "".join(esclist) - def load_keys(self, keyring): + def parse_address(self, uid): + """parses uid and returns a tuple of real name and email address""" import email.Utils + (name, address) = email.Utils.parseaddr(uid) + name = re.sub(r"\s*[(].*[)]", "", name) + name = self.de_escape_gpg_str(name) + if name == "": + name = uid + return (name, address) + def load_keys(self, keyring): if not self.keyring_id: raise Exception('Must be initialized with database information') @@ -1310,24 +1356,20 @@ class Keyring(object): field = line.split(":") if field[0] == "pub": key = field[4] - (name, addr) = email.Utils.parseaddr(field[9]) - name = re.sub(r"\s*[(].*[)]", "", name) - if name == "" or addr == "" or "@" not in addr: - name = field[9] - addr = "invalid-uid" - name = self.de_escape_gpg_str(name) - self.keys[key] = {"email": addr} - if name != "": + self.keys[key] = {} + (name, addr) = self.parse_address(field[9]) + if "@" in addr: + self.keys[key]["email"] = addr self.keys[key]["name"] = name - self.keys[key]["aliases"] = [name] self.keys[key]["fingerprints"] = [] signingkey = True elif key and field[0] == "sub" and len(field) >= 12: signingkey = ("s" in field[11]) elif key and field[0] == "uid": - (name, addr) = email.Utils.parseaddr(field[9]) - if name and name not in self.keys[key]["aliases"]: - self.keys[key]["aliases"].append(name) + (name, addr) = self.parse_address(field[9]) + if "email" not in self.keys[key] and "@" in addr: + self.keys[key]["email"] = addr + self.keys[key]["name"] = name elif signingkey and field[0] == "fpr": self.keys[key]["fingerprints"].append(field[9]) self.fpr_lookup[field[9]] = key @@ -1375,7 +1417,7 @@ class Keyring(object): byname = {} any_invalid = False for x in self.keys.keys(): - if self.keys[x]["email"] == "invalid-uid": + if "email" not in self.keys[x]: any_invalid = True self.keys[x]["uid"] = format % "invalid-uid" else: @@ -1437,70 +1479,14 @@ class DBChange(object): def __repr__(self): return '' % self.changesname - def upload_into_db(self, u, path): - cnf = Config() - session = DBConn().session().object_session(self) - - files = [] - for chg_fn, entry in u.pkg.files.items(): - try: - f = open(os.path.join(path, chg_fn)) - cpf = ChangePendingFile() - cpf.filename = chg_fn - cpf.size = entry['size'] - cpf.md5sum = entry['md5sum'] - - if entry.has_key('sha1sum'): - cpf.sha1sum = entry['sha1sum'] - else: - f.seek(0) - cpf.sha1sum = apt_pkg.sha1sum(f) - - if entry.has_key('sha256sum'): - cpf.sha256sum = entry['sha256sum'] - else: - f.seek(0) - cpf.sha256sum = apt_pkg.sha256sum(f) - - session.add(cpf) - files.append(cpf) - f.close() - - except IOError: - # Can't find the file, try to look it up in the pool - from utils import poolify - poolname = poolify(entry["source"], entry["component"]) - l = get_location(cnf["Dir::Pool"], entry["component"], session=session) - - found, poolfile = check_poolfile(os.path.join(poolname, chg_fn), - entry['size'], - entry["md5sum"], - l.location_id, - session=session) - - if found is None: - Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])]) - elif found is False and poolfile is not None: - Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)]) - else: - if poolfile is None: - Logger.log(["E: Could not find %s in pool" % (chg_fn)]) - else: - chg.poolfiles.append(poolfile) - - chg.files = files - - def clean_from_queue(self): session = DBConn().session().object_session(self) # Remove changes_pool_files entries - for pf in self.poolfiles: - self.poolfiles.remove(pf) + self.poolfiles = [] - # Remove change - for cf in self.files: - self.files.remove(cf) + # Remove changes_pending_files references + self.files = [] # Clear out of queue self.in_queue = None @@ -1513,15 +1499,15 @@ def get_dbchange(filename, session=None): """ returns DBChange object for given C{filename}. - @type archive: string - @param archive: the name of the arhive + @type filename: string + @param filename: the name of the file @type session: Session @param session: Optional SQLA session object (a temporary one will be generated if not supplied) - @rtype: Archive - @return: Archive object for the given name (None if not present) + @rtype: DBChange + @return: DBChange object for the given filename (C{None} if not present) """ q = session.query(DBChange).filter_by(changesname=filename) @@ -1551,13 +1537,13 @@ def get_location(location, component=None, archive=None, session=None): and archive @type location: string - @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/} + @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/} @type component: string @param component: the component name (if None, no restriction applied) @type archive: string - @param archive_id: the archive name (if None, no restriction applied) + @param archive: the archive name (if None, no restriction applied) @rtype: Location / None @return: Either a Location object or None if one can't be found @@ -1813,16 +1799,38 @@ __all__.append('get_override_type') ################################################################################ -class PendingContentAssociation(object): +class DebContents(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package.package,self.file) + +__all__.append('DebContents') + + +class UdebContents(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package.package,self.file) + +__all__.append('UdebContents') + +class PendingBinContents(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.pca_id + return '' % self.contents_id -__all__.append('PendingContentAssociation') +__all__.append('PendingBinContents') -def insert_pending_content_paths(package, fullpaths, session=None): +def insert_pending_content_paths(package, + is_udeb, + fullpaths, + session=None): """ Make sure given paths are temporarily associated with given package @@ -1851,32 +1859,27 @@ def insert_pending_content_paths(package, fullpaths, session=None): arch_id = arch.arch_id # Remove any already existing recorded files for this package - q = session.query(PendingContentAssociation) + q = session.query(PendingBinContents) q = q.filter_by(package=package['Package']) q = q.filter_by(version=package['Version']) q = q.filter_by(architecture=arch_id) q.delete() - # Insert paths - pathcache = {} for fullpath in fullpaths: - (path, filename) = os.path.split(fullpath) - if path.startswith( "./" ): - path = path[2:] - - filepath_id = get_or_set_contents_path_id(path, session) - filename_id = get_or_set_contents_file_id(filename, session) - - pathcache[fullpath] = (filepath_id, filename_id) + if fullpath.startswith( "./" ): + fullpath = fullpath[2:] - for fullpath, dat in pathcache.items(): - pca = PendingContentAssociation() + pca = PendingBinContents() pca.package = package['Package'] pca.version = package['Version'] - pca.filepath_id = dat[0] - pca.filename_id = dat[1] + pca.file = fullpath pca.architecture = arch_id + + if isudeb: + pca.type = 8 # gross + else: + pca.type = 7 # also gross session.add(pca) # Only commit if we set up the session ourself @@ -1935,6 +1938,31 @@ def get_policy_queue(queuename, session=None): __all__.append('get_policy_queue') +@session_wrapper +def get_policy_queue_from_path(pathname, session=None): + """ + Returns PolicyQueue object for given C{path name} + + @type queuename: string + @param queuename: The path + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(path=pathname) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue_from_path') + ################################################################################ class Priority(object): @@ -2094,8 +2122,8 @@ def source_exists(source, source_version, suites = ["any"], session=None): 1. exact match => 1.0-3 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 - @type package: string - @param package: package source name + @type source: string + @param source: source name @type source_version: string @param source_version: expected source version @@ -2178,8 +2206,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session= @type source: str @param source: DBSource package name to search for - @type source: str or None - @param source: DBSource version name to search for or None if not applicable + @type version: str or None + @param version: DBSource version name to search for or None if not applicable @type dm_upload_allowed: bool @param dm_upload_allowed: If None, no effect. If True or False, only @@ -2328,26 +2356,27 @@ def add_dsc_to_db(u, filename, session=None): # Add the src_uploaders to the DB uploader_ids = [source.maintainer_id] if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].split(","): + for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): up = up.strip() uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) added_ids = {} - for up in uploader_ids: - if added_ids.has_key(up): - utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + for up_id in uploader_ids: + if added_ids.has_key(up_id): + import utils + utils.warn("Already saw uploader %s for source %s" % (up_id, source.source)) continue - added_ids[u]=1 + added_ids[up_id]=1 su = SrcUploader() - su.maintainer_id = up + su.maintainer_id = up_id su.source_id = source.source_id session.add(su) session.flush() - return dsc_component, dsc_location_id, pfs + return source, dsc_component, dsc_location_id, pfs __all__.append('add_dsc_to_db') @@ -2386,7 +2415,7 @@ def add_deb_to_db(u, filename, session=None): bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) if len(bin_sources) != 1: raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ - (bin.package, bin.version, bin.architecture.arch_string, + (bin.package, bin.version, entry["architecture"], filename, bin.binarytype, u.pkg.changes["fingerprint"]) bin.source_id = bin_sources[0].source_id @@ -2475,15 +2504,12 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'), ('Priority', 'priority'), ('NotAutomatic', 'notautomatic'), ('CopyChanges', 'copychanges'), - ('CopyDotDak', 'copydotdak'), - ('CommentsDir', 'commentsdir'), - ('OverrideSuite', 'overridesuite'), - ('ChangelogBase', 'changelogbase')] - + ('OverrideSuite', 'overridesuite')] class Suite(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, suite_name = None, version = None): + self.suite_name = suite_name + self.version = version def __repr__(self): return '' % self.suite_name @@ -2509,38 +2535,31 @@ class Suite(object): return "\n".join(ret) -__all__.append('Suite') - -@session_wrapper -def get_suite_architecture(suite, architecture, session=None): - """ - Returns a SuiteArchitecture object given C{suite} and ${arch} or None if it - doesn't exist - - @type suite: str - @param suite: Suite name to search for + def get_architectures(self, skipsrc=False, skipall=False): + """ + Returns list of Architecture objects - @type architecture: str - @param architecture: Architecture name to search for + @type skipsrc: boolean + @param skipsrc: Whether to skip returning the 'source' architecture entry + (Default False) - @type session: Session - @param session: Optional SQL session object (a temporary one will be - generated if not supplied) + @type skipall: boolean + @param skipall: Whether to skip returning the 'all' architecture entry + (Default False) - @rtype: SuiteArchitecture - @return: the SuiteArchitecture object or None - """ + @rtype: list + @return: list of Architecture objects for the given name (may be empty) + """ - q = session.query(SuiteArchitecture) - q = q.join(Architecture).filter_by(arch_string=architecture) - q = q.join(Suite).filter_by(suite_name=suite) + q = object_session(self).query(Architecture). \ + filter(Architecture.suites.contains(self)) + if skipsrc: + q = q.filter(Architecture.arch_string != 'source') + if skipall: + q = q.filter(Architecture.arch_string != 'all') + return q.order_by(Architecture.arch_string).all() - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_suite_architecture') +__all__.append('Suite') @session_wrapper def get_suite(suite, session=None): @@ -2569,22 +2588,14 @@ __all__.append('get_suite') ################################################################################ -class SuiteArchitecture(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.suite_id, self.arch_id) - -__all__.append('SuiteArchitecture') - +# TODO: should be removed because the implementation is too trivial @session_wrapper def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ Returns list of Architecture objects for given C{suite} name - @type source: str - @param source: Suite name to search for + @type suite: str + @param suite: Suite name to search for @type skipsrc: boolean @param skipsrc: Whether to skip returning the 'source' architecture entry @@ -2602,19 +2613,7 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): @return: list of Architecture objects for the given name (may be empty) """ - q = session.query(Architecture) - q = q.join(SuiteArchitecture) - q = q.join(Suite).filter_by(suite_name=suite) - - if skipsrc: - q = q.filter(Architecture.arch_string != 'source') - - if skipall: - q = q.filter(Architecture.arch_string != 'all') - - q = q.order_by('arch_string') - - return q.all() + return get_suite(suite, session).get_architectures(skipsrc, skipall) __all__.append('get_suite_architectures') @@ -2657,8 +2656,9 @@ __all__.append('get_suite_src_formats') ################################################################################ class Uid(object): - def __init__(self, *args, **kwargs): - pass + def __init__(self, uid = None, name = None): + self.uid = uid + self.name = name def __eq__(self, val): if isinstance(val, str): @@ -2677,28 +2677,6 @@ class Uid(object): __all__.append('Uid') -@session_wrapper -def add_database_user(uidname, session=None): - """ - Adds a database user - - @type uidname: string - @param uidname: The uid of the user to add - - @type session: SQLAlchemy - @param session: Optional SQL session object (a temporary one will be - generated if not supplied). If not passed, a commit will be performed at - the end of the function, otherwise the caller is responsible for commiting. - - @rtype: Uid - @return: the uid object for the given uidname - """ - - session.execute("CREATE USER :uid", {'uid': uidname}) - session.commit_or_flush() - -__all__.append('add_database_user') - @session_wrapper def get_or_set_uid(uidname, session=None): """ @@ -2773,7 +2751,7 @@ class DBConn(object): self.__createconn() def __setuptables(self): - tables = ( + tables_with_primary = ( 'architecture', 'archive', 'bin_associations', @@ -2781,30 +2759,22 @@ class DBConn(object): 'binary_acl', 'binary_acl_map', 'build_queue', - 'build_queue_files', + 'changelogs_text', 'component', 'config', - 'content_associations', - 'content_file_names', - 'content_file_paths', 'changes_pending_binaries', 'changes_pending_files', - 'changes_pending_files_map', 'changes_pending_source', - 'changes_pending_source_files', - 'changes_pool_files', 'dsc_files', 'files', 'fingerprint', 'keyrings', - 'changes', 'keyring_acl_map', 'location', 'maintainer', 'new_comments', - 'override', 'override_type', - 'pending_content_associations', + 'pending_bin_contents', 'policy_queue', 'priority', 'section', @@ -2814,20 +2784,76 @@ class DBConn(object): 'src_format', 'src_uploaders', 'suite', + 'uid', + 'upload_blocks', + # The following tables have primary keys but sqlalchemy + # version 0.5 fails to reflect them correctly with database + # versions before upgrade #41. + #'changes', + #'build_queue_files', + ) + + tables_no_primary = ( + 'bin_contents', + 'changes_pending_files_map', + 'changes_pending_source_files', + 'changes_pool_files', + 'deb_contents', + 'override', 'suite_architectures', 'suite_src_formats', 'suite_build_queue_copy', - 'uid', - 'upload_blocks', + 'udeb_contents', + # see the comment above + 'changes', + 'build_queue_files', ) - for table_name in tables: + views = ( + 'almost_obsolete_all_associations', + 'almost_obsolete_src_associations', + 'any_associations_source', + 'bin_assoc_by_arch', + 'bin_associations_binaries', + 'binaries_suite_arch', + 'binfiles_suite_component_arch', + 'changelogs', + 'file_arch_suite', + 'newest_all_associations', + 'newest_any_associations', + 'newest_source', + 'newest_src_association', + 'obsolete_all_associations', + 'obsolete_any_associations', + 'obsolete_any_by_all_associations', + 'obsolete_src_associations', + 'source_suite', + 'src_associations_bin', + 'src_associations_src', + 'suite_arch_by_name', + ) + + # Sqlalchemy version 0.5 fails to reflect the SERIAL type + # correctly and that is why we have to use a workaround. It can + # be removed as soon as we switch to version 0.6. + for table_name in tables_with_primary: + table = Table(table_name, self.db_meta, \ + Column('id', Integer, primary_key = True), \ + autoload=True, useexisting=True) + setattr(self, 'tbl_%s' % table_name, table) + + for table_name in tables_no_primary: table = Table(table_name, self.db_meta, autoload=True) setattr(self, 'tbl_%s' % table_name, table) + for view_name in views: + view = Table(view_name, self.db_meta, autoload=True) + setattr(self, 'view_%s' % view_name, view) + def __setupmappers(self): mapper(Architecture, self.tbl_architecture, - properties = dict(arch_id = self.tbl_architecture.c.id)) + properties = dict(arch_id = self.tbl_architecture.c.id, + suites = relation(Suite, secondary=self.tbl_suite_architectures, backref='architectures', order_by='suite_name'))) mapper(Archive, self.tbl_archive, properties = dict(archive_id = self.tbl_archive.c.id, @@ -2840,6 +2866,30 @@ class DBConn(object): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(PendingBinContents, self.tbl_pending_bin_contents, + properties = dict(contents_id =self.tbl_pending_bin_contents.c.id, + filename = self.tbl_pending_bin_contents.c.filename, + package = self.tbl_pending_bin_contents.c.package, + version = self.tbl_pending_bin_contents.c.version, + arch = self.tbl_pending_bin_contents.c.arch, + otype = self.tbl_pending_bin_contents.c.type)) + + mapper(DebContents, self.tbl_deb_contents, + properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, + package=self.tbl_deb_contents.c.package, + suite=self.tbl_deb_contents.c.suite, + arch=self.tbl_deb_contents.c.arch, + section=self.tbl_deb_contents.c.section, + filename=self.tbl_deb_contents.c.filename)) + + mapper(UdebContents, self.tbl_udeb_contents, + properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, + package=self.tbl_udeb_contents.c.package, + suite=self.tbl_udeb_contents.c.suite, + arch=self.tbl_udeb_contents.c.arch, + section=self.tbl_udeb_contents.c.section, + filename=self.tbl_udeb_contents.c.filename)) + mapper(BuildQueue, self.tbl_build_queue, properties = dict(queue_id = self.tbl_build_queue.c.id)) @@ -2952,6 +3002,8 @@ class DBConn(object): source_files = relation(ChangePendingFile, secondary=self.tbl_changes_pending_source_files, backref="pending_sources"))) + + mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, keyring = relation(Keyring, backref="keyring_acl_map"), @@ -2974,6 +3026,7 @@ class DBConn(object): mapper(Override, self.tbl_override, properties = dict(suite_id = self.tbl_override.c.suite, suite = relation(Suite), + package = self.tbl_override.c.package, component_id = self.tbl_override.c.component, component = relation(Component), priority_id = self.tbl_override.c.priority, @@ -2994,7 +3047,8 @@ class DBConn(object): properties = dict(priority_id = self.tbl_priority.c.id)) mapper(Section, self.tbl_section, - properties = dict(section_id = self.tbl_section.c.id)) + properties = dict(section_id = self.tbl_section.c.id, + section=self.tbl_section.c.section)) mapper(DBSource, self.tbl_source, properties = dict(source_id = self.tbl_source.c.id, @@ -3043,12 +3097,6 @@ class DBConn(object): policy_queue = relation(PolicyQueue), copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) - mapper(SuiteArchitecture, self.tbl_suite_architectures, - properties = dict(suite_id = self.tbl_suite_architectures.c.suite, - suite = relation(Suite, backref='suitearchitectures'), - arch_id = self.tbl_suite_architectures.c.architecture, - architecture = relation(Architecture))) - mapper(SuiteSrcFormat, self.tbl_suite_src_formats, properties = dict(suite_id = self.tbl_suite_src_formats.c.suite, suite = relation(Suite, backref='suitesrcformats'),