X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=4e5acc216ee3d639d6be5e9afebdea74107e9b65;hb=8c70083c0df3435e068144a27c9fce94b3b5647a;hp=94884d5c6f1f92216ed878815673085e5049a795;hpb=a4b3758ff560961d45155225a3af66a3c45fe8be;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 94884d5c..c0295e17 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -33,11 +33,14 @@ ################################################################################ +import apt_pkg import os +from os.path import normpath import re import psycopg2 import traceback import commands +import signal try: # python >= 2.6 @@ -49,6 +52,8 @@ except: from datetime import datetime, timedelta from errno import ENOENT from tempfile import mkstemp, mkdtemp +from subprocess import Popen, PIPE +from tarfile import TarFile from inspect import getargspec @@ -56,8 +61,10 @@ import sqlalchemy from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \ Text, ForeignKey from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \ - backref, MapperExtension, EXT_CONTINUE, object_mapper + backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers from sqlalchemy import types as sqltypes +from sqlalchemy.orm.collections import attribute_mapped_collection +from sqlalchemy.ext.associationproxy import association_proxy # Don't remove this, we re-export the exceptions to scripts which import us from sqlalchemy.exc import * @@ -67,16 +74,16 @@ from sqlalchemy.orm.exc import NoResultFound # in the database from config import Config from textutils import fix_maintainer -from dak_exceptions import DBUpdateError, NoSourceFieldError +from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError # suppress some deprecation warnings in squeeze related to sqlalchemy import warnings warnings.filterwarnings('ignore', \ "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \ SADeprecationWarning) -# TODO: sqlalchemy needs some extra configuration to correctly reflect -# the ind_deb_contents_* indexes - we ignore the warnings at the moment -warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning) +warnings.filterwarnings('ignore', \ + "Predicate of partial index .* ignored during reflection", \ + SAWarning) ################################################################################ @@ -103,11 +110,11 @@ class DebVersion(UserDefinedType): return None sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version in ["0.5", "0.6"]: +if sa_major_version in ["0.5", "0.6", "0.7"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py") ################################################################################ @@ -202,7 +209,9 @@ class ORMObject(object): # list value = len(value) elif hasattr(value, 'count'): - # query + # query (but not during validation) + if self.in_validation: + continue value = value.count() else: raise KeyError('Do not understand property %s.' % property) @@ -256,6 +265,8 @@ class ORMObject(object): validation_message = \ "Validation failed because property '%s' must not be empty in object\n%s" + in_validation = False + def validate(self): ''' This function validates the not NULL constraints as returned by @@ -270,8 +281,11 @@ class ORMObject(object): getattr(self, property + '_id') is not None: continue if not hasattr(self, property) or getattr(self, property) is None: - raise DBUpdateError(self.validation_message % \ - (property, str(self))) + # str() might lead to races due to a 2nd flush + self.in_validation = True + message = self.validation_message % (property, str(self)) + self.in_validation = False + raise DBUpdateError(message) @classmethod @session_wrapper @@ -478,6 +492,11 @@ __all__.append('BinContents') ################################################################################ +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + class DBBinary(ORMObject): def __init__(self, package = None, source = None, version = None, \ maintainer = None, architecture = None, poolfile = None, \ @@ -490,18 +509,75 @@ class DBBinary(ORMObject): self.poolfile = poolfile self.binarytype = binarytype + @property + def pkid(self): + return self.binary_id + def properties(self): return ['package', 'version', 'maintainer', 'source', 'architecture', \ 'poolfile', 'binarytype', 'fingerprint', 'install_date', \ - 'suites_count', 'binary_id', 'contents_count'] + 'suites_count', 'binary_id', 'contents_count', 'extra_sources'] def not_null_constraints(self): return ['package', 'version', 'maintainer', 'source', 'poolfile', \ 'binarytype'] + metadata = association_proxy('key', 'value') + def get_component_name(self): return self.poolfile.location.component.component_name + def scan_contents(self): + ''' + Yields the contents of the package. Only regular files are yielded and + the path names are normalized after converting them from either utf-8 + or iso8859-1 encoding. It yields the string ' ' if the + package does not contain any regular file. + ''' + fullpath = self.poolfile.fullpath + dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE, + preexec_fn = subprocess_setup) + tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|') + for member in tar.getmembers(): + if not member.isdir(): + name = normpath(member.name) + # enforce proper utf-8 encoding + try: + name.decode('utf-8') + except UnicodeDecodeError: + name = name.decode('iso8859-1').encode('utf-8') + yield name + tar.close() + dpkg.stdout.close() + dpkg.wait() + + def read_control(self): + ''' + Reads the control information from a binary. + + @rtype: text + @return: stanza text of the control section. + ''' + import apt_inst + fullpath = self.poolfile.fullpath + deb_file = open(fullpath, 'r') + stanza = apt_inst.debExtractControl(deb_file) + deb_file.close() + + return stanza + + def read_control_fields(self): + ''' + Reads the control information from a binary and return + as a dictionary. + + @rtype: dict + @return: fields of the control section as a dictionary. + ''' + import apt_pkg + stanza = self.read_control() + return apt_pkg.TagSection(stanza) + __all__.append('DBBinary') @session_wrapper @@ -638,6 +714,7 @@ class BuildQueue(object): try: # Grab files we want to include newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() # Write file list with newer files (fl_fd, fl_name) = mkstemp() for n in newer: @@ -678,7 +755,7 @@ class BuildQueue(object): # Crude hack with open and append, but this whole section is and should be redone. if self.notautomatic: release=open("Release", "a") - release.write("NotAutomatic: yes") + release.write("NotAutomatic: yes\n") release.close() # Sign if necessary @@ -730,6 +807,7 @@ class BuildQueue(object): # Grab files older than our execution time older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() for o in older: killdb = False @@ -740,7 +818,7 @@ class BuildQueue(object): Logger.log(["I: Removing %s from the queue" % o.fullpath]) os.unlink(o.fullpath) killdb = True - except OSError, e: + except OSError as e: # If it wasn't there, don't worry if e.errno == ENOENT: killdb = True @@ -757,9 +835,7 @@ class BuildQueue(object): if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'): continue - try: - r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() - except NoResultFound: + if not self.contains_filename(f): fp = os.path.join(self.path, f) if dryrun: Logger.log(["I: Would remove unused link %s" % fp]) @@ -770,6 +846,18 @@ class BuildQueue(object): except OSError: Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + def contains_filename(self, filename): + """ + @rtype Boolean + @returns True if filename is supposed to be in the queue; False otherwise + """ + session = DBConn().session().object_session(self) + if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0: + return True + elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0: + return True + return False + def add_file_from_pool(self, poolfile): """Copies a file into the pool. Assumes that the PoolFile object is attached to the same SQLAlchemy session as the Queue object is. @@ -779,8 +867,8 @@ class BuildQueue(object): # Check if we have a file of this name or this ID already for f in self.queuefiles: - if f.fileid is not None and f.fileid == poolfile.file_id or \ - f.poolfile.filename == poolfile_basename: + if (f.fileid is not None and f.fileid == poolfile.file_id) or \ + (f.poolfile is not None and f.poolfile.filename == poolfile_basename): # In this case, update the BuildQueueFile entry so we # don't remove it too early f.lastused = datetime.now() @@ -790,7 +878,6 @@ class BuildQueue(object): # Prepare BuildQueueFile object qf = BuildQueueFile() qf.build_queue_id = self.queue_id - qf.lastused = datetime.now() qf.filename = poolfile_basename targetpath = poolfile.fullpath @@ -806,6 +893,9 @@ class BuildQueue(object): else: os.symlink(targetpath, queuepath) qf.fileid = poolfile.file_id + except FileExistsError: + if not poolfile.identical_to(queuepath): + raise except OSError: return None @@ -814,6 +904,64 @@ class BuildQueue(object): return qf + def add_changes_from_policy_queue(self, policyqueue, changes): + """ + Copies a changes from a policy queue together with its poolfiles. + + @type policyqueue: PolicyQueue + @param policyqueue: policy queue to copy the changes from + + @type changes: DBChange + @param changes: changes to copy to this build queue + """ + for policyqueuefile in changes.files: + self.add_file_from_policy_queue(policyqueue, policyqueuefile) + for poolfile in changes.poolfiles: + self.add_file_from_pool(poolfile) + + def add_file_from_policy_queue(self, policyqueue, policyqueuefile): + """ + Copies a file from a policy queue. + Assumes that the policyqueuefile is attached to the same SQLAlchemy + session as the Queue object is. The caller is responsible for + committing after calling this function. + + @type policyqueue: PolicyQueue + @param policyqueue: policy queue to copy the file from + + @type policyqueuefile: ChangePendingFile + @param policyqueuefile: file to be added to the build queue + """ + session = DBConn().session().object_session(policyqueuefile) + + # Is the file already there? + try: + f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one() + f.lastused = datetime.now() + return f + except NoResultFound: + pass # continue below + + # We have to add the file. + f = BuildQueuePolicyFile() + f.build_queue = self + f.file = policyqueuefile + f.filename = policyqueuefile.filename + + source = os.path.join(policyqueue.path, policyqueuefile.filename) + target = f.fullpath + try: + # Always copy files from policy queues as they might move around. + import utils + utils.copy(source, target) + except FileExistsError: + if not policyqueuefile.identical_to(target): + raise + except OSError: + return None + + session.add(f) + return f __all__.append('BuildQueue') @@ -846,6 +994,10 @@ __all__.append('get_build_queue') ################################################################################ class BuildQueueFile(object): + """ + BuildQueueFile represents a file in a build queue coming from a pool. + """ + def __init__(self, *args, **kwargs): pass @@ -861,6 +1013,27 @@ __all__.append('BuildQueueFile') ################################################################################ +class BuildQueuePolicyFile(object): + """ + BuildQueuePolicyFile represents a file in a build queue that comes from a + policy queue (and not a pool). + """ + + def __init__(self, *args, **kwargs): + pass + + #@property + #def filename(self): + # return self.file.filename + + @property + def fullpath(self): + return os.path.join(self.build_queue.path, self.filename) + +__all__.append('BuildQueuePolicyFile') + +################################################################################ + class ChangePendingBinary(object): def __init__(self, *args, **kwargs): pass @@ -879,6 +1052,24 @@ class ChangePendingFile(object): def __repr__(self): return '' % self.change_pending_file_id + def identical_to(self, filename): + """ + compare size and hash with the given file + + @rtype: bool + @return: true if the given file has the same size and hash as this object; false otherwise + """ + st = os.stat(filename) + if self.size != st.st_size: + return False + + f = open(filename, "r") + sha256sum = apt_pkg.sha256sum(f) + if sha256sum != self.sha256sum: + return False + + return True + __all__.append('ChangePendingFile') ################################################################################ @@ -911,8 +1102,8 @@ class Component(ORMObject): return NotImplemented def properties(self): - return ['component_name', 'component_id', 'description', 'location', \ - 'meets_dfsg'] + return ['component_name', 'component_id', 'description', \ + 'location_count', 'meets_dfsg', 'overrides_count'] def not_null_constraints(self): return ['component_name'] @@ -943,6 +1134,19 @@ def get_component(component, session=None): __all__.append('get_component') +@session_wrapper +def get_component_names(session=None): + """ + Returns list of strings of component names. + + @rtype: list + @return: list of strings of component names + """ + + return [ x.component_name for x in session.query(Component).all() ] + +__all__.append('get_component_names') + ################################################################################ class DBConfig(object): @@ -1195,6 +1399,17 @@ __all__.append('get_dscfiles') ################################################################################ +class ExternalOverride(ORMObject): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.package, self.key, self.value) + +__all__.append('ExternalOverride') + +################################################################################ + class PoolFile(ORMObject): def __init__(self, filename = None, location = None, filesize = -1, \ md5sum = None): @@ -1217,6 +1432,24 @@ class PoolFile(ORMObject): def not_null_constraints(self): return ['filename', 'md5sum', 'location'] + def identical_to(self, filename): + """ + compare size and hash with the given file + + @rtype: bool + @return: true if the given file has the same size and hash as this object; false otherwise + """ + st = os.stat(filename) + if self.filesize != st.st_size: + return False + + f = open(filename, "r") + sha256sum = apt_pkg.sha256sum(f) + if sha256sum != self.sha256sum: + return False + + return True + __all__.append('PoolFile') @session_wrapper @@ -1452,7 +1685,7 @@ class Keyring(object): key = None signingkey = False - for line in k.xreadlines(): + for line in k: field = line.split(":") if field[0] == "pub": key = field[4] @@ -1559,6 +1792,34 @@ def get_keyring(keyring, session=None): __all__.append('get_keyring') +@session_wrapper +def get_active_keyring_paths(session=None): + """ + @rtype: list + @return: list of active keyring paths + """ + return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ] + +__all__.append('get_active_keyring_paths') + +@session_wrapper +def get_primary_keyring_path(session=None): + """ + Get the full path to the highest priority active keyring + + @rtype: str or None + @return: path to the active keyring with the highest priority or None if no + keyring is configured + """ + keyrings = get_active_keyring_paths() + + if len(keyrings) > 0: + return keyrings[0] + else: + return None + +__all__.append('get_primary_keyring_path') + ################################################################################ class KeyringACLMap(object): @@ -1814,12 +2075,22 @@ __all__.append('get_new_comments') ################################################################################ -class Override(object): - def __init__(self, *args, **kwargs): - pass +class Override(ORMObject): + def __init__(self, package = None, suite = None, component = None, overridetype = None, \ + section = None, priority = None): + self.package = package + self.suite = suite + self.component = component + self.overridetype = overridetype + self.section = section + self.priority = priority - def __repr__(self): - return '' % (self.package, self.suite_id) + def properties(self): + return ['package', 'suite', 'component', 'overridetype', 'section', \ + 'priority'] + + def not_null_constraints(self): + return ['package', 'suite', 'component', 'overridetype', 'section'] __all__.append('Override') @@ -1873,12 +2144,15 @@ __all__.append('get_override') ################################################################################ -class OverrideType(object): - def __init__(self, *args, **kwargs): - pass +class OverrideType(ORMObject): + def __init__(self, overridetype = None): + self.overridetype = overridetype - def __repr__(self): - return '' % self.overridetype + def properties(self): + return ['overridetype', 'overridetype_id', 'overrides_count'] + + def not_null_constraints(self): + return ['overridetype'] __all__.append('OverrideType') @@ -1909,111 +2183,6 @@ __all__.append('get_override_type') ################################################################################ -class DebContents(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.package.package,self.file) - -__all__.append('DebContents') - - -class UdebContents(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.package.package,self.file) - -__all__.append('UdebContents') - -class PendingBinContents(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.contents_id - -__all__.append('PendingBinContents') - -def insert_pending_content_paths(package, - is_udeb, - fullpaths, - session=None): - """ - Make sure given paths are temporarily associated with given - package - - @type package: dict - @param package: the package to associate with should have been read in from the binary control file - @type fullpaths: list - @param fullpaths: the list of paths of the file being associated with the binary - @type session: SQLAlchemy session - @param session: Optional SQLAlchemy session. If this is passed, the caller - is responsible for ensuring a transaction has begun and committing the - results or rolling back based on the result code. If not passed, a commit - will be performed at the end of the function - - @return: True upon success, False if there is a problem - """ - - privatetrans = False - - if session is None: - session = DBConn().session() - privatetrans = True - - try: - arch = get_architecture(package['Architecture'], session) - arch_id = arch.arch_id - - # Remove any already existing recorded files for this package - q = session.query(PendingBinContents) - q = q.filter_by(package=package['Package']) - q = q.filter_by(version=package['Version']) - q = q.filter_by(architecture=arch_id) - q.delete() - - for fullpath in fullpaths: - - if fullpath.startswith( "./" ): - fullpath = fullpath[2:] - - pca = PendingBinContents() - pca.package = package['Package'] - pca.version = package['Version'] - pca.file = fullpath - pca.architecture = arch_id - - if isudeb: - pca.type = 8 # gross - else: - pca.type = 7 # also gross - session.add(pca) - - # Only commit if we set up the session ourself - if privatetrans: - session.commit() - session.close() - else: - session.flush() - - return True - except Exception, e: - traceback.print_exc() - - # Only rollback if we set up the session ourself - if privatetrans: - session.rollback() - session.close() - - return False - -__all__.append('insert_pending_content_paths') - -################################################################################ - class PolicyQueue(object): def __init__(self, *args, **kwargs): pass @@ -2075,9 +2244,16 @@ __all__.append('get_policy_queue_from_path') ################################################################################ -class Priority(object): - def __init__(self, *args, **kwargs): - pass +class Priority(ORMObject): + def __init__(self, priority = None, level = None): + self.priority = priority + self.level = level + + def properties(self): + return ['priority', 'priority_id', 'level', 'overrides_count'] + + def not_null_constraints(self): + return ['priority', 'level'] def __eq__(self, val): if isinstance(val, str): @@ -2091,9 +2267,6 @@ class Priority(object): # This signals to use the normal comparison operator return NotImplemented - def __repr__(self): - return '' % (self.priority, self.priority_id) - __all__.append('Priority') @session_wrapper @@ -2145,9 +2318,15 @@ __all__.append('get_priorities') ################################################################################ -class Section(object): - def __init__(self, *args, **kwargs): - pass +class Section(ORMObject): + def __init__(self, section = None): + self.section = section + + def properties(self): + return ['section', 'section_id', 'overrides_count'] + + def not_null_constraints(self): + return ['section'] def __eq__(self, val): if isinstance(val, str): @@ -2161,9 +2340,6 @@ class Section(object): # This signals to use the normal comparison operator return NotImplemented - def __repr__(self): - return '
' % self.section - __all__.append('Section') @session_wrapper @@ -2215,6 +2391,72 @@ __all__.append('get_sections') ################################################################################ +class SrcContents(ORMObject): + def __init__(self, file = None, source = None): + self.file = file + self.source = source + + def properties(self): + return ['file', 'source'] + +__all__.append('SrcContents') + +################################################################################ + +from debian.debfile import Deb822 + +# Temporary Deb822 subclass to fix bugs with : handling; see #597249 +class Dak822(Deb822): + def _internal_parser(self, sequence, fields=None): + # The key is non-whitespace, non-colon characters before any colon. + key_part = r"^(?P[^: \t\n\r\f\v]+)\s*:\s*" + single = re.compile(key_part + r"(?P\S.*?)\s*$") + multi = re.compile(key_part + r"$") + multidata = re.compile(r"^\s(?P.+?)\s*$") + + wanted_field = lambda f: fields is None or f in fields + + if isinstance(sequence, basestring): + sequence = sequence.splitlines() + + curkey = None + content = "" + for line in self.gpg_stripped_paragraph(sequence): + m = single.match(line) + if m: + if curkey: + self[curkey] = content + + if not wanted_field(m.group('key')): + curkey = None + continue + + curkey = m.group('key') + content = m.group('data') + continue + + m = multi.match(line) + if m: + if curkey: + self[curkey] = content + + if not wanted_field(m.group('key')): + curkey = None + continue + + curkey = m.group('key') + content = "" + continue + + m = multidata.match(line) + if m: + content += '\n' + line # XXX not m.group('data')? + continue + + if curkey: + self[curkey] = content + + class DBSource(ORMObject): def __init__(self, source = None, version = None, maintainer = None, \ changedby = None, poolfile = None, install_date = None): @@ -2225,15 +2467,54 @@ class DBSource(ORMObject): self.poolfile = poolfile self.install_date = install_date + @property + def pkid(self): + return self.source_id + def properties(self): return ['source', 'source_id', 'maintainer', 'changedby', \ 'fingerprint', 'poolfile', 'version', 'suites_count', \ - 'install_date', 'binaries_count'] + 'install_date', 'binaries_count', 'uploaders_count'] def not_null_constraints(self): return ['source', 'version', 'install_date', 'maintainer', \ 'changedby', 'poolfile', 'install_date'] + def read_control_fields(self): + ''' + Reads the control information from a dsc + + @rtype: tuple + @return: fields is the dsc information in a dictionary form + ''' + fullpath = self.poolfile.fullpath + fields = Dak822(open(self.poolfile.fullpath, 'r')) + return fields + + metadata = association_proxy('key', 'value') + + def get_component_name(self): + return self.poolfile.location.component.component_name + + def scan_contents(self): + ''' + Returns a set of names for non directories. The path names are + normalized after converting them from either utf-8 or iso8859-1 + encoding. + ''' + fullpath = self.poolfile.fullpath + from daklib.contents import UnpackedSource + unpacked = UnpackedSource(fullpath) + fileset = set() + for name in unpacked.get_all_filenames(): + # enforce proper utf-8 encoding + try: + name.decode('utf-8') + except UnicodeDecodeError: + name = name.decode('iso8859-1').encode('utf-8') + fileset.add(name) + return fileset + __all__.append('DBSource') @session_wrapper @@ -2272,20 +2553,14 @@ def source_exists(source, source_version, suites = ["any"], session=None): q = session.query(DBSource).filter_by(source=source). \ filter(DBSource.version.in_([source_version, orig_source_version])) if suite != "any": - # source must exist in suite X, or in some other suite that's - # mapped to X, recursively... silent-maps are counted too, - # unreleased-maps aren't. - maps = cnf.ValueList("SuiteMappings")[:] - maps.reverse() - maps = [ m.split() for m in maps ] - maps = [ (x[1], x[2]) for x in maps - if x[0] == "map" or x[0] == "silent-map" ] - s = [suite] - for x in maps: - if x[1] in s and x[0] not in s: - s.append(x[0]) - - q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s))) + # source must exist in 'suite' or a suite that is enhanced by 'suite' + s = get_suite(suite, session) + if s: + enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances') + considered_suites = [ vc.reference for vc in enhances_vcs ] + considered_suites.append(s) + + q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites]))) if q.count() > 0: continue @@ -2377,8 +2652,45 @@ def get_source_in_suite(source, suite, session=None): __all__.append('get_source_in_suite') +@session_wrapper +def import_metadata_into_db(obj, session=None): + """ + This routine works on either DBBinary or DBSource objects and imports + their metadata into the database + """ + fields = obj.read_control_fields() + for k in fields.keys(): + try: + # Try raw ASCII + val = str(fields[k]) + except UnicodeEncodeError: + # Fall back to UTF-8 + try: + val = fields[k].encode('utf-8') + except UnicodeEncodeError: + # Finally try iso8859-1 + val = fields[k].encode('iso8859-1') + # Otherwise we allow the exception to percolate up and we cause + # a reject as someone is playing silly buggers + + obj.metadata[get_or_set_metadatakey(k, session)] = val + + session.commit_or_flush() + +__all__.append('import_metadata_into_db') + + ################################################################################ +def split_uploaders(uploaders_list): + ''' + Split the Uploaders field into the individual uploaders and yield each of + them. Beware: email addresses might contain commas. + ''' + import re + for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"): + yield uploader.strip() + @session_wrapper def add_dsc_to_db(u, filename, session=None): entry = u.pkg.files[filename] @@ -2388,7 +2700,11 @@ def add_dsc_to_db(u, filename, session=None): source.source = u.pkg.dsc["source"] source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id - source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + # If Changed-By isn't available, fall back to maintainer + if u.pkg.changes.has_key("changed-by"): + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + else: + source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id source.install_date = datetime.now().date() @@ -2461,25 +2777,12 @@ def add_dsc_to_db(u, filename, session=None): session.add(df) # Add the src_uploaders to the DB - uploader_ids = [source.maintainer_id] + session.flush() + session.refresh(source) + source.uploaders = [source.maintainer] if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): - up = up.strip() - uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) - - added_ids = {} - for up_id in uploader_ids: - if added_ids.has_key(up_id): - import utils - utils.warn("Already saw uploader %s for source %s" % (up_id, source.source)) - continue - - added_ids[up_id]=1 - - su = SrcUploader() - su.maintainer_id = up_id - su.source_id = source.source_id - session.add(su) + for up in split_uploaders(u.pkg.dsc["uploaders"]): + source.uploaders.append(get_or_set_maintainer(up, session)) session.flush() @@ -2520,13 +2823,38 @@ def add_deb_to_db(u, filename, session=None): # Find source id bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + + # If we couldn't find anything and the upload contains Arch: source, + # fall back to trying the source package, source version uploaded + # This maintains backwards compatibility with previous dak behaviour + # and deals with slightly broken binary debs which don't properly + # declare their source package name + if len(bin_sources) == 0: + if u.pkg.changes["architecture"].has_key("source") \ + and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"): + bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session) + + # If we couldn't find a source here, we reject + # TODO: Fix this so that it doesn't kill process-upload and instead just + # performs a reject. To be honest, we should probably spot this + # *much* earlier than here if len(bin_sources) != 1: - raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ (bin.package, bin.version, entry["architecture"], - filename, bin.binarytype, u.pkg.changes["fingerprint"]) + filename, bin.binarytype, u.pkg.changes["fingerprint"])) bin.source_id = bin_sources[0].source_id + if entry.has_key("built-using"): + for srcname, version in entry["built-using"]: + exsources = get_sources_from_name(srcname, version, session=session) + if len(exsources) != 1: + raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \ + (srcname, version, bin.package, bin.version, entry["architecture"], + filename, bin.binarytype, u.pkg.changes["fingerprint"])) + + bin.extra_sources.append(exsources[0]) + # Add and flush object so it has an ID session.add(bin) @@ -2543,7 +2871,7 @@ def add_deb_to_db(u, filename, session=None): # session.rollback() # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) - return poolfile + return bin, poolfile __all__.append('add_deb_to_db') @@ -2571,17 +2899,6 @@ __all__.append('SrcFormat') ################################################################################ -class SrcUploader(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.uploader_id - -__all__.append('SrcUploader') - -################################################################################ - SUITE_FIELDS = [ ('SuiteName', 'suite_name'), ('SuiteID', 'suite_id'), ('Version', 'version'), @@ -2606,10 +2923,11 @@ class Suite(ORMObject): self.version = version def properties(self): - return ['suite_name', 'version', 'sources_count', 'binaries_count'] + return ['suite_name', 'version', 'sources_count', 'binaries_count', \ + 'overrides_count'] def not_null_constraints(self): - return ['suite_name', 'version'] + return ['suite_name'] def __eq__(self, val): if isinstance(val, str): @@ -2673,6 +2991,12 @@ class Suite(ORMObject): return session.query(DBSource).filter_by(source = source). \ with_parent(self) + def get_overridesuite(self): + if self.overridesuite is None: + return self + else: + return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one() + __all__.append('Suite') @session_wrapper @@ -2702,11 +3026,11 @@ __all__.append('get_suite') ################################################################################ -# TODO: should be removed because the implementation is too trivial @session_wrapper def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ - Returns list of Architecture objects for given C{suite} name + Returns list of Architecture objects for given C{suite} name. The list is + empty if suite does not exist. @type suite: str @param suite: Suite name to search for @@ -2727,48 +3051,15 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): @return: list of Architecture objects for the given name (may be empty) """ - return get_suite(suite, session).get_architectures(skipsrc, skipall) + try: + return get_suite(suite, session).get_architectures(skipsrc, skipall) + except AttributeError: + return [] __all__.append('get_suite_architectures') ################################################################################ -class SuiteSrcFormat(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.suite_id, self.src_format_id) - -__all__.append('SuiteSrcFormat') - -@session_wrapper -def get_suite_src_formats(suite, session=None): - """ - Returns list of allowed SrcFormat for C{suite}. - - @type suite: str - @param suite: Suite name to search for - - @type session: Session - @param session: Optional SQL session object (a temporary one will be - generated if not supplied) - - @rtype: list - @return: the list of allowed source formats for I{suite} - """ - - q = session.query(SrcFormat) - q = q.join(SuiteSrcFormat) - q = q.join(Suite).filter_by(suite_name=suite) - q = q.order_by('format_name') - - return q.all() - -__all__.append('get_suite_src_formats') - -################################################################################ - class Uid(ORMObject): def __init__(self, uid = None, name = None): self.uid = uid @@ -2853,6 +3144,113 @@ __all__.append('UploadBlock') ################################################################################ +class MetadataKey(ORMObject): + def __init__(self, key = None): + self.key = key + + def properties(self): + return ['key'] + + def not_null_constraints(self): + return ['key'] + +__all__.append('MetadataKey') + +@session_wrapper +def get_or_set_metadatakey(keyname, session=None): + """ + Returns MetadataKey object for given uidname. + + If no matching keyname is found, a row is inserted. + + @type uidname: string + @param uidname: The keyname to add + + @type session: SQLAlchemy + @param session: Optional SQL session object (a temporary one will be + generated if not supplied). If not passed, a commit will be performed at + the end of the function, otherwise the caller is responsible for commiting. + + @rtype: MetadataKey + @return: the metadatakey object for the given keyname + """ + + q = session.query(MetadataKey).filter_by(key=keyname) + + try: + ret = q.one() + except NoResultFound: + ret = MetadataKey(keyname) + session.add(ret) + session.commit_or_flush() + + return ret + +__all__.append('get_or_set_metadatakey') + +################################################################################ + +class BinaryMetadata(ORMObject): + def __init__(self, key = None, value = None, binary = None): + self.key = key + self.value = value + self.binary = binary + + def properties(self): + return ['binary', 'key', 'value'] + + def not_null_constraints(self): + return ['value'] + +__all__.append('BinaryMetadata') + +################################################################################ + +class SourceMetadata(ORMObject): + def __init__(self, key = None, value = None, source = None): + self.key = key + self.value = value + self.source = source + + def properties(self): + return ['source', 'key', 'value'] + + def not_null_constraints(self): + return ['value'] + +__all__.append('SourceMetadata') + +################################################################################ + +class VersionCheck(ORMObject): + def __init__(self, *args, **kwargs): + pass + + def properties(self): + #return ['suite_id', 'check', 'reference_id'] + return ['check'] + + def not_null_constraints(self): + return ['suite', 'check', 'reference'] + +__all__.append('VersionCheck') + +@session_wrapper +def get_version_checks(suite_name, check = None, session = None): + suite = get_suite(suite_name, session) + if not suite: + # Make sure that what we return is iterable so that list comprehensions + # involving this don't cause a traceback + return [] + q = session.query(VersionCheck).filter_by(suite=suite) + if check: + q = q.filter_by(check=check) + return q.all() + +__all__.append('get_version_checks') + +################################################################################ + class DBConn(object): """ database module init. @@ -2868,68 +3266,65 @@ class DBConn(object): self.__createconn() def __setuptables(self): - tables_with_primary = ( + tables = ( 'architecture', 'archive', 'bin_associations', + 'bin_contents', 'binaries', + 'binaries_metadata', 'binary_acl', 'binary_acl_map', 'build_queue', + 'build_queue_files', + 'build_queue_policy_files', 'changelogs_text', + 'changes', 'component', 'config', 'changes_pending_binaries', 'changes_pending_files', 'changes_pending_source', + 'changes_pending_files_map', + 'changes_pending_source_files', + 'changes_pool_files', 'dsc_files', + 'external_overrides', + 'extra_src_references', 'files', 'fingerprint', 'keyrings', 'keyring_acl_map', 'location', 'maintainer', + 'metadata_keys', 'new_comments', + # TODO: the maintainer column in table override should be removed. + 'override', 'override_type', - 'pending_bin_contents', 'policy_queue', 'priority', 'section', 'source', 'source_acl', + 'source_metadata', 'src_associations', + 'src_contents', 'src_format', 'src_uploaders', 'suite', - 'uid', - 'upload_blocks', - # The following tables have primary keys but sqlalchemy - # version 0.5 fails to reflect them correctly with database - # versions before upgrade #41. - 'changes', - 'build_queue_files', - ) - - tables_no_primary = ( - 'changes_pending_files_map', - 'changes_pending_source_files', - 'changes_pool_files', - 'deb_contents', - 'override', 'suite_architectures', - 'suite_src_formats', 'suite_build_queue_copy', - 'udeb_contents', - # see the comment above - #'changes', - #'build_queue_files', + 'suite_src_formats', + 'uid', + 'upload_blocks', + 'version_check', ) views = ( 'almost_obsolete_all_associations', 'almost_obsolete_src_associations', 'any_associations_source', - 'bin_assoc_by_arch', 'bin_associations_binaries', 'binaries_suite_arch', 'binfiles_suite_component_arch', @@ -2949,27 +3344,11 @@ class DBConn(object): 'suite_arch_by_name', ) - # Sqlalchemy version 0.5 fails to reflect the SERIAL type - # correctly and that is why we have to use a workaround. It can - # be removed as soon as we switch to version 0.6. - for table_name in tables_with_primary: + for table_name in tables: table = Table(table_name, self.db_meta, \ - Column('id', Integer, primary_key = True), \ autoload=True, useexisting=True) setattr(self, 'tbl_%s' % table_name, table) - for table_name in tables_no_primary: - table = Table(table_name, self.db_meta, autoload=True) - setattr(self, 'tbl_%s' % table_name, table) - - # bin_contents needs special attention until update #41 has been - # applied - self.tbl_bin_contents = Table('bin_contents', self.db_meta, \ - Column('file', Text, primary_key = True), - Column('binary_id', Integer, ForeignKey('binaries.id'), \ - primary_key = True), - autoload=True, useexisting=True) - for view_name in views: view = Table(view_name, self.db_meta, autoload=True) setattr(self, 'view_%s' % view_name, view) @@ -2978,38 +3357,14 @@ class DBConn(object): mapper(Architecture, self.tbl_architecture, properties = dict(arch_id = self.tbl_architecture.c.id, suites = relation(Suite, secondary=self.tbl_suite_architectures, - order_by='suite_name', - backref=backref('architectures', order_by='arch_string'))), + order_by=self.tbl_suite.c.suite_name, + backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))), extension = validator) mapper(Archive, self.tbl_archive, properties = dict(archive_id = self.tbl_archive.c.id, archive_name = self.tbl_archive.c.name)) - mapper(PendingBinContents, self.tbl_pending_bin_contents, - properties = dict(contents_id =self.tbl_pending_bin_contents.c.id, - filename = self.tbl_pending_bin_contents.c.filename, - package = self.tbl_pending_bin_contents.c.package, - version = self.tbl_pending_bin_contents.c.version, - arch = self.tbl_pending_bin_contents.c.arch, - otype = self.tbl_pending_bin_contents.c.type)) - - mapper(DebContents, self.tbl_deb_contents, - properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, - package=self.tbl_deb_contents.c.package, - suite=self.tbl_deb_contents.c.suite, - arch=self.tbl_deb_contents.c.arch, - section=self.tbl_deb_contents.c.section, - filename=self.tbl_deb_contents.c.filename)) - - mapper(UdebContents, self.tbl_udeb_contents, - properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, - package=self.tbl_udeb_contents.c.package, - suite=self.tbl_udeb_contents.c.suite, - arch=self.tbl_udeb_contents.c.arch, - section=self.tbl_udeb_contents.c.section, - filename=self.tbl_udeb_contents.c.filename)) - mapper(BuildQueue, self.tbl_build_queue, properties = dict(queue_id = self.tbl_build_queue.c.id)) @@ -3017,6 +3372,11 @@ class DBConn(object): properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files, + properties = dict( + build_queue = relation(BuildQueue, backref='policy_queue_files'), + file = relation(ChangePendingFile, lazy='joined'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -3034,7 +3394,11 @@ class DBConn(object): fingerprint = relation(Fingerprint), install_date = self.tbl_binaries.c.install_date, suites = relation(Suite, secondary=self.tbl_bin_associations, - backref=backref('binaries', lazy='dynamic'))), + backref=backref('binaries', lazy='dynamic')), + extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references, + backref=backref('extra_binary_references', lazy='dynamic')), + key = relation(BinaryMetadata, cascade='all', + collection_class=attribute_mapped_collection('key'))), extension = validator) mapper(BinaryACL, self.tbl_binary_acl, @@ -3060,6 +3424,13 @@ class DBConn(object): poolfile_id = self.tbl_dsc_files.c.file, poolfile = relation(PoolFile))) + mapper(ExternalOverride, self.tbl_external_overrides, + properties = dict( + suite_id = self.tbl_external_overrides.c.suite, + suite = relation(Suite), + component_id = self.tbl_external_overrides.c.component, + component = relation(Component))) + mapper(PoolFile, self.tbl_files, properties = dict(file_id = self.tbl_files.c.id, filesize = self.tbl_files.c.size, @@ -3140,8 +3511,7 @@ class DBConn(object): mapper(Location, self.tbl_location, properties = dict(location_id = self.tbl_location.c.id, component_id = self.tbl_location.c.component, - component = relation(Component, \ - backref=backref('location', uselist = False)), + component = relation(Component, backref='location'), archive_id = self.tbl_location.c.archive, archive = relation(Archive), # FIXME: the 'type' column is old cruft and @@ -3162,16 +3532,21 @@ class DBConn(object): mapper(Override, self.tbl_override, properties = dict(suite_id = self.tbl_override.c.suite, - suite = relation(Suite), + suite = relation(Suite, \ + backref=backref('overrides', lazy='dynamic')), package = self.tbl_override.c.package, component_id = self.tbl_override.c.component, - component = relation(Component), + component = relation(Component, \ + backref=backref('overrides', lazy='dynamic')), priority_id = self.tbl_override.c.priority, - priority = relation(Priority), + priority = relation(Priority, \ + backref=backref('overrides', lazy='dynamic')), section_id = self.tbl_override.c.section, - section = relation(Section), + section = relation(Section, \ + backref=backref('overrides', lazy='dynamic')), overridetype_id = self.tbl_override.c.type, - overridetype = relation(OverrideType))) + overridetype = relation(OverrideType, \ + backref=backref('overrides', lazy='dynamic')))) mapper(OverrideType, self.tbl_override_type, properties = dict(overridetype = self.tbl_override_type.c.type, @@ -3200,7 +3575,10 @@ class DBConn(object): primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)), suites = relation(Suite, secondary=self.tbl_src_associations, backref=backref('sources', lazy='dynamic')), - srcuploaders = relation(SrcUploader)), + uploaders = relation(Maintainer, + secondary=self.tbl_src_uploaders), + key = relation(SourceMetadata, cascade='all', + collection_class=attribute_mapped_collection('key'))), extension = validator) mapper(SourceACL, self.tbl_source_acl, @@ -3210,28 +3588,15 @@ class DBConn(object): properties = dict(src_format_id = self.tbl_src_format.c.id, format_name = self.tbl_src_format.c.format_name)) - mapper(SrcUploader, self.tbl_src_uploaders, - properties = dict(uploader_id = self.tbl_src_uploaders.c.id, - source_id = self.tbl_src_uploaders.c.source, - source = relation(DBSource, - primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)), - maintainer_id = self.tbl_src_uploaders.c.maintainer, - maintainer = relation(Maintainer, - primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id)))) - mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, policy_queue = relation(PolicyQueue), copy_queues = relation(BuildQueue, - secondary=self.tbl_suite_build_queue_copy)), + secondary=self.tbl_suite_build_queue_copy), + srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats, + backref=backref('suites', lazy='dynamic'))), extension = validator) - mapper(SuiteSrcFormat, self.tbl_suite_src_formats, - properties = dict(suite_id = self.tbl_suite_src_formats.c.suite, - suite = relation(Suite, backref='suitesrcformats'), - src_format_id = self.tbl_suite_src_formats.c.src_format, - src_format = relation(SrcFormat))) - mapper(Uid, self.tbl_uid, properties = dict(uid_id = self.tbl_uid.c.id, fingerprint = relation(Fingerprint)), @@ -3245,37 +3610,116 @@ class DBConn(object): mapper(BinContents, self.tbl_bin_contents, properties = dict( binary = relation(DBBinary, - backref=backref('contents', lazy='dynamic')), + backref=backref('contents', lazy='dynamic', cascade='all')), file = self.tbl_bin_contents.c.file)) + mapper(SrcContents, self.tbl_src_contents, + properties = dict( + source = relation(DBSource, + backref=backref('contents', lazy='dynamic', cascade='all')), + file = self.tbl_src_contents.c.file)) + + mapper(MetadataKey, self.tbl_metadata_keys, + properties = dict( + key_id = self.tbl_metadata_keys.c.key_id, + key = self.tbl_metadata_keys.c.key)) + + mapper(BinaryMetadata, self.tbl_binaries_metadata, + properties = dict( + binary_id = self.tbl_binaries_metadata.c.bin_id, + binary = relation(DBBinary), + key_id = self.tbl_binaries_metadata.c.key_id, + key = relation(MetadataKey), + value = self.tbl_binaries_metadata.c.value)) + + mapper(SourceMetadata, self.tbl_source_metadata, + properties = dict( + source_id = self.tbl_source_metadata.c.src_id, + source = relation(DBSource), + key_id = self.tbl_source_metadata.c.key_id, + key = relation(MetadataKey), + value = self.tbl_source_metadata.c.value)) + + mapper(VersionCheck, self.tbl_version_check, + properties = dict( + suite_id = self.tbl_version_check.c.suite, + suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id), + reference_id = self.tbl_version_check.c.reference, + reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined'))) + ## Connection functions def __createconn(self): from config import Config cnf = Config() - if cnf["DB::Host"]: + if cnf.has_key("DB::Service"): + connstr = "postgresql://service=%s" % cnf["DB::Service"] + elif cnf.has_key("DB::Host"): # TCP/IP - connstr = "postgres://%s" % cnf["DB::Host"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr = "postgresql://%s" % cnf["DB::Host"] + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": connstr += ":%s" % cnf["DB::Port"] connstr += "/%s" % cnf["DB::Name"] else: # Unix Socket - connstr = "postgres:///%s" % cnf["DB::Name"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr = "postgresql:///%s" % cnf["DB::Name"] + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": connstr += "?port=%s" % cnf["DB::Port"] - self.db_pg = create_engine(connstr, echo=self.debug) - self.db_meta = MetaData() - self.db_meta.bind = self.db_pg - self.db_smaker = sessionmaker(bind=self.db_pg, - autoflush=True, - autocommit=False) + engine_args = { 'echo': self.debug } + if cnf.has_key('DB::PoolSize'): + engine_args['pool_size'] = int(cnf['DB::PoolSize']) + if cnf.has_key('DB::MaxOverflow'): + engine_args['max_overflow'] = int(cnf['DB::MaxOverflow']) + if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \ + cnf['DB::Unicode'] == 'false': + engine_args['use_native_unicode'] = False + + # Monkey patch a new dialect in in order to support service= syntax + import sqlalchemy.dialects.postgresql + from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2 + class PGDialect_psycopg2_dak(PGDialect_psycopg2): + def create_connect_args(self, url): + if str(url).startswith('postgresql://service='): + # Eww + servicename = str(url)[21:] + return (['service=%s' % servicename], {}) + else: + return PGDialect_psycopg2.create_connect_args(self, url) + + sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak + + try: + self.db_pg = create_engine(connstr, **engine_args) + self.db_meta = MetaData() + self.db_meta.bind = self.db_pg + self.db_smaker = sessionmaker(bind=self.db_pg, + autoflush=True, + autocommit=False) + + self.__setuptables() + self.__setupmappers() + + except OperationalError as e: + import utils + utils.fubar("Cannot connect to database (%s)" % str(e)) - self.__setuptables() - self.__setupmappers() + self.pid = os.getpid() - def session(self): - return self.db_smaker() + def session(self, work_mem = 0): + ''' + Returns a new session object. If a work_mem parameter is provided a new + transaction is started and the work_mem parameter is set for this + transaction. The work_mem parameter is measured in MB. A default value + will be used if the parameter is not set. + ''' + # reinitialize DBConn in new processes + if self.pid != os.getpid(): + clear_mappers() + self.__createconn() + session = self.db_smaker() + if work_mem > 0: + session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem) + return session __all__.append('DBConn')