X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=c0295e17273cc02f3d1911ceeafdd9d35c6e4bc7;hb=8c70083c0df3435e068144a27c9fce94b3b5647a;hp=6d954c88884095e1aba61338917d912ddc133660;hpb=0ca34d3d3ba917f3a1b1573534589da1fac25306;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 6d954c88..c0295e17 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -81,6 +81,9 @@ import warnings warnings.filterwarnings('ignore', \ "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \ SADeprecationWarning) +warnings.filterwarnings('ignore', \ + "Predicate of partial index .* ignored during reflection", \ + SAWarning) ################################################################################ @@ -107,11 +110,11 @@ class DebVersion(UserDefinedType): return None sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version in ["0.5", "0.6"]: +if sa_major_version in ["0.5", "0.6", "0.7"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py") ################################################################################ @@ -815,7 +818,7 @@ class BuildQueue(object): Logger.log(["I: Removing %s from the queue" % o.fullpath]) os.unlink(o.fullpath) killdb = True - except OSError, e: + except OSError as e: # If it wasn't there, don't worry if e.errno == ENOENT: killdb = True @@ -875,7 +878,6 @@ class BuildQueue(object): # Prepare BuildQueueFile object qf = BuildQueueFile() qf.build_queue_id = self.queue_id - qf.lastused = datetime.now() qf.filename = poolfile_basename targetpath = poolfile.fullpath @@ -1683,7 +1685,7 @@ class Keyring(object): key = None signingkey = False - for line in k.xreadlines(): + for line in k: field = line.split(":") if field[0] == "pub": key = field[4] @@ -2491,6 +2493,9 @@ class DBSource(ORMObject): metadata = association_proxy('key', 'value') + def get_component_name(self): + return self.poolfile.location.component.component_name + def scan_contents(self): ''' Returns a set of names for non directories. The path names are @@ -2550,11 +2555,12 @@ def source_exists(source, source_version, suites = ["any"], session=None): if suite != "any": # source must exist in 'suite' or a suite that is enhanced by 'suite' s = get_suite(suite, session) - enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances') - considered_suites = [ vc.reference for vc in enhances_vcs ] - considered_suites.append(s) + if s: + enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances') + considered_suites = [ vc.reference for vc in enhances_vcs ] + considered_suites.append(s) - q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites]))) + q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites]))) if q.count() > 0: continue @@ -2817,10 +2823,25 @@ def add_deb_to_db(u, filename, session=None): # Find source id bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + + # If we couldn't find anything and the upload contains Arch: source, + # fall back to trying the source package, source version uploaded + # This maintains backwards compatibility with previous dak behaviour + # and deals with slightly broken binary debs which don't properly + # declare their source package name + if len(bin_sources) == 0: + if u.pkg.changes["architecture"].has_key("source") \ + and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"): + bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session) + + # If we couldn't find a source here, we reject + # TODO: Fix this so that it doesn't kill process-upload and instead just + # performs a reject. To be honest, we should probably spot this + # *much* earlier than here if len(bin_sources) != 1: - raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ (bin.package, bin.version, entry["architecture"], - filename, bin.binarytype, u.pkg.changes["fingerprint"]) + filename, bin.binarytype, u.pkg.changes["fingerprint"])) bin.source_id = bin_sources[0].source_id @@ -2828,9 +2849,9 @@ def add_deb_to_db(u, filename, session=None): for srcname, version in entry["built-using"]: exsources = get_sources_from_name(srcname, version, session=session) if len(exsources) != 1: - raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \ + raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \ (srcname, version, bin.package, bin.version, entry["architecture"], - filename, bin.binarytype, u.pkg.changes["fingerprint"]) + filename, bin.binarytype, u.pkg.changes["fingerprint"])) bin.extra_sources.append(exsources[0]) @@ -3005,11 +3026,11 @@ __all__.append('get_suite') ################################################################################ -# TODO: should be removed because the implementation is too trivial @session_wrapper def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ - Returns list of Architecture objects for given C{suite} name + Returns list of Architecture objects for given C{suite} name. The list is + empty if suite does not exist. @type suite: str @param suite: Suite name to search for @@ -3030,7 +3051,10 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): @return: list of Architecture objects for the given name (may be empty) """ - return get_suite(suite, session).get_architectures(skipsrc, skipall) + try: + return get_suite(suite, session).get_architectures(skipsrc, skipall) + except AttributeError: + return [] __all__.append('get_suite_architectures') @@ -3333,8 +3357,8 @@ class DBConn(object): mapper(Architecture, self.tbl_architecture, properties = dict(arch_id = self.tbl_architecture.c.id, suites = relation(Suite, secondary=self.tbl_suite_architectures, - order_by='suite_name', - backref=backref('architectures', order_by='arch_string'))), + order_by=self.tbl_suite.c.suite_name, + backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))), extension = validator) mapper(Archive, self.tbl_archive, @@ -3664,15 +3688,21 @@ class DBConn(object): sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak - self.db_pg = create_engine(connstr, **engine_args) - self.db_meta = MetaData() - self.db_meta.bind = self.db_pg - self.db_smaker = sessionmaker(bind=self.db_pg, - autoflush=True, - autocommit=False) + try: + self.db_pg = create_engine(connstr, **engine_args) + self.db_meta = MetaData() + self.db_meta.bind = self.db_pg + self.db_smaker = sessionmaker(bind=self.db_pg, + autoflush=True, + autocommit=False) + + self.__setuptables() + self.__setupmappers() + + except OperationalError as e: + import utils + utils.fubar("Cannot connect to database (%s)" % str(e)) - self.__setuptables() - self.__setupmappers() self.pid = os.getpid() def session(self, work_mem = 0):