X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=9307633027c31867941f62add1ae588fc1a29ff7;hb=5a27176db6eba187f3f9c253ad2a30f931eeb822;hp=88e954fed6879c9ecfff9c576c353ef6dadb7ae7;hpb=54a1e63df2a81a8f5e6f3148d59afcfeaa7b3495;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 88e954fe..93076330 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -37,6 +37,7 @@ import os import re import psycopg2 import traceback +import commands from datetime import datetime, timedelta from errno import ENOENT from tempfile import mkstemp, mkdtemp @@ -44,7 +45,7 @@ from tempfile import mkstemp, mkdtemp from inspect import getargspec import sqlalchemy -from sqlalchemy import create_engine, Table, MetaData +from sqlalchemy import create_engine, Table, MetaData, Column, Integer from sqlalchemy.orm import sessionmaker, mapper, relation from sqlalchemy import types as sqltypes @@ -56,30 +57,33 @@ from sqlalchemy.orm.exc import NoResultFound # in the database from config import Config from textutils import fix_maintainer +from dak_exceptions import NoSourceFieldError ################################################################################ # Patch in support for the debversion field type so that it works during # reflection -class DebVersion(sqltypes.Text): - """ - Support the debversion type - """ - +class DebVersion(sqltypes.TypeEngine): def get_col_spec(self): return "DEBVERSION" + def bind_processor(self, dialect): + return None + + def result_processor(self, dialect): + return None + sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version == "0.5": +if sa_major_version in ["0.5", "0.6"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") ################################################################################ -__all__ = ['IntegrityError', 'SQLAlchemyError'] +__all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion'] ################################################################################ @@ -441,8 +445,8 @@ MINIMAL_APT_CONF=""" Dir { ArchiveDir "%(archivepath)s"; - OverrideDir "/srv/ftp.debian.org/scripts/override/"; - CacheDir "/srv/ftp.debian.org/database/"; + OverrideDir "%(overridedir)s"; + CacheDir "%(cachedir)s"; }; Default @@ -504,11 +508,16 @@ class BuildQueue(object): os.write(fl_fd, '%s\n' % n.fullpath) os.close(fl_fd) + cnf = Config() + # Write minimal apt.conf # TODO: Remove hardcoding from template (ac_fd, ac_name) = mkstemp() os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, - 'filelist': fl_name}) + 'filelist': fl_name, + 'cachedir': cnf["Dir::Cache"], + 'overridedir': cnf["Dir::Override"], + }) os.close(ac_fd) # Run apt-ftparchive generate @@ -530,9 +539,14 @@ class BuildQueue(object): os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + # Crude hack with open and append, but this whole section is and should be redone. + if self.notautomatic: + release=open("Release", "a") + release.write("NotAutomatic: yes") + release.close() + # Sign if necessary if self.signingkey: - cnf = Config() keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] if cnf.has_key("Dinstall::SigningPubKeyring"): keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] @@ -604,7 +618,7 @@ class BuildQueue(object): session.commit() for f in os.listdir(self.path): - if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'): continue try: @@ -967,17 +981,16 @@ def insert_content_paths(binary_id, fullpaths, session=None): try: # Insert paths - pathcache = {} - def generate_path_dicts(): for fullpath in fullpaths: if fullpath.startswith( './' ): fullpath = fullpath[2:] - yield {'fulename':fullpath, 'id': binary_id } + yield {'filename':fullpath, 'id': binary_id } - session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", - generate_path_dicts() ) + for d in generate_path_dicts(): + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", + d ) session.commit() if privatetrans: @@ -1307,9 +1320,17 @@ class Keyring(object): esclist[x] = "%c" % (int(esclist[x][2:],16)) return "".join(esclist) - def load_keys(self, keyring): + def parse_address(self, uid): + """parses uid and returns a tuple of real name and email address""" import email.Utils + (name, address) = email.Utils.parseaddr(uid) + name = re.sub(r"\s*[(].*[)]", "", name) + name = self.de_escape_gpg_str(name) + if name == "": + name = uid + return (name, address) + def load_keys(self, keyring): if not self.keyring_id: raise Exception('Must be initialized with database information') @@ -1321,24 +1342,20 @@ class Keyring(object): field = line.split(":") if field[0] == "pub": key = field[4] - (name, addr) = email.Utils.parseaddr(field[9]) - name = re.sub(r"\s*[(].*[)]", "", name) - if name == "" or addr == "" or "@" not in addr: - name = field[9] - addr = "invalid-uid" - name = self.de_escape_gpg_str(name) - self.keys[key] = {"email": addr} - if name != "": + self.keys[key] = {} + (name, addr) = self.parse_address(field[9]) + if "@" in addr: + self.keys[key]["email"] = addr self.keys[key]["name"] = name - self.keys[key]["aliases"] = [name] self.keys[key]["fingerprints"] = [] signingkey = True elif key and field[0] == "sub" and len(field) >= 12: signingkey = ("s" in field[11]) elif key and field[0] == "uid": - (name, addr) = email.Utils.parseaddr(field[9]) - if name and name not in self.keys[key]["aliases"]: - self.keys[key]["aliases"].append(name) + (name, addr) = self.parse_address(field[9]) + if "email" not in self.keys[key] and "@" in addr: + self.keys[key]["email"] = addr + self.keys[key]["name"] = name elif signingkey and field[0] == "fpr": self.keys[key]["fingerprints"].append(field[9]) self.fpr_lookup[field[9]] = key @@ -1386,7 +1403,7 @@ class Keyring(object): byname = {} any_invalid = False for x in self.keys.keys(): - if self.keys[x]["email"] == "invalid-uid": + if "email" not in self.keys[x]: any_invalid = True self.keys[x]["uid"] = format % "invalid-uid" else: @@ -1506,7 +1523,7 @@ def get_location(location, component=None, archive=None, session=None): and archive @type location: string - @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/} + @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/} @type component: string @param component: the component name (if None, no restriction applied) @@ -1907,6 +1924,31 @@ def get_policy_queue(queuename, session=None): __all__.append('get_policy_queue') +@session_wrapper +def get_policy_queue_from_path(pathname, session=None): + """ + Returns PolicyQueue object for given C{path name} + + @type queuename: string + @param queuename: The path + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(path=pathname) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue_from_path') + ################################################################################ class Priority(object): @@ -2300,7 +2342,7 @@ def add_dsc_to_db(u, filename, session=None): # Add the src_uploaders to the DB uploader_ids = [source.maintainer_id] if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].split(","): + for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): up = up.strip() uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) @@ -2359,7 +2401,7 @@ def add_deb_to_db(u, filename, session=None): bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) if len(bin_sources) != 1: raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ - (bin.package, bin.version, bin.architecture.arch_string, + (bin.package, bin.version, entry["architecture"], filename, bin.binarytype, u.pkg.changes["fingerprint"]) bin.source_id = bin_sources[0].source_id @@ -2448,11 +2490,7 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'), ('Priority', 'priority'), ('NotAutomatic', 'notautomatic'), ('CopyChanges', 'copychanges'), - ('CopyDotDak', 'copydotdak'), - ('CommentsDir', 'commentsdir'), - ('OverrideSuite', 'overridesuite'), - ('ChangelogBase', 'changelogbase')] - + ('OverrideSuite', 'overridesuite')] class Suite(object): def __init__(self, *args, **kwargs): @@ -2650,28 +2688,6 @@ class Uid(object): __all__.append('Uid') -@session_wrapper -def add_database_user(uidname, session=None): - """ - Adds a database user - - @type uidname: string - @param uidname: The uid of the user to add - - @type session: SQLAlchemy - @param session: Optional SQL session object (a temporary one will be - generated if not supplied). If not passed, a commit will be performed at - the end of the function, otherwise the caller is responsible for commiting. - - @rtype: Uid - @return: the uid object for the given uidname - """ - - session.execute("CREATE USER :uid", {'uid': uidname}) - session.commit_or_flush() - -__all__.append('add_database_user') - @session_wrapper def get_or_set_uid(uidname, session=None): """ @@ -2746,35 +2762,28 @@ class DBConn(object): self.__createconn() def __setuptables(self): - tables = ( + tables_with_primary = ( 'architecture', 'archive', 'bin_associations', 'binaries', 'binary_acl', 'binary_acl_map', - 'bin_contents' 'build_queue', - 'build_queue_files', + 'changelogs_text', 'component', 'config', 'changes_pending_binaries', 'changes_pending_files', - 'changes_pending_files_map', 'changes_pending_source', - 'changes_pending_source_files', - 'changes_pool_files', - 'deb_contents', 'dsc_files', 'files', 'fingerprint', 'keyrings', - 'changes', 'keyring_acl_map', 'location', 'maintainer', 'new_comments', - 'override', 'override_type', 'pending_bin_contents', 'policy_queue', @@ -2786,18 +2795,72 @@ class DBConn(object): 'src_format', 'src_uploaders', 'suite', + 'uid', + 'upload_blocks', + # The following tables have primary keys but sqlalchemy + # version 0.5 fails to reflect them correctly with database + # versions before upgrade #41. + #'changes', + #'build_queue_files', + ) + + tables_no_primary = ( + 'bin_contents', + 'changes_pending_files_map', + 'changes_pending_source_files', + 'changes_pool_files', + 'deb_contents', + 'override', 'suite_architectures', 'suite_src_formats', 'suite_build_queue_copy', 'udeb_contents', - 'uid', - 'upload_blocks', + # see the comment above + 'changes', + 'build_queue_files', ) - for table_name in tables: + views = ( + 'almost_obsolete_all_associations', + 'almost_obsolete_src_associations', + 'any_associations_source', + 'bin_assoc_by_arch', + 'bin_associations_binaries', + 'binaries_suite_arch', + 'binfiles_suite_component_arch', + 'changelogs', + 'file_arch_suite', + 'newest_all_associations', + 'newest_any_associations', + 'newest_source', + 'newest_src_association', + 'obsolete_all_associations', + 'obsolete_any_associations', + 'obsolete_any_by_all_associations', + 'obsolete_src_associations', + 'source_suite', + 'src_associations_bin', + 'src_associations_src', + 'suite_arch_by_name', + ) + + # Sqlalchemy version 0.5 fails to reflect the SERIAL type + # correctly and that is why we have to use a workaround. It can + # be removed as soon as we switch to version 0.6. + for table_name in tables_with_primary: + table = Table(table_name, self.db_meta, \ + Column('id', Integer, primary_key = True), \ + autoload=True, useexisting=True) + setattr(self, 'tbl_%s' % table_name, table) + + for table_name in tables_no_primary: table = Table(table_name, self.db_meta, autoload=True) setattr(self, 'tbl_%s' % table_name, table) + for view_name in views: + view = Table(view_name, self.db_meta, autoload=True) + setattr(self, 'view_%s' % view_name, view) + def __setupmappers(self): mapper(Architecture, self.tbl_architecture, properties = dict(arch_id = self.tbl_architecture.c.id)) @@ -2824,7 +2887,7 @@ class DBConn(object): mapper(DebContents, self.tbl_deb_contents, properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, package=self.tbl_deb_contents.c.package, - component=self.tbl_deb_contents.c.component, + suite=self.tbl_deb_contents.c.suite, arch=self.tbl_deb_contents.c.arch, section=self.tbl_deb_contents.c.section, filename=self.tbl_deb_contents.c.filename)) @@ -2832,11 +2895,18 @@ class DBConn(object): mapper(UdebContents, self.tbl_udeb_contents, properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, package=self.tbl_udeb_contents.c.package, - component=self.tbl_udeb_contents.c.component, + suite=self.tbl_udeb_contents.c.suite, arch=self.tbl_udeb_contents.c.arch, section=self.tbl_udeb_contents.c.section, filename=self.tbl_udeb_contents.c.filename)) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -2942,10 +3012,7 @@ class DBConn(object): source_files = relation(ChangePendingFile, secondary=self.tbl_changes_pending_source_files, backref="pending_sources"))) - files = relation(KnownChangePendingFile, backref="changesfile"))) - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,