X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=5b30fce99612469c74991d42fe3fbc5871ff532e;hb=945c33a367afa0e69f3aa8b4d087deb6356d8931;hp=9e5afec7444cb36dfbbfc6632e38a4c91ecaf84f;hpb=bcb7c250fc94b6568c920e41281d44d05f705556;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 9e5afec7..5b30fce9 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -37,7 +37,7 @@ import os import re import psycopg2 import traceback -import datetime +from datetime import datetime from inspect import getargspec @@ -50,8 +50,6 @@ from sqlalchemy import types as sqltypes from sqlalchemy.exc import * from sqlalchemy.orm.exc import NoResultFound -# Only import Config until Queue stuff is changed to store its config -# in the database from config import Config from singleton import Singleton from textutils import fix_maintainer @@ -125,6 +123,8 @@ def session_wrapper(fn): return wrapped +__all__.append('session_wrapper') + ################################################################################ class Architecture(object): @@ -430,6 +430,132 @@ __all__.append('BinaryACLMap') ################################################################################ +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the QueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(pf).add(f) + return f + + # Prepare QueueFile object + qf = QueueFile() + qf.queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = dest + + targetpath = qf.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_pool_files: + # We need to copy instead of symlink + import utils + utils.copy(targetfile, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetfile, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_queue(queuename, session=None): + """ + Returns Queue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Queue + @return: Queue object for the given queue + """ + + q = session.query(Queue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.queue_id) + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -850,6 +976,39 @@ def get_poolfile_like_name(filename, session=None): __all__.append('get_poolfile_like_name') +@session_wrapper +def add_poolfile(filename, datadict, location_id, session=None): + """ + Add a new file to the pool + + @type filename: string + @param filename: filename + + @type datadict: dict + @param datadict: dict with needed data + + @type location_id: int + @param location_id: database id of the location + + @rtype: PoolFile + @return: the PoolFile object created + """ + poolfile = PoolFile() + poolfile.filename = filename + poolfile.filesize = datadict["size"] + poolfile.md5sum = datadict["md5sum"] + poolfile.sha1sum = datadict["sha1sum"] + poolfile.sha256sum = datadict["sha256sum"] + poolfile.location_id = location_id + + session.add(poolfile) + # Flush to get a file id (NB: This is not a commit) + session.flush() + + return poolfile + +__all__.append('add_poolfile') + ################################################################################ class Fingerprint(object): @@ -1125,17 +1284,6 @@ __all__.append('get_knownchange') ################################################################################ -class KnownChangePendingFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.known_change_pending_file_id - -__all__.append('KnownChangePendingFile') - -################################################################################ - class Location(object): def __init__(self, *args, **kwargs): pass @@ -1502,6 +1650,17 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass @@ -1572,99 +1731,6 @@ __all__.append('get_priorities') ################################################################################ -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name - - def add_file_from_pool(self, poolfile): - """Copies a file into the pool. Assumes that the PoolFile object is - attached to the same SQLAlchemy session as the Queue object is. - - The caller is responsible for committing after calling this function.""" - poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] - - # Check if we have a file of this name or this ID already - for f in self.queuefiles: - if f.fileid is not None and f.fileid == poolfile.file_id or \ - f.poolfile.filename == poolfile_basename: - # In this case, update the QueueFile entry so we - # don't remove it too early - f.lastused = datetime.now() - DBConn().session().object_session(pf).add(f) - return f - - # Prepare QueueFile object - qf = QueueFile() - qf.queue_id = self.queue_id - qf.lastused = datetime.now() - qf.filename = dest - - targetpath = qf.fullpath - queuepath = os.path.join(self.path, poolfile_basename) - - try: - if self.copy_pool_files: - # We need to copy instead of symlink - import utils - utils.copy(targetfile, queuepath) - # NULL in the fileid field implies a copy - qf.fileid = None - else: - os.symlink(targetfile, queuepath) - qf.fileid = poolfile.file_id - except OSError: - return None - - # Get the same session as the PoolFile is using and add the qf to it - DBConn().session().object_session(poolfile).add(qf) - - return qf - - -__all__.append('Queue') - -@session_wrapper -def get_queue(queuename, session=None): - """ - Returns Queue object for given C{queue name}, creating it if it does not - exist. - - @type queuename: string - @param queuename: The name of the queue - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - q = session.query(Queue).filter_by(queue_name=queuename) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue') - -################################################################################ - -class QueueFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueFile') - -################################################################################ - class Section(object): def __init__(self, *args, **kwargs): pass @@ -1895,6 +1961,174 @@ __all__.append('get_source_in_suite') ################################################################################ +@session_wrapper +def add_dsc_to_db(u, filename, session=None): + entry = u.pkg.files[filename] + source = DBSource() + + source.source = u.pkg.dsc["source"] + source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch + source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id + source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id + source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + source.install_date = datetime.now().date() + + dsc_component = entry["component"] + dsc_location_id = entry["location id"] + + source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") + + # Set up a new poolfile if necessary + if not entry.has_key("files id") or not entry["files id"]: + filename = entry["pool name"] + filename + poolfile = add_poolfile(filename, entry, dsc_location_id, session) + session.flush() + entry["files id"] = poolfile.file_id + + source.poolfile_id = entry["files id"] + session.add(source) + session.flush() + + for suite_name in u.pkg.changes["distribution"].keys(): + sa = SrcAssociation() + sa.source_id = source.source_id + sa.suite_id = get_suite(suite_name).suite_id + session.add(sa) + + session.flush() + + # Add the source files to the DB (files and dsc_files) + dscfile = DSCFile() + dscfile.source_id = source.source_id + dscfile.poolfile_id = entry["files id"] + session.add(dscfile) + + for dsc_file, dentry in u.pkg.dsc_files.items(): + df = DSCFile() + df.source_id = source.source_id + + # If the .orig tarball is already in the pool, it's + # files id is stored in dsc_files by check_dsc(). + files_id = dentry.get("files id", None) + + # Find the entry in the files hash + # TODO: Bail out here properly + dfentry = None + for f, e in u.pkg.files.items(): + if f == dsc_file: + dfentry = e + break + + if files_id is None: + filename = dfentry["pool name"] + dsc_file + + (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) + # FIXME: needs to check for -1/-2 and or handle exception + if found and obj is not None: + files_id = obj.file_id + + # If still not found, add it + if files_id is None: + # HACK: Force sha1sum etc into dentry + dentry["sha1sum"] = dfentry["sha1sum"] + dentry["sha256sum"] = dfentry["sha256sum"] + poolfile = add_poolfile(filename, dentry, dsc_location_id, session) + files_id = poolfile.file_id + + df.poolfile_id = files_id + session.add(df) + + session.flush() + + # Add the src_uploaders to the DB + uploader_ids = [source.maintainer_id] + if u.pkg.dsc.has_key("uploaders"): + for up in u.pkg.dsc["uploaders"].split(","): + up = up.strip() + uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) + + added_ids = {} + for up in uploader_ids: + if added_ids.has_key(up): + utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + continue + + added_ids[u]=1 + + su = SrcUploader() + su.maintainer_id = up + su.source_id = source.source_id + session.add(su) + + session.flush() + + return dsc_component, dsc_location_id + +__all__.append('add_dsc_to_db') + +@session_wrapper +def add_deb_to_db(u, filename, session=None): + """ + Contrary to what you might expect, this routine deals with both + debs and udebs. That info is in 'dbtype', whilst 'type' is + 'deb' for both of them + """ + cnf = Config() + entry = u.pkg.files[filename] + + bin = DBBinary() + bin.package = entry["package"] + bin.version = entry["version"] + bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id + bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id + bin.arch_id = get_architecture(entry["architecture"], session).arch_id + bin.binarytype = entry["dbtype"] + + # Find poolfile id + filename = entry["pool name"] + filename + fullpath = os.path.join(cnf["Dir::Pool"], filename) + if not entry.get("location id", None): + entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id + + if not entry.get("files id", None): + poolfile = add_poolfile(filename, entry, entry["location id"], session) + entry["files id"] = poolfile.file_id + + bin.poolfile_id = entry["files id"] + + # Find source id + bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) + if len(bin_sources) != 1: + raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ + (bin.package, bin.version, bin.architecture.arch_string, + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.source_id = bin_sources[0].source_id + + # Add and flush object so it has an ID + session.add(bin) + session.flush() + + # Add BinAssociations + for suite_name in u.pkg.changes["distribution"].keys(): + ba = BinAssociation() + ba.binary_id = bin.binary_id + ba.suite_id = get_suite(suite_name).suite_id + session.add(ba) + + session.flush() + + # Deal with contents - disabled for now + #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) + #if not contents: + # print "REJECT\nCould not determine contents of package %s" % bin.package + # session.rollback() + # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) + +__all__.append('add_deb_to_db') + +################################################################################ + class SourceACL(object): def __init__(self, *args, **kwargs): pass @@ -2256,12 +2490,17 @@ class DBConn(Singleton): self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) + self.tbl_build_queue = Table('build_queue', self.db_meta, autoload=True) + self.tbl_build_queue_files = Table('build_queue_files', self.db_meta, autoload=True) self.tbl_component = Table('component', self.db_meta, autoload=True) self.tbl_config = Table('config', self.db_meta, autoload=True) self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) + self.tbl_changes_pending_binary = Table('changes_pending_binaries', self.db_meta, autoload=True) self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True) + self.tbl_changes_pending_source = Table('changes_pending_source', self.db_meta, autoload=True) + self.tbl_changes_pending_source_files = Table('changes_pending_source_files', self.db_meta, autoload=True) self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True) self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) self.tbl_files = Table('files', self.db_meta, autoload=True) @@ -2275,9 +2514,8 @@ class DBConn(Singleton): self.tbl_override = Table('override', self.db_meta, autoload=True) self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True) + self.tbl_policy_queue = Table('policy_queue', self.db_meta, autoload=True) self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True) self.tbl_section = Table('section', self.db_meta, autoload=True) self.tbl_source = Table('source', self.db_meta, autoload=True) self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) @@ -2287,7 +2525,7 @@ class DBConn(Singleton): self.tbl_suite = Table('suite', self.db_meta, autoload=True) self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) - self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True) + self.tbl_suite_build_queue_copy = Table('suite_build_queue_copy', self.db_meta, autoload=True) self.tbl_uid = Table('uid', self.db_meta, autoload=True) self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) @@ -2306,6 +2544,12 @@ class DBConn(Singleton): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, @@ -2372,11 +2616,25 @@ class DBConn(Singleton): poolfiles = relation(PoolFile, secondary=self.tbl_changes_pool_files, backref="changeslinks"), - files = relation(KnownChangePendingFile, backref="changesfile"))) + files = relation(ChangePendingFile, backref="changesfile"))) - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) + mapper(ChangePendingBinary, self.tbl_changes_pending_binary, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binary.c.id)) + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(KnownChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, keyring = relation(Keyring, backref="keyring_acl_map"), @@ -2412,16 +2670,12 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueFile, self.tbl_queue_files, - properties = dict(queue = relation(Queue, backref='queuefiles'), - poolfile = relation(PoolFile, backref='queueinstances'))) - mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id)) @@ -2469,8 +2723,8 @@ class DBConn(Singleton): mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, - policy_queue = relation(Queue), - copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy))) + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite,