X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdbconn.py;h=5a92dc8146c39e365b85f012b22a0a81717e1153;hb=5d965c34b35048f8a8fab0a7a11f2943d833952d;hp=d05dd1599d2d5181605fb9d6b1cabb51a59cfef3;hpb=ca19ea22806872ba8360086b121c468689fe98df;p=dak.git diff --git a/daklib/dbconn.py b/daklib/dbconn.py old mode 100644 new mode 100755 index d05dd159..5a92dc81 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -5,7 +5,7 @@ @contact: Debian FTPMaster @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup @copyright: 2008-2009 Mark Hymers -@copyright: 2009 Joerg Jaspert +@copyright: 2009, 2010 Joerg Jaspert @copyright: 2009 Mike O'Connor @license: GNU General Public License version 2 or later """ @@ -37,7 +37,10 @@ import os import re import psycopg2 import traceback -from datetime import datetime +import commands +from datetime import datetime, timedelta +from errno import ENOENT +from tempfile import mkstemp, mkdtemp from inspect import getargspec @@ -61,15 +64,19 @@ from textutils import fix_maintainer # reflection class DebVersion(sqltypes.Text): + """ + Support the debversion type + """ + def get_col_spec(self): return "DEBVERSION" sa_major_version = sqlalchemy.__version__[0:3] -if sa_major_version == "0.5": +if sa_major_version in ["0.5", "0.6"]: from sqlalchemy.databases import postgres postgres.ischema_names['debversion'] = DebVersion else: - raise Exception("dak isn't ported to SQLA versions != 0.5 yet. See daklib/dbconn.py") + raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py") ################################################################################ @@ -179,8 +186,8 @@ def get_architecture_suites(architecture, session=None): """ Returns list of Suite objects for given C{architecture} name - @type source: str - @param source: Architecture name to search for + @type architecture: str + @param architecture: Architecture name to search for @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -276,8 +283,8 @@ def get_suites_binary_in(package, session=None): """ Returns list of Suite objects which given C{package} name is in - @type source: str - @param source: DBBinary package name to search for + @type package: str + @param package: DBBinary package name to search for @rtype: list @return: list of Suite objects for the given package @@ -323,8 +330,8 @@ def get_binaries_from_name(package, version=None, architecture=None, session=Non @type version: str or None @param version: Version to search for (or None) - @type package: str, list or None - @param package: Architectures to limit to (or None if no limit) + @type architecture: str, list or None + @param architecture: Architectures to limit to (or None if no limit) @type session: Session @param session: Optional SQL session object (a temporary one will be @@ -377,16 +384,16 @@ def get_binary_from_name_suite(package, suitename, session=None): sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su - WHERE b.package=:package + WHERE b.package='%(package)s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id - AND su.suite_name=:suitename + AND su.suite_name %(suitename)s ORDER BY b.version DESC""" - return session.execute(sql, {'package': package, 'suitename': suitename}) + return session.execute(sql % {'package': package, 'suitename': suitename}) __all__.append('get_binary_from_name_suite') @@ -431,6 +438,45 @@ __all__.append('BinaryACLMap') ################################################################################ +MINIMAL_APT_CONF=""" +Dir +{ + ArchiveDir "%(archivepath)s"; + OverrideDir "%(overridedir)s"; + CacheDir "%(cachedir)s"; +}; + +Default +{ + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; + DeLinkLimit 0; + FileMode 0664; +} + +bindirectory "incoming" +{ + Packages "Packages"; + Contents " "; + + BinOverride "override.sid.all3"; + BinCacheDB "packages-accepted.db"; + + FileList "%(filelist)s"; + + PathPrefix ""; + Packages::Extensions ".deb .udeb"; +}; + +bindirectory "incoming/" +{ + Sources "Sources"; + BinOverride "override.sid.all3"; + SrcOverride "override.sid.all3.src"; + FileList "%(filelist)s"; +}; +""" + class BuildQueue(object): def __init__(self, *args, **kwargs): pass @@ -438,6 +484,153 @@ class BuildQueue(object): def __repr__(self): return '' % self.queue_name + def write_metadata(self, starttime, force=False): + # Do we write out metafiles? + if not (force or self.generate_metadata): + return + + session = DBConn().session().object_session(self) + + fl_fd = fl_name = ac_fd = ac_name = None + tempdir = None + arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ]) + startdir = os.getcwd() + + try: + # Grab files we want to include + newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all() + # Write file list with newer files + (fl_fd, fl_name) = mkstemp() + for n in newer: + os.write(fl_fd, '%s\n' % n.fullpath) + os.close(fl_fd) + + cnf = Config() + + # Write minimal apt.conf + # TODO: Remove hardcoding from template + (ac_fd, ac_name) = mkstemp() + os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path, + 'filelist': fl_name, + 'cachedir': cnf["Dir::Cache"], + 'overridedir': cnf["Dir::Override"], + }) + os.close(ac_fd) + + # Run apt-ftparchive generate + os.chdir(os.path.dirname(ac_name)) + os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name)) + + # Run apt-ftparchive release + # TODO: Eww - fix this + bname = os.path.basename(self.path) + os.chdir(self.path) + os.chdir('..') + + # We have to remove the Release file otherwise it'll be included in the + # new one + try: + os.unlink(os.path.join(bname, 'Release')) + except OSError: + pass + + os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname)) + + # Crude hack with open and append, but this whole section is and should be redone. + if self.notautomatic: + release=open("Release", "a") + release.write("NotAutomatic: yes") + release.close() + + # Sign if necessary + if self.signingkey: + keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"] + if cnf.has_key("Dinstall::SigningPubKeyring"): + keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"] + + os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey)) + + # Move the files if we got this far + os.rename('Release', os.path.join(bname, 'Release')) + if self.signingkey: + os.rename('Release.gpg', os.path.join(bname, 'Release.gpg')) + + # Clean up any left behind files + finally: + os.chdir(startdir) + if fl_fd: + try: + os.close(fl_fd) + except OSError: + pass + + if fl_name: + try: + os.unlink(fl_name) + except OSError: + pass + + if ac_fd: + try: + os.close(ac_fd) + except OSError: + pass + + if ac_name: + try: + os.unlink(ac_name) + except OSError: + pass + + def clean_and_update(self, starttime, Logger, dryrun=False): + """WARNING: This routine commits for you""" + session = DBConn().session().object_session(self) + + if self.generate_metadata and not dryrun: + self.write_metadata(starttime) + + # Grab files older than our execution time + older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all() + + for o in older: + killdb = False + try: + if dryrun: + Logger.log(["I: Would have removed %s from the queue" % o.fullpath]) + else: + Logger.log(["I: Removing %s from the queue" % o.fullpath]) + os.unlink(o.fullpath) + killdb = True + except OSError, e: + # If it wasn't there, don't worry + if e.errno == ENOENT: + killdb = True + else: + # TODO: Replace with proper logging call + Logger.log(["E: Could not remove %s" % o.fullpath]) + + if killdb: + session.delete(o) + + session.commit() + + for f in os.listdir(self.path): + if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'): + continue + + try: + r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one() + except NoResultFound: + fp = os.path.join(self.path, f) + if dryrun: + Logger.log(["I: Would remove unused link %s" % fp]) + else: + Logger.log(["I: Removing unused link %s" % fp]) + try: + os.unlink(fp) + except OSError: + Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath]) + def add_file_from_pool(self, poolfile): """Copies a file into the pool. Assumes that the PoolFile object is attached to the same SQLAlchemy session as the Queue object is. @@ -518,7 +711,12 @@ class BuildQueueFile(object): pass def __repr__(self): - return '' % (self.filename, self.queue_id) + return '' % (self.filename, self.build_queue_id) + + @property + def fullpath(self): + return os.path.join(self.buildqueue.path, self.filename) + __all__.append('BuildQueueFile') @@ -717,8 +915,9 @@ def get_or_set_contents_path_id(filepath, session=None): If no matching file is found, a row is inserted. - @type filename: string - @param filename: The filepath + @type filepath: string + @param filepath: The filepath + @type session: SQLAlchemy @param session: Optional SQL session object (a temporary one will be generated if not supplied). If not passed, a commit will be performed at @@ -779,17 +978,16 @@ def insert_content_paths(binary_id, fullpaths, session=None): try: # Insert paths - pathcache = {} - def generate_path_dicts(): for fullpath in fullpaths: if fullpath.startswith( './' ): fullpath = fullpath[2:] - yield {'fulename':fullpath, 'id': binary_id } + yield {'filename':fullpath, 'id': binary_id } - session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", - generate_path_dicts() ) + for d in generate_path_dicts(): + session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", + d ) session.commit() if privatetrans: @@ -871,7 +1069,7 @@ __all__.append('PoolFile') def check_poolfile(filename, filesize, md5sum, location_id, session=None): """ Returns a tuple: - (ValidFileFound [boolean or None], PoolFile object or None) + (ValidFileFound [boolean or None], PoolFile object or None) @type filename: string @param filename: the filename of the file to check against the DB @@ -887,12 +1085,11 @@ def check_poolfile(filename, filesize, md5sum, location_id, session=None): @rtype: tuple @return: Tuple of length 2. - If more than one file found with that name: - (None, None) - If valid pool file found: (True, PoolFile object) - If valid pool file not found: - (False, None) if no file found - (False, PoolFile object) if file found with size/md5sum mismatch + - If more than one file found with that name: (C{None}, C{None}) + - If valid pool file found: (C{True}, C{PoolFile object}) + - If valid pool file not found: + - (C{False}, C{None}) if no file found + - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch """ q = session.query(PoolFile).filter_by(filename=filename) @@ -976,7 +1173,7 @@ def get_poolfile_like_name(filename, session=None): """ # TODO: There must be a way of properly using bind parameters with %FOO% - q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename)) + q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename)) return q.all() @@ -1261,6 +1458,19 @@ class DBChange(object): def __repr__(self): return '' % self.changesname + def clean_from_queue(self): + session = DBConn().session().object_session(self) + + # Remove changes_pool_files entries + self.poolfiles = [] + + # Remove changes_pending_files references + self.files = [] + + # Clear out of queue + self.in_queue = None + self.approved_for_id = None + __all__.append('DBChange') @session_wrapper @@ -1268,15 +1478,15 @@ def get_dbchange(filename, session=None): """ returns DBChange object for given C{filename}. - @type archive: string - @param archive: the name of the arhive + @type filename: string + @param filename: the name of the file @type session: Session @param session: Optional SQLA session object (a temporary one will be generated if not supplied) - @rtype: Archive - @return: Archive object for the given name (None if not present) + @rtype: DBChange + @return: DBChange object for the given filename (C{None} if not present) """ q = session.query(DBChange).filter_by(changesname=filename) @@ -1306,13 +1516,13 @@ def get_location(location, component=None, archive=None, session=None): and archive @type location: string - @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/} + @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/} @type component: string @param component: the component name (if None, no restriction applied) @type archive: string - @param archive_id: the archive name (if None, no restriction applied) + @param archive: the archive name (if None, no restriction applied) @rtype: Location / None @return: Either a Location object or None if one can't be found @@ -1707,6 +1917,31 @@ def get_policy_queue(queuename, session=None): __all__.append('get_policy_queue') +@session_wrapper +def get_policy_queue_from_path(pathname, session=None): + """ + Returns PolicyQueue object for given C{path name} + + @type queuename: string + @param queuename: The path + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: PolicyQueue + @return: PolicyQueue object for the given queue + """ + + q = session.query(PolicyQueue).filter_by(path=pathname) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_policy_queue_from_path') + ################################################################################ class Priority(object): @@ -1866,8 +2101,8 @@ def source_exists(source, source_version, suites = ["any"], session=None): 1. exact match => 1.0-3 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1 - @type package: string - @param package: package source name + @type source: string + @param source: source name @type source_version: string @param source_version: expected source version @@ -1950,8 +2185,8 @@ def get_sources_from_name(source, version=None, dm_upload_allowed=None, session= @type source: str @param source: DBSource package name to search for - @type source: str or None - @param source: DBSource version name to search for or None if not applicable + @type version: str or None + @param version: DBSource version name to search for or None if not applicable @type dm_upload_allowed: bool @param dm_upload_allowed: If None, no effect. If True or False, only @@ -2100,26 +2335,27 @@ def add_dsc_to_db(u, filename, session=None): # Add the src_uploaders to the DB uploader_ids = [source.maintainer_id] if u.pkg.dsc.has_key("uploaders"): - for up in u.pkg.dsc["uploaders"].split(","): + for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"): up = up.strip() uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) added_ids = {} - for up in uploader_ids: - if added_ids.has_key(up): - utils.warn("Already saw uploader %s for source %s" % (up, source.source)) + for up_id in uploader_ids: + if added_ids.has_key(up_id): + import utils + utils.warn("Already saw uploader %s for source %s" % (up_id, source.source)) continue - added_ids[u]=1 + added_ids[up_id]=1 su = SrcUploader() - su.maintainer_id = up + su.maintainer_id = up_id su.source_id = source.source_id session.add(su) session.flush() - return dsc_component, dsc_location_id, pfs + return source, dsc_component, dsc_location_id, pfs __all__.append('add_dsc_to_db') @@ -2247,11 +2483,7 @@ SUITE_FIELDS = [ ('SuiteName', 'suite_name'), ('Priority', 'priority'), ('NotAutomatic', 'notautomatic'), ('CopyChanges', 'copychanges'), - ('CopyDotDak', 'copydotdak'), - ('CommentsDir', 'commentsdir'), - ('OverrideSuite', 'overridesuite'), - ('ChangelogBase', 'changelogbase')] - + ('OverrideSuite', 'overridesuite')] class Suite(object): def __init__(self, *args, **kwargs): @@ -2355,8 +2587,8 @@ def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None): """ Returns list of Architecture objects for given C{suite} name - @type source: str - @param source: Suite name to search for + @type suite: str + @param suite: Suite name to search for @type skipsrc: boolean @param skipsrc: Whether to skip returning the 'source' architecture entry @@ -2449,28 +2681,6 @@ class Uid(object): __all__.append('Uid') -@session_wrapper -def add_database_user(uidname, session=None): - """ - Adds a database user - - @type uidname: string - @param uidname: The uid of the user to add - - @type session: SQLAlchemy - @param session: Optional SQL session object (a temporary one will be - generated if not supplied). If not passed, a commit will be performed at - the end of the function, otherwise the caller is responsible for commiting. - - @rtype: Uid - @return: the uid object for the given uidname - """ - - session.execute("CREATE USER :uid", {'uid': uidname}) - session.commit_or_flush() - -__all__.append('add_database_user') - @session_wrapper def get_or_set_uid(uidname, session=None): """ @@ -2552,7 +2762,7 @@ class DBConn(object): 'binaries', 'binary_acl', 'binary_acl_map', - 'bin_contents' + 'bin_contents', 'build_queue', 'build_queue_files', 'component', @@ -2623,7 +2833,7 @@ class DBConn(object): mapper(DebContents, self.tbl_deb_contents, properties = dict(binary_id=self.tbl_deb_contents.c.binary_id, package=self.tbl_deb_contents.c.package, - component=self.tbl_deb_contents.c.component, + suite=self.tbl_deb_contents.c.suite, arch=self.tbl_deb_contents.c.arch, section=self.tbl_deb_contents.c.section, filename=self.tbl_deb_contents.c.filename)) @@ -2631,11 +2841,18 @@ class DBConn(object): mapper(UdebContents, self.tbl_udeb_contents, properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id, package=self.tbl_udeb_contents.c.package, - component=self.tbl_udeb_contents.c.component, + suite=self.tbl_udeb_contents.c.suite, arch=self.tbl_udeb_contents.c.arch, section=self.tbl_udeb_contents.c.section, filename=self.tbl_udeb_contents.c.filename)) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) + mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, package = self.tbl_binaries.c.package, @@ -2701,6 +2918,16 @@ class DBConn(object): poolfiles = relation(PoolFile, secondary=self.tbl_changes_pool_files, backref="changeslinks"), + seen = self.tbl_changes.c.seen, + source = self.tbl_changes.c.source, + binaries = self.tbl_changes.c.binaries, + architecture = self.tbl_changes.c.architecture, + distribution = self.tbl_changes.c.distribution, + urgency = self.tbl_changes.c.urgency, + maintainer = self.tbl_changes.c.maintainer, + changedby = self.tbl_changes.c.changedby, + date = self.tbl_changes.c.date, + version = self.tbl_changes.c.version, files = relation(ChangePendingFile, secondary=self.tbl_changes_pending_files_map, backref="changesfile"), @@ -2713,7 +2940,12 @@ class DBConn(object): properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id)) mapper(ChangePendingFile, self.tbl_changes_pending_files, - properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id)) + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id, + filename = self.tbl_changes_pending_files.c.filename, + size = self.tbl_changes_pending_files.c.size, + md5sum = self.tbl_changes_pending_files.c.md5sum, + sha1sum = self.tbl_changes_pending_files.c.sha1sum, + sha256sum = self.tbl_changes_pending_files.c.sha256sum)) mapper(ChangePendingSource, self.tbl_changes_pending_source, properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, @@ -2726,10 +2958,7 @@ class DBConn(object): source_files = relation(ChangePendingFile, secondary=self.tbl_changes_pending_source_files, backref="pending_sources"))) - files = relation(KnownChangePendingFile, backref="changesfile"))) - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id)) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,