X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=daklib%2Fdatabase.py;h=cbdfad04e5a4504f204210802bdc5efc9236a75f;hb=52b14d883272923a4296ac02921ccf350815db83;hp=3fbd2a504992bae6b9555af6a4bb56812aa7acfb;hpb=e47619471d54d20613d18bb8ac928650513ab404;p=dak.git diff --git a/daklib/database.py b/daklib/database.py index 3fbd2a50..cbdfad04 100755 --- a/daklib/database.py +++ b/daklib/database.py @@ -4,8 +4,9 @@ @group readonly: get_suite_id, get_section_id, get_priority_id, get_override_type_id, get_architecture_id, get_archive_id, get_component_id, get_location_id, get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites, - get_suite_architectures + get_suite_architectures, get_new_comments, has_new_comment @group read/write: get_or_set*, set_files_id +@group writeonly: add_new_comment, delete_new_comments @contact: Debian FTP Master @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup @@ -32,6 +33,8 @@ import sys import time import types +import utils +import pg ################################################################################ @@ -48,12 +51,15 @@ location_id_cache = {} #: cache for locations maintainer_id_cache = {} #: cache for maintainers keyring_id_cache = {} #: cache for keyrings source_id_cache = {} #: cache for sources + files_id_cache = {} #: cache for files maintainer_cache = {} #: cache for maintainer names fingerprint_id_cache = {} #: cache for fingerprints queue_id_cache = {} #: cache for queues uid_id_cache = {} #: cache for uids suite_version_cache = {} #: cache for suite_versions (packages) +suite_bin_version_cache = {} +cache_preloaded = False ################################################################################ @@ -73,31 +79,6 @@ def init (config, sql): Cnf = config projectB = sql - -def do_query(query): - """ - Executes a database query. Writes statistics / timing to stderr. - - @type query: string - @param query: database query string, passed unmodified - - @return: db result - - @warning: The query is passed B{unmodified}, so be careful what you use this for. - """ - sys.stderr.write("query: \"%s\" ... " % (query)) - before = time.time() - r = projectB.query(query) - time_diff = time.time()-before - sys.stderr.write("took %.3f seconds.\n" % (time_diff)) - if type(r) is int: - sys.stderr.write("int result: %s\n" % (r)) - elif type(r) is types.NoneType: - sys.stderr.write("result: None\n") - else: - sys.stderr.write("pgresult: %s\n" % (r.getresult())) - return r - ################################################################################ def get_suite_id (suite): @@ -388,6 +369,7 @@ def get_suite_version(source, suite): @return: the version for I{source} in I{suite} """ + global suite_version_cache cache_key = "%s_%s" % (source, suite) @@ -410,6 +392,50 @@ def get_suite_version(source, suite): return version +def get_latest_binary_version_id(binary, section, suite, arch): + global suite_bin_version_cache + cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch) + cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all")) + + # Check for the cache hit for its arch, then arch all + if suite_bin_version_cache.has_key(cache_key): + return suite_bin_version_cache[cache_key] + if suite_bin_version_cache.has_key(cache_key_all): + return suite_bin_version_cache[cache_key_all] + if cache_preloaded == True: + return # package does not exist + + q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section))) + + if not q.getresult(): + return False + + highest_bid = q.getresult()[0][0] + + suite_bin_version_cache[cache_key] = highest_bid + return highest_bid + +def preload_binary_id_cache(): + global suite_bin_version_cache, cache_preloaded + + # Get suite info + q = projectB.query("SELECT id FROM suite") + suites = q.getresult() + + # Get arch mappings + q = projectB.query("SELECT id FROM architecture") + arches = q.getresult() + + for suite in suites: + for arch in arches: + q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0]))) + + for bi in q.getresult(): + cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0]) + suite_bin_version_cache[cache_key] = int(bi[0]) + + cache_preloaded = True + def get_suite_architectures(suite): """ Returns list of architectures for C{suite}. @@ -436,6 +462,32 @@ def get_suite_architectures(suite): q = projectB.query(sql) return map(lambda x: x[0], q.getresult()) +def get_suite_untouchable(suite): + """ + Returns true if the C{suite} is untouchable, otherwise false. + + @type suite: string, int + @param suite: the suite name or the suite_id + + @rtype: boolean + @return: status of suite + """ + + suite_id = None + if type(suite) == str: + suite_id = get_suite_id(suite.lower()) + elif type(suite) == int: + suite_id = suite + else: + return None + + sql = """ SELECT untouchable FROM suite WHERE id='%s' """ % (suite_id) + + q = projectB.query(sql) + if q.getresult()[0][0] == "f": + return False + else: + return True ################################################################################ @@ -758,3 +810,159 @@ def get_suites(pkgname, src=False): q = projectB.query(sql) return map(lambda x: x[0], q.getresult()) + + +################################################################################ + +def get_new_comments(package): + """ + Returns all the possible comments attached to C{package} in NEW. All versions. + + @type package: string + @param package: name of the package + + @rtype: list + @return: list of strings containing comments for all versions from all authors for package + """ + + comments = [] + query = projectB.query(""" SELECT version, comment, author, notedate + FROM new_comments + WHERE package = '%s' + ORDER BY notedate + """ % (package)) + + for row in query.getresult(): + comments.append("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s\n" % (row[2], row[0], row[3], row[1])) + comments.append("-"*72) + + return comments + +def has_new_comment(package, version, ignore_trainee=False): + """ + Returns true if the given combination of C{package}, C{version} has a comment. + If C{ignore_trainee} is true, comments from a trainee are ignored. + + @type package: string + @param package: name of the package + + @type version: string + @param version: package version + + @type ignore_trainee: boolean + @param ignore_trainee: ignore trainee comments + + @rtype: boolean + @return: true/false + """ + + trainee="" + if ignore_trainee: + trainee='AND trainee=false' + + exists = projectB.query("""SELECT 1 FROM new_comments + WHERE package='%s' + AND version='%s' + %s + LIMIT 1""" + % (package, version, trainee) ).getresult() + + if not exists: + return False + else: + return True + +def add_new_comment(package, version, comment, author, trainee=False): + """ + Add a new comment for C{package}, C{version} written by C{author} + + @type package: string + @param package: name of the package + + @type version: string + @param version: package version + + @type comment: string + @param comment: the comment + + @type author: string + @param author: the authorname + + @type trainee: boolean + @param trainee: trainee comment + """ + + projectB.query(""" INSERT INTO new_comments (package, version, comment, author, trainee) + VALUES ('%s', '%s', '%s', '%s', '%s') + """ % (package, version, pg.escape_string(comment), pg.escape_string(author), trainee)) + + return + +def delete_new_comments(package, version): + """ + Delete a comment for C{package}, C{version}, if one exists + """ + + projectB.query(""" DELETE FROM new_comments + WHERE package = '%s' AND version = '%s' + """ % (package, version)) + return + +def delete_all_new_comments(package): + """ + Delete all comments for C{package}, if they exist + """ + + projectB.query(""" DELETE FROM new_comments + WHERE package = '%s' + """ % (package)) + return + +################################################################################ +def copy_temporary_contents(package, version, arch, deb, reject): + """ + copy the previously stored contents from the temp table to the permanant one + + during process-unchecked, the deb should have been scanned and the + contents stored in pending_content_associations + """ + + # first see if contents exist: + + arch_id = get_architecture_id (arch) + + exists = projectB.query("""SELECT 1 FROM pending_content_associations + WHERE package='%s' + AND version='%s' + AND architecture=%d LIMIT 1""" + % (package, version, arch_id) ).getresult() + + if not exists: + # This should NOT happen. We should have added contents + # during process-unchecked. if it did, log an error, and send + # an email. + subst = { + "__PACKAGE__": package, + "__VERSION__": version, + "__ARCH__": arch, + "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"], + "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"] } + + message = utils.TemplateSubst(subst, Cnf["Dir::Templates"]+"/missing-contents") + utils.send_mail( message ) + + exists = Binary(deb, reject).scan_package() + + if exists: + sql = """INSERT INTO content_associations(binary_pkg,filepath,filename) + SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations + WHERE package='%s' + AND version='%s' + AND architecture=%d""" % (package, version, arch_id) + projectB.query(sql) + projectB.query("""DELETE from pending_content_associations + WHERE package='%s' + AND version='%s' + AND architecture=%d""" % (package, version, arch_id)) + + return exists