--- /dev/null
- # Internal method to return the header for Contents.gz files
+#!/usr/bin/env python
+"""
+Create all the contents files
+
+@contact: Debian FTPMaster <ftpmaster@debian.org>
+@copyright: 2008, 2009 Michael Casadevall <mcasadevall@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+################################################################################
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+# <Ganneff> there is the idea to slowly replace contents files
+# <Ganneff> with a new generation of such files.
+# <Ganneff> having more info.
+
+# <Ganneff> of course that wont help for now where we need to generate them :)
+
+################################################################################
+
+import sys
+import os
+import tempfile
+import logging
+import math
+import gzip
+import apt_pkg
+from daklib import utils
+from daklib.config import Config
+from daklib.dbconn import DBConn
+################################################################################
+
+def usage (exit_code=0):
+ print """Usage: dak contents [options] command [arguments]
+
+COMMANDS
+ generate
+ generate Contents-$arch.gz files
+
+ bootstrap
+ scan the debs in the existing pool and load contents in the the database
+
+ cruft
+ remove files/paths which are no longer referenced by a binary
+
+OPTIONS
+ -h, --help
+ show this help and exit
+
+ -v, --verbose
+ show verbose information messages
+
+ -q, --quiet
+ supress all output but errors
+
+ -s, --suite={stable,testing,unstable,...}
+ only operate on a signle suite
+
+ -a, --arch={i386,amd64}
+ only operate on a signle architecture
+"""
+ sys.exit(exit_code)
+
+################################################################################
+
+# where in dak.conf all of our configuration will be stowed
+
+options_prefix = "Contents"
+options_prefix = "%s::Opitons" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
++# we unfortunately still have broken stuff in headers
+latin1_q = """SET CLIENT_ENCODING TO 'LATIN1'"""
+
++# get all the arches delivered for a given suite
++# this should probably exist somehere common
+arches_q = """PREPARE arches_q as
+ SELECT s.architecture, a.arch_string
+ FROM suite_architectures s
+ JOIN architecture a ON (s.architecture=a.id)
+ WHERE suite = $1"""
+
++# find me the .deb for a given binary id
+debs_q = """PREPARE debs_q as
+ SELECT b.id, f.filename FROM bin_assoc_by_arch baa
+ JOIN binaries b ON baa.bin=b.id
+ JOIN files f ON b.file=f.id
+ WHERE suite = $1
+ AND arch = $2"""
+
++# ask if we already have contents associated with this binary
+olddeb_q = """PREPARE olddeb_q as
+ SELECT 1 FROM content_associations
+ WHERE binary_pkg = $1
+ LIMIT 1"""
+
++# find me all of the contents for a given .deb
+contents_q = """PREPARE contents_q as
+ SELECT (p.path||'/'||n.file) AS fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE (b.architecture = $1 OR b.architecture = $2)
+ AND ba.suite = $3
+ AND o.suite = $4
+ AND b.type = 'deb'
+ AND o.type = '7'
+ GROUP BY fn
+ ORDER BY fn"""
+
++# find me all of the contents for a given .udeb
+udeb_contents_q = """PREPARE udeb_contents_q as
+ SELECT (p.path||'/'||n.file) as fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE s.id = $1
+ AND ba.suite = $2
+ AND o.suite = $3
+ AND b.type = 'udeb'
+ AND o.type = '8'
+ GROUP BY fn
+ ORDER BY fn"""
+
++# clear out all of the temporarily stored content associations
++# this should be run only after p-a has run. after a p-a
++# run we should have either accepted or rejected every package
++# so there should no longer be anything in the queue
++remove_temp_contents_cruft_q = """DELETE FROM temp_content_associations"""
++
++# delete any filenames we are storing which have no binary associated with them
++remove_filename_cruft_q = """DELETE FROM content_file_names
++ WHERE id IN (SELECT cfn.id FROM content_file_names cfn
++ LEFT JOIN content_associations ca
++ ON ca.filename=cfn.id
++ WHERE ca.id IS NULL)""" );
++
++# delete any paths we are storing which have no binary associated with them
++remove_filepath_cruft_q = """DELETE FROM content_file_paths
++ WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
++ LEFT JOIN content_associations ca
++ ON ca.filepath=cfn.id
++ WHERE ca.id IS NULL)"""
+class Contents(object):
+ """
+ Class capable of generating Contents-$arch.gz files
+
+ Usage GenerateContents().generateContents( ["main","contrib","non-free"] )
+ """
+
+ def __init__(self):
+ self.header = None
+
+ def _getHeader(self):
- log.error( "error openeing header file: %d\n%s" % (Config()["Contents::Header"],
- traceback.format_exc() ))
++ """
++ Internal method to return the header for Contents.gz files
++
++ This is boilerplate which explains the contents of the file and how
++ it can be used.
++ """
+ if self.header == None:
+ if Config().has_key("Contents::Header"):
+ try:
+ h = open(os.path.join( Config()["Dir::Templates"],
+ Config()["Contents::Header"] ), "r")
+ self.header = h.read()
+ print( "header: %s" % self.header )
+ h.close()
+ except:
- # Internal method for writing all the results to a given file
++ log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"],
++ traceback.format_exc() ))
+ self.header = False
+ else:
+ print( "no header" )
+ self.header = False
+
+ return self.header
+
+ # goal column for section column
+ _goal_column = 54
+
+ def _write_content_file(self, cursor, filename):
- remove files/paths from the DB which are no longer referenced by binaries
++ """
++ Internal method for writing all the results to a given file.
++ The cursor should have a result set generated from a query already.
++ """
+ f = gzip.open(Config()["Dir::Root"] + filename, "w")
+ try:
+ header = self._getHeader()
+
+ if header:
+ f.write(header)
+
+ while True:
+ contents = cursor.fetchone()
+ if not contents:
+ return
+
+ num_tabs = max(1,
+ int( math.ceil( (self._goal_column - len(contents[0])) / 8) ) )
+ f.write(contents[0] + ( '\t' * num_tabs ) + contents[-1] + "\n")
+
+ finally:
+ f.close()
+
+ def cruft(self):
+ """
- cursor.execute( """DELETE FROM content_file_names
- WHERE id IN (SELECT cfn.id FROM content_file_names cfn
- LEFT JOIN content_associations ca
- ON ca.filename=cfn.id
- WHERE ca.id IS NULL)""" );
- cursor.execute( """DELETE FROM content_file_paths
- WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
- LEFT JOIN content_associations ca
- ON ca.filepath=cfn.id
- WHERE ca.id IS NULL)""" );
++ remove files/paths from the DB which are no longer referenced
++ by binaries and clean the temporary table
+ """
+ cursor = DBConn().cursor();
+ cursor.execute( "BEGIN WORK" )
- # return a list of suites to operate on
++ cursor.execute( remove_temp_contents_cruft_q )
++ cursor.execute( remove_filename_cruft_q )
++ cursor.execute( remove_filepath_cruft_q )
+ cursor.execute( "COMMIT" )
+
+
+ def bootstrap(self):
+ """
+ scan the existing debs in the pool to populate the contents database tables
+ """
+ pooldir = Config()[ 'Dir::Pool' ]
+
+ cursor = DBConn().cursor();
+ cursor.execute( latin1_q )
+ cursor.execute( debs_q )
+ cursor.execute( olddeb_q )
+ cursor.execute( arches_q )
+
+ suites = self._suites()
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+
+ arch_list = self._arches(cursor, suite_id)
+ arch_all_id = DBConn().get_architecture_id("all")
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE debs_q(%d, %d)" % ( suite_id, arch_id[0] ) )
+
+ debs = cursor.fetchall()
+ count = 0
+ for deb in debs:
+ count += 1
+ cursor.execute( "EXECUTE olddeb_q(%d)" % (deb[0] ) )
+ old = cursor.fetchone()
+ if old:
+ log.debug( "already imported: %s" % deb[1] )
+ else:
+ debfile = os.path.join( pooldir, deb[1] )
+ if os.path.exists( debfile ):
+ contents = utils.generate_contents_information( debfile )
+ DBConn().insert_content_paths(deb[0], contents)
+ log.info( "imported (%d/%d): %s" % (count,len(debs),deb[1] ) )
+ else:
+ log.error( "missing .deb: %s" % deb[1] )
+
+ def generate(self):
+ """
+ Generate Contents-$arch.gz files for every aviailable arch in each given suite.
+ """
+ cursor = DBConn().cursor();
+
+ cursor.execute( arches_q )
+ cursor.execute( contents_q )
+ cursor.execute( udeb_contents_q )
+
+ suites = self._suites()
+
+ # Get our suites, and the architectures
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+ arch_list = self._arches(cursor, suite_id)
+
+ arch_all_id = DBConn().get_architecture_id("all")
+
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE contents_q(%d,%d,%d,%d)" % (arch_id[0], arch_all_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-%s.gz" % (suite, arch_id[1]))
+
+ # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free)
+ # This is HORRIBLY debian specific :-/
+ # First off, udeb
+ section_id = DBConn().get_section_id('debian-installer') # all udebs should be here)
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb.gz" % suite)
+
+ # Once more, with non-free
+ section_id = DBConn().get_section_id('non-free/debian-installer') # all udebs should be here)
+
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb-nf.gz" % suite)
+
+
+################################################################################
+
+ def _suites(self):
- # return a list of archs to operate on
++ """
++ return a list of suites to operate on
++ """
+ if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+ suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+ else:
+ suites = Config().SubTree("Suite").List()
+
+ return suites
+
+ def _arches(self, cursor, suite):
- logging.basicConfig( level=logging.DEBUG,
++ """
++ return a list of archs to operate on
++ """
+ arch_list = [ ]
+ if Config().has_key( "%s::%s" %(options_prefix,"Arch")):
+ archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")])
+ for arch_name in archs:
+ arch_list.append((DBConn().get_architecture_id(arch_name), arch_name))
+ else:
+ cursor.execute("EXECUTE arches_q(%d)" % (suite))
+ while True:
+ r = cursor.fetchone()
+ if not r:
+ break
+
+ if r[1] != "source" and r[1] != "all":
+ arch_list.append((r[0], r[1]))
+
+ return arch_list
+
+################################################################################
+
+def main():
+ cnf = Config()
+
+ arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+ ('s',"suite", "%s::%s" % (options_prefix,"Suite"),"HasArg"),
+ ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+ ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+ ('a',"arch", "%s::%s" % (options_prefix,"Arch"),"HasArg"),
+ ]
+
+ commands = {'generate' : Contents.generate,
+ 'bootstrap' : Contents.bootstrap,
+ 'cruft' : Contents.cruft,
+ }
+
+ level=logging.INFO
+ if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+ level=logging.ERROR
+
+ elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+ level=logging.DEBUG
+
+
++ logging.basicConfig( level=level,
+ format='%(asctime)s %(levelname)s %(message)s',
+ stream = sys.stderr )
+
+ args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+ if (len(args) < 1) or not commands.has_key(args[0]):
+ usage()
+
+ if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+ usage()
+
+ commands[args[0]](Contents())
+
+if __name__ == '__main__':
+ main()
################################################################################
--import sys, imp
--import daklib.utils, daklib.extensions
++import sys
++import imp
++import daklib.utils
++import daklib.extensions
################################################################################
"Generate package <-> file mapping"),
("generate-releases",
"Generate Release files"),
- "Generate contest files"),
+ ("contents",
++ "Generate content files"),
("generate-index-diffs",
"Generate .diff/Index files"),
("clean-suites",
#!/usr/bin/env python
- # coding=utf8
--
"""
- Debian Archive Kit Database Update Script
- Copyright © 2008 Michael Casadevall <mcasadevall@debian.org>
- Copyright © 2008 Roger Leigh <rleigh@debian.org>
+ Database Update Script - Get suite_architectures table use sane values
- Debian Archive Kit Database Update Script 2
+ @contact: Debian FTP Master <ftpmaster@debian.org>
+ @copyright: 2009 Joerg Jaspert <joerg@debian.org>
+ @license: GNU General Public License version 2 or later
-
"""
# This program is free software; you can redistribute it and/or modify
################################################################################
-def do_archive():
- """Initalize the archive table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM archive")
- for name in Cnf.SubTree("Archive").List():
- archive_config = Cnf.SubTree("Archive::%s" % (name))
- origin_server = sql_get(archive_config, "OriginServer")
- description = sql_get(archive_config, "Description")
- projectB.query("INSERT INTO archive (name, origin_server, description) "
- "VALUES ('%s', %s, %s)"
- % (name, origin_server, description))
- projectB.query("COMMIT WORK")
-
-def do_architecture():
- """Initalize the architecture table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM architecture")
- for arch in Cnf.SubTree("Architectures").List():
- description = Cnf["Architectures::%s" % (arch)]
- projectB.query("INSERT INTO architecture (arch_string, description) "
- "VALUES ('%s', '%s')" % (arch, description))
- projectB.query("COMMIT WORK")
-
-def do_component():
- """Initalize the component table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM component")
- for name in Cnf.SubTree("Component").List():
- component_config = Cnf.SubTree("Component::%s" % (name))
- description = sql_get(component_config, "Description")
- if component_config.get("MeetsDFSG").lower() == "true":
- meets_dfsg = "true"
- else:
- meets_dfsg = "false"
- projectB.query("INSERT INTO component (name, description, meets_dfsg) "
- "VALUES ('%s', %s, %s)"
- % (name, description, meets_dfsg))
- projectB.query("COMMIT WORK")
-
-def do_location():
- """Initalize the location table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM location")
- for location in Cnf.SubTree("Location").List():
- location_config = Cnf.SubTree("Location::%s" % (location))
- archive_id = database.get_archive_id(location_config["Archive"])
- if archive_id == -1:
- utils.fubar("Archive '%s' for location '%s' not found."
- % (location_config["Archive"], location))
- location_type = location_config.get("type")
- if location_type == "pool":
- for component in Cnf.SubTree("Component").List():
- component_id = database.get_component_id(component)
- projectB.query("INSERT INTO location (path, component, "
- "archive, type) VALUES ('%s', %d, %d, '%s')"
- % (location, component_id, archive_id,
- location_type))
- else:
- utils.fubar("E: type '%s' not recognised in location %s."
- % (location_type, location))
- projectB.query("COMMIT WORK")
-
-def do_suite():
- """Initalize the suite table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM suite")
- for suite in Cnf.SubTree("Suite").List():
- suite_config = Cnf.SubTree("Suite::%s" %(suite))
- version = sql_get(suite_config, "Version")
- origin = sql_get(suite_config, "Origin")
- description = sql_get(suite_config, "Description")
- projectB.query("INSERT INTO suite (suite_name, version, origin, "
- "description) VALUES ('%s', %s, %s, %s)"
- % (suite.lower(), version, origin, description))
- for architecture in database.get_suite_architectures(suite):
- architecture_id = database.get_architecture_id (architecture)
- if architecture_id < 0:
- utils.fubar("architecture '%s' not found in architecture"
- " table for suite %s."
- % (architecture, suite))
- projectB.query("INSERT INTO suite_architectures (suite, "
- "architecture) VALUES (currval('suite_id_seq'), %d)"
- % (architecture_id))
- projectB.query("COMMIT WORK")
-
-def do_override_type():
- """Initalize the override_type table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM override_type")
- for override_type in Cnf.ValueList("OverrideType"):
- projectB.query("INSERT INTO override_type (type) VALUES ('%s')"
- % (override_type))
- projectB.query("COMMIT WORK")
-
-def do_priority():
- """Initialize the priority table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM priority")
- for priority in Cnf.SubTree("Priority").List():
- projectB.query("INSERT INTO priority (priority, level) VALUES "
- "('%s', %s)"
- % (priority, Cnf["Priority::%s" % (priority)]))
- projectB.query("COMMIT WORK")
-
-def do_section():
- """Initalize the section table."""
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM section")
- for component in Cnf.SubTree("Component").List():
- if Cnf["Control-Overrides::ComponentPosition"] == "prefix":
- suffix = ""
- if component != "main":
- prefix = component + '/'
- else:
- prefix = ""
- else:
- prefix = ""
- if component != "main":
- suffix = '/' + component
+class InitDB(object):
+ def __init__(self, Cnf, projectB):
+ self.Cnf = Cnf
+ self.projectB = projectB
+
+ def do_archive(self):
- """Initalize the archive table."""
++ """initalize the archive table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM archive")
+ archive_add = "INSERT INTO archive (name, origin_server, description) VALUES (%s, %s, %s)"
+ for name in self.Cnf.SubTree("Archive").List():
+ archive_config = self.Cnf.SubTree("Archive::%s" % (name))
+ origin_server = sql_get(archive_config, "OriginServer")
+ description = sql_get(archive_config, "Description")
+ c.execute(archive_add, [name, origin_server, description])
+ self.projectB.commit()
+
+ def do_architecture(self):
+ """Initalize the architecture table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM architecture")
+ arch_add = "INSERT INTO architecture (arch_string, description) VALUES (%s, %s)"
+ for arch in self.Cnf.SubTree("Architectures").List():
+ description = self.Cnf["Architectures::%s" % (arch)]
+ c.execute(arch_add, [arch, description])
+ self.projectB.commit()
+
+ def do_component(self):
+ """Initalize the component table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM component")
+
+ comp_add = "INSERT INTO component (name, description, meets_dfsg) " + \
+ "VALUES (%s, %s, %s)"
+
+ for name in self.Cnf.SubTree("Component").List():
+ component_config = self.Cnf.SubTree("Component::%s" % (name))
+ description = sql_get(component_config, "Description")
+ meets_dfsg = (component_config.get("MeetsDFSG").lower() == "true")
+ c.execute(comp_add, [name, description, meets_dfsg])
+
+ self.projectB.commit()
+
+ def do_location(self):
+ """Initalize the location table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM location")
+
- loc_add_mixed = "INSERT INTO location (path, archive, type) " + \
- "VALUES (%s, %s, %s)"
-
+ loc_add = "INSERT INTO location (path, component, archive, type) " + \
+ "VALUES (%s, %s, %s, %s)"
+
+ for location in self.Cnf.SubTree("Location").List():
+ location_config = self.Cnf.SubTree("Location::%s" % (location))
+ archive_id = self.projectB.get_archive_id(location_config["Archive"])
+ if archive_id == -1:
+ utils.fubar("Archive '%s' for location '%s' not found."
+ % (location_config["Archive"], location))
+ location_type = location_config.get("type")
- if location_type == "legacy-mixed":
- c.execute(loc_add_mixed, [location, archive_id, location_config["type"]])
- elif location_type == "legacy" or location_type == "pool":
++ if location_type == "pool":
+ for component in self.Cnf.SubTree("Component").List():
+ component_id = self.projectB.get_component_id(component)
+ c.execute(loc_add, [location, component_id, archive_id, location_type])
else:
- " table for suite %s."
- % (architecture, suite))
+ utils.fubar("E: type '%s' not recognised in location %s."
+ % (location_type, location))
+
+ self.projectB.commit()
+
+ def do_suite(self):
+ """Initalize the suite table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM suite")
+
+ suite_add = "INSERT INTO suite (suite_name, version, origin, description) " + \
+ "VALUES (%s, %s, %s, %s)"
+
+ sa_add = "INSERT INTO suite_architectures (suite, architecture) " + \
+ "VALUES (currval('suite_id_seq'), %s)"
+
+ for suite in self.Cnf.SubTree("Suite").List():
+ suite_config = self.Cnf.SubTree("Suite::%s" %(suite))
+ version = sql_get(suite_config, "Version")
+ origin = sql_get(suite_config, "Origin")
+ description = sql_get(suite_config, "Description")
+ c.execute(suite_add, [suite.lower(), version, origin, description])
+ for architecture in self.Cnf.ValueList("Suite::%s::Architectures" % (suite)):
+ architecture_id = self.projectB.get_architecture_id (architecture)
+ if architecture_id < 0:
+ utils.fubar("architecture '%s' not found in architecture"
++ " table for suite %s."
++ % (architecture, suite))
+ c.execute(sa_add, [architecture_id])
+
+ self.projectB.commit()
+
+ def do_override_type(self):
+ """Initalize the override_type table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM override_type")
+
+ over_add = "INSERT INTO override_type (type) VALUES (%s)"
+
+ for override_type in self.Cnf.ValueList("OverrideType"):
+ c.execute(over_add, [override_type])
+
+ self.projectB.commit()
+
+ def do_priority(self):
+ """Initialize the priority table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM priority")
+
+ prio_add = "INSERT INTO priority (priority, level) VALUES (%s, %s)"
+
+ for priority in self.Cnf.SubTree("Priority").List():
+ c.execute(prio_add, [priority, self.Cnf["Priority::%s" % (priority)]])
+
+ self.projectB.commit()
+
+ def do_section(self):
+ """Initalize the section table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM section")
+
+ sect_add = "INSERT INTO section (section) VALUES (%s)"
+
+ for component in self.Cnf.SubTree("Component").List():
+ if self.Cnf["Control-Overrides::ComponentPosition"] == "prefix":
suffix = ""
- for section in Cnf.ValueList("Section"):
- projectB.query("INSERT INTO section (section) VALUES "
- "('%s%s%s')" % (prefix, section, suffix))
- projectB.query("COMMIT WORK")
+ if component != "main":
+ prefix = component + '/'
+ else:
+ prefix = ""
+ else:
+ prefix = ""
+ if component != "main":
+ suffix = '/' + component
+ else:
+ suffix = ""
+ for section in self.Cnf.ValueList("Section"):
+ c.execute(sect_add, [prefix + section + suffix])
+
+ self.projectB.commit()
+
+ def do_all(self):
+ self.do_archive()
+ self.do_architecture()
+ self.do_component()
+ self.do_location()
+ self.do_suite()
+ self.do_override_type()
+ self.do_priority()
+ self.do_section()
################################################################################
###############################################################################
--import errno, fcntl, os, sys, time, re
-import apt_pkg
++import errno
++import fcntl
++import os
++import sys
++import time
++import re
+import apt_pkg, commands
from daklib import database
from daklib import logging
from daklib import queue
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
-
+ if not database.copy_temporary_contents(package, version, files[newfile]):
+ reject("Missing contents for package")
+
- # If the .orig.tar.gz is in a legacy directory we need to poolify
- # it, so that apt-get source (and anything else that goes by the
- # "Directory:" field in the Sources.gz file) works.
- orig_tar_id = Upload.pkg.orig_tar_id
- orig_tar_location = Upload.pkg.orig_tar_location
- legacy_source_untouchable = Upload.pkg.legacy_source_untouchable
- if orig_tar_id and orig_tar_location == "legacy":
- q = projectB.query("SELECT DISTINCT ON (f.id) l.path, f.filename, f.id as files_id, df.source, df.id as dsc_files_id, f.size, f.md5sum FROM files f, dsc_files df, location l WHERE df.source IN (SELECT source FROM dsc_files WHERE file = %s) AND f.id = df.file AND l.id = f.location AND (l.type = 'legacy' OR l.type = 'legacy-mixed')" % (orig_tar_id))
- qd = q.dictresult()
- for qid in qd:
- # Is this an old upload superseded by a newer -sa upload? (See check_dsc() for details)
- if legacy_source_untouchable.has_key(qid["files_id"]):
- continue
- # First move the files to the new location
- legacy_filename = qid["path"] + qid["filename"]
- pool_location = utils.poolify (changes["source"], files[newfile]["component"])
- pool_filename = pool_location + os.path.basename(qid["filename"])
- destination = Cnf["Dir::Pool"] + pool_location
- utils.move(legacy_filename, destination)
- # Then Update the DB's files table
- q = projectB.query("UPDATE files SET filename = '%s', location = '%s' WHERE id = '%s'" % (pool_filename, dsc_location_id, qid["files_id"]))
-
- # If this is a sourceful diff only upload that is moving non-legacy
+ # If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig.tar.gz into the new
# component too for the same reasons as above.
#
################################################################################
- import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback, tarfile
-import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback
++import commands
++import errno
++import fcntl
++import os
++import re
++import shutil
++import stat
++import sys
++import time
++import tempfile
++import traceback
++import tarfile
import apt_inst, apt_pkg
-from daklib import database
+from debian_bundle import deb822
+from daklib.dbconn import DBConn
+from daklib.binary import Binary
from daklib import logging
from daklib import queue
from daklib import utils
################################################################################
--import commands, os, pg, re, sys
--import apt_pkg, apt_inst
++import commands
++import os
++import pg
++import re
++import sys
++import apt_pkg
++import apt_inst
from daklib import database
from daklib import utils
from daklib.dak_exceptions import *
maintainer_id_cache = {} #: cache for maintainers
keyring_id_cache = {} #: cache for keyrings
source_id_cache = {} #: cache for sources
++
files_id_cache = {} #: cache for files
maintainer_cache = {} #: cache for maintainer names
fingerprint_id_cache = {} #: cache for fingerprints
return version
-
+def get_latest_binary_version_id(binary, section, suite, arch):
+ global suite_bin_version_cache
+ cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch)
+ cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all"))
+
+ # Check for the cache hit for its arch, then arch all
+ if suite_bin_version_cache.has_key(cache_key):
+ return suite_bin_version_cache[cache_key]
+ if suite_bin_version_cache.has_key(cache_key_all):
+ return suite_bin_version_cache[cache_key_all]
+ if cache_preloaded == True:
+ return # package does not exist
+
+ q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section)))
+
+ if not q.getresult():
+ return False
+
+ highest_bid = q.getresult()[0][0]
+
+ suite_bin_version_cache[cache_key] = highest_bid
+ return highest_bid
+
+def preload_binary_id_cache():
+ global suite_bin_version_cache, cache_preloaded
+
+ # Get suite info
+ q = projectB.query("SELECT id FROM suite")
+ suites = q.getresult()
+
+ # Get arch mappings
+ q = projectB.query("SELECT id FROM architecture")
+ arches = q.getresult()
+
+ for suite in suites:
+ for arch in arches:
+ q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0])))
+
+ for bi in q.getresult():
+ cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0])
+ suite_bin_version_cache[cache_key] = int(bi[0])
+
+ cache_preloaded = True
+
+ def get_suite_architectures(suite):
+ """
+ Returns list of architectures for C{suite}.
+
+ @type suite: string, int
+ @param suite: the suite name or the suite_id
+
+ @rtype: list
+ @return: the list of architectures for I{suite}
+ """
+
+ suite_id = None
+ if type(suite) == str:
+ suite_id = get_suite_id(suite)
+ elif type(suite) == int:
+ suite_id = suite
+ else:
+ return None
+
+ sql = """ SELECT a.arch_string FROM suite_architectures sa
+ JOIN architecture a ON (a.id = sa.architecture)
+ WHERE suite='%s' """ % (suite_id)
+
+ q = projectB.query(sql)
+ return map(lambda x: x[0], q.getresult())
+
################################################################################
def get_or_set_maintainer_id (maintainer):
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
- message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/bts-categorize")
+
+
+################################################################################
+
+def copy_temporary_contents(package, version, deb):
+ """
+ copy the previously stored contents from the temp table to the permanant one
+
+ during process-unchecked, the deb should have been scanned and the
+ contents stored in temp_content_associations
+ """
+
+ # first see if contents exist:
+
+ exists = projectB.query("""SELECT 1 FROM temp_content_associations
+ WHERE package='%s' LIMIT 1""" % package ).getresult()
+
+ if not exists:
+ # This should NOT happen. We should have added contents
+ # during process-unchecked. if it did, log an error, and send
+ # an email.
+ subst = {
+ "__PACKAGE__": package,
+ "__VERSION__": version,
++ "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"]
+ "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"]
+ }
+
++ message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/missing-contents")
+ utils.send_mail( message )
+
+ exists = DBConn().insert_content_path(package, version, deb)
+
+ if exists:
+ sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
+ SELECT currval('binaries_id_seq'), filepath, filename FROM temp_content_associations
+ WHERE package='%s'
+ AND version='%s'""" % (package, version)
+ projectB.query(sql)
+ projectB.query("""DELETE from temp_content_associations
+ WHERE package='%s'
+ AND version='%s'""" % (package, version))
+
+ return exists
--- /dev/null
+From: __DAK_ADDRESS__
++To: __TO_ADDRESS__
+X-Debian: DAK
+X-Debian-Package: __PACKAGE__
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 8bit
+Subject: Missing contents for __PACKAGE__ in accepted queue
+
+While processing the accepted queue, I didn't have contents in the
+database for __PACKAGE__ version __VERSION__. These contents should
+have been put into the database by process-unchecked when the package
+first arrived.
+
+This is probably stew's fault.