Dinstall
{
GPGKeyring {
- "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
+ "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
"/srv/keyring.debian.org/keyrings/debian-keyring.pgp";
"/srv/ftp.debian.org/keyrings/debian-maintainers.gpg";
};
critical;
};
};
-
-Contents
-{
- Header "contents";
- Root "/srv/ftp.debian.org/ftp/";
-}
\ No newline at end of file
+++ /dev/null
-#!/usr/bin/env python
-"""
-Create all the contents files
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2008, 2009 Michael Casadevall <mcasadevall@debian.org>
-@copyright: 2009 Mike O'Connor <stew@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-################################################################################
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# <Ganneff> there is the idea to slowly replace contents files
-# <Ganneff> with a new generation of such files.
-# <Ganneff> having more info.
-
-# <Ganneff> of course that wont help for now where we need to generate them :)
-
-################################################################################
-
-import sys
-import os
-import tempfile
-import logging
-import math
-import gzip
-import apt_pkg
-from daklib import utils
-from daklib.binary import Binary
-from daklib.config import Config
-from daklib.dbconn import DBConn
-################################################################################
-
-def usage (exit_code=0):
- print """Usage: dak contents [options] command [arguments]
-
-COMMANDS
- generate
- generate Contents-$arch.gz files
-
- bootstrap
- scan the debs in the existing pool and load contents in the the database
-
- cruft
- remove files/paths which are no longer referenced by a binary
-
-OPTIONS
- -h, --help
- show this help and exit
-
- -v, --verbose
- show verbose information messages
-
- -q, --quiet
- supress all output but errors
-
- -s, --suite={stable,testing,unstable,...}
- only operate on a single suite
-
- -a, --arch={i386,amd64}
- only operate on a single architecture
-"""
- sys.exit(exit_code)
-
-################################################################################
-
-# where in dak.conf all of our configuration will be stowed
-
-options_prefix = "Contents"
-options_prefix = "%s::Options" % options_prefix
-
-log = logging.getLogger()
-
-################################################################################
-
-# get all the arches delivered for a given suite
-# this should probably exist somehere common
-arches_q = """PREPARE arches_q as
- SELECT s.architecture, a.arch_string
- FROM suite_architectures s
- JOIN architecture a ON (s.architecture=a.id)
- WHERE suite = $1"""
-
-# find me the .deb for a given binary id
-debs_q = """PREPARE debs_q as
- SELECT b.id, f.filename FROM bin_assoc_by_arch baa
- JOIN binaries b ON baa.bin=b.id
- JOIN files f ON b.file=f.id
- WHERE suite = $1
- AND arch = $2"""
-
-# ask if we already have contents associated with this binary
-olddeb_q = """PREPARE olddeb_q as
- SELECT 1 FROM content_associations
- WHERE binary_pkg = $1
- LIMIT 1"""
-
-# find me all of the contents for a given .deb
-contents_q = """PREPARE contents_q as
- SELECT (p.path||'/'||n.file) AS fn,
- comma_separated_list(s.section||'/'||b.package)
- FROM content_associations c
- JOIN content_file_paths p ON (c.filepath=p.id)
- JOIN content_file_names n ON (c.filename=n.id)
- JOIN binaries b ON (b.id=c.binary_pkg)
- JOIN bin_associations ba ON (b.id=ba.bin)
- JOIN override o ON (o.package=b.package)
- JOIN section s ON (s.id=o.section)
- WHERE (b.architecture = $1 OR b.architecture = $2)
- AND ba.suite = $3
- AND o.suite = $4
- AND b.type = 'deb'
- AND o.type = '7'
- GROUP BY fn
- ORDER BY fn"""
-
-# find me all of the contents for a given .udeb
-udeb_contents_q = """PREPARE udeb_contents_q as
- SELECT (p.path||'/'||n.file) as fn,
- comma_separated_list(s.section||'/'||b.package)
- FROM content_associations c
- JOIN content_file_paths p ON (c.filepath=p.id)
- JOIN content_file_names n ON (c.filename=n.id)
- JOIN binaries b ON (b.id=c.binary_pkg)
- JOIN bin_associations ba ON (b.id=ba.bin)
- JOIN override o ON (o.package=b.package)
- JOIN section s ON (s.id=o.section)
- WHERE s.id = $1
- AND ba.suite = $2
- AND o.suite = $3
- AND b.type = 'udeb'
- AND o.type = '8'
- GROUP BY fn
- ORDER BY fn"""
-
-# clear out all of the temporarily stored content associations
-# this should be run only after p-a has run. after a p-a
-# run we should have either accepted or rejected every package
-# so there should no longer be anything in the queue
-remove_pending_contents_cruft_q = """DELETE FROM pending_content_associations"""
-
-# delete any filenames we are storing which have no binary associated with them
-remove_filename_cruft_q = """DELETE FROM content_file_names
- WHERE id IN (SELECT cfn.id FROM content_file_names cfn
- LEFT JOIN content_associations ca
- ON ca.filename=cfn.id
- WHERE ca.id IS NULL)"""
-
-# delete any paths we are storing which have no binary associated with them
-remove_filepath_cruft_q = """DELETE FROM content_file_paths
- WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
- LEFT JOIN content_associations ca
- ON ca.filepath=cfn.id
- WHERE ca.id IS NULL)"""
-class Contents(object):
- """
- Class capable of generating Contents-$arch.gz files
-
- Usage GenerateContents().generateContents( ["main","contrib","non-free"] )
- """
-
- def __init__(self):
- self.header = None
-
- def _getHeader(self):
- """
- Internal method to return the header for Contents.gz files
-
- This is boilerplate which explains the contents of the file and how
- it can be used.
- """
- if self.header == None:
- if Config().has_key("Contents::Header"):
- try:
- h = open(os.path.join( Config()["Dir::Templates"],
- Config()["Contents::Header"] ), "r")
- self.header = h.read()
- h.close()
- except:
- log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"],
- traceback.format_exc() ))
- self.header = False
- else:
- self.header = False
-
- return self.header
-
- # goal column for section column
- _goal_column = 54
-
- def _write_content_file(self, cursor, filename):
- """
- Internal method for writing all the results to a given file.
- The cursor should have a result set generated from a query already.
- """
- filepath = Config()["Contents::Root"] + filename
- filedir = os.path.dirname(filepath)
- if not os.path.isdir(filedir):
- os.makedirs(filedir)
- f = gzip.open(filepath, "w")
- try:
- header = self._getHeader()
-
- if header:
- f.write(header)
-
- while True:
- contents = cursor.fetchone()
- if not contents:
- return
-
- num_tabs = max(1,
- int(math.ceil((self._goal_column - len(contents[0])) / 8)))
- f.write(contents[0] + ( '\t' * num_tabs ) + contents[-1] + "\n")
-
- finally:
- f.close()
-
- def cruft(self):
- """
- remove files/paths from the DB which are no longer referenced
- by binaries and clean the temporary table
- """
- cursor = DBConn().cursor();
- cursor.execute( "BEGIN WORK" )
- cursor.execute( remove_pending_contents_cruft_q )
- cursor.execute( remove_filename_cruft_q )
- cursor.execute( remove_filepath_cruft_q )
- cursor.execute( "COMMIT" )
-
-
- def bootstrap(self):
- """
- scan the existing debs in the pool to populate the contents database tables
- """
- pooldir = Config()[ 'Dir::Pool' ]
-
- cursor = DBConn().cursor();
- cursor.execute( debs_q )
- cursor.execute( olddeb_q )
- cursor.execute( arches_q )
-
- suites = self._suites()
- for suite in [i.lower() for i in suites]:
- suite_id = DBConn().get_suite_id(suite)
-
- arch_list = self._arches(cursor, suite_id)
- arch_all_id = DBConn().get_architecture_id("all")
- for arch_id in arch_list:
- cursor.execute( "EXECUTE debs_q(%d, %d)" % ( suite_id, arch_id[0] ) )
-
- count = 0
- while True:
- deb = cursor.fetchone()
- if not deb:
- break
- count += 1
- cursor1 = DBConn().cursor();
- cursor1.execute( "EXECUTE olddeb_q(%d)" % (deb[0] ) )
- old = cursor1.fetchone()
- if old:
- log.debug( "already imported: %s" % deb[1] )
- else:
- debfile = os.path.join( pooldir, deb[1] )
- if os.path.exists( debfile ):
- Binary(debfile).scan_package( deb[0] )
- else:
- log.error( "missing .deb: %s" % deb[1] )
-
- def generate(self):
- """
- Generate Contents-$arch.gz files for every available arch in each given suite.
- """
- cursor = DBConn().cursor();
-
- cursor.execute( arches_q )
- cursor.execute( contents_q )
- cursor.execute( udeb_contents_q )
-
- suites = self._suites()
-
- # Get our suites, and the architectures
- for suite in [i.lower() for i in suites]:
- suite_id = DBConn().get_suite_id(suite)
- arch_list = self._arches(cursor, suite_id)
-
- arch_all_id = DBConn().get_architecture_id("all")
-
- for arch_id in arch_list:
- cursor.execute( "EXECUTE contents_q(%d,%d,%d,%d)" % (arch_id[0], arch_all_id, suite_id, suite_id))
- self._write_content_file(cursor, "dists/%s/Contents-%s.gz" % (suite, arch_id[1]))
-
- # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free)
- # This is HORRIBLY debian specific :-/
- for section_id, fn_pattern in [("debian-installer","dists/%s/Contents-udeb.gz"),
- ("non-free/debian-installer", "dists/%s/Contents-udeb-nf.gz")]:
-
- section_id = DBConn().get_section_id(section_id) # all udebs should be here)
- if section_id != -1:
- cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
- self._write_content_file(cursor, fn_pattern % suite)
-
-
-################################################################################
-
- def _suites(self):
- """
- return a list of suites to operate on
- """
- if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
- suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
- else:
- suites = Config().SubTree("Suite").List()
-
- return suites
-
- def _arches(self, cursor, suite):
- """
- return a list of archs to operate on
- """
- arch_list = [ ]
- if Config().has_key( "%s::%s" %(options_prefix,"Arch")):
- archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")])
- for arch_name in archs:
- arch_list.append((DBConn().get_architecture_id(arch_name), arch_name))
- else:
- cursor.execute("EXECUTE arches_q(%d)" % (suite))
- while True:
- r = cursor.fetchone()
- if not r:
- break
-
- if r[1] != "source" and r[1] != "all":
- arch_list.append((r[0], r[1]))
-
- return arch_list
-
-################################################################################
-
-def main():
- cnf = Config()
-
- arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
- ('s',"suite", "%s::%s" % (options_prefix,"Suite"),"HasArg"),
- ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
- ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
- ('a',"arch", "%s::%s" % (options_prefix,"Arch"),"HasArg"),
- ]
-
- commands = {'generate' : Contents.generate,
- 'bootstrap' : Contents.bootstrap,
- 'cruft' : Contents.cruft,
- }
-
- level=logging.INFO
- if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
- level=logging.ERROR
-
- elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
- level=logging.DEBUG
-
-
- logging.basicConfig( level=level,
- format='%(asctime)s %(levelname)s %(message)s',
- stream = sys.stderr )
-
- args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
-
- if (len(args) < 1) or not commands.has_key(args[0]):
- usage()
-
- if cnf.has_key("%s::%s" % (options_prefix,"Help")):
- usage()
-
- commands[args[0]](Contents())
-
-if __name__ == '__main__':
- main()
################################################################################
-import sys
-import imp
-import daklib.utils
-import daklib.extensions
+import sys, imp
+import daklib.utils, daklib.extensions
################################################################################
"Generate package <-> file mapping"),
("generate-releases",
"Generate Release files"),
- ("contents",
- "Generate content files"),
("generate-index-diffs",
"Generate .diff/Index files"),
("clean-suites",
#!/usr/bin/env python
+
"""
Database Update Script - Get suite_architectures table use sane values
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
+
"""
# This program is free software; you can redistribute it and/or modify
+++ /dev/null
-#!/usr/bin/env python
-# coding=utf8
-
-"""
-Debian Archive Kit Database Update Script
-Copyright © 2008 Michael Casadevall <mcasadevall@debian.org>
-Copyright © 2008 Roger Leigh <rleigh@debian.org>
-
-Debian Archive Kit Database Update Script 2
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# <tomv_w> really, if we want to screw ourselves, let's find a better way.
-# <Ganneff> rm -rf /srv/ftp.debian.org
-
-################################################################################
-
-import psycopg2
-import time
-
-################################################################################
-
-def do_update(self):
- print "Adding content fields to database"
-
- try:
- c = self.db.cursor()
- c.execute("""CREATE TABLE content_file_paths (
- id serial primary key not null,
- path text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_file_names (
- id serial primary key not null,
- file text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_associations (
- id serial not null,
- binary_pkg int4 not null references binaries(id) on delete cascade,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE TABLE pending_content_associations (
- id serial not null,
- package text not null,
- version debversion not null,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE FUNCTION comma_concat(text, text) RETURNS text
- AS $_$select case
- WHEN $2 is null or $2 = '' THEN $1
- WHEN $1 is null or $1 = '' THEN $2
- ELSE $1 || ',' || $2
- END$_$
- LANGUAGE sql""")
-
- c.execute("""CREATE AGGREGATE comma_separated_list (
- BASETYPE = text,
- SFUNC = comma_concat,
- STYPE = text,
- INITCOND = ''
- );""")
-
- c.execute( "CREATE INDEX content_assocaitions_binary ON content_associations(binary_pkg)" )
-
- c.execute("UPDATE config SET value = '6' WHERE name = 'db_revision'")
- self.db.commit()
-
- except psycopg2.ProgrammingError, msg:
- self.db.rollback()
- raise DBUpdateError, "Unable to appy debversion updates, rollback issued. Error message : %s" % (str(msg))
################################################################################
-import psycopg2, sys
+import pg, sys
import apt_pkg
-
+from daklib import database
from daklib import utils
-from daklib.DBConn import DBConn
-from daklib.Config import Config
+
+################################################################################
+
+Cnf = None
+projectB = None
################################################################################
################################################################################
def sql_get (config, key):
- """Return the value of config[key] or None if it doesn't exist."""
+ """Return the value of config[key] in quotes or NULL if it doesn't exist."""
- try:
- return config[key]
- except KeyError:
- return None
+ if config.has_key(key):
+ return "'%s'" % (config[key])
+ else:
+ return "NULL"
################################################################################
-class InitDB(object):
- def __init__(self, Cnf, projectB):
- self.Cnf = Cnf
- self.projectB = projectB
-
- def do_archive(self):
- """initalize the archive table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM archive")
- archive_add = "INSERT INTO archive (name, origin_server, description) VALUES (%s, %s, %s)"
- for name in self.Cnf.SubTree("Archive").List():
- archive_config = self.Cnf.SubTree("Archive::%s" % (name))
- origin_server = sql_get(archive_config, "OriginServer")
- description = sql_get(archive_config, "Description")
- c.execute(archive_add, [name, origin_server, description])
- self.projectB.commit()
-
- def do_architecture(self):
- """Initalize the architecture table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM architecture")
- arch_add = "INSERT INTO architecture (arch_string, description) VALUES (%s, %s)"
- for arch in self.Cnf.SubTree("Architectures").List():
- description = self.Cnf["Architectures::%s" % (arch)]
- c.execute(arch_add, [arch, description])
- self.projectB.commit()
-
- def do_component(self):
- """Initalize the component table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM component")
-
- comp_add = "INSERT INTO component (name, description, meets_dfsg) " + \
- "VALUES (%s, %s, %s)"
-
- for name in self.Cnf.SubTree("Component").List():
- component_config = self.Cnf.SubTree("Component::%s" % (name))
- description = sql_get(component_config, "Description")
- meets_dfsg = (component_config.get("MeetsDFSG").lower() == "true")
- c.execute(comp_add, [name, description, meets_dfsg])
-
- self.projectB.commit()
-
- def do_location(self):
- """Initalize the location table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM location")
-
- loc_add = "INSERT INTO location (path, component, archive, type) " + \
- "VALUES (%s, %s, %s, %s)"
-
- for location in self.Cnf.SubTree("Location").List():
- location_config = self.Cnf.SubTree("Location::%s" % (location))
- archive_id = self.projectB.get_archive_id(location_config["Archive"])
- if archive_id == -1:
- utils.fubar("Archive '%s' for location '%s' not found."
- % (location_config["Archive"], location))
- location_type = location_config.get("type")
- if location_type == "pool":
- for component in self.Cnf.SubTree("Component").List():
- component_id = self.projectB.get_component_id(component)
- c.execute(loc_add, [location, component_id, archive_id, location_type])
- else:
- utils.fubar("E: type '%s' not recognised in location %s."
- % (location_type, location))
-
- self.projectB.commit()
-
- def do_suite(self):
- """Initalize the suite table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM suite")
-
- suite_add = "INSERT INTO suite (suite_name, version, origin, description) " + \
- "VALUES (%s, %s, %s, %s)"
-
- sa_add = "INSERT INTO suite_architectures (suite, architecture) " + \
- "VALUES (currval('suite_id_seq'), %s)"
-
- for suite in self.Cnf.SubTree("Suite").List():
- suite_config = self.Cnf.SubTree("Suite::%s" %(suite))
- version = sql_get(suite_config, "Version")
- origin = sql_get(suite_config, "Origin")
- description = sql_get(suite_config, "Description")
- c.execute(suite_add, [suite.lower(), version, origin, description])
- for architecture in self.Cnf.ValueList("Suite::%s::Architectures" % (suite)):
- architecture_id = self.projectB.get_architecture_id (architecture)
- if architecture_id < 0:
- utils.fubar("architecture '%s' not found in architecture"
- " table for suite %s."
- % (architecture, suite))
- c.execute(sa_add, [architecture_id])
-
- self.projectB.commit()
-
- def do_override_type(self):
- """Initalize the override_type table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM override_type")
-
- over_add = "INSERT INTO override_type (type) VALUES (%s)"
-
- for override_type in self.Cnf.ValueList("OverrideType"):
- c.execute(over_add, [override_type])
-
- self.projectB.commit()
-
- def do_priority(self):
- """Initialize the priority table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM priority")
-
- prio_add = "INSERT INTO priority (priority, level) VALUES (%s, %s)"
-
- for priority in self.Cnf.SubTree("Priority").List():
- c.execute(prio_add, [priority, self.Cnf["Priority::%s" % (priority)]])
-
- self.projectB.commit()
-
- def do_section(self):
- """Initalize the section table."""
-
- c = self.projectB.cursor()
- c.execute("DELETE FROM section")
-
- sect_add = "INSERT INTO section (section) VALUES (%s)"
-
- for component in self.Cnf.SubTree("Component").List():
- if self.Cnf["Control-Overrides::ComponentPosition"] == "prefix":
- suffix = ""
- if component != "main":
- prefix = component + '/'
- else:
- prefix = ""
+def do_archive():
+ """Initalize the archive table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM archive")
+ for name in Cnf.SubTree("Archive").List():
+ archive_config = Cnf.SubTree("Archive::%s" % (name))
+ origin_server = sql_get(archive_config, "OriginServer")
+ description = sql_get(archive_config, "Description")
+ projectB.query("INSERT INTO archive (name, origin_server, description) "
+ "VALUES ('%s', %s, %s)"
+ % (name, origin_server, description))
+ projectB.query("COMMIT WORK")
+
+def do_architecture():
+ """Initalize the architecture table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM architecture")
+ for arch in Cnf.SubTree("Architectures").List():
+ description = Cnf["Architectures::%s" % (arch)]
+ projectB.query("INSERT INTO architecture (arch_string, description) "
+ "VALUES ('%s', '%s')" % (arch, description))
+ projectB.query("COMMIT WORK")
+
+def do_component():
+ """Initalize the component table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM component")
+ for name in Cnf.SubTree("Component").List():
+ component_config = Cnf.SubTree("Component::%s" % (name))
+ description = sql_get(component_config, "Description")
+ if component_config.get("MeetsDFSG").lower() == "true":
+ meets_dfsg = "true"
+ else:
+ meets_dfsg = "false"
+ projectB.query("INSERT INTO component (name, description, meets_dfsg) "
+ "VALUES ('%s', %s, %s)"
+ % (name, description, meets_dfsg))
+ projectB.query("COMMIT WORK")
+
+def do_location():
+ """Initalize the location table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM location")
+ for location in Cnf.SubTree("Location").List():
+ location_config = Cnf.SubTree("Location::%s" % (location))
+ archive_id = database.get_archive_id(location_config["Archive"])
+ if archive_id == -1:
+ utils.fubar("Archive '%s' for location '%s' not found."
+ % (location_config["Archive"], location))
+ location_type = location_config.get("type")
+ if location_type == "pool":
+ for component in Cnf.SubTree("Component").List():
+ component_id = database.get_component_id(component)
+ projectB.query("INSERT INTO location (path, component, "
+ "archive, type) VALUES ('%s', %d, %d, '%s')"
+ % (location, component_id, archive_id,
+ location_type))
+ else:
+ utils.fubar("E: type '%s' not recognised in location %s."
+ % (location_type, location))
+ projectB.query("COMMIT WORK")
+
+def do_suite():
+ """Initalize the suite table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM suite")
+ for suite in Cnf.SubTree("Suite").List():
+ suite_config = Cnf.SubTree("Suite::%s" %(suite))
+ version = sql_get(suite_config, "Version")
+ origin = sql_get(suite_config, "Origin")
+ description = sql_get(suite_config, "Description")
+ projectB.query("INSERT INTO suite (suite_name, version, origin, "
+ "description) VALUES ('%s', %s, %s, %s)"
+ % (suite.lower(), version, origin, description))
+ for architecture in database.get_suite_architectures(suite):
+ architecture_id = database.get_architecture_id (architecture)
+ if architecture_id < 0:
+ utils.fubar("architecture '%s' not found in architecture"
+ " table for suite %s."
+ % (architecture, suite))
+ projectB.query("INSERT INTO suite_architectures (suite, "
+ "architecture) VALUES (currval('suite_id_seq'), %d)"
+ % (architecture_id))
+ projectB.query("COMMIT WORK")
+
+def do_override_type():
+ """Initalize the override_type table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM override_type")
+ for override_type in Cnf.ValueList("OverrideType"):
+ projectB.query("INSERT INTO override_type (type) VALUES ('%s')"
+ % (override_type))
+ projectB.query("COMMIT WORK")
+
+def do_priority():
+ """Initialize the priority table."""
+
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM priority")
+ for priority in Cnf.SubTree("Priority").List():
+ projectB.query("INSERT INTO priority (priority, level) VALUES "
+ "('%s', %s)"
+ % (priority, Cnf["Priority::%s" % (priority)]))
+ projectB.query("COMMIT WORK")
+
+def do_section():
+ """Initalize the section table."""
+ projectB.query("BEGIN WORK")
+ projectB.query("DELETE FROM section")
+ for component in Cnf.SubTree("Component").List():
+ if Cnf["Control-Overrides::ComponentPosition"] == "prefix":
+ suffix = ""
+ if component != "main":
+ prefix = component + '/'
else:
prefix = ""
- if component != "main":
- suffix = '/' + component
- else:
- suffix = ""
- for section in self.Cnf.ValueList("Section"):
- c.execute(sect_add, [prefix + section + suffix])
-
- self.projectB.commit()
-
- def do_all(self):
- self.do_archive()
- self.do_architecture()
- self.do_component()
- self.do_location()
- self.do_suite()
- self.do_override_type()
- self.do_priority()
- self.do_section()
+ else:
+ prefix = ""
+ if component != "main":
+ suffix = '/' + component
+ else:
+ suffix = ""
+ for section in Cnf.ValueList("Section"):
+ projectB.query("INSERT INTO section (section) VALUES "
+ "('%s%s%s')" % (prefix, section, suffix))
+ projectB.query("COMMIT WORK")
################################################################################
def main ():
"""Sync dak.conf configuartion file and the SQL database"""
+ global Cnf, projectB
+
Cnf = utils.get_conf()
arguments = [('h', "help", "Init-DB::Options::Help")]
for i in [ "help" ]:
utils.warn("dak init-db takes no arguments.")
usage(exit_code=1)
- # Just let connection failures be reported to the user
- projectB = DBConn()
- Cnf = Config()
-
- InitDB(Cnf, projectB).do_all()
+ projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"],
+ int(Cnf["DB::Port"]))
+ database.init(Cnf, projectB)
+
+ do_archive()
+ do_architecture()
+ do_component()
+ do_location()
+ do_suite()
+ do_override_type()
+ do_priority()
+ do_section()
################################################################################
###############################################################################
-import errno
-import fcntl
-import os
-import sys
-import time
-import re
-import apt_pkg, commands
+import errno, fcntl, os, sys, time, re
+import apt_pkg
from daklib import database
from daklib import logging
from daklib import queue
else:
os.unlink(self.log_filename)
-
###############################################################################
-
def reject (str, prefix="Rejected: "):
global reject_message
if str:
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
- if not database.copy_temporary_contents(package, version, files[newfile]):
- reject("Missing contents for package")
-
orig_tar_id = Upload.pkg.orig_tar_id
orig_tar_location = Upload.pkg.orig_tar_location
utils.copy(pkg.changes_file, Cnf["Dir::Root"] + dest)
for dest in copy_dot_dak.keys():
utils.copy(Upload.pkg.changes_file[:-8]+".dak", dest)
+
projectB.query("COMMIT WORK")
# Move the .changes into the 'done' directory
################################################################################
-import commands
-import errno
-import fcntl
-import os
-import re
-import shutil
-import stat
-import sys
-import time
-import tempfile
-import traceback
-import tarfile
-import apt_inst
-import apt_pkg
-from debian_bundle import deb822
-from daklib.dbconn import DBConn
-from daklib.binary import Binary
+import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback
+import apt_inst, apt_pkg
+from daklib import database
from daklib import logging
from daklib import queue
from daklib import utils
################################################################################
-def create_tmpdir():
- """
- Create a temporary directory that can be used for unpacking files into for
- checking
- """
- tmpdir = tempfile.mkdtemp()
- return tmpdir
-
-################################################################################
-
def copy_to_holding(filename):
global in_holding
(source, dest) = args[1:3]
if changes["distribution"].has_key(source):
for arch in changes["architecture"].keys():
- if arch not in DBConn().get_suite_architectures(source):
+ if arch not in database.get_suite_architectures(source):
reject("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch),"")
del changes["distribution"][source]
changes["distribution"][dest] = 1
################################################################################
+def check_deb_ar(filename):
+ """
+ Sanity check the ar of a .deb, i.e. that there is:
+
+ 1. debian-binary
+ 2. control.tar.gz
+ 3. data.tar.gz or data.tar.bz2
+
+ in that order, and nothing else.
+ """
+ cmd = "ar t %s" % (filename)
+ (result, output) = commands.getstatusoutput(cmd)
+ if result != 0:
+ reject("%s: 'ar t' invocation failed." % (filename))
+ reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
+ chunks = output.split('\n')
+ if len(chunks) != 3:
+ reject("%s: found %d chunks, expected 3." % (filename, len(chunks)))
+ if chunks[0] != "debian-binary":
+ reject("%s: first chunk is '%s', expected 'debian-binary'." % (filename, chunks[0]))
+ if chunks[1] != "control.tar.gz":
+ reject("%s: second chunk is '%s', expected 'control.tar.gz'." % (filename, chunks[1]))
+ if chunks[2] not in [ "data.tar.bz2", "data.tar.gz" ]:
+ reject("%s: third chunk is '%s', expected 'data.tar.gz' or 'data.tar.bz2'." % (filename, chunks[2]))
+
+################################################################################
+
def check_files():
global reprocess
has_binaries = 0
has_source = 0
- cursor = DBConn().cursor()
- # Check for packages that have moved from one component to another
- # STU: this should probably be changed to not join on architecture, suite tables but instead to used their cached name->id mappings from DBConn
- cursor.execute("""PREPARE moved_pkg_q AS
- SELECT c.name FROM binaries b, bin_associations ba, suite s, location l,
- component c, architecture a, files f
- WHERE b.package = $1 AND s.suite_name = $2
- AND (a.arch_string = $3 OR a.arch_string = 'all')
- AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
- AND f.location = l.id
- AND l.component = c.id
- AND b.file = f.id""")
-
for f in file_keys:
# Ensure the file does not already exist in one of the accepted directories
for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
default_suite = Cnf.get("Dinstall::DefaultSuite", "Unstable")
architecture = control.Find("Architecture")
upload_suite = changes["distribution"].keys()[0]
- if architecture not in DBConn().get_suite_architectures(default_suite) and architecture not in DBConn().get_suite_architectures(upload_suite):
+ if architecture not in database.get_suite_architectures(default_suite) and architecture not in database.get_suite_architectures(upload_suite):
reject("Unknown architecture '%s'." % (architecture))
# Ensure the architecture of the .deb is one of the ones
# Check the version and for file overwrites
reject(Upload.check_binary_against_db(f),"")
- Binary(f).scan_package()
+ check_deb_ar(f)
# Checks for a source package...
else:
# Validate the component
component = files[f]["component"]
- component_id = DBConn().get_component_id(component)
+ component_id = database.get_component_id(component)
if component_id == -1:
reject("file '%s' has unknown component '%s'." % (f, component))
continue
# Determine the location
location = Cnf["Dir::Pool"]
- location_id = DBConn().get_location_id(location, component, archive)
+ location_id = database.get_location_id (location, component, archive)
if location_id == -1:
reject("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
files[f]["location id"] = location_id
# Check the md5sum & size against existing files (if any)
files[f]["pool name"] = utils.poolify (changes["source"], files[f]["component"])
- files_id = DBConn().get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"])
+ files_id = database.get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"])
if files_id == -1:
reject("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
elif files_id == -2:
files[f]["files id"] = files_id
# Check for packages that have moved from one component to another
- files[f]['suite'] = suite
- cursor.execute("""EXECUTE moved_pkg_q( %(package)s, %(suite)s, %(architecture)s )""", ( files[f] ) )
- ql = cursor.fetchone()
+ q = Upload.projectB.query("""
+SELECT c.name FROM binaries b, bin_associations ba, suite s, location l,
+ component c, architecture a, files f
+ WHERE b.package = '%s' AND s.suite_name = '%s'
+ AND (a.arch_string = '%s' OR a.arch_string = 'all')
+ AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
+ AND f.location = l.id AND l.component = c.id AND b.file = f.id"""
+ % (files[f]["package"], suite,
+ files[f]["architecture"]))
+ ql = q.getresult()
if ql:
files[f]["othercomponents"] = ql[0][0]
or pkg.orig_tar_gz == -1:
return
- tmpdir = create_tmpdir()
+ # Create a temporary directory to extract the source into
+ if Options["No-Action"]:
+ tmpdir = tempfile.mkdtemp()
+ else:
+ # We're in queue/holding and can create a random directory.
+ tmpdir = "%s" % (os.getpid())
+ os.mkdir(tmpdir)
# Move into the temporary directory
cwd = os.getcwd()
################################################################################
def lookup_uid_from_fingerprint(fpr):
- """
- Return the uid,name,isdm for a given gpg fingerprint
-
- @ptype fpr: string
- @param fpr: a 40 byte GPG fingerprint
-
- @return (uid, name, isdm)
- """
- cursor = DBConn().cursor()
- cursor.execute( "SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr))
- qs = cursor.fetchone()
- if qs:
- return qs
- else:
+ q = Upload.projectB.query("SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr))
+ qs = q.getresult()
+ if len(qs) == 0:
return (None, None, None)
+ else:
+ return qs[0]
def check_signed_by_key():
"""Ensure the .changes is signed by an authorized uploader."""
if not sponsored and not may_nmu:
source_ids = []
- cursor.execute( "SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = %(source)s AND s.dm_upload_allowed = 'yes'", changes )
+ q = Upload.projectB.query("SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.dm_upload_allowed = 'yes'" % (changes["source"]))
highest_sid, highest_version = None, None
should_reject = True
- while True:
- si = cursor.fetchone()
- if not si:
- break
-
+ for si in q.getresult():
if highest_version == None or apt_pkg.VersionCompare(si[1], highest_version) == 1:
highest_sid = si[0]
highest_version = si[1]
if highest_sid == None:
reject("Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version" % changes["source"])
else:
-
- cursor.execute("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid))
-
- while True:
- m = cursor.fetchone()
- if not m:
- break
-
+ q = Upload.projectB.query("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid))
+ for m in q.getresult():
(rfc822, rfc2047, name, email) = utils.fix_maintainer(m[0])
if email == uid_email or name == uid_name:
should_reject=False
for b in changes["binary"].keys():
for suite in changes["distribution"].keys():
- suite_id = DBConn().get_suite_id(suite)
-
- cursor.execute("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = %(package)s AND ba.suite = %(suite)s" , {'package':b, 'suite':suite_id} )
- while True:
- s = cursor.fetchone()
- if not s:
- break
-
+ suite_id = database.get_suite_id(suite)
+ q = Upload.projectB.query("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = '%s' AND ba.suite = %s" % (b, suite_id))
+ for s in q.getresult():
if s[0] != changes["source"]:
reject("%s may not hijack %s from source package %s in suite %s" % (uid, b, s, suite))
################################################################################
def is_unembargo ():
- cursor = DBConn().cursor()
- cursor.execute( "SELECT package FROM disembargo WHERE package = %(source)s AND version = %(version)s", changes )
- if cursor.fetchone():
+ q = Upload.projectB.query(
+ "SELECT package FROM disembargo WHERE package = '%s' AND version = '%s'" %
+ (changes["source"], changes["version"]))
+ ql = q.getresult()
+ if ql:
return 1
oldcwd = os.getcwd()
if changes["architecture"].has_key("source"):
if Options["No-Action"]: return 1
- cursor.execute( "INSERT INTO disembargo (package, version) VALUES ('%(package)s', '%(version)s')",
- changes )
- cursor.execute( "COMMIT" )
+ Upload.projectB.query(
+ "INSERT INTO disembargo (package, version) VALUES ('%s', '%s')" %
+ (changes["source"], changes["version"]))
return 1
return 0
return 0
if not changes["architecture"].has_key("source"):
- pusuite = DBConn().get_suite_id("proposed-updates")
- cursor = DBConn().cursor()
- cursor.execute( """SELECT 1 FROM source s
- JOIN src_associations sa ON (s.id = sa.source)
- WHERE s.source = %(source)s
- AND s.version = '%(version)s'
- AND sa.suite = %(suite)d""",
- {'source' : changes['source'],
- 'version' : changes['version'],
- 'suite' : pasuite})
-
- if cursor.fetchone():
+ pusuite = database.get_suite_id("proposed-updates")
+ q = Upload.projectB.query(
+ "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" %
+ (changes["source"], changes["version"], pusuite))
+ ql = q.getresult()
+ if ql:
# source is already in proposed-updates so no need to hold
return 0
return 0
if not changes["architecture"].has_key("source"):
- pusuite = DBConn().get_suite_id("oldstable-proposed-updates")
- cursor = DBConn().cursor()
- cursor.execute( """"SELECT 1 FROM source s
- JOIN src_associations sa ON (s.id = sa.source)
- WHERE s.source = %(source)s
- AND s.version = %(version)s
- AND sa.suite = %d""",
- {'source' : changes['source'],
- 'version' : changes['version'],
- 'suite' : pasuite})
- if cursor.fetchone():
+ pusuite = database.get_suite_id("oldstable-proposed-updates")
+ q = Upload.projectB.query(
+ "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" %
+ (changes["source"], changes["version"], pusuite))
+ ql = q.getresult()
+ if ql:
+ # source is already in oldstable-proposed-updates so no need to hold
return 0
return 1
################################################################################
-import commands
-import os
-import pg
-import re
-import sys
-import apt_pkg
-import apt_inst
+import commands, os, pg, re, sys
+import apt_pkg, apt_inst
from daklib import database
from daklib import utils
from daklib.dak_exceptions import *
Cnf = None
projectB = None
-required_database_schema = 6
+required_database_schema = 5
################################################################################
+++ /dev/null
-#!/usr/bin/python
-
-"""
-Functions related debian binary packages
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2009 Mike O'Connor <stew@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-import os
-import shutil
-import tempfile
-import tarfile
-import commands
-import traceback
-import atexit
-from debian_bundle import deb822
-from dbconn import DBConn
-
-class Binary(object):
- def __init__(self, filename):
- self.filename = filename
- self.tmpdir = None
- self.chunks = None
-
- def __del__(self):
- """
- make sure we cleanup when we are garbage collected.
- """
- self._cleanup()
-
- def _cleanup(self):
- """
- we need to remove the temporary directory, if we created one
- """
- if self.tmpdir and os.path.exists(self.tmpdir):
- shutil.rmtree(self.tmpdir)
- self.tmpdir = None
-
- def __scan_ar(self):
- # get a list of the ar contents
- if not self.chunks:
-
- cmd = "ar t %s" % (self.filename)
-
- (result, output) = commands.getstatusoutput(cmd)
- if result != 0:
- rejected = True
- reject("%s: 'ar t' invocation failed." % (self.filename))
- reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
- self.chunks = output.split('\n')
-
-
-
- def __unpack(self):
- # Internal function which extracts the contents of the .ar to
- # a temporary directory
-
- if not self.tmpdir:
- tmpdir = tempfile.mkdtemp()
- cwd = os.getcwd()
- try:
- os.chdir( tmpdir )
- cmd = "ar x %s %s %s" % (os.path.join(cwd,self.filename), self.chunks[1], self.chunks[2])
- (result, output) = commands.getstatusoutput(cmd)
- if result != 0:
- reject("%s: '%s' invocation failed." % (filename, cmd))
- reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
- else:
- self.tmpdir = tmpdir
- atexit.register( self._cleanup )
-
- finally:
- os.chdir( cwd )
-
- def valid_deb(self):
- """
- Check deb contents making sure the .deb contains:
- 1. debian-binary
- 2. control.tar.gz
- 3. data.tar.gz or data.tar.bz2
- in that order, and nothing else.
- """
- self.__scan_ar()
- rejected = not self.chunks
- if len(self.chunks) != 3:
- rejected = True
- reject("%s: found %d chunks, expected 3." % (self.filename, len(self.chunks)))
- if self.chunks[0] != "debian-binary":
- rejected = True
- reject("%s: first chunk is '%s', expected 'debian-binary'." % (self.filename, self.chunks[0]))
- if self.chunks[1] != "control.tar.gz":
- rejected = True
- reject("%s: second chunk is '%s', expected 'control.tar.gz'." % (self.filename, self.chunks[1]))
- if self.chunks[2] not in [ "data.tar.bz2", "data.tar.gz" ]:
- rejected = True
- reject("%s: third chunk is '%s', expected 'data.tar.gz' or 'data.tar.bz2'." % (self.filename, self.chunks[2]))
-
- return not rejected
-
- def scan_package(self, bootstrap_id=0):
- """
- Unpack the .deb, do sanity checking, and gather info from it.
-
- Currently information gathering consists of getting the contents list. In
- the hopefully near future, it should also include gathering info from the
- control file.
-
- @ptype bootstrap_id: int
- @param bootstrap_id: the id of the binary these packages
- should be associated or zero meaning we are not bootstrapping
- so insert into a temporary table
-
- @return True if the deb is valid and contents were imported
- """
- rejected = not self.valid_deb()
- self.__unpack()
-
- if not rejected and self.tmpdir:
- cwd = os.getcwd()
- try:
- os.chdir(self.tmpdir)
- if self.chunks[1] == "control.tar.gz":
- control = tarfile.open(os.path.join(self.tmpdir, "control.tar.gz" ), "r:gz")
-
-
- if self.chunks[2] == "data.tar.gz":
- data = tarfile.open(os.path.join(self.tmpdir, "data.tar.gz"), "r:gz")
- elif self.chunks[2] == "data.tar.bz2":
- data = tarfile.open(os.path.join(self.tmpdir, "data.tar.bz2" ), "r:bz2")
-
- if bootstrap_id:
- return DBConn().insert_content_paths(bootstrap_id, [ tarinfo.name for tarinfo in data if not tarinfo.isdir()])
- else:
- pkg = deb822.Packages.iter_paragraphs( control.extractfile('./control') ).next()
- return DBConn().insert_pending_content_paths(pkg, [ tarinfo.name for tarinfo in data if not tarinfo.isdir()])
-
- except:
- traceback.print_exc()
-
- return False
-
- finally:
- os.chdir( cwd )
-
-if __name__ == "__main__":
- Binary( "/srv/ftp.debian.org/queue/accepted/halevt_0.1.3-2_amd64.deb" ).scan_package()
-
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-Config access class
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2008 Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# <NCommander> mhy, how about "Now with 20% more monty python references"
-
-################################################################################
-
-import apt_pkg
-import socket
-
-from singleton import Singleton
-
-################################################################################
-
-default_config = "/etc/dak/dak.conf"
-
-def which_conf_file(Cnf):
- res = socket.gethostbyaddr(socket.gethostname())
- if Cnf.get("Config::" + res[0] + "::DakConfig"):
- return Cnf["Config::" + res[0] + "::DakConfig"]
- else:
- return default_config
-
-class Config(Singleton):
- """
- A Config object is a singleton containing
- information about the DAK configuration
- """
- def __init__(self, *args, **kwargs):
- super(Config, self).__init__(*args, **kwargs)
-
- def _readconf(self):
- apt_pkg.init()
-
- self.Cnf = apt_pkg.newConfiguration()
-
- apt_pkg.ReadConfigFileISC(self.Cnf, default_config)
-
- # Check whether our dak.conf was the real one or
- # just a pointer to our main one
- res = socket.gethostbyaddr(socket.gethostname())
- conffile = self.Cnf.get("Config::" + res[0] + "::DakConfig")
- if conffile:
- apt_pkg.ReadConfigFileISC(self.Cnf, conffile)
-
- # Rebind some functions
- # TODO: Clean this up
- self.get = self.Cnf.get
- self.SubTree = self.Cnf.SubTree
- self.ValueList = self.Cnf.ValueList
-
- def _startup(self, *args, **kwargs):
- self._readconf()
-
- def has_key(self, name):
- return self.Cnf.has_key(name)
-
- def __getitem__(self, name):
- return self.Cnf[name]
-
maintainer_id_cache = {} #: cache for maintainers
keyring_id_cache = {} #: cache for keyrings
source_id_cache = {} #: cache for sources
-
files_id_cache = {} #: cache for files
maintainer_cache = {} #: cache for maintainer names
fingerprint_id_cache = {} #: cache for fingerprints
queue_id_cache = {} #: cache for queues
uid_id_cache = {} #: cache for uids
suite_version_cache = {} #: cache for suite_versions (packages)
-suite_bin_version_cache = {}
-cache_preloaded = False
################################################################################
@return: the version for I{source} in I{suite}
"""
-
global suite_version_cache
cache_key = "%s_%s" % (source, suite)
return version
-def get_latest_binary_version_id(binary, section, suite, arch):
- global suite_bin_version_cache
- cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch)
- cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all"))
-
- # Check for the cache hit for its arch, then arch all
- if suite_bin_version_cache.has_key(cache_key):
- return suite_bin_version_cache[cache_key]
- if suite_bin_version_cache.has_key(cache_key_all):
- return suite_bin_version_cache[cache_key_all]
- if cache_preloaded == True:
- return # package does not exist
-
- q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section)))
-
- if not q.getresult():
- return False
-
- highest_bid = q.getresult()[0][0]
-
- suite_bin_version_cache[cache_key] = highest_bid
- return highest_bid
-
-def preload_binary_id_cache():
- global suite_bin_version_cache, cache_preloaded
-
- # Get suite info
- q = projectB.query("SELECT id FROM suite")
- suites = q.getresult()
-
- # Get arch mappings
- q = projectB.query("SELECT id FROM architecture")
- arches = q.getresult()
-
- for suite in suites:
- for arch in arches:
- q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0])))
-
- for bi in q.getresult():
- cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0])
- suite_bin_version_cache[cache_key] = int(bi[0])
-
- cache_preloaded = True
-
def get_suite_architectures(suite):
"""
Returns list of architectures for C{suite}.
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
+
################################################################################
def get_or_set_maintainer_id (maintainer):
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
-
-
-################################################################################
-
-def copy_temporary_contents(package, version, deb):
- """
- copy the previously stored contents from the temp table to the permanant one
-
- during process-unchecked, the deb should have been scanned and the
- contents stored in pending_content_associations
- """
-
- # first see if contents exist:
-
- exists = projectB.query("""SELECT 1 FROM pending_content_associations
- WHERE package='%s' LIMIT 1""" % package ).getresult()
-
- if not exists:
- # This should NOT happen. We should have added contents
- # during process-unchecked. if it did, log an error, and send
- # an email.
- subst = {
- "__PACKAGE__": package,
- "__VERSION__": version,
- "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"],
- "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"] }
-
- message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/missing-contents")
- utils.send_mail( message )
-
- exists = DBConn().insert_content_path(package, version, deb)
-
- if exists:
- sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
- SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations
- WHERE package='%s'
- AND version='%s'""" % (package, version)
- projectB.query(sql)
- projectB.query("""DELETE from pending_content_associations
- WHERE package='%s'
- AND version='%s'""" % (package, version))
-
- return exists
+++ /dev/null
-#!/usr/bin/python
-
-""" DB access class
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
-@copyright: 2008-2009 Mark Hymers <mhy@debian.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@copyright: 2009 Mike O'Connor <stew@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# < mhy> I need a funny comment
-# < sgran> two peanuts were walking down a dark street
-# < sgran> one was a-salted
-# * mhy looks up the definition of "funny"
-
-################################################################################
-
-import os
-import psycopg2
-import traceback
-
-from singleton import Singleton
-from config import Config
-
-################################################################################
-
-class Cache(object):
- def __init__(self, hashfunc=None):
- if hashfunc:
- self.hashfunc = hashfunc
- else:
- self.hashfunc = lambda x: x['value']
-
- self.data = {}
-
- def SetValue(self, keys, value):
- self.data[self.hashfunc(keys)] = value
-
- def GetValue(self, keys):
- return self.data.get(self.hashfunc(keys))
-
-################################################################################
-
-class DBConn(Singleton):
- """
- database module init.
- """
- def __init__(self, *args, **kwargs):
- super(DBConn, self).__init__(*args, **kwargs)
-
- def _startup(self, *args, **kwargs):
- self.__createconn()
- self.__init_caches()
-
- ## Connection functions
- def __createconn(self):
- cnf = Config()
- connstr = "dbname=%s" % cnf["DB::Name"]
- if cnf["DB::Host"]:
- connstr += " host=%s" % cnf["DB::Host"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
- connstr += " port=%s" % cnf["DB::Port"]
-
- self.db_con = psycopg2.connect(connstr)
-
- def reconnect(self):
- try:
- self.db_con.close()
- except psycopg2.InterfaceError:
- pass
-
- self.db_con = None
- self.__createconn()
-
- ## Cache functions
- def __init_caches(self):
- self.caches = {'suite': Cache(),
- 'section': Cache(),
- 'priority': Cache(),
- 'override_type': Cache(),
- 'architecture': Cache(),
- 'archive': Cache(),
- 'component': Cache(),
- 'content_path_names': Cache(),
- 'content_file_names': Cache(),
- 'location': Cache(lambda x: '%s_%s_%s' % (x['location'], x['component'], x['location'])),
- 'maintainer': {}, # TODO
- 'keyring': {}, # TODO
- 'source': Cache(lambda x: '%s_%s_' % (x['source'], x['version'])),
- 'files': Cache(lambda x: '%s_%s_' % (x['filename'], x['location'])),
- 'maintainer': {}, # TODO
- 'fingerprint': {}, # TODO
- 'queue': {}, # TODO
- 'uid': {}, # TODO
- 'suite_version': Cache(lambda x: '%s_%s' % (x['source'], x['suite'])),
- }
-
- def clear_caches(self):
- self.__init_caches()
-
- ## Functions to pass through to the database connector
- def cursor(self):
- return self.db_con.cursor()
-
- def commit(self):
- return self.db_con.commit()
-
- ## Get functions
- def __get_single_id(self, query, values, cachename=None):
- # This is a bit of a hack but it's an internal function only
- if cachename is not None:
- res = self.caches[cachename].GetValue(values)
- if res:
- return res
-
- c = self.db_con.cursor()
- c.execute(query, values)
-
- if c.rowcount != 1:
- return None
-
- res = c.fetchone()[0]
-
- if cachename is not None:
- self.caches[cachename].SetValue(values, res)
-
- return res
-
- def __get_id(self, retfield, table, qfield, value):
- query = "SELECT %s FROM %s WHERE %s = %%(value)s" % (retfield, table, qfield)
- return self.__get_single_id(query, {'value': value}, cachename=table)
-
- def get_suite_id(self, suite):
- """
- Returns database id for given C{suite}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type suite: string
- @param suite: The name of the suite
-
- @rtype: int
- @return: the database id for the given suite
-
- """
- return self.__get_id('id', 'suite', 'suite_name', suite)
-
- def get_section_id(self, section):
- """
- Returns database id for given C{section}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type section: string
- @param section: The name of the section
-
- @rtype: int
- @return: the database id for the given section
-
- """
- return self.__get_id('id', 'section', 'section', section)
-
- def get_priority_id(self, priority):
- """
- Returns database id for given C{priority}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type priority: string
- @param priority: The name of the priority
-
- @rtype: int
- @return: the database id for the given priority
-
- """
- return self.__get_id('id', 'priority', 'priority', priority)
-
- def get_override_type_id(self, override_type):
- """
- Returns database id for given override C{type}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type type: string
- @param type: The name of the override type
-
- @rtype: int
- @return: the database id for the given override type
-
- """
- return self.__get_id('id', 'override_type', 'override_type', override_type)
-
- def get_architecture_id(self, architecture):
- """
- Returns database id for given C{architecture}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type architecture: string
- @param architecture: The name of the override type
-
- @rtype: int
- @return: the database id for the given architecture
-
- """
- return self.__get_id('id', 'architecture', 'arch_string', architecture)
-
- def get_archive_id(self, archive):
- """
- returns database id for given c{archive}.
- results are kept in a cache during runtime to minimize database queries.
-
- @type archive: string
- @param archive: the name of the override type
-
- @rtype: int
- @return: the database id for the given archive
-
- """
- return self.__get_id('id', 'archive', 'lower(name)', archive)
-
- def get_component_id(self, component):
- """
- Returns database id for given C{component}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type component: string
- @param component: The name of the override type
-
- @rtype: int
- @return: the database id for the given component
-
- """
- return self.__get_id('id', 'component', 'lower(name)', component)
-
- def get_location_id(self, location, component, archive):
- """
- Returns database id for the location behind the given combination of
- - B{location} - the path of the location, eg. I{/srv/ftp.debian.org/ftp/pool/}
- - B{component} - the id of the component as returned by L{get_component_id}
- - B{archive} - the id of the archive as returned by L{get_archive_id}
- Results are kept in a cache during runtime to minimize database queries.
-
- @type location: string
- @param location: the path of the location
-
- @type component: int
- @param component: the id of the component
-
- @type archive: int
- @param archive: the id of the archive
-
- @rtype: int
- @return: the database id for the location
-
- """
-
- archive_id = self.get_archive_id(archive)
-
- if not archive_id:
- return None
-
- res = None
-
- if component:
- component_id = self.get_component_id(component)
- if component_id:
- res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND component=%(component)s AND archive=%(archive)s",
- {'location': location,
- 'archive': int(archive_id),
- 'component': component_id}, cachename='location')
- else:
- res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND archive=%(archive)d",
- {'location': location, 'archive': archive_id, 'component': ''}, cachename='location')
-
- return res
-
- def get_source_id(self, source, version):
- """
- Returns database id for the combination of C{source} and C{version}
- - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
- - B{version}
- Results are kept in a cache during runtime to minimize database queries.
-
- @type source: string
- @param source: source package name
-
- @type version: string
- @param version: the source version
-
- @rtype: int
- @return: the database id for the source
-
- """
- return self.__get_single_id("SELECT id FROM source s WHERE s.source=%(source)s AND s.version=%(version)s",
- {'source': source, 'version': version}, cachename='source')
-
- def get_suite_version(self, source, suite):
- """
- Returns database id for a combination of C{source} and C{suite}.
-
- - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
- - B{suite} - a suite name, eg. I{unstable}
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type source: string
- @param source: source package name
-
- @type suite: string
- @param suite: the suite name
-
- @rtype: string
- @return: the version for I{source} in I{suite}
-
- """
- return self.__get_single_id("""
- SELECT s.version FROM source s, suite su, src_associations sa
- WHERE sa.source=s.id
- AND sa.suite=su.id
- AND su.suite_name=%(suite)s
- AND s.source=%(source)""", {'suite': suite, 'source': source}, cachename='suite_version')
-
-
- def get_files_id (self, filename, size, md5sum, location_id):
- """
- Returns -1, -2 or the file_id for filename, if its C{size} and C{md5sum} match an
- existing copy.
-
- The database is queried using the C{filename} and C{location_id}. If a file does exist
- at that location, the existing size and md5sum are checked against the provided
- parameters. A size or checksum mismatch returns -2. If more than one entry is
- found within the database, a -1 is returned, no result returns None, otherwise
- the file id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type filename: string
- @param filename: the filename of the file to check against the DB
-
- @type size: int
- @param size: the size of the file to check against the DB
-
- @type md5sum: string
- @param md5sum: the md5sum of the file to check against the DB
-
- @type location_id: int
- @param location_id: the id of the location as returned by L{get_location_id}
-
- @rtype: int / None
- @return: Various return values are possible:
- - -2: size/checksum error
- - -1: more than one file found in database
- - None: no file found in database
- - int: file id
-
- """
- values = {'filename' : filename,
- 'location' : location_id}
-
- res = self.caches['files'].GetValue( values )
-
- if not res:
- query = """SELECT id, size, md5sum
- FROM files
- WHERE filename = %(filename)s AND location = %(location)s"""
-
- cursor = self.db_con.cursor()
- cursor.execute( query, values )
-
- if cursor.rowcount == 0:
- res = None
-
- elif cursor.rowcount != 1:
- res = -1
-
- else:
- row = cursor.fetchone()
-
- if row[1] != size or row[2] != md5sum:
- res = -2
-
- else:
- self.caches[cachename].SetValue(values, row[0])
- res = row[0]
-
- return res
-
-
- def get_or_set_contents_file_id(self, filename):
- """
- Returns database id for given filename.
-
- Results are kept in a cache during runtime to minimize database queries.
- If no matching file is found, a row is inserted.
-
- @type filename: string
- @param filename: The filename
-
- @rtype: int
- @return: the database id for the given component
- """
- values={'value': filename}
- query = "SELECT id FROM content_file_names WHERE file = %(value)s"
- id = self.__get_single_id(query, values, cachename='content_file_names')
- if not id:
- c = self.db_con.cursor()
- c.execute( "INSERT INTO content_file_names VALUES (DEFAULT, %(value)s) RETURNING id",
- values )
-
- id = c.fetchone()[0]
- self.caches['content_file_names'].SetValue(values, id)
-
- return id
-
- def get_or_set_contents_path_id(self, path):
- """
- Returns database id for given path.
-
- Results are kept in a cache during runtime to minimize database queries.
- If no matching file is found, a row is inserted.
-
- @type path: string
- @param path: The filename
-
- @rtype: int
- @return: the database id for the given component
- """
- values={'value': path}
- query = "SELECT id FROM content_file_paths WHERE path = %(value)s"
- id = self.__get_single_id(query, values, cachename='content_path_names')
- if not id:
- c = self.db_con.cursor()
- c.execute( "INSERT INTO content_file_paths VALUES (DEFAULT, %(value)s) RETURNING id",
- values )
-
- id = c.fetchone()[0]
- self.caches['content_path_names'].SetValue(values, id)
-
- return id
-
- def get_suite_architectures(self, suite):
- """
- Returns list of architectures for C{suite}.
-
- @type suite: string, int
- @param suite: the suite name or the suite_id
-
- @rtype: list
- @return: the list of architectures for I{suite}
- """
-
- suite_id = None
- if type(suite) == str:
- suite_id = self.get_suite_id(suite)
- elif type(suite) == int:
- suite_id = suite
- else:
- return None
-
- c = self.db_con.cursor()
- c.execute( """SELECT a.arch_string FROM suite_architectures sa
- JOIN architecture a ON (a.id = sa.architecture)
- WHERE suite='%s'""" % suite_id )
-
- return map(lambda x: x[0], c.fetchall())
-
- def insert_content_paths(self, bin_id, fullpaths):
- """
- Make sure given path is associated with given binary id
-
- @type bin_id: int
- @param bin_id: the id of the binary
- @type fullpath: string
- @param fullpath: the path of the file being associated with the binary
-
- @return True upon success
- """
-
- c = self.db_con.cursor()
-
- c.execute("BEGIN WORK")
- try:
-
- for fullpath in fullpaths:
- (path, file) = os.path.split(fullpath)
-
- # Get the necessary IDs ...
- file_id = self.get_or_set_contents_file_id(file)
- path_id = self.get_or_set_contents_path_id(path)
-
- c.execute("""INSERT INTO content_associations
- (binary_pkg, filepath, filename)
- VALUES ( '%d', '%d', '%d')""" % (bin_id, path_id, file_id) )
-
- c.execute("COMMIT")
- return True
- except:
- traceback.print_exc()
- c.execute("ROLLBACK")
- return False
-
- def insert_pending_content_paths(self, package, fullpaths):
- """
- Make sure given paths are temporarily associated with given
- package
-
- @type package: dict
- @param package: the package to associate with should have been read in from the binary control file
- @type fullpaths: list
- @param fullpaths: the list of paths of the file being associated with the binary
-
- @return True upon success
- """
-
- c = self.db_con.cursor()
-
- c.execute("BEGIN WORK")
- try:
-
- # Remove any already existing recorded files for this package
- c.execute("""DELETE FROM pending_content_associations
- WHERE package=%(Package)s
- AND version=%(Version)s""", package )
-
- for fullpath in fullpaths:
- (path, file) = os.path.split(fullpath)
-
- # Get the necessary IDs ...
- file_id = self.get_or_set_contents_file_id(file)
- path_id = self.get_or_set_contents_path_id(path)
-
- c.execute("""INSERT INTO pending_content_associations
- (package, version, filepath, filename)
- VALUES (%%(Package)s, %%(Version)s, '%d', '%d')""" % (path_id, file_id),
- package )
- c.execute("COMMIT")
- return True
- except:
- traceback.print_exc()
- c.execute("ROLLBACK")
- return False
+++ /dev/null
-#!/usr/bin/env python
-# vim:set et ts=4 sw=4:
-
-"""
-Singleton pattern code
-
-Inspiration for this very simple ABC was taken from various documents /
-tutorials / mailing lists. This may not be thread safe but given that
-(as I write) large chunks of dak aren't even type-safe, I'll live with
-it for now
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2008 Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-################################################################################
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# < sgran> NCommander: in SQL, it's better to join than to repeat information
-# < tomv_w> that makes SQL the opposite to Debian mailing lists!
-
-################################################################################
-
-"""
-This class set implements objects that may need to be instantiated multiple
-times, but we don't want the overhead of actually creating and init'ing
-them more than once. It also saves us using globals all over the place
-"""
-
-class Singleton(object):
- """This is the ABC for other dak Singleton classes"""
- __single = None
- def __new__(cls, *args, **kwargs):
- # Check to see if a __single exists already for this class
- # Compare class types instead of just looking for None so
- # that subclasses will create their own __single objects
- if cls != type(cls.__single):
- cls.__single = object.__new__(cls, *args, **kwargs)
- cls.__single._startup(*args, **kwargs)
- return cls.__single
-
- def __init__(self, *args, **kwargs):
- if type(self) == "Singleton":
- raise NotImplementedError("Singleton is an ABC")
-
- def _startup(self):
- """
- _startup is a private method used instead of __init__ due to the way
- we instantiate this object
- """
- raise NotImplementedError("Singleton is an ABC")
-
if which_conf_file() != default_config:
apt_pkg.ReadConfigFileISC(Cnf,which_conf_file())
-###############################################################################
+################################################################################
<helix> elmo: I can't believe people pay you to fix computers
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-* Ganneff ponders how to best write the text to -devel. (need to tell em in
- case they find more bugs). "We fixed the fucking idiotic broken implementation
- to be less so" is probably not the nicest, even if perfect valid, way to say so
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+++ /dev/null
-This file maps each file available in the Debian GNU/Linux system to
-the package from which it originates. It includes packages from the
-DIST distribution for the ARCH architecture.
-
-You can use this list to determine which package contains a specific
-file, or whether or not a specific file is available. The list is
-updated weekly, each architecture on a different day.
-
-When a file is contained in more than one package, all packages are
-listed. When a directory is contained in more than one package, only
-the first is listed.
-
-As all Contents files are shipped compressed, the best way to search quickly
-for a file is with the Unix `zgrep' utility, as in:
- `zgrep <regular expression> CONTENTS.gz':
-
- $ zgrep nose Contents.gz
- etc/nosendfile net/sendfile
- usr/X11R6/bin/noseguy x11/xscreensaver
- usr/X11R6/man/man1/noseguy.1x.gz x11/xscreensaver
- usr/doc/examples/ucbmpeg/mpeg_encode/nosearch.param graphics/ucbmpeg
- usr/lib/cfengine/bin/noseyparker admin/cfengine
-
-This list contains files in all packages, even though not all of the
-packages are installed on an actual system at once. If you want to
-find out which packages on an installed Debian system provide a
-particular file, you can use `dpkg --search <filename>':
-
- $ dpkg --search /usr/bin/dselect
- dpkg: /usr/bin/dselect
-
-
-FILE LOCATION
+++ /dev/null
-From: __DAK_ADDRESS__
-To: __TO_ADDRESS__
-X-Debian: DAK
-X-Debian-Package: __PACKAGE__
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-Subject: Missing contents for __PACKAGE__ in accepted queue
-
-While processing the accepted queue, I didn't have contents in the
-database for __PACKAGE__ version __VERSION__. These contents should
-have been put into the database by process-unchecked when the package
-first arrived.
-
-This is probably stew's fault.
\ No newline at end of file