Dinstall
{
GPGKeyring {
- "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
- "/srv/keyring.debian.org/keyrings/debian-keyring.pgp";
- "/srv/ftp.debian.org/keyrings/debian-maintainers.gpg";
+ "/usr/share/keyrings/debian-keyring.gpg";
+ "/usr/share/keyrings/debian-keyring.pgp";
+ "/usr/share/keyrings/debian-keyring.pgp";
+// "/srv/ftp.debian.org/keyrings/debian-maintainers.gpg";
};
SigningKeyring "/srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg";
SigningPubKeyring "/srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg";
SigningKeyIds "6070D3A1";
- SendmailCommand "/usr/sbin/sendmail -odq -oi -t";
+// SendmailCommand "/usr/sbin/sendmail -odq -oi -t";
+ SendmailCommand "/bin/echo -odq -oi -t";
MyEmailAddress "Debian Installer <installer@ftp-master.debian.org>";
MyAdminAddress "ftpmaster@debian.org";
MyHost "debian.org"; // used for generating user@my_host addresses in e.g. manual_reject()
critical;
};
};
+
+Content
+{
+ Header "contents"
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+"""
+Create all the contents files
+
+@contact: Debian FTPMaster <ftpmaster@debian.org>
+@copyright: 2008, 2009 Michael Casadevall <mcasadevall@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+################################################################################
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+# <Ganneff> there is the idea to slowly replace contents files
+# <Ganneff> with a new generation of such files.
+# <Ganneff> having more info.
+
+# <Ganneff> of course that wont help for now where we need to generate them :)
+
+################################################################################
+
+import sys
+import os
+import tempfile
+import logging
+import math
+import gzip
+import apt_pkg
+from daklib import utils
+from daklib.Config import Config
+from daklib.DBConn import DBConn
+################################################################################
+
+def usage (exit_code=0):
+ print """Usage: dak contents [options] command [arguments]
+
+COMMANDS
+ generate
+ generate Contents-$arch.gz files
+
+ bootstrap
+ scan the debs in the existing pool and load contents in the the database
+
+ cruft
+ remove files/paths which are no longer referenced by a binary
+
+OPTIONS
+ -h, --help
+ show this help and exit
+
+ -v, --verbose
+ show verbose information messages
+
+ -q, --quiet
+ supress all output but errors
+
+ -s, --suite={stable,testing,unstable,...}
+ only operate on a signle suite
+
+ -a, --arch={i386,amd64}
+ only operate on a signle architecture
+"""
+ sys.exit(exit_code)
+
+################################################################################
+
+# where in dak.conf all of our configuration will be stowed
+
+options_prefix = "Contents"
+options_prefix = "%s::Opitons" % options_prefix
+header_prefix = "%s::Header" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
+latin1_q = """SET CLIENT_ENCODING TO 'LATIN1'"""
+
+arches_q = """PREPARE arches_q as
+ SELECT s.architecture, a.arch_string
+ FROM suite_architectures s
+ JOIN architecture a ON (s.architecture=a.id)
+ WHERE suite = $1"""
+
+debs_q = """PREPARE debs_q as
+ SELECT b.id, f.filename FROM bin_assoc_by_arch baa
+ JOIN binaries b ON baa.bin=b.id
+ JOIN files f ON b.file=f.id
+ WHERE suite = $1
+ AND arch = $2"""
+
+olddeb_q = """PREPARE olddeb_q as
+ SELECT 1 FROM content_associations
+ WHERE binary_pkg = $1
+ LIMIT 1"""
+
+contents_q = """PREPARE contents_q as
+ SELECT (p.path||'/'||n.file) AS fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE (b.architecture = $1 OR b.architecture = $2)
+ AND ba.suite = $3
+ AND o.suite = $4
+ AND b.type = 'deb'
+ AND o.type = '7'
+ GROUP BY fn
+ ORDER BY fn"""
+
+udeb_contents_q = """PREPARE udeb_contents_q as
+ SELECT (p.path||'/'||n.file) as fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE s.id = $1
+ AND ba.suite = $2
+ AND o.suite = $3
+ AND b.type = 'udeb'
+ AND o.type = '8'
+ GROUP BY fn
+ ORDER BY fn"""
+
+class Contents(object):
+ """
+ Class capable of generating Contents-$arch.gz files
+
+ Usage GenerateContents().generateContents( ["main","contrib","non-free"] )
+ """
+
+ def __init__(self):
+ self.header = None
+
+ def _getHeader(self):
+ # Internal method to return the header for Contents.gz files
+ if self.header == None:
+ if Config().has_key("Contents::Header"):
+ try:
+ h = open(os.path.join( Config()["Dir::Templates"],
+ Config()["Contents::Header"] ), "r")
+ self.header = h.read()
+ h.close()
+ except:
+ log.error( "error openeing header file: %d\n%s" % (Config()["Contents::Header"],
+ traceback.format_exc() ))
+ self.header = False
+ else:
+ self.header = False
+
+ return self.header
+
+ # goal column for section column
+ _goal_column = 54
+
+ def _write_content_file(self, cursor, filename):
+ # Internal method for writing all the results to a given file
+ f = gzip.open(Config()["Dir::Root"] + filename, "w")
+ try:
+ header = self._getHeader()
+
+ if header:
+ f.write(header)
+
+ while True:
+ contents = cursor.fetchone()
+ if not contents:
+ return
+
+ num_tabs = max(1,
+ int( math.ceil( (self._goal_column - len(contents[0])) / 8) ) )
+ f.write(contents[0] + ( '\t' * num_tabs ) + contents[-1] + "\n")
+
+ finally:
+ f.close()
+
+ def cruft(self):
+ """
+ remove files/paths from the DB which are no longer referenced by binaries
+ """
+ cursor = DBConn().cursor();
+ cursor.execute( "BEGIN WORK" )
+ cursor.execute( """DELETE FROM content_file_names
+ WHERE id IN (SELECT cfn.id FROM content_file_names cfn
+ LEFT JOIN content_associations ca
+ ON ca.filename=cfn.id
+ WHERE ca.id IS NULL)""" );
+ cursor.execute( """DELETE FROM content_file_paths
+ WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
+ LEFT JOIN content_associations ca
+ ON ca.filepath=cfn.id
+ WHERE ca.id IS NULL)""" );
+ cursor.execute( "COMMIT" )
+
+ def bootstrap(self):
+ """
+ scan the existing debs in the pool to populate the contents database tables
+ """
+ pooldir = Config()[ 'Dir::Pool' ]
+
+ cursor = DBConn().cursor();
+ cursor.execute( latin1_q )
+ cursor.execute( debs_q )
+ cursor.execute( olddeb_q )
+ cursor.execute( arches_q )
+
+ suites = self._suites()
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+
+ arch_list = self._arches(cursor, suite_id)
+ arch_all_id = DBConn().get_architecture_id("all")
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE debs_q(%d, %d)" % ( suite_id, arch_id[0] ) )
+
+ debs = cursor.fetchall()
+ count = 0
+ for deb in debs:
+ count += 1
+ cursor.execute( "EXECUTE olddeb_q(%d)" % (deb[0] ) )
+ old = cursor.fetchone()
+ if old:
+ log.debug( "already imported: %s" % deb[1] )
+ else:
+ debfile = os.path.join( pooldir, deb[1] )
+ if os.path.exists( debfile ):
+ contents = utils.generate_contents_information( debfile )
+ DBConn().insert_content_paths(deb[0], contents)
+ log.info( "imported (%d/%d): %s" % (count,len(debs),deb[1] ) )
+ else:
+ log.error( "missing .deb: %s" % deb[1] )
+
+ def generate(self):
+ """
+ Generate Contents-$arch.gz files for every aviailable arch in each given suite.
+ """
+ cursor = DBConn().cursor();
+
+ cursor.execute( arches_q )
+ cursor.execute( contents_q )
+ cursor.execute( udeb_contents_q )
+
+ suites = self._suites()
+
+ # Get our suites, and the architectures
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+
+ arch_list = self._arches(cursor, suite_id)
+
+ arch_all_id = DBConn().get_architecture_id("all")
+
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE contents_q(%d,%d,%d,%d)" % (arch_id[0], arch_all_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-%s.gz" % (suite, arch_id[1]))
+
+ # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free)
+ # This is HORRIBLY debian specific :-/
+ # First off, udeb
+ section_id = DBConn().get_section_id('debian-installer') # all udebs should be here)
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb.gz" % suite)
+
+ # Once more, with non-free
+ section_id = DBConn().get_section_id('non-free/debian-installer') # all udebs should be here)
+
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb-nf.gz" % suite)
+
+
+################################################################################
+
+ def _suites(self):
+ # return a list of suites to operate on
+ if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+ suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+ else:
+ suites = Config().SubTree("Suite").List()
+
+ return suites
+
+ def _arches(self, cursor, suite):
+ # return a list of archs to operate on
+ arch_list = [ ]
+ if Config().has_key( "%s::%s" %(options_prefix,"Arch")):
+ archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")])
+ for arch_name in archs:
+ arch_list.append((DBConn().get_architecture_id(arch_name), arch_name))
+ else:
+ cursor.execute("EXECUTE arches_q(%d)" % (suite))
+ while True:
+ r = cursor.fetchone()
+ if not r:
+ break
+
+ if r[1] != "source" and r[1] != "all":
+ arch_list.append((r[0], r[1]))
+
+ return arch_list
+
+################################################################################
+
+def main():
+ cnf = Config()
+
+ arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+ ('s',"suite", "%s::%s" % (options_prefix,"Suite"),"HasArg"),
+ ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+ ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+ ('a',"arch", "%s::%s" % (options_prefix,"Arch"),"HasArg"),
+ ]
+
+ commands = {'generate' : Contents.generate,
+ 'bootstrap' : Contents.bootstrap,
+ 'cruft' : Contents.cruft,
+ }
+
+ level=logging.INFO
+ if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+ level=logging.ERROR
+
+ elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+ level=logging.DEBUG
+
+
+ logging.basicConfig( level=logging.DEBUG,
+ format='%(asctime)s %(levelname)s %(message)s',
+ stream = sys.stderr )
+
+ args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+ if (len(args) < 1) or not commands.has_key(args[0]):
+ usage()
+
+ if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+ usage()
+
+ commands[args[0]](Contents())
+
+if __name__ == '__main__':
+ main()
"Generate package <-> file mapping"),
("generate-releases",
"Generate Release files"),
- ("generate-contents",
+ ("contents",
"Generate contest files"),
("generate-index-diffs",
"Generate .diff/Index files"),
"Check for users with no packages in the archive"),
("import-archive",
"Populate SQL database based from an archive tree"),
- ("import-contents",
- "Populate SQL database with Contents files"),
("import-keyring",
"Populate fingerprint/uid table based on a new/updated keyring"),
("import-ldap-fingerprints",
INITCOND = ''
);""")
+ c.execute( "CREATE INDEX content_assocaitions_binary ON content_associations(binary_pkg)" )
+
c.execute("UPDATE config SET value = '2' WHERE name = 'db_revision'")
self.db.commit()
###############################################################################
import errno, fcntl, os, sys, time, re
-import apt_pkg, tarfile, commands
+import apt_pkg, commands
from daklib import database
from daklib import logging
from daklib import queue
#!/usr/bin/env python
-# Config access class
-# Copyright (C) 2008 Mark Hymers <mhy@debian.org>
+"""
+Config access class
+
+@contact: Debian FTPMaster <ftpmaster@debian.org>
+@copyright: 2008 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
def _startup(self, *args, **kwargs):
self._readconf()
+ def has_key(self, name):
+ return self.Cnf.has_key(name)
+
def __getitem__(self, name):
return self.Cnf[name]
- def GetDBConnString(self):
- s = "dbname=%s" % self.Cnf["DB::Name"]
- if self.Cnf["DB::Host"]:
- s += " host=%s" % self.Cnf["DB::Host"]
- if self.Cnf["DB::Port"] and self.Cnf["DB::Port"] != "-1":
- s += " port=%s" % self.Cnf["DB::Port"]
-
- return s
@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
@copyright: 2008-2009 Mark Hymers <mhy@debian.org>
@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
@license: GNU General Public License version 2 or later
"""
################################################################################
+import os
import psycopg2
-from psycopg2.extras import DictCursor
from Singleton import Singleton
from Config import Config
## Connection functions
def __createconn(self):
- connstr = Config().GetDBConnString()
+ cnf = Config()
+ connstr = "dbname=%s" % cnf["DB::Name"]
+ if cnf["DB::Host"]:
+ connstr += " host=%s" % cnf["DB::Host"]
+ if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ connstr += " port=%s" % cnf["DB::Port"]
+
self.db_con = psycopg2.connect(connstr)
def reconnect(self):
'architecture': Cache(),
'archive': Cache(),
'component': Cache(),
+ 'content_path_names': Cache(),
+ 'content_file_names': Cache(),
'location': Cache(lambda x: '%s_%s_%s' % (x['location'], x['component'], x['location'])),
'maintainer': {}, # TODO
'keyring': {}, # TODO
AND su.suite_name=%(suite)s
AND s.source=%(source)""", {'suite': suite, 'source': source}, cachename='suite_version')
+
+ def get_or_set_contents_file_id(self, filename):
+ """
+ Returns database id for given filename.
+
+ Results are kept in a cache during runtime to minimize database queries.
+ If no matching file is found, a row is inserted.
+
+ @type filename: string
+ @param filename: The filename
+
+ @rtype: int
+ @return: the database id for the given component
+ """
+ values={'value': filename}
+ query = "SELECT id FROM content_file_names WHERE file = %(value)s"
+ id = self.__get_single_id(query, values, cachename='content_file_names')
+ if not id:
+ c = self.db_con.cursor()
+ c.execute( "INSERT INTO content_file_names VALUES (DEFAULT, %(value)s) RETURNING id",
+ values )
+
+ id = c.fetchone()[0]
+ self.caches['content_file_names'].SetValue(values, id)
+
+ return id
+
+ def get_or_set_contents_path_id(self, path):
+ """
+ Returns database id for given path.
+
+ Results are kept in a cache during runtime to minimize database queries.
+ If no matching file is found, a row is inserted.
+
+ @type path: string
+ @param path: The filename
+
+ @rtype: int
+ @return: the database id for the given component
+ """
+ values={'value': path}
+ query = "SELECT id FROM content_file_paths WHERE path = %(value)s"
+ id = self.__get_single_id(query, values, cachename='content_path_names')
+ if not id:
+ c = self.db_con.cursor()
+ c.execute( "INSERT INTO content_file_paths VALUES (DEFAULT, %(value)s) RETURNING id",
+ values )
+
+ id = c.fetchone()[0]
+ self.caches['content_path_names'].SetValue(values, id)
+
+ return id
+
+ def insert_content_paths(self, bin_id, fullpaths):
+ """
+ Make sure given path is associated with given binary id
+
+ @type bin_id: int
+ @param bin_id: the id of the binary
+ @type fullpath: string
+ @param fullpath: the path of the file being associated with the binary
+ """
+
+ c = self.db_con.cursor()
+
+ for fullpath in fullpaths:
+ c.execute( "BEGIN WORK" )
+ (path, file) = os.path.split(fullpath)
+
+ # Get the necessary IDs ...
+ file_id = self.get_or_set_contents_file_id(file)
+ path_id = self.get_or_set_contents_path_id(path)
+
+ # Determine if we're inserting a duplicate row
+
+ c.execute("SELECT 1 FROM content_associations WHERE binary_pkg = '%d' AND filepath = '%d' AND filename = '%d'" % (int(bin_id), path_id, file_id))
+ if not c.fetchone():
+ # no, we are not, do the insert
+
+ c.execute("INSERT INTO content_associations VALUES (DEFAULT, '%d', '%d', '%d')" % (bin_id, path_id, file_id))
+ c.execute( "COMMIT" )
#!/usr/bin/env python
# vim:set et ts=4 sw=4:
-# Singleton pattern code
-# Copyright (C) 2008 Mark Hymers <mhy@debian.org>
+"""
+Singleton pattern code
+
+Inspiration for this very simple ABC was taken from various documents /
+tutorials / mailing lists. This may not be thread safe but given that
+(as I write) large chunks of dak aren't even type-safe, I'll live with
+it for now
-# Inspiration for this very simple ABC was taken from various documents /
-# tutorials / mailing lists. This may not be thread safe but given that
-# (as I write) large chunks of dak aren't even type-safe, I'll live with
-# it for now
+@contact: Debian FTPMaster <ftpmaster@debian.org>
+@copyright: 2008 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
################################################################################
+++ /dev/null
-#!/usr/bin/python
-
-"""
-Class providing access to a projectb database
-
-This class provides convenience functions for common queries to a
-projectb database using psycopg2.
-
-Copyright (C) 2009 Mike O'Connor <stew@vireo.org>
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-import psycopg2
-
-################################################################################
-
-class Projectb(object):
- """
- Object providing methods for accessing the projectb database
- """
- def __init__(self,Cnf):
- connect_str = "dbname=%s"% (Cnf["DB::Name"])
- if Cnf["DB::Host"] != '': connect_str += " host=%s" % (Cnf["DB::Host"])
- if Cnf["DB::Port"] != '-1': connect_str += " port=%d" % (int(Cnf["DB::Port"]))
-
- self.dbh = psycopg2.connect(connect_str)
- self.suite_id_cache = {}
- self.architecture_id_cache = {}
- self.section_id_cache = {}
-
- def get_suite_id(self, suite_name):
- """
- return the id for the given suite_name
-
- @param suite_name: name of a suite such as "unsatble" or "testing"
-
- @rtype: int
- @return: id of given suite or None if suite_name not matched
-
- >>> Cnf = {'DB::Name' : "projectb","DB::Host":"","DB::Port":'-1' }
- >>> pb = Projectb( Cnf )
- >>> pb.get_suite_id("unstable")
- 5
- >>> pb.get_suite_id("n'existe pas")
- """
- if not self.suite_id_cache.has_key(suite_name):
- c = self.dbh.cursor()
- c.execute("SELECT id FROM suite WHERE suite_name=%(suite_name)s",
- {'suite_name':suite_name})
- r = c.fetchone()
- if r:
- self.suite_id_cache[suite_name] = r[0]
- else:
- self.suite_id_cache[suite_name] = None
-
- return self.suite_id_cache[suite_name]
-
- def get_architecture_id(self, architecture_name):
- """
- return the id for the given architecture_name
-
- @param architecture_name: name of a architecture such as "i386" or "source"
-
- @rtype: int
- @return: id of given architecture or None if architecture_name not matched
-
- >>> Cnf = {'DB::Name' : "projectb","DB::Host":"","DB::Port":'-1' }
- >>> pb = Projectb( Cnf )
- >>> pb.get_architecture_id("i386")
- 7
- >>> pb.get_architecture_id("n'existe pas")
- """
- if not self.architecture_id_cache.has_key(architecture_name):
- c = self.dbh.cursor()
- c.execute("SELECT id FROM architecture WHERE arch_string=%(architecture_name)s",
- {'architecture_name':architecture_name})
- r = c.fetchone()
- if r:
- self.architecture_id_cache[architecture_name] = r[0]
- else:
- self.architecture_id_cache[architecture_name] = None
-
- return self.architecture_id_cache[architecture_name]
-
- def get_section_id(self, section_name):
- """
- return the id for the given section_name
-
- @param section_name: name of a section such as "x11" or "non-free/libs"
-
- @rtype: int
- @return: id of given section or None if section_name not matched
-
- >>> Cnf = {'DB::Name' : "projectb","DB::Host":"","DB::Port":'-1' }
- >>> pb = Projectb( Cnf )
- >>> pb.get_section_id("non-free/libs")
- 285
- >>> pb.get_section_id("n'existe pas")
- """
- if not self.section_id_cache.has_key(section_name):
- c = self.dbh.cursor()
- c.execute("SELECT id FROM section WHERE section=%(section_name)s",
- {'section_name':section_name})
- r = c.fetchone()
- if r:
- self.section_id_cache[section_name] = r[0]
- else:
- self.section_id_cache[section_name] = None
-
- return self.section_id_cache[section_name]
-
-if __name__ == "__main__":
- import doctest
- doctest.testmod()
import apt_pkg
import database
import time
+import tarfile
from dak_exceptions import *
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_verwithext, \
# and pieces the deb had ...
if chunks[2] == "data.tar.gz":
data = tarfile.open("data.tar.gz", "r:gz")
- elif data_tar == "data.tar.bz2":
+ elif chunks[2] == "data.tar.bz2":
data = tarfile.open("data.tar.bz2", "r:bz2")
else:
os.remove(chunks[2])
--- /dev/null
+This file maps each file available in the Debian GNU/Linux system to
+the package from which it originates. It includes packages from the
+DIST distribution for the ARCH architecture.
+
+You can use this list to determine which package contains a specific
+file, or whether or not a specific file is available. The list is
+updated weekly, each architecture on a different day.
+
+When a file is contained in more than one package, all packages are
+listed. When a directory is contained in more than one package, only
+the first is listed.
+
+The best way to search quickly for a file is with the Unix `grep'
+utility, as in `grep <regular expression> CONTENTS':
+
+ $ grep nose Contents
+ etc/nosendfile net/sendfile
+ usr/X11R6/bin/noseguy x11/xscreensaver
+ usr/X11R6/man/man1/noseguy.1x.gz x11/xscreensaver
+ usr/doc/examples/ucbmpeg/mpeg_encode/nosearch.param graphics/ucbmpeg
+ usr/lib/cfengine/bin/noseyparker admin/cfengine
+
+This list contains files in all packages, even though not all of the
+packages are installed on an actual system at once. If you want to
+find out which packages on an installed Debian system provide a
+particular file, you can use `dpkg --search <filename>':
+
+ $ dpkg --search /usr/bin/dselect
+ dpkg: /usr/bin/dselect
+
+
+FILE LOCATION