Dinstall
{
GPGKeyring {
- "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
+ "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
"/srv/keyring.debian.org/keyrings/debian-keyring.pgp";
"/srv/ftp.debian.org/keyrings/debian-maintainers.gpg";
};
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
// Version "4.0r1";
Origin "Debian";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
CopyChanges "dists/oldstable-proposed-updates/";
CopyDotDak "/srv/ftp.debian.org/queue/oldstable-proposed-updates/";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
// Version "5.0r0";
Origin "Debian";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
CopyChanges "dists/proposed-updates/";
CopyDotDak "/srv/ftp.debian.org/queue/proposed-updates/";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Testing distribution - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Testing distribution updates - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- m68k;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Etch for m68k - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- hurd-i386;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-devel-changes@lists.debian.org";
Origin "Debian";
Description "Debian Unstable - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- hurd-i386;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-devel-changes@lists.debian.org";
Origin "Debian";
Description "Experimental packages - not released; use at your own risk.";
hppa "HP PA RISC";
amd64 "AMD64";
arm "ARM";
+ armel "ARM eabi";
i386 "Intel ia32";
ia64 "Intel ia64";
m68k "Motorola Mc680x0";
Description "Software that fails to meet the DFSG";
MeetsDFSG "false";
};
-
- mixed // **NB:** only used for overrides; not yet used in other code
- {
- Description "Legacy Mixed";
- MeetsDFSG "false";
- };
};
Section
critical;
};
};
+
+Contents
+{
+ Header "contents";
+}
--- /dev/null
- # Internal method to return the header for Contents.gz files
+#!/usr/bin/env python
+"""
+Create all the contents files
+
+@contact: Debian FTPMaster <ftpmaster@debian.org>
+@copyright: 2008, 2009 Michael Casadevall <mcasadevall@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+################################################################################
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+# <Ganneff> there is the idea to slowly replace contents files
+# <Ganneff> with a new generation of such files.
+# <Ganneff> having more info.
+
+# <Ganneff> of course that wont help for now where we need to generate them :)
+
+################################################################################
+
+import sys
+import os
+import tempfile
+import logging
+import math
+import gzip
+import apt_pkg
+from daklib import utils
+from daklib.config import Config
+from daklib.dbconn import DBConn
+################################################################################
+
+def usage (exit_code=0):
+ print """Usage: dak contents [options] command [arguments]
+
+COMMANDS
+ generate
+ generate Contents-$arch.gz files
+
+ bootstrap
+ scan the debs in the existing pool and load contents in the the database
+
+ cruft
+ remove files/paths which are no longer referenced by a binary
+
+OPTIONS
+ -h, --help
+ show this help and exit
+
+ -v, --verbose
+ show verbose information messages
+
+ -q, --quiet
+ supress all output but errors
+
+ -s, --suite={stable,testing,unstable,...}
+ only operate on a signle suite
+
+ -a, --arch={i386,amd64}
+ only operate on a signle architecture
+"""
+ sys.exit(exit_code)
+
+################################################################################
+
+# where in dak.conf all of our configuration will be stowed
+
+options_prefix = "Contents"
+options_prefix = "%s::Opitons" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
++# we unfortunately still have broken stuff in headers
+latin1_q = """SET CLIENT_ENCODING TO 'LATIN1'"""
+
++# get all the arches delivered for a given suite
++# this should probably exist somehere common
+arches_q = """PREPARE arches_q as
+ SELECT s.architecture, a.arch_string
+ FROM suite_architectures s
+ JOIN architecture a ON (s.architecture=a.id)
+ WHERE suite = $1"""
+
++# find me the .deb for a given binary id
+debs_q = """PREPARE debs_q as
+ SELECT b.id, f.filename FROM bin_assoc_by_arch baa
+ JOIN binaries b ON baa.bin=b.id
+ JOIN files f ON b.file=f.id
+ WHERE suite = $1
+ AND arch = $2"""
+
++# ask if we already have contents associated with this binary
+olddeb_q = """PREPARE olddeb_q as
+ SELECT 1 FROM content_associations
+ WHERE binary_pkg = $1
+ LIMIT 1"""
+
++# find me all of the contents for a given .deb
+contents_q = """PREPARE contents_q as
+ SELECT (p.path||'/'||n.file) AS fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE (b.architecture = $1 OR b.architecture = $2)
+ AND ba.suite = $3
+ AND o.suite = $4
+ AND b.type = 'deb'
+ AND o.type = '7'
+ GROUP BY fn
+ ORDER BY fn"""
+
++# find me all of the contents for a given .udeb
+udeb_contents_q = """PREPARE udeb_contents_q as
+ SELECT (p.path||'/'||n.file) as fn,
+ comma_separated_list(s.section||'/'||b.package)
+ FROM content_associations c
+ JOIN content_file_paths p ON (c.filepath=p.id)
+ JOIN content_file_names n ON (c.filename=n.id)
+ JOIN binaries b ON (b.id=c.binary_pkg)
+ JOIN bin_associations ba ON (b.id=ba.bin)
+ JOIN override o ON (o.package=b.package)
+ JOIN section s ON (s.id=o.section)
+ WHERE s.id = $1
+ AND ba.suite = $2
+ AND o.suite = $3
+ AND b.type = 'udeb'
+ AND o.type = '8'
+ GROUP BY fn
+ ORDER BY fn"""
+
++# clear out all of the temporarily stored content associations
++# this should be run only after p-a has run. after a p-a
++# run we should have either accepted or rejected every package
++# so there should no longer be anything in the queue
++remove_temp_contents_cruft_q = """DELETE FROM temp_content_associations"""
++
++# delete any filenames we are storing which have no binary associated with them
++remove_filename_cruft_q = """DELETE FROM content_file_names
++ WHERE id IN (SELECT cfn.id FROM content_file_names cfn
++ LEFT JOIN content_associations ca
++ ON ca.filename=cfn.id
++ WHERE ca.id IS NULL)""" );
++
++# delete any paths we are storing which have no binary associated with them
++remove_filepath_cruft_q = """DELETE FROM content_file_paths
++ WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
++ LEFT JOIN content_associations ca
++ ON ca.filepath=cfn.id
++ WHERE ca.id IS NULL)"""
+class Contents(object):
+ """
+ Class capable of generating Contents-$arch.gz files
+
+ Usage GenerateContents().generateContents( ["main","contrib","non-free"] )
+ """
+
+ def __init__(self):
+ self.header = None
+
+ def _getHeader(self):
- log.error( "error openeing header file: %d\n%s" % (Config()["Contents::Header"],
- traceback.format_exc() ))
++ """
++ Internal method to return the header for Contents.gz files
++
++ This is boilerplate which explains the contents of the file and how
++ it can be used.
++ """
+ if self.header == None:
+ if Config().has_key("Contents::Header"):
+ try:
+ h = open(os.path.join( Config()["Dir::Templates"],
+ Config()["Contents::Header"] ), "r")
+ self.header = h.read()
+ print( "header: %s" % self.header )
+ h.close()
+ except:
- # Internal method for writing all the results to a given file
++ log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"],
++ traceback.format_exc() ))
+ self.header = False
+ else:
+ print( "no header" )
+ self.header = False
+
+ return self.header
+
+ # goal column for section column
+ _goal_column = 54
+
+ def _write_content_file(self, cursor, filename):
- remove files/paths from the DB which are no longer referenced by binaries
++ """
++ Internal method for writing all the results to a given file.
++ The cursor should have a result set generated from a query already.
++ """
+ f = gzip.open(Config()["Dir::Root"] + filename, "w")
+ try:
+ header = self._getHeader()
+
+ if header:
+ f.write(header)
+
+ while True:
+ contents = cursor.fetchone()
+ if not contents:
+ return
+
+ num_tabs = max(1,
+ int( math.ceil( (self._goal_column - len(contents[0])) / 8) ) )
+ f.write(contents[0] + ( '\t' * num_tabs ) + contents[-1] + "\n")
+
+ finally:
+ f.close()
+
+ def cruft(self):
+ """
- cursor.execute( """DELETE FROM content_file_names
- WHERE id IN (SELECT cfn.id FROM content_file_names cfn
- LEFT JOIN content_associations ca
- ON ca.filename=cfn.id
- WHERE ca.id IS NULL)""" );
- cursor.execute( """DELETE FROM content_file_paths
- WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
- LEFT JOIN content_associations ca
- ON ca.filepath=cfn.id
- WHERE ca.id IS NULL)""" );
++ remove files/paths from the DB which are no longer referenced
++ by binaries and clean the temporary table
+ """
+ cursor = DBConn().cursor();
+ cursor.execute( "BEGIN WORK" )
- # return a list of suites to operate on
++ cursor.execute( remove_temp_contents_cruft_q )
++ cursor.execute( remove_filename_cruft_q )
++ cursor.execute( remove_filepath_cruft_q )
+ cursor.execute( "COMMIT" )
+
+
+ def bootstrap(self):
+ """
+ scan the existing debs in the pool to populate the contents database tables
+ """
+ pooldir = Config()[ 'Dir::Pool' ]
+
+ cursor = DBConn().cursor();
+ cursor.execute( latin1_q )
+ cursor.execute( debs_q )
+ cursor.execute( olddeb_q )
+ cursor.execute( arches_q )
+
+ suites = self._suites()
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+
+ arch_list = self._arches(cursor, suite_id)
+ arch_all_id = DBConn().get_architecture_id("all")
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE debs_q(%d, %d)" % ( suite_id, arch_id[0] ) )
+
+ debs = cursor.fetchall()
+ count = 0
+ for deb in debs:
+ count += 1
+ cursor.execute( "EXECUTE olddeb_q(%d)" % (deb[0] ) )
+ old = cursor.fetchone()
+ if old:
+ log.debug( "already imported: %s" % deb[1] )
+ else:
+ debfile = os.path.join( pooldir, deb[1] )
+ if os.path.exists( debfile ):
+ contents = utils.generate_contents_information( debfile )
+ DBConn().insert_content_paths(deb[0], contents)
+ log.info( "imported (%d/%d): %s" % (count,len(debs),deb[1] ) )
+ else:
+ log.error( "missing .deb: %s" % deb[1] )
+
+ def generate(self):
+ """
+ Generate Contents-$arch.gz files for every aviailable arch in each given suite.
+ """
+ cursor = DBConn().cursor();
+
+ cursor.execute( arches_q )
+ cursor.execute( contents_q )
+ cursor.execute( udeb_contents_q )
+
+ suites = self._suites()
+
+ # Get our suites, and the architectures
+ for suite in [i.lower() for i in suites]:
+ suite_id = DBConn().get_suite_id(suite)
+ arch_list = self._arches(cursor, suite_id)
+
+ arch_all_id = DBConn().get_architecture_id("all")
+
+ for arch_id in arch_list:
+ cursor.execute( "EXECUTE contents_q(%d,%d,%d,%d)" % (arch_id[0], arch_all_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-%s.gz" % (suite, arch_id[1]))
+
+ # The MORE fun part. Ok, udebs need their own contents files, udeb, and udeb-nf (not-free)
+ # This is HORRIBLY debian specific :-/
+ # First off, udeb
+ section_id = DBConn().get_section_id('debian-installer') # all udebs should be here)
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb.gz" % suite)
+
+ # Once more, with non-free
+ section_id = DBConn().get_section_id('non-free/debian-installer') # all udebs should be here)
+
+ if section_id != -1:
+ cursor.execute("EXECUTE udeb_contents_q(%d,%d,%d)" % (section_id, suite_id, suite_id))
+ self._write_content_file(cursor, "dists/%s/Contents-udeb-nf.gz" % suite)
+
+
+################################################################################
+
+ def _suites(self):
- # return a list of archs to operate on
++ """
++ return a list of suites to operate on
++ """
+ if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+ suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+ else:
+ suites = Config().SubTree("Suite").List()
+
+ return suites
+
+ def _arches(self, cursor, suite):
- logging.basicConfig( level=logging.DEBUG,
++ """
++ return a list of archs to operate on
++ """
+ arch_list = [ ]
+ if Config().has_key( "%s::%s" %(options_prefix,"Arch")):
+ archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")])
+ for arch_name in archs:
+ arch_list.append((DBConn().get_architecture_id(arch_name), arch_name))
+ else:
+ cursor.execute("EXECUTE arches_q(%d)" % (suite))
+ while True:
+ r = cursor.fetchone()
+ if not r:
+ break
+
+ if r[1] != "source" and r[1] != "all":
+ arch_list.append((r[0], r[1]))
+
+ return arch_list
+
+################################################################################
+
+def main():
+ cnf = Config()
+
+ arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+ ('s',"suite", "%s::%s" % (options_prefix,"Suite"),"HasArg"),
+ ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+ ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+ ('a',"arch", "%s::%s" % (options_prefix,"Arch"),"HasArg"),
+ ]
+
+ commands = {'generate' : Contents.generate,
+ 'bootstrap' : Contents.bootstrap,
+ 'cruft' : Contents.cruft,
+ }
+
+ level=logging.INFO
+ if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+ level=logging.ERROR
+
+ elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+ level=logging.DEBUG
+
+
++ logging.basicConfig( level=level,
+ format='%(asctime)s %(levelname)s %(message)s',
+ stream = sys.stderr )
+
+ args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+ if (len(args) < 1) or not commands.has_key(args[0]):
+ usage()
+
+ if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+ usage()
+
+ commands[args[0]](Contents())
+
+if __name__ == '__main__':
+ main()
################################################################################
--import sys, imp
--import daklib.utils, daklib.extensions
++import sys
++import imp
++import daklib.utils
++import daklib.extensions
################################################################################
"Generate package <-> file mapping"),
("generate-releases",
"Generate Release files"),
- "Generate contest files"),
+ ("contents",
++ "Generate content files"),
("generate-index-diffs",
"Generate .diff/Index files"),
("clean-suites",
"Split queue/done into a date-based hierarchy"),
("stats",
"Generate statistics"),
+ ("calculate-shasums",
+ "Calculate missing sha1sums and sha256sums"),
("bts-categorize",
"Categorize uncategorized bugs filed against ftp.debian.org"),
("add-user",
#!/usr/bin/env python
- # coding=utf8
--
"""
- Debian Archive Kit Database Update Script
- Copyright © 2008 Michael Casadevall <mcasadevall@debian.org>
- Copyright © 2008 Roger Leigh <rleigh@debian.org>
+ Database Update Script - Get suite_architectures table use sane values
- Debian Archive Kit Database Update Script 2
+ @contact: Debian FTP Master <ftpmaster@debian.org>
+ @copyright: 2009 Joerg Jaspert <joerg@debian.org>
+ @license: GNU General Public License version 2 or later
-
"""
# This program is free software; you can redistribute it and/or modify
################################################################################
- # <tomv_w> really, if we want to screw ourselves, let's find a better way.
- # <Ganneff> rm -rf /srv/ftp.debian.org
+ import psycopg2
+ from daklib.dak_exceptions import DBUpdateError
+ from daklib.utils import get_conf
################################################################################
- import psycopg2, time
-
- ################################################################################
+ suites = {} #: Cache of existing suites
+ archs = {} #: Cache of existing architectures
def do_update(self):
- print "Adding content fields to database"
+ """ Execute the DB update """
+ print "Lets make suite_architecture table use sane values"
+ Cnf = get_conf()
+
+ query = "INSERT into suite_architectures (suite, architecture) VALUES (%s, %s)" #: Update query
try:
c = self.db.cursor()
- c.execute("""CREATE TABLE content_file_paths (
- id serial primary key not null,
- path text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_file_names (
- id serial primary key not null,
- file text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_associations (
- id serial not null,
- binary_pkg int4 not null references binaries(id) on delete cascade,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE TABLE temp_content_associations (
- id serial not null,
- package text not null,
- version debversion not null,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE FUNCTION comma_concat(text, text) RETURNS text
- AS $_$select case
- WHEN $2 is null or $2 = '' THEN $1
- WHEN $1 is null or $1 = '' THEN $2
- ELSE $1 || ',' || $2
- END$_$
- LANGUAGE sql""")
-
- c.execute("""CREATE AGGREGATE comma_separated_list (
- BASETYPE = text,
- SFUNC = comma_concat,
- STYPE = text,
- INITCOND = ''
- );""")
-
- c.execute( "CREATE INDEX content_assocaitions_binary ON content_associations(binary_pkg)" )
-
- c.execute("UPDATE config SET value = '2' WHERE name = 'db_revision'")
- self.db.commit()
+ c.execute("DELETE FROM suite_architectures;")
- print "REMINDER: Remember to fully regenerate the Contents files before running import-contents"
- print ""
- print "Pausing for five seconds ..."
- time.sleep (5)
+ c.execute("SELECT id, arch_string FROM architecture;")
+ a=c.fetchall()
+ for arch in a:
+ archs[arch[1]]=arch[0]
+
+ c.execute("SELECT id,suite_name FROM suite")
+ s=c.fetchall()
+ for suite in s:
+ suites[suite[1]]=suite[0]
+
+ for suite in Cnf.SubTree("Suite").List():
+ print "Processing suite %s" % (suite)
+ architectures = Cnf.SubTree("Suite::" + suite).ValueList("Architectures")
+ suite = suite.lower()
+ for arch in architectures:
+ c.execute(query, [suites[suite], archs[arch]])
+
+ c.execute("UPDATE config SET value = '4' WHERE name = 'db_revision'")
+
+ self.db.commit()
except psycopg2.ProgrammingError, msg:
self.db.rollback()
- print "FATAL: Unable to apply debversion table update 2!"
- print "Error Message: " + str(msg)
- print "Database changes have been rolled back."
+ raise DBUpdateError, "Unable to apply sanity to suite_architecture table, rollback issued. Error message : %s" % (str(msg))
################################################################################
-import pg, sys
+import psycopg2, sys
import apt_pkg
-from daklib import database
-from daklib import utils
-
-################################################################################
-Cnf = None
-projectB = None
+from daklib import utils
+from daklib.DBConn import DBConn
+from daklib.Config import Config
################################################################################
################################################################################
def sql_get (config, key):
- """Return the value of config[key] in quotes or NULL if it doesn't exist."""
+ """Return the value of config[key] or None if it doesn't exist."""
- if config.has_key(key):
- return "'%s'" % (config[key])
- else:
- return "NULL"
+ try:
+ return config[key]
+ except KeyError:
+ return None
################################################################################
-def do_archive():
- """Initalize the archive table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM archive")
- for name in Cnf.SubTree("Archive").List():
- archive_config = Cnf.SubTree("Archive::%s" % (name))
- origin_server = sql_get(archive_config, "OriginServer")
- description = sql_get(archive_config, "Description")
- projectB.query("INSERT INTO archive (name, origin_server, description) "
- "VALUES ('%s', %s, %s)"
- % (name, origin_server, description))
- projectB.query("COMMIT WORK")
-
-def do_architecture():
- """Initalize the architecture table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM architecture")
- for arch in Cnf.SubTree("Architectures").List():
- description = Cnf["Architectures::%s" % (arch)]
- projectB.query("INSERT INTO architecture (arch_string, description) "
- "VALUES ('%s', '%s')" % (arch, description))
- projectB.query("COMMIT WORK")
-
-def do_component():
- """Initalize the component table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM component")
- for name in Cnf.SubTree("Component").List():
- component_config = Cnf.SubTree("Component::%s" % (name))
- description = sql_get(component_config, "Description")
- if component_config.get("MeetsDFSG").lower() == "true":
- meets_dfsg = "true"
- else:
- meets_dfsg = "false"
- projectB.query("INSERT INTO component (name, description, meets_dfsg) "
- "VALUES ('%s', %s, %s)"
- % (name, description, meets_dfsg))
- projectB.query("COMMIT WORK")
-
-def do_location():
- """Initalize the location table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM location")
- for location in Cnf.SubTree("Location").List():
- location_config = Cnf.SubTree("Location::%s" % (location))
- archive_id = database.get_archive_id(location_config["Archive"])
- if archive_id == -1:
- utils.fubar("Archive '%s' for location '%s' not found."
- % (location_config["Archive"], location))
- location_type = location_config.get("type")
- if location_type == "pool":
- for component in Cnf.SubTree("Component").List():
- component_id = database.get_component_id(component)
- projectB.query("INSERT INTO location (path, component, "
- "archive, type) VALUES ('%s', %d, %d, '%s')"
- % (location, component_id, archive_id,
- location_type))
- else:
- utils.fubar("E: type '%s' not recognised in location %s."
- % (location_type, location))
- projectB.query("COMMIT WORK")
-
-def do_suite():
- """Initalize the suite table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM suite")
- for suite in Cnf.SubTree("Suite").List():
- suite_config = Cnf.SubTree("Suite::%s" %(suite))
- version = sql_get(suite_config, "Version")
- origin = sql_get(suite_config, "Origin")
- description = sql_get(suite_config, "Description")
- projectB.query("INSERT INTO suite (suite_name, version, origin, "
- "description) VALUES ('%s', %s, %s, %s)"
- % (suite.lower(), version, origin, description))
- for architecture in database.get_suite_architectures(suite):
- architecture_id = database.get_architecture_id (architecture)
- if architecture_id < 0:
- utils.fubar("architecture '%s' not found in architecture"
- " table for suite %s."
- % (architecture, suite))
- projectB.query("INSERT INTO suite_architectures (suite, "
- "architecture) VALUES (currval('suite_id_seq'), %d)"
- % (architecture_id))
- projectB.query("COMMIT WORK")
-
-def do_override_type():
- """Initalize the override_type table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM override_type")
- for override_type in Cnf.ValueList("OverrideType"):
- projectB.query("INSERT INTO override_type (type) VALUES ('%s')"
- % (override_type))
- projectB.query("COMMIT WORK")
-
-def do_priority():
- """Initialize the priority table."""
-
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM priority")
- for priority in Cnf.SubTree("Priority").List():
- projectB.query("INSERT INTO priority (priority, level) VALUES "
- "('%s', %s)"
- % (priority, Cnf["Priority::%s" % (priority)]))
- projectB.query("COMMIT WORK")
-
-def do_section():
- """Initalize the section table."""
- projectB.query("BEGIN WORK")
- projectB.query("DELETE FROM section")
- for component in Cnf.SubTree("Component").List():
- if Cnf["Control-Overrides::ComponentPosition"] == "prefix":
- suffix = ""
- if component != "main":
- prefix = component + '/'
- else:
- prefix = ""
- else:
- prefix = ""
- if component != "main":
- suffix = '/' + component
+class InitDB(object):
+ def __init__(self, Cnf, projectB):
+ self.Cnf = Cnf
+ self.projectB = projectB
+
+ def do_archive(self):
- """Initalize the archive table."""
++ """initalize the archive table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM archive")
+ archive_add = "INSERT INTO archive (name, origin_server, description) VALUES (%s, %s, %s)"
+ for name in self.Cnf.SubTree("Archive").List():
+ archive_config = self.Cnf.SubTree("Archive::%s" % (name))
+ origin_server = sql_get(archive_config, "OriginServer")
+ description = sql_get(archive_config, "Description")
+ c.execute(archive_add, [name, origin_server, description])
+ self.projectB.commit()
+
+ def do_architecture(self):
+ """Initalize the architecture table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM architecture")
+ arch_add = "INSERT INTO architecture (arch_string, description) VALUES (%s, %s)"
+ for arch in self.Cnf.SubTree("Architectures").List():
+ description = self.Cnf["Architectures::%s" % (arch)]
+ c.execute(arch_add, [arch, description])
+ self.projectB.commit()
+
+ def do_component(self):
+ """Initalize the component table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM component")
+
+ comp_add = "INSERT INTO component (name, description, meets_dfsg) " + \
+ "VALUES (%s, %s, %s)"
+
+ for name in self.Cnf.SubTree("Component").List():
+ component_config = self.Cnf.SubTree("Component::%s" % (name))
+ description = sql_get(component_config, "Description")
+ meets_dfsg = (component_config.get("MeetsDFSG").lower() == "true")
+ c.execute(comp_add, [name, description, meets_dfsg])
+
+ self.projectB.commit()
+
+ def do_location(self):
+ """Initalize the location table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM location")
+
- loc_add_mixed = "INSERT INTO location (path, archive, type) " + \
- "VALUES (%s, %s, %s)"
-
+ loc_add = "INSERT INTO location (path, component, archive, type) " + \
+ "VALUES (%s, %s, %s, %s)"
+
+ for location in self.Cnf.SubTree("Location").List():
+ location_config = self.Cnf.SubTree("Location::%s" % (location))
+ archive_id = self.projectB.get_archive_id(location_config["Archive"])
+ if archive_id == -1:
+ utils.fubar("Archive '%s' for location '%s' not found."
+ % (location_config["Archive"], location))
+ location_type = location_config.get("type")
- if location_type == "legacy-mixed":
- c.execute(loc_add_mixed, [location, archive_id, location_config["type"]])
- elif location_type == "legacy" or location_type == "pool":
++ if location_type == "pool":
+ for component in self.Cnf.SubTree("Component").List():
+ component_id = self.projectB.get_component_id(component)
+ c.execute(loc_add, [location, component_id, archive_id, location_type])
else:
- " table for suite %s."
- % (architecture, suite))
+ utils.fubar("E: type '%s' not recognised in location %s."
+ % (location_type, location))
+
+ self.projectB.commit()
+
+ def do_suite(self):
+ """Initalize the suite table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM suite")
+
+ suite_add = "INSERT INTO suite (suite_name, version, origin, description) " + \
+ "VALUES (%s, %s, %s, %s)"
+
+ sa_add = "INSERT INTO suite_architectures (suite, architecture) " + \
+ "VALUES (currval('suite_id_seq'), %s)"
+
+ for suite in self.Cnf.SubTree("Suite").List():
+ suite_config = self.Cnf.SubTree("Suite::%s" %(suite))
+ version = sql_get(suite_config, "Version")
+ origin = sql_get(suite_config, "Origin")
+ description = sql_get(suite_config, "Description")
+ c.execute(suite_add, [suite.lower(), version, origin, description])
+ for architecture in self.Cnf.ValueList("Suite::%s::Architectures" % (suite)):
+ architecture_id = self.projectB.get_architecture_id (architecture)
+ if architecture_id < 0:
+ utils.fubar("architecture '%s' not found in architecture"
++ " table for suite %s."
++ % (architecture, suite))
+ c.execute(sa_add, [architecture_id])
+
+ self.projectB.commit()
+
+ def do_override_type(self):
+ """Initalize the override_type table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM override_type")
+
+ over_add = "INSERT INTO override_type (type) VALUES (%s)"
+
+ for override_type in self.Cnf.ValueList("OverrideType"):
+ c.execute(over_add, [override_type])
+
+ self.projectB.commit()
+
+ def do_priority(self):
+ """Initialize the priority table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM priority")
+
+ prio_add = "INSERT INTO priority (priority, level) VALUES (%s, %s)"
+
+ for priority in self.Cnf.SubTree("Priority").List():
+ c.execute(prio_add, [priority, self.Cnf["Priority::%s" % (priority)]])
+
+ self.projectB.commit()
+
+ def do_section(self):
+ """Initalize the section table."""
+
+ c = self.projectB.cursor()
+ c.execute("DELETE FROM section")
+
+ sect_add = "INSERT INTO section (section) VALUES (%s)"
+
+ for component in self.Cnf.SubTree("Component").List():
+ if self.Cnf["Control-Overrides::ComponentPosition"] == "prefix":
suffix = ""
- for section in Cnf.ValueList("Section"):
- projectB.query("INSERT INTO section (section) VALUES "
- "('%s%s%s')" % (prefix, section, suffix))
- projectB.query("COMMIT WORK")
+ if component != "main":
+ prefix = component + '/'
+ else:
+ prefix = ""
+ else:
+ prefix = ""
+ if component != "main":
+ suffix = '/' + component
+ else:
+ suffix = ""
+ for section in self.Cnf.ValueList("Section"):
+ c.execute(sect_add, [prefix + section + suffix])
+
+ self.projectB.commit()
+
+ def do_all(self):
+ self.do_archive()
+ self.do_architecture()
+ self.do_component()
+ self.do_location()
+ self.do_suite()
+ self.do_override_type()
+ self.do_priority()
+ self.do_section()
################################################################################
def main ():
"""Sync dak.conf configuartion file and the SQL database"""
- global Cnf, projectB
-
Cnf = utils.get_conf()
arguments = [('h', "help", "Init-DB::Options::Help")]
for i in [ "help" ]:
utils.warn("dak init-db takes no arguments.")
usage(exit_code=1)
- projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"],
- int(Cnf["DB::Port"]))
- database.init(Cnf, projectB)
-
- do_archive()
- do_architecture()
- do_component()
- do_location()
- do_suite()
- do_override_type()
- do_priority()
- do_section()
+ # Just let connection failures be reported to the user
+ projectB = DBConn()
+ Cnf = Config()
+
+ InitDB(Cnf, projectB).do_all()
################################################################################
###############################################################################
--import errno, fcntl, os, sys, time, re
-import apt_pkg
++import errno
++import fcntl
++import os
++import sys
++import time
++import re
+import apt_pkg, commands
from daklib import database
from daklib import logging
from daklib import queue
else:
os.unlink(self.log_filename)
+
###############################################################################
+
def reject (str, prefix="Rejected: "):
global reject_message
if str:
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
-
+ if not database.copy_temporary_contents(package, version, files[newfile]):
+ reject("Missing contents for package")
+
- # If the .orig.tar.gz is in a legacy directory we need to poolify
- # it, so that apt-get source (and anything else that goes by the
- # "Directory:" field in the Sources.gz file) works.
- orig_tar_id = Upload.pkg.orig_tar_id
- orig_tar_location = Upload.pkg.orig_tar_location
- legacy_source_untouchable = Upload.pkg.legacy_source_untouchable
- if orig_tar_id and orig_tar_location == "legacy":
- q = projectB.query("SELECT DISTINCT ON (f.id) l.path, f.filename, f.id as files_id, df.source, df.id as dsc_files_id, f.size, f.md5sum FROM files f, dsc_files df, location l WHERE df.source IN (SELECT source FROM dsc_files WHERE file = %s) AND f.id = df.file AND l.id = f.location AND (l.type = 'legacy' OR l.type = 'legacy-mixed')" % (orig_tar_id))
- qd = q.dictresult()
- for qid in qd:
- # Is this an old upload superseded by a newer -sa upload? (See check_dsc() for details)
- if legacy_source_untouchable.has_key(qid["files_id"]):
- continue
- # First move the files to the new location
- legacy_filename = qid["path"] + qid["filename"]
- pool_location = utils.poolify (changes["source"], files[newfile]["component"])
- pool_filename = pool_location + os.path.basename(qid["filename"])
- destination = Cnf["Dir::Pool"] + pool_location
- utils.move(legacy_filename, destination)
- # Then Update the DB's files table
- q = projectB.query("UPDATE files SET filename = '%s', location = '%s' WHERE id = '%s'" % (pool_filename, dsc_location_id, qid["files_id"]))
-
- # If this is a sourceful diff only upload that is moving non-legacy
+ # If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig.tar.gz into the new
# component too for the same reasons as above.
#
if changes["architecture"].has_key("source") and orig_tar_id and \
- orig_tar_location != "legacy" and orig_tar_location != dsc_location_id:
+ orig_tar_location != dsc_location_id:
q = projectB.query("SELECT l.path, f.filename, f.size, f.md5sum, f.sha1sum, f.sha256sum FROM files f, location l WHERE f.id = %s AND f.location = l.id" % (orig_tar_id))
ql = q.getresult()[0]
old_filename = ql[0] + ql[1]
utils.copy(pkg.changes_file, Cnf["Dir::Root"] + dest)
for dest in copy_dot_dak.keys():
utils.copy(Upload.pkg.changes_file[:-8]+".dak", dest)
-
projectB.query("COMMIT WORK")
# Move the .changes into the 'done' directory
################################################################################
- import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback, tarfile
-import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback
++import commands
++import errno
++import fcntl
++import os
++import re
++import shutil
++import stat
++import sys
++import time
++import tempfile
++import traceback
++import tarfile
import apt_inst, apt_pkg
-from daklib import database
+from debian_bundle import deb822
+from daklib.dbconn import DBConn
+from daklib.binary import Binary
from daklib import logging
from daklib import queue
from daklib import utils
################################################################################
+def create_tmpdir():
+ """
+ Create a temporary directory that can be used for unpacking files into for
+ checking
+ """
+ tmpdir = tempfile.mkdtemp()
+ return tmpdir
+
+################################################################################
+
def copy_to_holding(filename):
global in_holding
(source, dest) = args[1:3]
if changes["distribution"].has_key(source):
for arch in changes["architecture"].keys():
- if arch not in Cnf.ValueList("Suite::%s::Architectures" % (source)):
+ if arch not in database.get_suite_architectures(source):
reject("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch),"")
del changes["distribution"][source]
changes["distribution"][dest] = 1
################################################################################
-def check_deb_ar(filename):
- """
- Sanity check the ar of a .deb, i.e. that there is:
-
- 1. debian-binary
- 2. control.tar.gz
- 3. data.tar.gz or data.tar.bz2
-
- in that order, and nothing else.
- """
- cmd = "ar t %s" % (filename)
- (result, output) = commands.getstatusoutput(cmd)
- if result != 0:
- reject("%s: 'ar t' invocation failed." % (filename))
- reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
- chunks = output.split('\n')
- if len(chunks) != 3:
- reject("%s: found %d chunks, expected 3." % (filename, len(chunks)))
- if chunks[0] != "debian-binary":
- reject("%s: first chunk is '%s', expected 'debian-binary'." % (filename, chunks[0]))
- if chunks[1] != "control.tar.gz":
- reject("%s: second chunk is '%s', expected 'control.tar.gz'." % (filename, chunks[1]))
- if chunks[2] not in [ "data.tar.bz2", "data.tar.gz" ]:
- reject("%s: third chunk is '%s', expected 'data.tar.gz' or 'data.tar.bz2'." % (filename, chunks[2]))
-
-################################################################################
-
def check_files():
global reprocess
has_binaries = 0
has_source = 0
+ cursor = DBConn().cursor()
+ # Check for packages that have moved from one component to another
+ # STU: this should probably be changed to not join on architecture, suite tables but instead to used their cached name->id mappings from DBConn
+ cursor.execute("""PREPARE moved_pkg_q AS
+ SELECT c.name FROM binaries b, bin_associations ba, suite s, location l,
+ component c, architecture a, files f
+ WHERE b.package = $1 AND s.suite_name = $2
+ AND (a.arch_string = $3 OR a.arch_string = 'all')
+ AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
+ AND f.location = l.id
+ AND l.component = c.id
+ AND b.file = f.id""")
+
for f in file_keys:
# Ensure the file does not already exist in one of the accepted directories
for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
default_suite = Cnf.get("Dinstall::DefaultSuite", "Unstable")
architecture = control.Find("Architecture")
upload_suite = changes["distribution"].keys()[0]
- if architecture not in Cnf.ValueList("Suite::%s::Architectures" % (default_suite)) and architecture not in Cnf.ValueList("Suite::%s::Architectures" % (upload_suite)):
+ if architecture not in database.get_suite_architectures(default_suite) and architecture not in database.get_suite_architectures(upload_suite):
reject("Unknown architecture '%s'." % (architecture))
# Ensure the architecture of the .deb is one of the ones
# Check the version and for file overwrites
reject(Upload.check_binary_against_db(f),"")
- check_deb_ar(f)
+ Binary(f).scan_package()
# Checks for a source package...
else:
# Validate the component
component = files[f]["component"]
- component_id = database.get_component_id(component)
+ component_id = DBConn().get_component_id(component)
if component_id == -1:
reject("file '%s' has unknown component '%s'." % (f, component))
continue
# Determine the location
location = Cnf["Dir::Pool"]
- location_id = database.get_location_id (location, component, archive)
+ location_id = DBConn().get_location_id(location, component, archive)
if location_id == -1:
reject("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (component, archive))
files[f]["location id"] = location_id
# Check the md5sum & size against existing files (if any)
files[f]["pool name"] = utils.poolify (changes["source"], files[f]["component"])
- files_id = database.get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"])
+ files_id = DBConn().get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"])
if files_id == -1:
reject("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
elif files_id == -2:
files[f]["files id"] = files_id
# Check for packages that have moved from one component to another
- q = Upload.projectB.query("""
-SELECT c.name FROM binaries b, bin_associations ba, suite s, location l,
- component c, architecture a, files f
- WHERE b.package = '%s' AND s.suite_name = '%s'
- AND (a.arch_string = '%s' OR a.arch_string = 'all')
- AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
- AND f.location = l.id AND l.component = c.id AND b.file = f.id"""
- % (files[f]["package"], suite,
- files[f]["architecture"]))
- ql = q.getresult()
+ files[f]['suite'] = suite
+ cursor.execute("""EXECUTE moved_pkg_q( %(package)s, %(suite)s, %(architecture)s )""", ( files[f] ) )
+ ql = cursor.fetchone()
if ql:
files[f]["othercomponents"] = ql[0][0]
or pkg.orig_tar_gz == -1:
return
- # Create a temporary directory to extract the source into
- if Options["No-Action"]:
- tmpdir = tempfile.mkdtemp()
- else:
- # We're in queue/holding and can create a random directory.
- tmpdir = "%s" % (os.getpid())
- os.mkdir(tmpdir)
+ tmpdir = create_tmpdir()
# Move into the temporary directory
cwd = os.getcwd()
################################################################################
def lookup_uid_from_fingerprint(fpr):
- q = Upload.projectB.query("SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr))
- qs = q.getresult()
- if len(qs) == 0:
- return (None, None, None)
+ """
+ Return the uid,name,isdm for a given gpg fingerprint
+
+ @ptype fpr: string
+ @param fpr: a 40 byte GPG fingerprint
+
+ @return (uid, name, isdm)
+ """
+ cursor = DBConn().cursor()
+ cursor.execute( "SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr))
+ qs = cursor.fetchone()
+ if qs:
+ return qs
else:
- return qs[0]
+ return (None, None, None)
def check_signed_by_key():
"""Ensure the .changes is signed by an authorized uploader."""
if not sponsored and not may_nmu:
source_ids = []
- q = Upload.projectB.query("SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.dm_upload_allowed = 'yes'" % (changes["source"]))
+ cursor.execute( "SELECT s.id, s.version FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = %(source)s AND s.dm_upload_allowed = 'yes'", changes )
highest_sid, highest_version = None, None
should_reject = True
- for si in q.getresult():
+ while True:
+ si = cursor.fetchone()
+ if not si:
+ break
+
if highest_version == None or apt_pkg.VersionCompare(si[1], highest_version) == 1:
highest_sid = si[0]
highest_version = si[1]
if highest_sid == None:
reject("Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version" % changes["source"])
else:
- q = Upload.projectB.query("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid))
- for m in q.getresult():
+
+ cursor.execute("SELECT m.name FROM maintainer m WHERE m.id IN (SELECT su.maintainer FROM src_uploaders su JOIN source s ON (s.id = su.source) WHERE su.source = %s)" % (highest_sid))
+
+ while True:
+ m = cursor.fetchone()
+ if not m:
+ break
+
(rfc822, rfc2047, name, email) = utils.fix_maintainer(m[0])
if email == uid_email or name == uid_name:
should_reject=False
for b in changes["binary"].keys():
for suite in changes["distribution"].keys():
- suite_id = database.get_suite_id(suite)
- q = Upload.projectB.query("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = '%s' AND ba.suite = %s" % (b, suite_id))
- for s in q.getresult():
+ suite_id = DBConn().get_suite_id(suite)
+
+ cursor.execute("SELECT DISTINCT s.source FROM source s JOIN binaries b ON (s.id = b.source) JOIN bin_associations ba On (b.id = ba.bin) WHERE b.package = %(package)s AND ba.suite = %(suite)s" , {'package':b, 'suite':suite_id} )
+ while True:
+ s = cursor.fetchone()
+ if not s:
+ break
+
if s[0] != changes["source"]:
reject("%s may not hijack %s from source package %s in suite %s" % (uid, b, s, suite))
################################################################################
def is_unembargo ():
- q = Upload.projectB.query(
- "SELECT package FROM disembargo WHERE package = '%s' AND version = '%s'" %
- (changes["source"], changes["version"]))
- ql = q.getresult()
- if ql:
+ cursor = DBConn().cursor()
+ cursor.execute( "SELECT package FROM disembargo WHERE package = %(source)s AND version = %(version)s", changes )
+ if cursor.fetchone():
return 1
oldcwd = os.getcwd()
if changes["architecture"].has_key("source"):
if Options["No-Action"]: return 1
- Upload.projectB.query(
- "INSERT INTO disembargo (package, version) VALUES ('%s', '%s')" %
- (changes["source"], changes["version"]))
+ cursor.execute( "INSERT INTO disembargo (package, version) VALUES ('%(package)s', '%(version)s')",
+ changes )
+ cursor.execute( "COMMIT" )
return 1
return 0
return 0
if not changes["architecture"].has_key("source"):
- pusuite = database.get_suite_id("proposed-updates")
- q = Upload.projectB.query(
- "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" %
- (changes["source"], changes["version"], pusuite))
- ql = q.getresult()
- if ql:
+ pusuite = DBConn().get_suite_id("proposed-updates")
+ cursor = DBConn().cursor()
+ cursor.execute( """SELECT 1 FROM source s
+ JOIN src_associations sa ON (s.id = sa.source)
+ WHERE s.source = %(source)s
+ AND s.version = '%(version)s'
+ AND sa.suite = %(suite)d""",
+ {'source' : changes['source'],
+ 'version' : changes['version'],
+ 'suite' : pasuite})
+
+ if cursor.fetchone():
# source is already in proposed-updates so no need to hold
return 0
return 0
if not changes["architecture"].has_key("source"):
- pusuite = database.get_suite_id("oldstable-proposed-updates")
- q = Upload.projectB.query(
- "SELECT S.source FROM source s JOIN src_associations sa ON (s.id = sa.source) WHERE s.source = '%s' AND s.version = '%s' AND sa.suite = %d" %
- (changes["source"], changes["version"], pusuite))
- ql = q.getresult()
- if ql:
- # source is already in oldstable-proposed-updates so no need to hold
+ pusuite = DBConn().get_suite_id("oldstable-proposed-updates")
+ cursor = DBConn().cursor()
+ cursor.execute( """"SELECT 1 FROM source s
+ JOIN src_associations sa ON (s.id = sa.source)
+ WHERE s.source = %(source)s
+ AND s.version = %(version)s
+ AND sa.suite = %d""",
+ {'source' : changes['source'],
+ 'version' : changes['version'],
+ 'suite' : pasuite})
+ if cursor.fetchone():
return 0
return 1
################################################################################
--import commands, os, pg, re, sys
--import apt_pkg, apt_inst
++import commands
++import os
++import pg
++import re
++import sys
++import apt_pkg
++import apt_inst
from daklib import database
from daklib import utils
from daklib.dak_exceptions import *
if arches:
all_arches = set(arches)
else:
- all_arches = set(Cnf.ValueList("Suite::%s::Architectures" % suites[0]))
+ all_arches = set(database.get_suite_architectures(suites[0]))
all_arches -= set(["source", "all"])
for architecture in all_arches:
deps = {}
""" DB access functions
@group readonly: get_suite_id, get_section_id, get_priority_id, get_override_type_id,
get_architecture_id, get_archive_id, get_component_id, get_location_id,
- get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites
+ get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites,
+ get_suite_architectures
@group read/write: get_or_set*, set_files_id
@contact: Debian FTP Master <ftpmaster@debian.org>
maintainer_id_cache = {} #: cache for maintainers
keyring_id_cache = {} #: cache for keyrings
source_id_cache = {} #: cache for sources
++
files_id_cache = {} #: cache for files
maintainer_cache = {} #: cache for maintainer names
fingerprint_id_cache = {} #: cache for fingerprints
queue_id_cache = {} #: cache for queues
uid_id_cache = {} #: cache for uids
suite_version_cache = {} #: cache for suite_versions (packages)
+suite_bin_version_cache = {}
+cache_preloaded = False
################################################################################
@return: the version for I{source} in I{suite}
"""
+
global suite_version_cache
cache_key = "%s_%s" % (source, suite)
return version
-
+def get_latest_binary_version_id(binary, section, suite, arch):
+ global suite_bin_version_cache
+ cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch)
+ cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all"))
+
+ # Check for the cache hit for its arch, then arch all
+ if suite_bin_version_cache.has_key(cache_key):
+ return suite_bin_version_cache[cache_key]
+ if suite_bin_version_cache.has_key(cache_key_all):
+ return suite_bin_version_cache[cache_key_all]
+ if cache_preloaded == True:
+ return # package does not exist
+
+ q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section)))
+
+ if not q.getresult():
+ return False
+
+ highest_bid = q.getresult()[0][0]
+
+ suite_bin_version_cache[cache_key] = highest_bid
+ return highest_bid
+
+def preload_binary_id_cache():
+ global suite_bin_version_cache, cache_preloaded
+
+ # Get suite info
+ q = projectB.query("SELECT id FROM suite")
+ suites = q.getresult()
+
+ # Get arch mappings
+ q = projectB.query("SELECT id FROM architecture")
+ arches = q.getresult()
+
+ for suite in suites:
+ for arch in arches:
+ q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0])))
+
+ for bi in q.getresult():
+ cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0])
+ suite_bin_version_cache[cache_key] = int(bi[0])
+
+ cache_preloaded = True
+
+ def get_suite_architectures(suite):
+ """
+ Returns list of architectures for C{suite}.
+
+ @type suite: string, int
+ @param suite: the suite name or the suite_id
+
+ @rtype: list
+ @return: the list of architectures for I{suite}
+ """
+
+ suite_id = None
+ if type(suite) == str:
+ suite_id = get_suite_id(suite)
+ elif type(suite) == int:
+ suite_id = suite
+ else:
+ return None
+
+ sql = """ SELECT a.arch_string FROM suite_architectures sa
+ JOIN architecture a ON (a.id = sa.architecture)
+ WHERE suite='%s' """ % (suite_id)
+
+ q = projectB.query(sql)
+ return map(lambda x: x[0], q.getresult())
+
################################################################################
def get_or_set_maintainer_id (maintainer):
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
- message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/bts-categorize")
+
+
+################################################################################
+
+def copy_temporary_contents(package, version, deb):
+ """
+ copy the previously stored contents from the temp table to the permanant one
+
+ during process-unchecked, the deb should have been scanned and the
+ contents stored in temp_content_associations
+ """
+
+ # first see if contents exist:
+
+ exists = projectB.query("""SELECT 1 FROM temp_content_associations
+ WHERE package='%s' LIMIT 1""" % package ).getresult()
+
+ if not exists:
+ # This should NOT happen. We should have added contents
+ # during process-unchecked. if it did, log an error, and send
+ # an email.
+ subst = {
+ "__PACKAGE__": package,
+ "__VERSION__": version,
++ "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"]
+ "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"]
+ }
+
++ message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/missing-contents")
+ utils.send_mail( message )
+
+ exists = DBConn().insert_content_path(package, version, deb)
+
+ if exists:
+ sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
+ SELECT currval('binaries_id_seq'), filepath, filename FROM temp_content_associations
+ WHERE package='%s'
+ AND version='%s'""" % (package, version)
+ projectB.query(sql)
+ projectB.query("""DELETE from temp_content_associations
+ WHERE package='%s'
+ AND version='%s'""" % (package, version))
+
+ return exists
import apt_pkg
import database
import time
+import tarfile
import re
import string
import email as modemail
if len(args) >= 1:
timestamp = args[0]
if timestamp.count("T") == 0:
- expiredate = time.strftime("%Y-%m-%d", time.gmtime(timestamp))
+ try:
+ expiredate = time.strftime("%Y-%m-%d", time.gmtime(float(timestamp)))
+ except ValueError:
+ expiredate = "unknown (%s)" % (timestamp)
else:
expiredate = timestamp
reject("The key used to sign %s has expired on %s" % (sig_filename, expiredate))
apt_pkg.ReadConfigFileISC(Cnf,which_conf_file())
################################################################################
+
+def generate_contents_information(filename):
+ """
+ Generate a list of flies contained in a .deb
+
+ @type filename: string
+ @param filename: the path to a data.tar.gz or data.tar.bz2
+
+ @rtype: list
+ @return: a list of files in the data.tar.* portion of the .deb
+ """
+ cmd = "ar t %s" % (filename)
+ (result, output) = commands.getstatusoutput(cmd)
+ if result != 0:
+ reject("%s: 'ar t' invocation failed." % (filename))
+ reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
+
+ # Ugh ... this is ugly ... Code ripped from process_unchecked.py
+ chunks = output.split('\n')
+
+ contents = []
+ try:
+ cmd = "ar x %s %s" % (filename, chunks[2])
+ (result, output) = commands.getstatusoutput(cmd)
+ if result != 0:
+ reject("%s: '%s' invocation failed." % (filename, cmd))
+ reject(utils.prefix_multi_line_string(output, " [ar output:] "), "")
+
+ # Got deb tarballs, now lets go through and determine what bits
+ # and pieces the deb had ...
+ if chunks[2] == "data.tar.gz":
+ data = tarfile.open("data.tar.gz", "r:gz")
+ elif chunks[2] == "data.tar.bz2":
+ data = tarfile.open("data.tar.bz2", "r:bz2")
+ else:
+ os.remove(chunks[2])
+ reject("couldn't find data.tar.*")
+
+ for tarinfo in data:
+ if not tarinfo.isdir():
+ contents.append(tarinfo.name[2:])
+
+ finally:
+ if os.path.exists( chunks[2] ):
+ shutil.rmtree( chunks[2] )
+ os.remove( chunks[2] )
+
+ return contents
+
+###############################################################################
--- /dev/null
+From: __DAK_ADDRESS__
++To: __TO_ADDRESS__
+X-Debian: DAK
+X-Debian-Package: __PACKAGE__
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 8bit
+Subject: Missing contents for __PACKAGE__ in accepted queue
+
+While processing the accepted queue, I didn't have contents in the
+database for __PACKAGE__ version __VERSION__. These contents should
+have been put into the database by process-unchecked when the package
+first arrived.
+
+This is probably stew's fault.