"Generates Maintainers file for BTS etc"),
("make-overrides",
"Generates override files"),
- ("mirror-split",
- "Split the pool/ by architecture groups"),
("poolize",
"Move packages from dists/ to pool/"),
("reject-proposed-updates",
# Read a file, strip the signature and return the modified contents as
# a string.
def strip_pgp_signature (filename):
- file = utils.open_file (filename)
+ inputfile = utils.open_file (filename)
contents = ""
inside_signature = 0
skip_next = 0
- for line in file.readlines():
+ for line in inputfile.readlines():
if line[:-1] == "":
continue
if inside_signature:
inside_signature = 0
continue
contents += line
- file.close()
+ inputfile.close()
return contents
def display_changes(suite, changes_filename):
for arch in AptCnf["tree::%s::Architectures" % (tree)].split():
if arch == "source":
filepath = "%s/%s/Sources" % (sec, arch)
- for file in compressnames("tree::%s" % (tree), "Sources", filepath):
- files.append(file)
+ for cfile in compressnames("tree::%s" % (tree), "Sources", filepath):
+ files.append(cfile)
add_tiffani(files, Cnf["Dir::Root"] + tree, filepath)
else:
disks = "%s/disks-%s" % (sec, arch)
files.append("%s/%s/md5sum.txt" % (disks, dir))
filepath = "%s/binary-%s/Packages" % (sec, arch)
- for file in compressnames("tree::%s" % (tree), "Packages", filepath):
- files.append(file)
+ for cfile in compressnames("tree::%s" % (tree), "Packages", filepath):
+ files.append(cfile)
add_tiffani(files, Cnf["Dir::Root"] + tree, filepath)
if arch == "source":
for arch in AptCnf["tree::%s/%s::Architectures" % (tree,dis)].split():
if arch != "source": # always true
- for file in compressnames("tree::%s/%s" % (tree,dis),
+ for cfile in compressnames("tree::%s/%s" % (tree,dis),
"Packages",
"%s/%s/binary-%s/Packages" % (dis, sec, arch)):
- files.append(file)
+ files.append(cfile)
elif AptCnf.has_key("tree::%s::FakeDI" % (tree)):
usetree = AptCnf["tree::%s::FakeDI" % (tree)]
sec = AptCnf["tree::%s/main::Sections" % (usetree)].split()[0]
for arch in AptCnf["tree::%s/main::Architectures" % (usetree)].split():
if arch != "source": # always true
- for file in compressnames("tree::%s/main" % (usetree), "Packages", "main/%s/binary-%s/Packages" % (sec, arch)):
- files.append(file)
+ for cfile in compressnames("tree::%s/main" % (usetree), "Packages", "main/%s/binary-%s/Packages" % (sec, arch)):
+ files.append(cfile)
elif AptCnf.has_key("bindirectory::%s" % (tree)):
- for file in compressnames("bindirectory::%s" % (tree), "Packages", AptCnf["bindirectory::%s::Packages" % (tree)]):
- files.append(file.replace(tree+"/","",1))
- for file in compressnames("bindirectory::%s" % (tree), "Sources", AptCnf["bindirectory::%s::Sources" % (tree)]):
- files.append(file.replace(tree+"/","",1))
+ for cfile in compressnames("bindirectory::%s" % (tree), "Packages", AptCnf["bindirectory::%s::Packages" % (tree)]):
+ files.append(cfile.replace(tree+"/","",1))
+ for cfile in compressnames("bindirectory::%s" % (tree), "Sources", AptCnf["bindirectory::%s::Sources" % (tree)]):
+ files.append(cfile.replace(tree+"/","",1))
else:
print "ALERT: no tree/bindirectory for %s" % (tree)
byname = {}
byid = {}
q = projectB.query("SELECT id, uid, name FROM uid")
- for (id, uid, name) in q.getresult():
- byname[uid] = (id, name)
- byid[id] = (uid, name)
+ for (keyid, uid, name) in q.getresult():
+ byname[uid] = (keyid, name)
+ byid[keyid] = (uid, name)
return (byname, byid)
def get_fingerprint_info():
uid = entry["uid"][0]
name = get_ldap_name(entry)
fingerprints = entry["keyFingerPrint"]
- id = None
+ keyid = None
for f in fingerprints:
key = fpr_lookup.get(f, None)
if key not in keys: continue
keys[key]["uid"] = uid
- if id != None: continue
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, name)
- byname[uid] = (id, name)
+ if keyid != None: continue
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, name)
+ byname[uid] = (keyid, name)
return (byname, byuid)
keys[x]["uid"] = format % "invalid-uid"
else:
uid = format % keys[x]["email"]
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, keys[x]["name"])
- byname[uid] = (id, keys[x]["name"])
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, keys[x]["name"])
+ byname[uid] = (keyid, keys[x]["name"])
keys[x]["uid"] = uid
if any_invalid:
uid = format % "invalid-uid"
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, "ungeneratable user id")
- byname[uid] = (id, "ungeneratable user id")
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, "ungeneratable user id")
+ byname[uid] = (keyid, "ungeneratable user id")
return (byname, byuid)
################################################################################
(db_uid_byname, db_uid_byid) = get_uid_info()
### Update full names of applicable users
- for id in desuid_byid.keys():
- uid = (id, desuid_byid[id][0])
- name = desuid_byid[id][1]
- oname = db_uid_byid[id][1]
+ for keyid in desuid_byid.keys():
+ uid = (keyid, desuid_byid[keyid][0])
+ name = desuid_byid[keyid][1]
+ oname = db_uid_byid[keyid][1]
if name and oname != name:
changes.append((uid[1], "Full name: %s" % (name)))
projectB.query("UPDATE uid SET name = '%s' WHERE id = %s" %
- (pg.escape_string(name), id))
+ (pg.escape_string(name), keyid))
# The fingerprint table (fpr) points to a uid and a keyring.
# If the uid is being decided here (ldap/generate) we set it to it.
fpr = {}
for z in keyring.keys.keys():
- id = db_uid_byname.get(keyring.keys[z].get("uid", None), [None])[0]
- if id == None:
- id = db_fin_info.get(keyring.keys[z]["fingerprints"][0], [None])[0]
+ keyid = db_uid_byname.get(keyring.keys[z].get("uid", None), [None])[0]
+ if keyid == None:
+ keyid = db_fin_info.get(keyring.keys[z]["fingerprints"][0], [None])[0]
for y in keyring.keys[z]["fingerprints"]:
- fpr[y] = (id,keyring_id)
+ fpr[y] = (keyid,keyring_id)
# For any keys that used to be in this keyring, disassociate them.
# We don't change the uid, leaving that for historical info; if
print "Assigning %s to 0x%s." % (uid, fingerprint)
elif existing_uid == uid:
pass
- elif '@' not in existing_ui:
+ elif '@' not in existing_uid:
q = projectB.query("UPDATE fingerprint SET uid = %s WHERE id = %s" % (uid_id, fingerprint_id))
print "Promoting DM %s to DD %s with keyid 0x%s." % (existing_uid, uid, fingerprint)
else:
# Process any additional Maintainer files (e.g. from pseudo packages)
for filename in extra_files:
- file = utils.open_file(filename)
- for line in file.readlines():
+ extrafile = utils.open_file(filename)
+ for line in extrafile.readlines():
line = re_comments.sub('', line).strip()
if line == "":
continue
if not packages.has_key(package) or version == '*' \
or apt_pkg.VersionCompare(packages[package]["version"], version) < 0:
packages[package] = { "maintainer": maintainer, "version": version }
- file.close()
+ extrafile.close()
package_keys = packages.keys()
package_keys.sort()
override_suite = Cnf["Suite::%s::OverrideCodeName" % (suite)]
for component in Cnf.SubTree("Component").List():
if component == "mixed":
- continue; # Ick
+ continue # Ick
for otype in Cnf.ValueList("OverrideType"):
if otype == "deb":
suffix = ""
elif otype == "udeb":
if component == "contrib":
- continue; # Ick2
+ continue # Ick2
suffix = ".debian-installer"
elif otype == "dsc":
suffix = ".src"
output = utils.open_file(filename, "w")
# Generate the final list of files
files = {}
- for id in list:
- path = packages[id]["path"]
- filename = packages[id]["filename"]
- file_id = packages[id]["file_id"]
+ for fileid in list:
+ path = packages[fileid]["path"]
+ filename = packages[fileid]["filename"]
+ file_id = packages[fileid]["file_id"]
if suite == "stable" and dislocated_files.has_key(file_id):
filename = dislocated_files[file_id]
else:
keys = files.keys()
keys.sort()
# Write the list of files out
- for file in keys:
- output.write(file+'\n')
+ for outfile in keys:
+ output.write(outfile+'\n')
output.close()
############################################################
output = utils.open_file(filename, "w")
# Generate the final list of files
files = {}
- for id in list:
- path = packages[id]["path"]
- filename = packages[id]["filename"]
- file_id = packages[id]["file_id"]
- pkg = packages[id]["pkg"]
+ for fileid in list:
+ path = packages[fileid]["path"]
+ filename = packages[fileid]["filename"]
+ file_id = packages[fileid]["file_id"]
+ pkg = packages[fileid]["pkg"]
if suite == "stable" and dislocated_files.has_key(file_id):
filename = dislocated_files[file_id]
else:
suite = packages[unique_id]["suite"]
component = packages[unique_id]["component"]
arch = packages[unique_id]["arch"]
- type = packages[unique_id]["type"]
+ packagetype = packages[unique_id]["type"]
d.setdefault(suite, {})
d[suite].setdefault(component, {})
d[suite][component].setdefault(arch, {})
- d[suite][component][arch].setdefault(type, [])
- d[suite][component][arch][type].append(unique_id)
+ d[suite][component][arch].setdefault(packagetype, [])
+ d[suite][component][arch][packagetype].append(unique_id)
# Flesh out the index
if not Options["Suite"]:
suites = Cnf.SubTree("Suite").List()
else:
components = utils.split_args(Options["Component"])
udeb_components = Cnf.ValueList("Suite::%s::UdebComponents" % (suite))
- udeb_components = udeb_components
for component in components:
d[suite].setdefault(component, {})
if component in udeb_components:
types = [ "dsc" ]
else:
types = binary_types
- for type in types:
- d[suite][component][arch].setdefault(type, [])
+ for packagetype in types:
+ d[suite][component][arch].setdefault(packagetype, [])
# Then walk it
for suite in d.keys():
if Cnf.has_key("Suite::%s::Components" % (suite)):
for arch in d[suite][component].keys():
if arch == "all":
continue
- for type in d[suite][component][arch].keys():
- list = d[suite][component][arch][type]
+ for packagetype in d[suite][component][arch].keys():
+ filelist = d[suite][component][arch][packagetype]
# If it's a binary, we need to add in the arch: all debs too
if arch != "source":
archall_suite = Cnf.get("Make-Suite-File-List::ArchAllMap::%s" % (suite))
if archall_suite:
- list.extend(d[archall_suite][component]["all"][type])
+ filelist.extend(d[archall_suite][component]["all"][packagetype])
elif d[suite][component].has_key("all") and \
- d[suite][component]["all"].has_key(type):
- list.extend(d[suite][component]["all"][type])
- write_filelist(suite, component, arch, type, list,
+ d[suite][component]["all"].has_key(packagetype):
+ filelist.extend(d[suite][component]["all"][packagetype])
+ write_filelist(suite, component, arch, packagetype, filelist,
packages, dislocated_files)
else: # legacy-mixed suite
- list = []
+ filelist = []
for component in d[suite].keys():
for arch in d[suite][component].keys():
- for type in d[suite][component][arch].keys():
- list.extend(d[suite][component][arch][type])
- write_legacy_mixed_filelist(suite, list, packages, dislocated_files)
+ for packagetype in d[suite][component][arch].keys():
+ filelist.extend(d[suite][component][arch][packagetype])
+ write_legacy_mixed_filelist(suite, filelist, packages, dislocated_files)
################################################################################
packages = {}
unique_id = 0
for i in ql:
- (id, pkg, arch, version, path, filename, component, file_id, suite, type) = i
+ (sourceid, pkg, arch, version, path, filename, component, file_id, suite, filetype) = i
# 'id' comes from either 'binaries' or 'source', so it's not unique
unique_id += 1
- packages[unique_id] = Dict(id=id, pkg=pkg, arch=arch, version=version,
+ packages[unique_id] = Dict(sourceid=sourceid, pkg=pkg, arch=arch, version=version,
path=path, filename=filename,
component=component, file_id=file_id,
- suite=suite, type = type)
+ suite=suite, filetype = filetype)
cleanup(packages)
write_filelists(packages, dislocated_files)
+++ /dev/null
-#!/usr/bin/env python
-
-# Prepare and maintain partial trees by architecture
-# Copyright (C) 2004, 2006 Daniel Silverstone <dsilvers@digital-scurf.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-###############################################################################
-## <kinnison> So Martin, do you have a quote for me yet?
-## <tbm> Make something damned stupid up and attribute it to me, that's okay
-###############################################################################
-
-import sys
-import apt_pkg
-
-from stat import S_ISDIR, S_ISLNK, S_ISREG
-import os
-import cPickle
-
-import daklib.utils
-
-## Master path is the main repository
-#MASTER_PATH = "/org/ftp.debian.org/scratch/dsilvers/master"
-
-MASTER_PATH = "***Configure Mirror-Split::FTPPath Please***"
-TREE_ROOT = "***Configure Mirror-Split::TreeRootPath Please***"
-TREE_DB_ROOT = "***Configure Mirror-Split::TreeDatabasePath Please***"
-trees = []
-
-Cnf = None
-
-###############################################################################
-# A MirrorSplitTarget is a representation of a target. It is a set of archs, a path
-# and whether or not the target includes source.
-##################
-
-class MirrorSplitTarget:
- def __init__(self, name, archs, source):
- self.name = name
- self.root = "%s/%s" % (TREE_ROOT,name)
- self.archs = archs.split(",")
- self.source = source
- self.dbpath = "%s/%s.db" % (TREE_DB_ROOT,name)
- self.db = MirrorSplitDB()
- if os.path.exists( self.dbpath ):
- self.db.load_from_file( self.dbpath )
-
- ## Save the db back to disk
- def save_db(self):
- self.db.save_to_file( self.dbpath )
-
- ## Returns true if it's a poolish match
- def poolish_match(self, path):
- for a in self.archs:
- if path.endswith( "_%s.deb" % (a) ):
- return 1
- if path.endswith( "_%s.udeb" % (a) ):
- return 1
- if self.source:
- if (path.endswith( ".tar.gz" ) or
- path.endswith( ".diff.gz" ) or
- path.endswith( ".dsc" )):
- return 1
- return 0
-
- ## Returns false if it's a badmatch distswise
- def distish_match(self,path):
- for a in self.archs:
- if path.endswith("/Contents-%s.gz" % (a)):
- return 1
- if path.find("/binary-%s/" % (a)) != -1:
- return 1
- if path.find("/installer-%s/" % (a)) != -1:
- return 1
- if path.find("/source/") != -1:
- if self.source:
- return 1
- else:
- return 0
- if path.find("/Contents-") != -1:
- return 0
- if path.find("/binary-") != -1:
- return 0
- if path.find("/installer-") != -1:
- return 0
- return 1
-
-##############################################################################
-# The applicable function is basically a predicate. Given a path and a
-# target object its job is to decide if the path conforms for the
-# target and thus is wanted.
-#
-# 'verbatim' is a list of files which are copied regardless
-# it should be loaded from a config file eventually
-##################
-
-verbatim = [
- ]
-
-verbprefix = [
- "/tools/",
- "/README",
- "/doc/"
- ]
-
-def applicable(path, target):
- if path.startswith("/pool/"):
- return target.poolish_match(path)
- if (path.startswith("/dists/") or
- path.startswith("/project/experimental/")):
- return target.distish_match(path)
- if path in verbatim:
- return 1
- for prefix in verbprefix:
- if path.startswith(prefix):
- return 1
- return 0
-
-
-##############################################################################
-# A MirrorSplitDir is a representation of a tree.
-# It distinguishes files dirs and links
-# Dirs are dicts of (name, MirrorSplitDir)
-# Files are dicts of (name, inode)
-# Links are dicts of (name, target)
-##############
-
-class MirrorSplitDir:
- def __init__(self):
- self.dirs = {}
- self.files = {}
- self.links = {}
-
-##############################################################################
-# A MirrorSplitDB is a container for a MirrorSplitDir...
-##############
-
-class MirrorSplitDB:
- ## Initialise a MirrorSplitDB as containing nothing
- def __init__(self):
- self.root = MirrorSplitDir()
-
- def _internal_recurse(self, path):
- bdir = MirrorSplitDir()
- dl = os.listdir( path )
- dl.sort()
- dirs = []
- for ln in dl:
- lnl = os.lstat( "%s/%s" % (path, ln) )
- if S_ISDIR(lnl[0]):
- dirs.append(ln)
- elif S_ISLNK(lnl[0]):
- bdir.links[ln] = os.readlink( "%s/%s" % (path, ln) )
- elif S_ISREG(lnl[0]):
- bdir.files[ln] = lnl[1]
- else:
- daklib.utils.fubar( "Confused by %s/%s -- not a dir, link or file" %
- ( path, ln ) )
- for d in dirs:
- bdir.dirs[d] = self._internal_recurse( "%s/%s" % (path,d) )
-
- return bdir
-
- ## Recurse through a given path, setting the sequence accordingly
- def init_from_dir(self, dirp):
- self.root = self._internal_recurse( dirp )
-
- ## Load this MirrorSplitDB from file
- def load_from_file(self, fname):
- f = open(fname, "r")
- self.root = cPickle.load(f)
- f.close()
-
- ## Save this MirrorSplitDB to a file
- def save_to_file(self, fname):
- f = open(fname, "w")
- cPickle.dump( self.root, f, 1 )
- f.close()
-
-
-##############################################################################
-# Helper functions for the tree syncing...
-##################
-
-def _pth(a,b):
- return "%s/%s" % (a,b)
-
-def do_mkdir(targ,path):
- if not os.path.exists( _pth(targ.root, path) ):
- os.makedirs( _pth(targ.root, path) )
-
-def do_mkdir_f(targ,path):
- do_mkdir(targ, os.path.dirname(path))
-
-def do_link(targ,path):
- do_mkdir_f(targ,path)
- os.link( _pth(MASTER_PATH, path),
- _pth(targ.root, path))
-
-def do_symlink(targ,path,link):
- do_mkdir_f(targ,path)
- os.symlink( link, _pth(targ.root, path) )
-
-def do_unlink(targ,path):
- os.unlink( _pth(targ.root, path) )
-
-def do_unlink_dir(targ,path):
- os.system( "rm -Rf '%s'" % _pth(targ.root, path) )
-
-##############################################################################
-# Reconciling a target with the sourcedb
-################
-
-def _internal_reconcile( path, srcdir, targdir, targ ):
- # Remove any links in targdir which aren't in srcdir
- # Or which aren't applicable
- rm = []
- for k in targdir.links.keys():
- if applicable( _pth(path, k), targ ):
- if not srcdir.links.has_key(k):
- rm.append(k)
- else:
- rm.append(k)
- for k in rm:
- #print "-L-", _pth(path,k)
- do_unlink(targ, _pth(path,k))
- del targdir.links[k]
-
- # Remove any files in targdir which aren't in srcdir
- # Or which aren't applicable
- rm = []
- for k in targdir.files.keys():
- if applicable( _pth(path, k), targ ):
- if not srcdir.files.has_key(k):
- rm.append(k)
- else:
- rm.append(k)
- for k in rm:
- #print "-F-", _pth(path,k)
- do_unlink(targ, _pth(path,k))
- del targdir.files[k]
-
- # Remove any dirs in targdir which aren't in srcdir
- rm = []
- for k in targdir.dirs.keys():
- if not srcdir.dirs.has_key(k):
- rm.append(k)
- for k in rm:
- #print "-D-", _pth(path,k)
- do_unlink_dir(targ, _pth(path,k))
- del targdir.dirs[k]
-
- # Add/update files
- for k in srcdir.files.keys():
- if applicable( _pth(path,k), targ ):
- if not targdir.files.has_key(k):
- #print "+F+", _pth(path,k)
- do_link( targ, _pth(path,k) )
- targdir.files[k] = srcdir.files[k]
- else:
- if targdir.files[k] != srcdir.files[k]:
- #print "*F*", _pth(path,k)
- do_unlink( targ, _pth(path,k) )
- do_link( targ, _pth(path,k) )
- targdir.files[k] = srcdir.files[k]
-
- # Add/update links
- for k in srcdir.links.keys():
- if applicable( _pth(path,k), targ ):
- if not targdir.links.has_key(k):
- targdir.links[k] = srcdir.links[k];
- #print "+L+",_pth(path,k), "->", srcdir.links[k]
- do_symlink( targ, _pth(path,k), targdir.links[k] )
- else:
- if targdir.links[k] != srcdir.links[k]:
- do_unlink( targ, _pth(path,k) )
- targdir.links[k] = srcdir.links[k]
- #print "*L*", _pth(path,k), "to ->", srcdir.links[k]
- do_symlink( targ, _pth(path,k), targdir.links[k] )
-
- # Do dirs
- for k in srcdir.dirs.keys():
- if not targdir.dirs.has_key(k):
- targdir.dirs[k] = MirrorSplitDir()
- #print "+D+", _pth(path,k)
- _internal_reconcile( _pth(path,k), srcdir.dirs[k],
- targdir.dirs[k], targ )
-
-
-def reconcile_target_db( src, targ ):
- _internal_reconcile( "", src.root, targ.db.root, targ )
-
-###############################################################################
-
-def load_config():
- global MASTER_PATH
- global TREE_ROOT
- global TREE_DB_ROOT
- global trees
-
- MASTER_PATH = Cnf["Mirror-Split::FTPPath"]
- TREE_ROOT = Cnf["Mirror-Split::TreeRootPath"]
- TREE_DB_ROOT = Cnf["Mirror-Split::TreeDatabasePath"]
-
- for a in Cnf.ValueList("Mirror-Split::BasicTrees"):
- trees.append( MirrorSplitTarget( a, "%s,all" % a, 1 ) )
-
- for n in Cnf.SubTree("Mirror-Split::CombinationTrees").List():
- archs = Cnf.ValueList("Mirror-Split::CombinationTrees::%s" % n)
- source = 0
- if "source" in archs:
- source = 1
- archs.remove("source")
- archs = ",".join(archs)
- trees.append( MirrorSplitTarget( n, archs, source ) )
-
-def do_list ():
- print "Master path",MASTER_PATH
- print "Trees at",TREE_ROOT
- print "DBs at",TREE_DB_ROOT
-
- for tree in trees:
- print tree.name,"contains",", ".join(tree.archs),
- if tree.source:
- print " [source]"
- else:
- print ""
-
-def do_help ():
- print """Usage: dak mirror-split [OPTIONS]
-Generate hardlink trees of certain architectures
-
- -h, --help show this help and exit
- -l, --list list the configuration and exit
-"""
-
-
-def main ():
- global Cnf
-
- Cnf = daklib.utils.get_conf()
-
- Arguments = [('h',"help","Mirror-Split::Options::Help"),
- ('l',"list","Mirror-Split::Options::List"),
- ]
-
- arguments = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
- Cnf["Mirror-Split::Options::cake"] = ""
- Options = Cnf.SubTree("Mirror-Split::Options")
-
- print "Loading configuration..."
- load_config()
- print "Loaded."
-
- if Options.has_key("Help"):
- do_help()
- return
- if Options.has_key("List"):
- do_list()
- return
-
-
- src = MirrorSplitDB()
- print "Scanning", MASTER_PATH
- src.init_from_dir(MASTER_PATH)
- print "Scanned"
-
- for tree in trees:
- print "Reconciling tree:",tree.name
- reconcile_target_db( src, tree )
- print "Saving updated DB...",
- tree.save_db()
- print "Done"
-
-##############################################################################
-
-if __name__ == '__main__':
- main()
# Retrieve current section/priority...
oldsection, oldsourcesection, oldpriority = None, None, None
- for type in ['source', 'binary']:
+ for packagetype in ['source', 'binary']:
eqdsc = '!='
- if type == 'source':
+ if packagetype == 'source':
eqdsc = '='
q = projectB.query("""
SELECT priority.priority AS prio, section.section AS sect, override_type.type AS type
utils.fubar("%s is ambiguous. Matches %d packages" % (package,q.ntuples()))
r = q.getresult()
- if type == 'binary':
+ if packagetype == 'binary':
oldsection = r[0][1]
oldpriority = r[0][0]
else:
def check():
propogate={}
nopropogate={}
- for file in files.keys():
+ for checkfile in files.keys():
# The .orig.tar.gz can disappear out from under us is it's a
# duplicate of one in the archive.
- if not files.has_key(file):
+ if not files.has_key(checkfile):
continue
# Check that the source still exists
- if files[file]["type"] == "deb":
- source_version = files[file]["source version"]
- source_package = files[file]["source package"]
+ if files[checkfile]["type"] == "deb":
+ source_version = files[checkfile]["source version"]
+ source_package = files[checkfile]["source package"]
if not changes["architecture"].has_key("source") \
and not Upload.source_exists(source_package, source_version, changes["distribution"].keys()):
- reject("no source found for %s %s (%s)." % (source_package, source_version, file))
+ reject("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
# Version and file overwrite checks
if not installing_to_stable:
- if files[file]["type"] == "deb":
- reject(Upload.check_binary_against_db(file), "")
- elif files[file]["type"] == "dsc":
- reject(Upload.check_source_against_db(file), "")
- (reject_msg, is_in_incoming) = Upload.check_dsc_against_db(file)
+ if files[checkfile]["type"] == "deb":
+ reject(Upload.check_binary_against_db(checkfile), "")
+ elif files[checkfile]["type"] == "dsc":
+ reject(Upload.check_source_against_db(checkfile), "")
+ (reject_msg, is_in_incoming) = Upload.check_dsc_against_db(checkfile)
reject(reject_msg, "")
# propogate in the case it is in the override tables:
if changes.has_key("propdistribution"):
for suite in changes["propdistribution"].keys():
- if Upload.in_override_p(files[file]["package"], files[file]["component"], suite, files[file].get("dbtype",""), file):
+ if Upload.in_override_p(files[checkfile]["package"], files[checkfile]["component"], suite, files[checkfile].get("dbtype",""), checkfile):
propogate[suite] = 1
else:
nopropogate[suite] = 1
continue
changes["distribution"][suite] = 1
- for file in files.keys():
+ for checkfile in files.keys():
# Check the package is still in the override tables
for suite in changes["distribution"].keys():
- if not Upload.in_override_p(files[file]["package"], files[file]["component"], suite, files[file].get("dbtype",""), file):
- reject("%s is NEW for %s." % (file, suite))
+ if not Upload.in_override_p(files[checkfile]["package"], files[checkfile]["component"], suite, files[checkfile].get("dbtype",""), checkfile):
+ reject("%s is NEW for %s." % (checkfile, suite))
###############################################################################
return
# Add the .dsc file to the DB
- for file in files.keys():
- if files[file]["type"] == "dsc":
+ for newfile in files.keys():
+ if files[newfile]["type"] == "dsc":
package = dsc["source"]
version = dsc["version"] # NB: not files[file]["version"], that has no epoch
maintainer = dsc["maintainer"]
changedby_id = database.get_or_set_maintainer_id(changedby)
fingerprint_id = database.get_or_set_fingerprint_id(dsc["fingerprint"])
install_date = time.strftime("%Y-%m-%d")
- filename = files[file]["pool name"] + file
- dsc_component = files[file]["component"]
- dsc_location_id = files[file]["location id"]
+ filename = files[newfile]["pool name"] + newfile
+ dsc_component = files[newfile]["component"]
+ dsc_location_id = files[newfile]["location id"]
if dsc.has_key("dm-upload-allowed") and dsc["dm-upload-allowed"] == "yes":
dm_upload_allowed = "true"
else:
dm_upload_allowed = "false"
- if not files[file].has_key("files id") or not files[file]["files id"]:
- files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], dsc_location_id)
+ if not files[newfile].has_key("files id") or not files[newfile]["files id"]:
+ files[newfile]["files id"] = database.set_files_id (filename, files[newfile]["size"], files[newfile]["md5sum"], files[newfile]["sha1sum"], files[newfile]["sha256sum"], dsc_location_id)
projectB.query("INSERT INTO source (source, version, maintainer, changedby, file, install_date, sig_fpr, dm_upload_allowed) VALUES ('%s', '%s', %d, %d, %d, '%s', %s, %s)"
- % (package, version, maintainer_id, changedby_id, files[file]["files id"], install_date, fingerprint_id, dm_upload_allowed))
+ % (package, version, maintainer_id, changedby_id, files[newfile]["files id"], install_date, fingerprint_id, dm_upload_allowed))
for suite in changes["distribution"].keys():
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO src_associations (suite, source) VALUES (%d, currval('source_id_seq'))" % (suite_id))
# Add the source files to the DB (files and dsc_files)
- projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files[file]["files id"]))
+ projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files[newfile]["files id"]))
for dsc_file in dsc_files.keys():
- filename = files[file]["pool name"] + dsc_file
+ filename = files[newfile]["pool name"] + dsc_file
# If the .orig.tar.gz is already in the pool, it's
# files id is stored in dsc_files by check_dsc().
files_id = dsc_files[dsc_file].get("files id", None)
# Add the .deb files to the DB
- for file in files.keys():
- if files[file]["type"] == "deb":
- package = files[file]["package"]
- version = files[file]["version"]
- maintainer = files[file]["maintainer"]
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ package = files[newfile]["package"]
+ version = files[newfile]["version"]
+ maintainer = files[newfile]["maintainer"]
maintainer = maintainer.replace("'", "\\'")
maintainer_id = database.get_or_set_maintainer_id(maintainer)
fingerprint_id = database.get_or_set_fingerprint_id(changes["fingerprint"])
- architecture = files[file]["architecture"]
+ architecture = files[newfile]["architecture"]
architecture_id = database.get_architecture_id (architecture)
- type = files[file]["dbtype"]
- source = files[file]["source package"]
- source_version = files[file]["source version"]
- filename = files[file]["pool name"] + file
- if not files[file].has_key("location id") or not files[file]["location id"]:
- files[file]["location id"] = database.get_location_id(Cnf["Dir::Pool"],files[file]["component"],utils.where_am_i())
- if not files[file].has_key("files id") or not files[file]["files id"]:
- files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], files[file]["location id"])
+ filetype = files[newfile]["dbtype"]
+ source = files[newfile]["source package"]
+ source_version = files[newfile]["source version"]
+ filename = files[newfile]["pool name"] + newfile
+ if not files[newfile].has_key("location id") or not files[newfile]["location id"]:
+ files[newfile]["location id"] = database.get_location_id(Cnf["Dir::Pool"],files[newfile]["component"],utils.where_am_i())
+ if not files[newfile].has_key("files id") or not files[newfile]["files id"]:
+ files[newfile]["files id"] = database.set_files_id (filename, files[newfile]["size"], files[newfile]["md5sum"], files[newfile]["sha1sum"], files[newfile]["sha256sum"], files[newfile]["location id"])
source_id = database.get_source_id (source, source_version)
if source_id:
projectB.query("INSERT INTO binaries (package, version, maintainer, source, architecture, file, type, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, %d, '%s', %d)"
- % (package, version, maintainer_id, source_id, architecture_id, files[file]["files id"], type, fingerprint_id))
+ % (package, version, maintainer_id, source_id, architecture_id, files[newfile]["files id"], filetype, fingerprint_id))
else:
- raise NoSourceFieldError, "Unable to find a source id for %s (%s), %s, file %s, type %s, signed by %s" % (package, version, architecture, file, type, sig_fpr)
+ raise NoSourceFieldError, "Unable to find a source id for %s (%s), %s, file %s, type %s, signed by %s" % (package, version, architecture, newfile, filetype, changes["fingerprint"])
for suite in changes["distribution"].keys():
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
continue
# First move the files to the new location
legacy_filename = qid["path"] + qid["filename"]
- pool_location = utils.poolify (changes["source"], files[file]["component"])
+ pool_location = utils.poolify (changes["source"], files[newfile]["component"])
pool_filename = pool_location + os.path.basename(qid["filename"])
destination = Cnf["Dir::Pool"] + pool_location
utils.move(legacy_filename, destination)
projectB.query("UPDATE dsc_files SET file = %s WHERE source = %s AND file = %s" % (new_files_id, database.get_source_id(changes["source"], changes["version"]), orig_tar_id))
# Install the files into the pool
- for file in files.keys():
- destination = Cnf["Dir::Pool"] + files[file]["pool name"] + file
- utils.move(file, destination)
- Logger.log(["installed", file, files[file]["type"], files[file]["size"], files[file]["architecture"]])
- install_bytes += float(files[file]["size"])
+ for newfile in files.keys():
+ destination = Cnf["Dir::Pool"] + files[newfile]["pool name"] + newfile
+ utils.move(newfile, destination)
+ Logger.log(["installed", newfile, files[newfile]["type"], files[newfile]["size"], files[newfile]["architecture"]])
+ install_bytes += float(files[newfile]["size"])
# Copy the .changes file across for suite which need it.
copy_changes = {}
dest_dir = Cnf["Dir::QueueBuild"]
if Cnf.FindB("Dinstall::SecurityQueueBuild"):
dest_dir = os.path.join(dest_dir, suite)
- for file in files.keys():
- dest = os.path.join(dest_dir, file)
+ for newfile in files.keys():
+ dest = os.path.join(dest_dir, newfile)
# Remove it from the list of packages for later processing by apt-ftparchive
projectB.query("UPDATE queue_build SET in_queue = 'f', last_used = '%s' WHERE filename = '%s' AND suite = %s" % (now_date, dest, suite_id))
if not Cnf.FindB("Dinstall::SecurityQueueBuild"):
# Update the symlink to point to the new location in the pool
- pool_location = utils.poolify (changes["source"], files[file]["component"])
- src = os.path.join(Cnf["Dir::Pool"], pool_location, os.path.basename(file))
+ pool_location = utils.poolify (changes["source"], files[newfile]["component"])
+ src = os.path.join(Cnf["Dir::Pool"], pool_location, os.path.basename(newfile))
if os.path.islink(dest):
os.unlink(dest)
os.symlink(src, dest)
projectB.query("BEGIN WORK")
# Add the source to stable (and remove it from proposed-updates)
- for file in files.keys():
- if files[file]["type"] == "dsc":
+ for newfile in files.keys():
+ if files[newfile]["type"] == "dsc":
package = dsc["source"]
version = dsc["version"]; # NB: not files[file]["version"], that has no epoch
q = projectB.query("SELECT id FROM source WHERE source = '%s' AND version = '%s'" % (package, version))
projectB.query("INSERT INTO src_associations (suite, source) VALUES ('%s', '%s')" % (suite_id, source_id))
# Add the binaries to stable (and remove it/them from proposed-updates)
- for file in files.keys():
- if files[file]["type"] == "deb":
- package = files[file]["package"]
- version = files[file]["version"]
- architecture = files[file]["architecture"]
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ package = files[newfile]["package"]
+ version = files[newfile]["version"]
+ architecture = files[newfile]["architecture"]
q = projectB.query("SELECT b.id FROM binaries b, architecture a WHERE b.package = '%s' AND b.version = '%s' AND (a.arch_string = '%s' OR a.arch_string = 'all') AND b.architecture = a.id" % (package, version, architecture))
ql = q.getresult()
if not ql:
os.unlink (new_changelog_filename)
new_changelog = utils.open_file(new_changelog_filename, 'w')
- for file in files.keys():
- if files[file]["type"] == "deb":
- new_changelog.write("stable/%s/binary-%s/%s\n" % (files[file]["component"], files[file]["architecture"], file))
- elif re_issource.match(file):
- new_changelog.write("stable/%s/source/%s\n" % (files[file]["component"], file))
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ new_changelog.write("stable/%s/binary-%s/%s\n" % (files[newfile]["component"], files[newfile]["architecture"], newfile))
+ elif re_issource.match(newfile):
+ new_changelog.write("stable/%s/source/%s\n" % (files[newfile]["component"], newfile))
else:
- new_changelog.write("%s\n" % (file))
+ new_changelog.write("%s\n" % (newfile))
chop_changes = re_fdnic.sub("\n", changes["changes"])
new_changelog.write(chop_changes + '\n\n')
if os.access(changelog_filename, os.R_OK) != 0:
class Section_Completer:
def __init__ (self):
self.sections = []
+ self.matches = []
q = projectB.query("SELECT section FROM section")
for i in q.getresult():
self.sections.append(i[0])
class Priority_Completer:
def __init__ (self):
self.priorities = []
+ self.matches = []
q = projectB.query("SELECT priority FROM priority")
for i in q.getresult():
self.priorities.append(i[0])
def edit_note(note):
# Write the current data to a temporary file
(fd, temp_filename) = utils.temp_filename()
- temp_file = os.fdopen(temp_filename, 'w')
+ temp_file = os.fdopen(fd, 'w')
temp_file.write(note)
temp_file.close()
editor = os.environ.get("EDITOR","vi")
def do_stableupdate (summary, short_summary):
print "Moving to PROPOSED-UPDATES holding area."
- Logger.log(["Moving to proposed-updates", pkg.changes_file]);
+ Logger.log(["Moving to proposed-updates", pkg.changes_file])
- Upload.dump_vars(Cnf["Dir::Queue::ProposedUpdates"]);
+ Upload.dump_vars(Cnf["Dir::Queue::ProposedUpdates"])
move_to_dir(Cnf["Dir::Queue::ProposedUpdates"], perms=0664)
# Check for override disparities
- Upload.Subst["__SUMMARY__"] = summary;
- Upload.check_override();
+ Upload.Subst["__SUMMARY__"] = summary
+ Upload.check_override()
################################################################################
def do_oldstableupdate (summary, short_summary):
print "Moving to OLDSTABLE-PROPOSED-UPDATES holding area."
- Logger.log(["Moving to oldstable-proposed-updates", pkg.changes_file]);
+ Logger.log(["Moving to oldstable-proposed-updates", pkg.changes_file])
- Upload.dump_vars(Cnf["Dir::Queue::OldProposedUpdates"]);
+ Upload.dump_vars(Cnf["Dir::Queue::OldProposedUpdates"])
move_to_dir(Cnf["Dir::Queue::OldProposedUpdates"], perms=0664)
# Check for override disparities
- Upload.Subst["__SUMMARY__"] = summary;
- Upload.check_override();
+ Upload.Subst["__SUMMARY__"] = summary
+ Upload.check_override()
################################################################################
import psycopg2, sys, fcntl, os
import apt_pkg
import time
+import errno
from daklib import database
from daklib import utils
name TEXT UNIQUE NOT NULL,
value TEXT
);""")
- c.execute("INSERT INTO config VALUES ( nextval('config_id_seq'), 'db_revision', '0')");
+ c.execute("INSERT INTO config VALUES ( nextval('config_id_seq'), 'db_revision', '0')")
self.db.commit()
except psycopg2.ProgrammingError:
try:
c = self.db.cursor()
- q = c.execute("SELECT value FROM config WHERE name = 'db_revision';");
+ q = c.execute("SELECT value FROM config WHERE name = 'db_revision';")
return c.fetchone()[0]
except psycopg2.ProgrammingError:
"""
def __init__(self, message=""):
+ Exception.__init__(self)
self.args = str(message)
self.message = str(message)
################################################################################
-import sys, time, types
+import sys
+import time
+import types
################################################################################
################################################################################
def init (config, sql):
+ """ database module init. Just sets two variables"""
global Cnf, projectB
Cnf = config
def do_query(q):
+ """
+ Executes a database query q. Writes statistics to stderr and returns
+ the result.
+
+ """
sys.stderr.write("query: \"%s\" ... " % (q))
before = time.time()
r = projectB.query(q)
################################################################################
def get_suite_id (suite):
+ """ Returns database suite_id for given suite, caches result. """
global suite_id_cache
if suite_id_cache.has_key(suite):
return suite_id
def get_section_id (section):
+ """ Returns database section_id for given section, caches result. """
global section_id_cache
if section_id_cache.has_key(section):
return section_id
def get_priority_id (priority):
+ """ Returns database priority_id for given priority, caches result. """
global priority_id_cache
if priority_id_cache.has_key(priority):
return priority_id
def get_override_type_id (type):
+ """ Returns database override_id for given override_type type, caches result. """
global override_type_id_cache
if override_type_id_cache.has_key(type):
return override_type_id
def get_architecture_id (architecture):
+ """ Returns database architecture_id for given architecture, caches result. """
global architecture_id_cache
if architecture_id_cache.has_key(architecture):
return architecture_id
def get_archive_id (archive):
+ """ Returns database archive_id for given archive, caches result. """
global archive_id_cache
archive = archive.lower()
return archive_id
def get_component_id (component):
+ """ Returns database component_id for given component, caches result. """
global component_id_cache
component = component.lower()
return component_id
def get_location_id (location, component, archive):
+ """
+ Returns database location_id for given combination of
+ location
+ component
+ archive.
+
+ The 3 parameters are the database ids returned by the respective
+ "get_foo_id" functions.
+
+ The result will be cached.
+
+ """
global location_id_cache
cache_key = location + '_' + component + '_' + location
return location_id
def get_source_id (source, version):
+ """ Returns database source_id for given combination of source and version, caches result. """
global source_id_cache
cache_key = source + '_' + version + '_'
return source_id
def get_suite_version(source, suite):
+ """ Returns database version for a given source in a given suite, caches result. """
global suite_version_cache
cache_key = "%s_%s" % (source, suite)
################################################################################
def get_or_set_maintainer_id (maintainer):
+ """
+ If maintainer does not have an entry in the maintainer table yet, create one
+ and return its id.
+ If maintainer already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global maintainer_id_cache
if maintainer_id_cache.has_key(maintainer):
################################################################################
def get_or_set_keyring_id (keyring):
+ """
+ If keyring does not have an entry in the keyring table yet, create one
+ and return its id.
+ If keyring already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global keyring_id_cache
if keyring_id_cache.has_key(keyring):
################################################################################
def get_or_set_uid_id (uid):
+ """
+ If uid does not have an entry in the uid table yet, create one
+ and return its id.
+ If uid already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global uid_id_cache
if uid_id_cache.has_key(uid):
################################################################################
def get_or_set_fingerprint_id (fingerprint):
+ """
+ If fingerprintd does not have an entry in the fingerprint table yet, create one
+ and return its id.
+ If fingerprint already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global fingerprint_id_cache
if fingerprint_id_cache.has_key(fingerprint):
################################################################################
def get_files_id (filename, size, md5sum, location_id):
+ """
+ Returns -1, -2 or the file_id for a given combination of
+ filename
+ size
+ md5sum
+ location_id.
+
+ The database is queried using filename and location_id, size and md5sum are for
+ extra checks.
+
+ Return values:
+ -1 - The given combination of arguments result in more (or less) than
+ one result from the database
+ -2 - The given size and md5sum do not match the values in the database
+ anything else is a file_id
+
+ Result is cached.
+
+ """
global files_id_cache
cache_key = "%s_%d" % (filename, location_id)
################################################################################
def get_or_set_queue_id (queue):
+ """
+ If queue does not have an entry in the queue_name table yet, create one
+ and return its id.
+ If queue already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global queue_id_cache
if queue_id_cache.has_key(queue):
################################################################################
def set_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id):
+ """
+ Insert a new entry into the files table.
+
+ Returns the new file_id
+
+ """
global files_id_cache
projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', '%s', '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id))
################################################################################
def get_maintainer (maintainer_id):
+ """ Return the name of the maintainer behind maintainer_id """
global maintainer_cache
if not maintainer_cache.has_key(maintainer_id):
################################################################################
def get_suites(pkgname, src=False):
+ """ Return the suites in which pkgname is. If src is True, query for source package, else binary. """
if src:
- sql = "select suite_name from source, src_associations,suite where source.id=src_associations.source and source.source='%s' and src_associations.suite = suite.id"%pkgname
+ sql = """
+ SELECT suite_name
+ FROM source,
+ src_associations,
+ suite
+ WHERE source.id = src_associations.source
+ AND source.source = '%s'
+ AND src_associations.suite = suite.id
+ """ % (pkgname)
else:
- sql = "select suite_name from binaries, bin_associations,suite where binaries.id=bin_associations.bin and package='%s' and bin_associations.suite = suite.id"%pkgname
+ sql = """
+ SELECT suite_name
+ FROM binaries,
+ bin_associations,
+ suite
+ WHERE binaries.id = bin_associations.bin
+ AND package = '%s'
+ AND bin_associations.suite = suite.id
+ """ % (pkgname)
+
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
self.Cnf = Cnf
self.accept_count = 0
self.accept_bytes = 0L
+ self.reject_message = ""
self.pkg = Pkg(changes = {}, dsc = {}, dsc_files = {}, files = {},
legacy_source_untouchable = {})
if not changes.has_key("distribution") or not isinstance(changes["distribution"], DictType):
changes["distribution"] = {}
- override_summary ="";
+ override_summary =""
file_keys = files.keys()
file_keys.sort()
for file_entry in file_keys:
dsc.has_key("bts changelog"):
(fd, temp_filename) = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"], prefix=".")
- version_history = os.fdopen(temp_filename, 'w')
+ version_history = os.fdopen(fd, 'w')
version_history.write(dsc["bts changelog"])
version_history.close()
filename = "%s/%s" % (Cnf["Dir::Queue::BTSVersionTrack"],
changes_file[:-8]+".versions")
os.rename(temp_filename, filename)
- os.chmod(filename, "0644")
+ os.chmod(filename, 0644)
# Write out the binary -> source mapping.
(fd, temp_filename) = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"], prefix=".")
- debinfo = os.fdopen(temp_filename, 'w')
+ debinfo = os.fdopen(fd, 'w')
for file_entry in file_keys:
f = files[file_entry]
if f["type"] == "deb":
filename = "%s/%s" % (Cnf["Dir::Queue::BTSVersionTrack"],
changes_file[:-8]+".debinfo")
os.rename(temp_filename, filename)
- os.chmod(filename, "0644")
+ os.chmod(filename, 0644)
self.queue_build("accepted", Cnf["Dir::Queue::Accepted"])
# for example, the package was in potato but had an -sa
# upload in woody. So we need to choose the right one.
- x = ql[0]; # default to something sane in case we don't match any or have only one
+ # default to something sane in case we don't match any or have only one
+ x = ql[0]
if len(ql) > 1:
for i in ql:
actual_size = os.stat(old_file)[stat.ST_SIZE]
found = old_file
suite_type = x[2]
- dsc_files[dsc_file]["files id"] = x[3]; # need this for updating dsc_files in install()
+ # need this for updating dsc_files in install()
+ dsc_files[dsc_file]["files id"] = x[3]
# See install() in process-accepted...
self.pkg.orig_tar_id = x[3]
self.pkg.orig_tar_gz = old_file
# Perform a substition of template
def TemplateSubst(map, filename):
- file = open_file(filename)
- template = file.read()
+ templatefile = open_file(filename)
+ template = templatefile.read()
for x in map.keys():
template = template.replace(x,map[x])
- file.close()
+ templatefile.close()
return template
################################################################################
################################################################################
def result_join (original, sep = '\t'):
- list = []
+ resultlist = []
for i in xrange(len(original)):
if original[i] == None:
- list.append("")
+ resultlist.append("")
else:
- list.append(original[i])
- return sep.join(list)
+ resultlist.append(original[i])
+ return sep.join(resultlist)
################################################################################
return "%s: tainted filename" % (filename)
# Invoke gpgv on the file
- status_read, status_write = os.pipe();
+ status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s --keyring /dev/null %s" % (status_write, filename)
(_, status, _) = gpgv_get_status_output(cmd, status_read, status_write)
return None
# Build the command line
- status_read, status_write = os.pipe();
+ status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s %s %s %s" % (
status_write, gpg_keyring_args(keyrings), sig_filename, data_filename)