FileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_binary-$(ARCH).list";
SourceFileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_source.list";
Sections "main contrib non-free";
- Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
+ Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
BinOverride "override.squeeze.$(SECTION)";
ExtraOverride "override.squeeze.extra.$(SECTION)";
SrcOverride "override.squeeze.$(SECTION).src";
FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_binary-$(ARCH).list";
SourceFileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_source.list";
Sections "main contrib non-free";
- Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
+ Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
BinOverride "override.squeeze.$(SECTION)";
ExtraOverride "override.squeeze.extra.$(SECTION)";
SrcOverride "override.squeeze.$(SECTION).src";
{
FileList "/srv/ftp.debian.org/database/dists/testing_main_$(SECTION)_binary-$(ARCH).list";
Sections "debian-installer";
- Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
+ Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
BinOverride "override.squeeze.main.$(SECTION)";
SrcOverride "override.squeeze.main.src";
BinCacheDB "packages-debian-installer-$(ARCH).db";
{
FileList "/srv/ftp.debian.org/database/dists/testing_non-free_$(SECTION)_binary-$(ARCH).list";
Sections "debian-installer";
- Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
+ Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
BinOverride "override.squeeze.main.$(SECTION)";
SrcOverride "override.squeeze.main.src";
BinCacheDB "packages-debian-installer-$(ARCH).db";
{
FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_main_$(SECTION)_binary-$(ARCH).list";
Sections "debian-installer";
- Architectures "alpha amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
+ Architectures "amd64 armel hppa i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
BinOverride "override.squeeze.main.$(SECTION)";
SrcOverride "override.squeeze.main.src";
BinCacheDB "packages-debian-installer-$(ARCH).db";
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1
}
-function runparts() {
- log "Using run-parts to run scripts in $base/scripts/distmnt"
- run-parts --report $base/scripts/distmnt
+function mirrorpush() {
+ log "Starting the mirrorpush"
+ date -u > /srv/ftp.debian.org/web/mirrorstart
+ echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart
+ echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart
+ sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
}
function i18n2() {
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="punew"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
lockfile "$LOCK_ACCEPTED"
lockfile "$LOCK_NEW"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="overrides"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
rm -f "${NOTICE}"
rm -f "${LOCK_DAILY}"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
GO=(
FUNC="expire"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
GO=(
FUNC="transitionsclean"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
GO=(
FUNC="reports"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
GO=(
FUNC="dm"
ARGS=""
ERR=""
)
-stage $GO
+stage $GO &
GO=(
FUNC="bts"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="merkel2"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
- FUNC="runparts"
- TIME="run-parts"
+ FUNC="mirrorpush"
+ TIME="mirrorpush"
ARGS=""
ERR="false"
)
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="testingsourcelist"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="merkel3"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="compress"
CloseBugs "true";
OverrideDisparityCheck "true";
DefaultSuite "unstable";
+ LintianTags "/srv/ftp.debian.org/dak/config/debian/lintian.tags";
QueueBuildSuites
{
unstable;
--- /dev/null
+lintian:
+ warning:
+ - statically-linked-binary
+ - arch-independent-package-contains-binary-or-object
+ - arch-dependent-file-in-usr-share
+ - missing-build-dependency
+ - arch-dependent-file-in-usr-share
+ - missing-dependency-on-libc
+ - usr-share-doc-symlink-without-dependency
+ - binary-with-bad-dynamic-table
+ - usr-share-doc-symlink-without-dependency
+ - mknod-in-maintainer-script
+ error:
+ - binary-in-etc
+ - missing-dependency-on-perlapi
+ - copyright-lists-upstream-authors-with-dh_make-boilerplate
+ - section-is-dh_make-template
+ - package-installs-python-pyc
+ - library-in-debug-or-profile-should-not-be-stripped
+ - binary-file-compressed-with-upx
+ - html-changelog-without-text-version
+ - file-in-usr-marked-as-conffile
+ - build-info-in-binary-control-file-section
+ - debian-control-with-duplicate-fields
+ - not-allowed-control-file
+ - control-file-has-bad-permissions
+ - control-file-has-bad-owner
+ - no-copyright-file
+ - copyright-refers-to-old-directory
+ - copyright-file-compressed
+ - copyright-file-is-symlink
+ - usr-share-doc-symlink-to-foreign-package
+ - old-style-copyright-file
+ - copyright-refers-to-incorrect-directory
+ - package-has-no-description
+ - description-synopsis-is-empty
+ - extended-description-is-empty
+ - description-is-dh_make-template
+ - file-in-etc-not-marked-as-conffile
+ - no-package-name
+ - bad-package-name
+ - package-not-lowercase
+ - no-version-field
+ - bad-version-number
+ - upstream-version-not-numeric
+ - no-architecture-field
+ - magic-arch-in-arch-list
+ - too-many-architectures
+ - arch-any-in-binary-pkg
+ - no-maintainer-field
+ - maintainer-name-missing
+ - maintainer-address-missing
+ - maintainer-address-malformed
+ - maintainer-address-is-on-localhost
+ - uploader-name-missing
+ - uploader-address-malformed
+ - uploader-address-is-on-localhost
+ - no-source-field
+ - source-field-does-not-match-pkg-name
+ - section-is-dh_make-template
+ - build-depends-on-essential-package-without-using-version
+ - depends-on-build-essential-package-without-using-version
+ - build-depends-on-build-essential
+ - executable-in-usr-share-doc
+ - symlink-has-too-many-up-segments
+ - debian-rules-is-symlink
+ - debian-rules-not-a-makefile
+ - debian-rules-missing-required-target
+ - maintainer-script-removes-device-files
+ - no-standards-version-field
+ - invalid-standards-version
+ - dir-or-file-in-var-www
+ - dir-or-file-in-tmp
+ - dir-or-file-in-mnt
+ - dir-or-file-in-opt
+ - dir-or-file-in-srv
# I know what I say. I dont know python and I wrote it. So go and read some other stuff.
import commands
-import re
import sys
-import time
-import os
import apt_pkg
-from daklib import daklog
from daklib import utils
from daklib.dbconn import DBConn, add_database_user, get_or_set_uid
from daklib.regexes import re_gpg_fingerprint, re_user_address, re_user_mails, re_user_name
from daklib import utils
from daklib.dbconn import *
-from daklib.config import Config
################################################################################
from daklib.dbconn import *
from daklib import utils
-from daklib.regexes import re_issource
from daklib.config import Config
################################################################################
missing-overrides - check for missing overrides
source-in-one-dir - ensure the source for each package is in one directory
timestamps - check for future timestamps in .deb's
- tar-gz-in-dsc - ensure each .dsc lists a .tar.gz file
+ files-in-dsc - ensure each .dsc references appropriate Files
validate-indices - ensure files mentioned in Packages & Sources exist
files-not-symlinks - check files in the database aren't symlinks
validate-builddeps - validate build-dependencies of .dsc files in the archive
################################################################################
-def check_missing_tar_gz_in_dsc():
+def check_files_in_dsc():
"""
- Ensure each .dsc lists a .tar.gz file
+ Ensure each .dsc lists appropriate files in its Files field (according
+ to the format announced in its Format field).
"""
count = 0
except:
utils.fubar("error parsing .dsc file '%s'." % (filename))
- dsc_files = utils.build_file_list(dsc, is_a_dsc=1)
- has_tar = 0
+ reasons = utils.check_dsc_files(filename, dsc)
+ for r in reasons:
+ utils.warn(r)
- for f in dsc_files.keys():
- m = re_issource.match(f)
- if not m:
- utils.fubar("%s not recognised as source." % (f))
- ftype = m.group(3)
- if ftype == "orig.tar.gz" or ftype == "tar.gz":
- has_tar = 1
-
- if not has_tar:
- utils.warn("%s has no .tar.gz in the .dsc file." % (f))
+ if len(reasons) > 0:
count += 1
if count:
check_source_in_one_dir()
elif mode == "timestamps":
check_timestamps()
- elif mode == "tar-gz-in-dsc":
- check_missing_tar_gz_in_dsc()
+ elif mode == "files-in-dsc":
+ check_files_in_dsc()
elif mode == "validate-indices":
check_indices_files_exist()
elif mode == "files-not-symlinks":
# we can copy
packages[package] = 1
Logger.log(["copying missing override", osuite, component,
- type, package, priorities[i[1]], sections[i[2]], i[3]])
+ otype, package, priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""INSERT INTO override (package, suite, component,
priority, section, type, maintainer)
################################################################################
-import os, stat, sys, time
+import os, os.path, stat, sys, time
import apt_pkg
from daklib import utils
+from daklib import daklog
+from daklib.config import Config
################################################################################
-Cnf = None
Options = None
+Logger = None
del_dir = None
delete_date = None
################################################################################
-def init ():
+def init (cnf):
global delete_date, del_dir
delete_date = int(time.time())-(int(Options["Days"])*84600)
+ date = time.strftime("%Y-%m-%d")
+ del_dir = os.path.join(cnf["Dir::Morgue"], cnf["Clean-Queues::MorgueSubDir"], date)
# Ensure a directory exists to remove files to
if not Options["No-Action"]:
- date = time.strftime("%Y-%m-%d")
- del_dir = Cnf["Dir::Morgue"] + '/' + Cnf["Clean-Queues::MorgueSubDir"] + '/' + date
if not os.path.exists(del_dir):
os.makedirs(del_dir, 02775)
if not os.path.isdir(del_dir):
# Move to the directory to clean
incoming = Options["Incoming"]
if incoming == "":
- incoming = Cnf["Dir::Queue::Unchecked"]
+ incoming = cnf["Dir::Queue::Unchecked"]
os.chdir(incoming)
# Remove a file to the morgue
def remove (f):
+ fname = os.path.basename(f)
if os.access(f, os.R_OK):
- dest_filename = del_dir + '/' + os.path.basename(f)
+ Logger.log(["move file to morgue", fname, del_dir])
+ if Options["Verbose"]:
+ print "Removing '%s' (to '%s')." % (fname, del_dir)
+ if Options["No-Action"]:
+ return
+
+ dest_filename = os.path.join(del_dir, fname)
# If the destination file exists; try to find another filename to use
if os.path.exists(dest_filename):
dest_filename = utils.find_next_free(dest_filename, 10)
+ Logger.log(["change destination file name", os.path.basename(dest_filename)])
utils.move(f, dest_filename, 0660)
else:
- utils.warn("skipping '%s', permission denied." % (os.path.basename(f)))
+ Logger.log(["skipping file because of permission problem", fname])
+ utils.warn("skipping '%s', permission denied." % fname)
# Removes any old files.
# [Used for Incoming/REJECT]
#
def flush_old ():
+ Logger.log(["check Incoming/REJECT for old files"])
for f in os.listdir('.'):
if os.path.isfile(f):
if os.stat(f)[stat.ST_MTIME] < delete_date:
- if Options["No-Action"]:
- print "I: Would delete '%s'." % (os.path.basename(f))
- else:
- if Options["Verbose"]:
- print "Removing '%s' (to '%s')." % (os.path.basename(f), del_dir)
- remove(f)
+ remove(f)
else:
if Options["Verbose"]:
print "Skipping, too new, '%s'." % (os.path.basename(f))
all_files = {}
changes_files = []
+ Logger.log(["check Incoming for old orphaned files"])
# Build up the list of all files in the directory
for i in os.listdir('.'):
if os.path.isfile(i):
# a .dsc) and should be deleted if old enough.
for f in all_files.keys():
if os.stat(f)[stat.ST_MTIME] < delete_date:
- if Options["No-Action"]:
- print "I: Would delete '%s'." % (os.path.basename(f))
- else:
- if Options["Verbose"]:
- print "Removing '%s' (to '%s')." % (os.path.basename(f), del_dir)
- remove(f)
+ remove(f)
else:
if Options["Verbose"]:
print "Skipping, too new, '%s'." % (os.path.basename(f))
################################################################################
def main ():
- global Cnf, Options
+ global Options, Logger
- Cnf = utils.get_conf()
+ cnf = Config()
for i in ["Help", "Incoming", "No-Action", "Verbose" ]:
- if not Cnf.has_key("Clean-Queues::Options::%s" % (i)):
- Cnf["Clean-Queues::Options::%s" % (i)] = ""
- if not Cnf.has_key("Clean-Queues::Options::Days"):
- Cnf["Clean-Queues::Options::Days"] = "14"
+ if not cnf.has_key("Clean-Queues::Options::%s" % (i)):
+ cnf["Clean-Queues::Options::%s" % (i)] = ""
+ if not cnf.has_key("Clean-Queues::Options::Days"):
+ cnf["Clean-Queues::Options::Days"] = "14"
Arguments = [('h',"help","Clean-Queues::Options::Help"),
('d',"days","Clean-Queues::Options::Days", "IntLevel"),
('n',"no-action","Clean-Queues::Options::No-Action"),
('v',"verbose","Clean-Queues::Options::Verbose")]
- apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
- Options = Cnf.SubTree("Clean-Queues::Options")
+ apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
+ Options = cnf.SubTree("Clean-Queues::Options")
if Options["Help"]:
usage()
- init()
+ Logger = daklog.Logger(cnf, 'clean-queues', Options['No-Action'])
+
+ init(cnf)
if Options["Verbose"]:
print "Processing incoming..."
flush_orphans()
- reject = Cnf["Dir::Queue::Reject"]
+ reject = cnf["Dir::Queue::Reject"]
if os.path.exists(reject) and os.path.isdir(reject):
if Options["Verbose"]:
print "Processing incoming/REJECT..."
os.chdir(reject)
flush_old()
+ Logger.close()
+
#######################################################################################
if __name__ == '__main__':
from daklib.config import Config
from daklib.dbconn import *
from daklib import utils
+from daklib import daklog
################################################################################
Options = None
+Logger = None
################################################################################
# Get the list of binary packages not in a suite and mark them for
# deletion.
- # TODO: This can be a single SQL UPDATE statement
q = session.execute("""
-SELECT b.file FROM binaries b, files f
+SELECT b.file, f.filename FROM binaries b, files f
WHERE f.last_used IS NULL AND b.file = f.id
AND NOT EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
for i in q.fetchall():
+ Logger.log(["set lastused", i[1]])
session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
{'lastused': now_date, 'fileid': i[0]})
session.commit()
# Check for any binaries which are marked for eventual deletion
# but are now used again.
- # TODO: This can be a single SQL UPDATE statement
q = session.execute("""
-SELECT b.file FROM binaries b, files f
+SELECT b.file, f.filename FROM binaries b, files f
WHERE f.last_used IS NOT NULL AND f.id = b.file
AND EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
for i in q.fetchall():
+ Logger.log(["unset lastused", i[1]])
session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
session.commit()
# Get the list of source packages not in a suite and not used by
# any binaries.
q = session.execute("""
-SELECT s.id, s.file FROM source s, files f
+SELECT s.id, s.file, f.filename FROM source s, files f
WHERE f.last_used IS NULL AND s.file = f.id
AND NOT EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id)
AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)""")
for i in q.fetchall():
source_id = i[0]
dsc_file_id = i[1]
+ dsc_fname = i[2]
# Mark the .dsc file for deletion
+ Logger.log(["set lastused", dsc_fname])
session.execute("""UPDATE files SET last_used = :last_used
WHERE id = :dscfileid AND last_used IS NULL""",
{'last_used': now_date, 'dscfileid': dsc_file_id})
# Mark all other files references by .dsc too if they're not used by anyone else
- x = session.execute("""SELECT f.id FROM files f, dsc_files d
+ x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
WHERE d.source = :sourceid AND d.file = f.id""",
{'sourceid': source_id})
for j in x.fetchall():
file_id = j[0]
+ file_name = j[1]
y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
if len(y.fetchall()) == 1:
+ Logger.log(["set lastused", file_name])
session.execute("""UPDATE files SET last_used = :lastused
WHERE id = :fileid AND last_used IS NULL""",
{'lastused': now_date, 'fileid': file_id})
# are now used again.
q = session.execute("""
-SELECT f.id FROM source s, files f, dsc_files df
+SELECT f.id, f.filename FROM source s, files f, dsc_files df
WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)))""")
#### XXX: this should also handle deleted binaries specially (ie, not
#### reinstate sources because of them
- # Could be done in SQL; but left this way for hysterical raisins
- # [and freedom to innovate don'cha know?]
for i in q.fetchall():
+ Logger.log(["unset lastused", i[1]])
session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
{'fileid': i[0]})
SELECT id, filename FROM files f
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
+ AND last_used IS NULL
ORDER BY filename""")
ql = q.fetchall()
if len(ql) > 0:
- print "WARNING: check_files found something it shouldn't"
+ utils.warn("check_files found something it shouldn't")
for x in ql:
- print x
+ utils.warn("orphaned file: %s" % x)
+ Logger.log(["set lastused", x[1], "ORPHANED FILE"])
session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
{'lastused': now_date, 'fileid': x[0]})
# XXX: why doesn't this remove the files here as well? I don't think it
# buys anything keeping this separate
print "Cleaning binaries from the DB..."
+ print "Deleting from binaries table... "
+ for bin in session.query(DBBinary).join(DBBinary.poolfile).filter(PoolFile.last_used <= delete_date):
+ Logger.log(["delete binary", bin.poolfile.filename])
+ if not Options["No-Action"]:
+ session.delete(bin)
if not Options["No-Action"]:
- print "Deleting from binaries table... "
- session.execute("""DELETE FROM binaries WHERE EXISTS
- (SELECT 1 FROM files WHERE binaries.file = files.id
- AND files.last_used <= :deldate)""",
- {'deldate': delete_date})
+ session.commit()
########################################
os.mkdir(dest)
# Delete from source
- if not Options["No-Action"]:
- print "Deleting from source table... "
- session.execute("""DELETE FROM dsc_files
- WHERE EXISTS
- (SELECT 1 FROM source s, files f, dsc_files df
- WHERE f.last_used <= :deletedate
- AND s.file = f.id AND s.id = df.source
- AND df.id = dsc_files.id)""", {'deletedate': delete_date})
- session.execute("""DELETE FROM source
- WHERE EXISTS
- (SELECT 1 FROM files
- WHERE source.file = files.id
- AND files.last_used <= :deletedate)""", {'deletedate': delete_date})
+ print "Deleting from source table... "
+ q = session.execute("""
+SELECT s.id, f.filename FROM source s, files f
+ WHERE f.last_used <= :deletedate
+ AND s.file = f.id""", {'deletedate': delete_date})
+ for s in q.fetchall():
+ Logger.log(["delete source", s[1], s[0]])
+ if not Options["No-Action"]:
+ session.execute("DELETE FROM dsc_files WHERE source = :s_id", {"s_id":s[0]})
+ session.execute("DELETE FROM source WHERE id = :s_id", {"s_id":s[0]})
+ if not Options["No-Action"]:
session.commit()
# Delete files from the pool
- query = """SELECT l.path, f.filename FROM location l, files f
- WHERE f.last_used <= :deletedate AND l.id = f.location"""
+ old_files = session.query(PoolFile).filter(PoolFile.last_used <= delete_date)
if max_delete is not None:
- query += " LIMIT %d" % max_delete
+ old_files = old_files.limit(max_delete)
print "Limiting removals to %d" % max_delete
- q = session.execute(query, {'deletedate': delete_date})
- for i in q.fetchall():
- filename = i[0] + i[1]
+ for pf in old_files:
+ filename = os.path.join(pf.location.path, pf.filename)
if not os.path.exists(filename):
utils.warn("can not find '%s'." % (filename))
continue
+ Logger.log(["delete pool file", filename])
if os.path.isfile(filename):
if os.path.islink(filename):
count += 1
- if Options["No-Action"]:
- print "Removing symlink %s..." % (filename)
- else:
+ Logger.log(["delete symlink", filename])
+ if not Options["No-Action"]:
os.unlink(filename)
else:
size += os.stat(filename)[stat.ST_SIZE]
if os.path.exists(dest_filename):
dest_filename = utils.find_next_free(dest_filename)
- if Options["No-Action"]:
- print "Cleaning %s -> %s ..." % (filename, dest_filename)
- else:
+ Logger.log(["move to morgue", filename, dest_filename])
+ if not Options["No-Action"]:
utils.move(filename, dest_filename)
+
+ if not Options["No-Action"]:
+ session.delete(pf)
+
else:
utils.fubar("%s is neither symlink nor file?!" % (filename))
- # Delete from the 'files' table
- # XXX: I've a horrible feeling that the max_delete stuff breaks here - mhy
- # TODO: Change it so we do the DELETEs as we go; it'll be slower but
- # more reliable
if not Options["No-Action"]:
- print "Deleting from files table... "
- session.execute("DELETE FROM files WHERE last_used <= :deletedate", {'deletedate': delete_date})
session.commit()
if count > 0:
+ Logger.log(["total", count, utils.size_type(size)])
print "Cleaned %d files, %s." % (count, utils.size_type(size))
################################################################################
# TODO Replace this whole thing with one SQL statement
q = session.execute("""
-SELECT m.id FROM maintainer m
+SELECT m.id, m.name FROM maintainer m
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.maintainer = m.id)
AND NOT EXISTS (SELECT 1 FROM source s WHERE s.maintainer = m.id OR s.changedby = m.id)
AND NOT EXISTS (SELECT 1 FROM src_uploaders u WHERE u.maintainer = m.id)""")
for i in q.fetchall():
maintainer_id = i[0]
+ Logger.log(["delete maintainer", i[1]])
if not Options["No-Action"]:
session.execute("DELETE FROM maintainer WHERE id = :maint", {'maint': maintainer_id})
- count += 1
+ count += 1
if not Options["No-Action"]:
session.commit()
if count > 0:
+ Logger.log(["total", count])
print "Cleared out %d maintainer entries." % (count)
################################################################################
# TODO Replace this whole thing with one SQL statement
q = session.execute("""
-SELECT f.id FROM fingerprint f
+SELECT f.id, f.fingerprint FROM fingerprint f
WHERE f.keyring IS NULL
AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.sig_fpr = f.id)
AND NOT EXISTS (SELECT 1 FROM source s WHERE s.sig_fpr = f.id)""")
for i in q.fetchall():
fingerprint_id = i[0]
+ Logger.log(["delete fingerprint", i[1]])
if not Options["No-Action"]:
session.execute("DELETE FROM fingerprint WHERE id = :fpr", {'fpr': fingerprint_id})
- count += 1
+ count += 1
if not Options["No-Action"]:
session.commit()
if count > 0:
+ Logger.log(["total", count])
print "Cleared out %d fingerprint entries." % (count)
################################################################################
our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
count = 0
- q = session.execute("SELECT filename FROM queue_build WHERE last_used <= :deletedate",
- {'deletedate': our_delete_date})
- for i in q.fetchall():
- filename = i[0]
- if not os.path.exists(filename):
- utils.warn("%s (from queue_build) doesn't exist." % (filename))
+ for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date):
+ if not os.path.exists(qf.filename):
+ utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
continue
- if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(filename):
- utils.fubar("%s (from queue_build) should be a symlink but isn't." % (filename))
+ if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(qf.filename):
+ utils.fubar("%s (from queue_build) should be a symlink but isn't." % (qf.filename))
- os.unlink(filename)
+ Logger.log(["delete queue build", qf.filename])
+ if not Options["No-Action"]:
+ os.unlink(qf.filename)
+ session.delete(qf)
count += 1
- session.execute("DELETE FROM queue_build WHERE last_used <= :deletedate",
- {'deletedate': our_delete_date})
-
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
if count:
+ Logger.log(["total", count])
print "Cleaned %d queue_build files." % (count)
################################################################################
+def clean_empty_directories(session):
+ """
+ Removes empty directories from pool directories.
+ """
+
+ count = 0
+
+ cursor = session.execute(
+ "SELECT DISTINCT(path) FROM location WHERE type = :type",
+ {'type': 'pool'},
+ )
+ bases = [x[0] for x in cursor.fetchall()]
+
+ for base in bases:
+ for dirpath, dirnames, filenames in os.walk(base, topdown=False):
+ if not filenames and not dirnames:
+ to_remove = os.path.join(base, dirpath)
+ if not Options["No-Action"]:
+ Logger.log(["removing directory", to_remove])
+ os.removedirs(to_remove)
+ count += 1
+
+ if count:
+ Logger.log(["total removed directories", count])
+
+################################################################################
+
def main():
- global Options
+ global Options, Logger
cnf = Config()
if Options["Help"]:
usage()
+ Logger = daklog.Logger(cnf, "clean-suites", debug=Options["No-Action"])
+
session = DBConn().session()
now_date = datetime.now()
clean_maintainers(now_date, delete_date, max_delete, session)
clean_fingerprints(now_date, delete_date, max_delete, session)
clean_queue_build(now_date, delete_date, max_delete, session)
+ clean_empty_directories(session)
+
+ Logger.close()
################################################################################
import sys
import os
import logging
-import math
import gzip
import threading
import Queue
################################################################################
-import commands, os, sys, time, re
+import commands, os, sys, re
import apt_pkg
from daklib.config import Config
################################################################################
import sys
-import imp
import daklib.utils
################################################################################
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Adding table for allowed source formats
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Raphael Hertzog <hertzog@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+
+def do_update(self):
+ print "Adding tables listing allowed source formats"
+
+ try:
+ c = self.db.cursor()
+ c.execute("""
+ CREATE TABLE src_format (
+ id SERIAL PRIMARY KEY,
+ format_name TEXT NOT NULL,
+ UNIQUE (format_name)
+ )
+ """)
+ c.execute("INSERT INTO src_format (format_name) VALUES('1.0')")
+ c.execute("INSERT INTO src_format (format_name) VALUES('3.0 (quilt)')")
+ c.execute("INSERT INTO src_format (format_name) VALUES('3.0 (native)')")
+
+ c.execute("""
+ CREATE TABLE suite_src_formats (
+ suite INT4 NOT NULL REFERENCES suite(id),
+ src_format INT4 NOT NULL REFERENCES src_format(id),
+ PRIMARY KEY (suite, src_format)
+ )
+ """)
+
+ print "Authorize format 1.0 on all suites by default"
+ c.execute("SELECT id FROM suite")
+ suites = c.fetchall()
+ c.execute("SELECT id FROM src_format WHERE format_name = '1.0'")
+ formats = c.fetchall()
+ for s in suites:
+ for f in formats:
+ c.execute("INSERT INTO suite_src_formats (suite, src_format) VALUES(%s, %s)", (s[0], f[0]))
+
+ print "Authorize all other formats on tpu, unstable & experimental by default"
+ c.execute("SELECT id FROM suite WHERE suite_name IN ('testing-proposed-updates', 'unstable', 'experimental')")
+ suites = c.fetchall()
+ c.execute("SELECT id FROM src_format WHERE format_name != '1.0'")
+ formats = c.fetchall()
+ for s in suites:
+ for f in formats:
+ c.execute("INSERT INTO suite_src_formats (suite, src_format) VALUES(%s, %s)", (s[0], f[0]))
+
+ c.execute("UPDATE config SET value = '15' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.ProgrammingError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Unable to apply source format update 15, rollback issued. Error message : %s" % (str(msg))
import sys
import os
import tempfile
-import subprocess
import time
import apt_pkg
from daklib.config import Config
from daklib.dbconn import *
-from daklib import utils
# Globals
################################################################################
-import commands, ldap, re, sys
+import commands, ldap, sys
import apt_pkg
from daklib.config import Config
################################################################################
-import os
-import sys
-
from daklib.dbconn import *
################################################################################
from daklib import queue
from daklib import daklog
from daklib import utils
-from daklib import database
+from daklib.dbconn import DBConn, get_or_set_queue, get_suite_architectures
from daklib.regexes import re_taint_free
Cnf = None
ver, suite)
adv += "%s\n%s\n\n" % (suite_header, "-"*len(suite_header))
- arches = database.get_suite_architectures(suite)
+ arches = [x.arch_name for x in get_suite_architectures(suite)]
if "source" in arches:
arches.remove("source")
if "all" in arches:
if os.getcwd() != Cnf["Dir::Queue::Embargoed"].rstrip("/"):
utils.fubar("Can only disembargo from %s" % Cnf["Dir::Queue::Embargoed"])
+ session = DBConn().session()
+
dest = Cnf["Dir::Queue::Unembargoed"]
- emb_q = database.get_or_set_queue_id("embargoed")
- une_q = database.get_or_set_queue_id("unembargoed")
+ emb_q = get_or_set_queue("embargoed", session)
+ une_q = get_or_set_queue("unembargoed", session)
for c in changes:
print "Disembargoing %s" % (c)
if "source" in Upload.pkg.changes["architecture"].keys():
print "Adding %s %s to disembargo table" % (Upload.pkg.changes["source"], Upload.pkg.changes["version"])
- Upload.projectB.query("INSERT INTO disembargo (package, version) VALUES ('%s', '%s')" % (Upload.pkg.changes["source"], Upload.pkg.changes["version"]))
+ session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)",
+ {'package': Upload.pkg.changes["source"], 'version': Upload.pkg.changes["version"]})
files = {}
for suite in Upload.pkg.changes["distribution"].keys():
files[os.path.join(dest_dir, file)] = 1
files = files.keys()
- Upload.projectB.query("BEGIN WORK")
for f in files:
- Upload.projectB.query("UPDATE queue_build SET queue = %s WHERE filename = '%s' AND queue = %s" % (une_q, f, emb_q))
- Upload.projectB.query("COMMIT WORK")
+ session.execute("UPDATE queue_build SET queue = :unembargoed WHERE filename = :filename AND queue = :embargoed",
+ {'unembargoed': une_q.queue_id, 'filename': f, 'embargoed': emb_q.queue_id})
+ session.commit()
for file in Upload.pkg.files.keys():
utils.copy(file, os.path.join(dest, file))
utils.copy(k, os.path.join(dest, k))
os.unlink(k)
+ session.commit()
+
def do_Reject(): sudo("R", _do_Reject, True)
def _do_Reject():
global changes
+
+ session = DBConn().session()
+
for c in changes:
print "Rejecting %s..." % (c)
Upload.init_vars()
if not aborted:
os.unlink(c[:-8]+".dak")
for f in files:
- Upload.projectB.query(
- "DELETE FROM queue_build WHERE filename = '%s'" % (f))
+ session.execute("DELETE FROM queue_build WHERE filename = :filename",
+ {'filename': f})
os.unlink(f)
print "Updating buildd information..."
if os.path.exists(adv_file):
os.unlink(adv_file)
+ session.commit()
+
def do_DropAdvisory():
for c in changes:
Upload.init_vars()
import os
import sys
from datetime import datetime
-import re
-import apt_pkg, commands
+import apt_pkg
from daklib import daklog
from daklib.queue import *
from daklib import utils
from daklib.dbconn import *
-from daklib.binary import copy_temporary_contents
from daklib.dak_exceptions import *
from daklib.regexes import re_default_answer, re_issource, re_fdnic
from daklib.urgencylog import UrgencyLog
df = DSCFile()
df.source_id = source.source_id
- # If the .orig.tar.gz is already in the pool, it's
+ # If the .orig tarball is already in the pool, it's
# files id is stored in dsc_files by check_dsc().
files_id = dentry.get("files id", None)
add_deb_to_db(u, newfile, session)
# If this is a sourceful diff only upload that is moving
- # cross-component we need to copy the .orig.tar.gz into the new
+ # cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
- #
- if u.pkg.changes["architecture"].has_key("source") and u.pkg.orig_tar_id and \
- u.pkg.orig_tar_location != dsc_location_id:
-
- oldf = get_poolfile_by_id(u.pkg.orig_tar_id, session)
- old_filename = os.path.join(oldf.location.path, oldf.filename)
- old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
- 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
- new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
- # TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
-
- if newf is None:
- utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
- newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
- # TODO: Check that there's only 1 here
- source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
- dscf = get_dscfiles(source_id = source.source_id, poolfile_id=u.pkg.orig_tar_id, session=session)[0]
- dscf.poolfile_id = newf.file_id
- session.add(dscf)
- session.flush()
+ if u.pkg.changes["architecture"].has_key("source"):
+ for orig_file in u.pkg.orig_files.keys():
+ if not u.pkg.orig_files[orig_file].has_key("id"):
+ continue # Skip if it's not in the pool
+ orig_file_id = u.pkg.orig_files[orig_file]["id"]
+ if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+ continue # Skip if the location didn't change
+
+ # Do the move
+ oldf = get_poolfile_by_id(orig_file_id, session)
+ old_filename = os.path.join(oldf.location.path, oldf.filename)
+ old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
+ 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
+
+ new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+
+ # TODO: Care about size/md5sum collisions etc
+ (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+
+ if newf is None:
+ utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
+ newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
+
+ # TODO: Check that there's only 1 here
+ source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
+ dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
+ dscf.poolfile_id = newf.file_id
+ session.add(dscf)
+ session.flush()
# Install the files into the pool
for newfile, entry in u.pkg.files.items():
os.unlink(dest)
os.symlink(src, dest)
- # Update last_used on any non-upload .orig.tar.gz symlink
- if u.pkg.orig_tar_id:
+ # Update last_used on any non-uploaded .orig symlink
+ for orig_file in u.pkg.orig_files.keys():
# Determine the .orig.tar.gz file name
- for dsc_file in u.pkg.dsc_files.keys():
- if dsc_file.endswith(".orig.tar.gz"):
- u.pkg.orig_tar_gz = os.path.join(dest_dir, dsc_file)
+ if not u.pkg.orig_files[orig_file].has_key("id"):
+ continue # Skip files not in the pool
+ # XXX: do we really want to update the orig_files dict here
+ # instead of using a temporary variable?
+ u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file)
# Remove it from the list of packages for later processing by apt-ftparchive
- qb = get_queue_build(u.pkg.orig_tar_gz, suite.suite_id, session)
+ qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session)
if qb:
qb.in_queue = False
qb.last_used = now_date
try:
check_daily_lock()
done = add_overrides (new, upload, session)
- Logger.log([utils.getusername(), "NEW ACCEPT: %s" % (upload.pkg.changes_file)])
+ Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
except CantGetLockError:
print "Hello? Operator! Give me the number for 911!"
print "Dinstall in the locked area, cant process packages, come back later"
reject_message=Options["Manual-Reject"],
note=get_new_comments(changes.get("source", ""), session=session))
if not aborted:
- Logger.log([utils.getusername(), "NEW REJECT: %s" % (upload.pkg.changes_file)])
+ Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
os.unlink(upload.pkg.changes_file[:-8]+".dak")
done = 1
elif answer == 'N':
elif answer == 'P' and not Options["Trainee"]:
prod_maintainer(get_new_comments(changes.get("source", ""), session=session),
upload)
- Logger.log([utils.getusername(), "NEW PROD: %s" % (upload.pkg.changes_file)])
+ Logger.log(["NEW PROD: %s" % (upload.pkg.changes_file)])
elif answer == 'R' and not Options["Trainee"]:
confirm = utils.our_raw_input("Really clear note (y/N)? ").lower()
if confirm == "y":
done = 1
for f in byhand:
del files[f]
- Logger.log([utils.getusername(), "BYHAND ACCEPT: %s" % (upload.pkg.changes_file)])
+ Logger.log(["BYHAND ACCEPT: %s" % (upload.pkg.changes_file)])
except CantGetLockError:
print "Hello? Operator! Give me the number for 911!"
print "Dinstall in the locked area, cant process packages, come back later"
elif answer == 'M':
- Logger.log([utils.getusername(), "BYHAND REJECT: %s" % (upload.pkg.changes_file)])
+ Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
upload.do_reject(manual=1, reject_message=Options["Manual-Reject"])
os.unlink(upload.pkg.changes_file[:-8]+".dak")
done = 1
if accept_count > 1:
sets = "sets"
sys.stderr.write("Accepted %d package %s, %s.\n" % (accept_count, sets, utils.size_type(int(accept_bytes))))
- Logger.log([utils.getusername(), "total",accept_count,accept_bytes])
+ Logger.log(["total",accept_count,accept_bytes])
if not Options["No-Action"] and not Options["Trainee"]:
Logger.close()
################################################################################
-import commands
import errno
import fcntl
import os
-import re
-import shutil
-import stat
import sys
-import time
import traceback
-import tarfile
-import apt_inst
import apt_pkg
-from debian_bundle import deb822
from daklib.dbconn import *
-from daklib.binary import Binary
from daklib import daklog
from daklib.queue import *
from daklib import utils
u.pkg.write_dot_dak(dir)
u.move_to_dir(dir, perms=perms)
if build:
- get_queue(queue.lower()).autobuild_upload(u.pkg, dir)
+ get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
# Check for override disparities
u.check_override()
valid_dsc_p = u.check_dsc(not Options["No-Action"])
if valid_dsc_p:
u.check_source()
+ u.check_lintian()
u.check_hashes()
u.check_urgency()
u.check_timestamps()
from copy import copy
import glob, os, stat, sys, time
import apt_pkg
-import cgi
from daklib import utils
from daklib.changes import Changes
import commands
import os
-import re
import sys
import apt_pkg
import apt_inst
logfile.write("----------------------------------------------\n")
logfile.flush()
- dsc_type_id = get_override_type('dsc', session)
- deb_type_id = get_override_type('deb', session)
+ dsc_type_id = get_override_type('dsc', session).overridetype_id
+ deb_type_id = get_override_type('deb', session).overridetype_id
# Do the actual deletion
print "Deleting...",
import sys, os, re, time
import apt_pkg
-import tempfile
from debian_bundle import deb822
from daklib.dbconn import *
from daklib import utils
from daklib.queue import determine_new, check_valid
from daklib import utils
+from daklib.regexes import re_source_ext
# Globals
Cnf = None
filestoexamine = []
for pkg in new.keys():
for fn in new[pkg]["files"]:
- if ( c.files[fn].has_key("new") and not
- c.files[fn]["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2"] ):
+ if (c.files[fn].has_key("new") and
+ (c.files[fn]["type"] == "dsc" or
+ not re_source_ext.match(c.files[fn]["type"]))):
filestoexamine.append(fn)
html_header(c.changes["source"], filestoexamine)
import apt_pkg
from daklib import utils
-from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture, \
- BinAssociation
+from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture
################################################################################
import errno
import fcntl
import tempfile
-import pwd
import apt_pkg
from daklib.dbconn import *
################################################################################
Cnf = None
-required_database_schema = 14
+required_database_schema = 15
################################################################################
__all__.append('CHANGESFIELDS_DSCFILES_OPTIONAL')
+CHANGESFIELDS_ORIGFILES = [ "id", "location" ]
+
+__all__.append('CHANGESFIELDS_ORIGFILES')
+
###############################################################################
class Changes(object):
self.dsc = {}
self.files = {}
self.dsc_files = {}
-
- self.orig_tar_id = None
- self.orig_tar_location = ""
- self.orig_tar_gz = None
+ self.orig_files = {}
def file_summary(self):
# changes["distribution"] may not exist in corner cases
self.files.update(p.load())
self.dsc_files.update(p.load())
- self.orig_tar_id = p.load()
- self.orig_tar_location = p.load()
+ next_obj = p.load()
+ if isinstance(next_obj, dict):
+ self.orig_files.update(next_obj)
+ else:
+ # Auto-convert old dak files to new format supporting
+ # multiple tarballs
+ orig_tar_gz = None
+ for dsc_file in self.dsc_files.keys():
+ if dsc_file.endswith(".orig.tar.gz"):
+ orig_tar_gz = dsc_file
+ self.orig_files[orig_tar_gz] = {}
+ if next_obj != None:
+ self.orig_files[orig_tar_gz]["id"] = next_obj
+ next_obj = p.load()
+ if next_obj != None and next_obj != "":
+ self.orig_files[orig_tar_gz]["location"] = next_obj
+ if len(self.orig_files[orig_tar_gz]) == 0:
+ del self.orig_files[orig_tar_gz]
dump_file.close()
return ret
+ def sanitised_orig_files(self):
+ ret = {}
+ for name, entry in self.orig_files.items():
+ ret[name] = {}
+ # Optional orig_files fields
+ for i in CHANGESFIELDS_ORIGFILES:
+ if entry.has_key(i):
+ ret[name][i] = entry[i]
+
+ return ret
+
def write_dot_dak(self, dest_dir):
"""
Dump ourself into a cPickle file.
p.dump(self.sanitised_dsc())
p.dump(self.sanitised_files())
p.dump(self.sanitised_dsc_files())
- p.dump(self.orig_tar_id)
- p.dump(self.orig_tar_location)
+ p.dump(self.sanitised_orig_files())
dump_file.close()
logfile = utils.open_file(logfilename, 'a')
os.umask(umask)
self.logfile = logfile
- # Log the start of the program
- user = pwd.getpwuid(os.getuid())[0]
- self.log(["program start", user])
+ self.log(["program start"])
def log (self, details):
"Log an event"
- # Prepend the timestamp and program name
+ # Prepend timestamp, program name, and user name
+ details.insert(0, utils.getusername())
details.insert(0, self.program)
timestamp = time.strftime("%Y%m%d%H%M%S")
details.insert(0, timestamp)
+++ /dev/null
-#!/usr/bin/env python
-
-""" DB access functions
-@group readonly: get_suite_id, get_section_id, get_priority_id, get_override_type_id,
- get_architecture_id, get_archive_id, get_component_id, get_location_id,
- get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites,
- get_suite_architectures, get_new_comments, has_new_comment
-@group read/write: get_or_set*, set_files_id
-@group writeonly: add_new_comment, delete_new_comments
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-import sys
-import time
-import types
-import utils
-import pg
-
-################################################################################
-
-Cnf = None #: Configuration, apt_pkg.Configuration
-projectB = None #: database connection, pgobject
-suite_id_cache = {} #: cache for suites
-section_id_cache = {} #: cache for sections
-priority_id_cache = {} #: cache for priorities
-override_type_id_cache = {} #: cache for overrides
-architecture_id_cache = {} #: cache for architectures
-archive_id_cache = {} #: cache for archives
-component_id_cache = {} #: cache for components
-location_id_cache = {} #: cache for locations
-maintainer_id_cache = {} #: cache for maintainers
-keyring_id_cache = {} #: cache for keyrings
-source_id_cache = {} #: cache for sources
-
-files_id_cache = {} #: cache for files
-maintainer_cache = {} #: cache for maintainer names
-fingerprint_id_cache = {} #: cache for fingerprints
-queue_id_cache = {} #: cache for queues
-uid_id_cache = {} #: cache for uids
-suite_version_cache = {} #: cache for suite_versions (packages)
-suite_bin_version_cache = {}
-cache_preloaded = False
-
-################################################################################
-
-def init (config, sql):
- """
- database module init.
-
- @type config: apt_pkg.Configuration
- @param config: apt config, see U{http://apt.alioth.debian.org/python-apt-doc/apt_pkg/cache.html#Configuration}
-
- @type sql: pgobject
- @param sql: database connection
-
- """
- global Cnf, projectB
-
- Cnf = config
- projectB = sql
-
-################################################################################
-
-def get_suite_id (suite):
- """
- Returns database id for given C{suite}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type suite: string
- @param suite: The name of the suite
-
- @rtype: int
- @return: the database id for the given suite
-
- """
- global suite_id_cache
-
- if suite_id_cache.has_key(suite):
- return suite_id_cache[suite]
-
- q = projectB.query("SELECT id FROM suite WHERE suite_name = '%s'" % (suite))
- ql = q.getresult()
- if not ql:
- return -1
-
- suite_id = ql[0][0]
- suite_id_cache[suite] = suite_id
-
- return suite_id
-
-def get_section_id (section):
- """
- Returns database id for given C{section}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type section: string
- @param section: The name of the section
-
- @rtype: int
- @return: the database id for the given section
-
- """
- global section_id_cache
-
- if section_id_cache.has_key(section):
- return section_id_cache[section]
-
- q = projectB.query("SELECT id FROM section WHERE section = '%s'" % (section))
- ql = q.getresult()
- if not ql:
- return -1
-
- section_id = ql[0][0]
- section_id_cache[section] = section_id
-
- return section_id
-
-def get_priority_id (priority):
- """
- Returns database id for given C{priority}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type priority: string
- @param priority: The name of the priority
-
- @rtype: int
- @return: the database id for the given priority
-
- """
- global priority_id_cache
-
- if priority_id_cache.has_key(priority):
- return priority_id_cache[priority]
-
- q = projectB.query("SELECT id FROM priority WHERE priority = '%s'" % (priority))
- ql = q.getresult()
- if not ql:
- return -1
-
- priority_id = ql[0][0]
- priority_id_cache[priority] = priority_id
-
- return priority_id
-
-def get_override_type_id (type):
- """
- Returns database id for given override C{type}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type type: string
- @param type: The name of the override type
-
- @rtype: int
- @return: the database id for the given override type
-
- """
- global override_type_id_cache
-
- if override_type_id_cache.has_key(type):
- return override_type_id_cache[type]
-
- q = projectB.query("SELECT id FROM override_type WHERE type = '%s'" % (type))
- ql = q.getresult()
- if not ql:
- return -1
-
- override_type_id = ql[0][0]
- override_type_id_cache[type] = override_type_id
-
- return override_type_id
-
-def get_architecture_id (architecture):
- """
- Returns database id for given C{architecture}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type architecture: string
- @param architecture: The name of the override type
-
- @rtype: int
- @return: the database id for the given architecture
-
- """
- global architecture_id_cache
-
- if architecture_id_cache.has_key(architecture):
- return architecture_id_cache[architecture]
-
- q = projectB.query("SELECT id FROM architecture WHERE arch_string = '%s'" % (architecture))
- ql = q.getresult()
- if not ql:
- return -1
-
- architecture_id = ql[0][0]
- architecture_id_cache[architecture] = architecture_id
-
- return architecture_id
-
-def get_archive_id (archive):
- """
- Returns database id for given C{archive}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type archive: string
- @param archive: The name of the override type
-
- @rtype: int
- @return: the database id for the given archive
-
- """
- global archive_id_cache
-
- archive = archive.lower()
-
- if archive_id_cache.has_key(archive):
- return archive_id_cache[archive]
-
- q = projectB.query("SELECT id FROM archive WHERE lower(name) = '%s'" % (archive))
- ql = q.getresult()
- if not ql:
- return -1
-
- archive_id = ql[0][0]
- archive_id_cache[archive] = archive_id
-
- return archive_id
-
-def get_component_id (component):
- """
- Returns database id for given C{component}.
- Results are kept in a cache during runtime to minimize database queries.
-
- @type component: string
- @param component: The name of the component
-
- @rtype: int
- @return: the database id for the given component
-
- """
- global component_id_cache
-
- component = component.lower()
-
- if component_id_cache.has_key(component):
- return component_id_cache[component]
-
- q = projectB.query("SELECT id FROM component WHERE lower(name) = '%s'" % (component))
- ql = q.getresult()
- if not ql:
- return -1
-
- component_id = ql[0][0]
- component_id_cache[component] = component_id
-
- return component_id
-
-def get_location_id (location, component, archive):
- """
- Returns database id for the location behind the given combination of
- - B{location} - the path of the location, eg. I{/srv/ftp.debian.org/ftp/pool/}
- - B{component} - the id of the component as returned by L{get_component_id}
- - B{archive} - the id of the archive as returned by L{get_archive_id}
- Results are kept in a cache during runtime to minimize database queries.
-
- @type location: string
- @param location: the path of the location
-
- @type component: int
- @param component: the id of the component
-
- @type archive: int
- @param archive: the id of the archive
-
- @rtype: int
- @return: the database id for the location
-
- """
- global location_id_cache
-
- cache_key = location + '_' + component + '_' + location
- if location_id_cache.has_key(cache_key):
- return location_id_cache[cache_key]
-
- archive_id = get_archive_id (archive)
- if component != "":
- component_id = get_component_id (component)
- if component_id != -1:
- q = projectB.query("SELECT id FROM location WHERE path = '%s' AND component = %d AND archive = %d" % (location, component_id, archive_id))
- else:
- q = projectB.query("SELECT id FROM location WHERE path = '%s' AND archive = %d" % (location, archive_id))
- ql = q.getresult()
- if not ql:
- return -1
-
- location_id = ql[0][0]
- location_id_cache[cache_key] = location_id
-
- return location_id
-
-def get_source_id (source, version):
- """
- Returns database id for the combination of C{source} and C{version}
- - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
- - B{version}
- Results are kept in a cache during runtime to minimize database queries.
-
- @type source: string
- @param source: source package name
-
- @type version: string
- @param version: the source version
-
- @rtype: int
- @return: the database id for the source
-
- """
- global source_id_cache
-
- cache_key = source + '_' + version + '_'
- if source_id_cache.has_key(cache_key):
- return source_id_cache[cache_key]
-
- q = projectB.query("SELECT id FROM source s WHERE s.source = '%s' AND s.version = '%s'" % (source, version))
-
- if not q.getresult():
- return None
-
- source_id = q.getresult()[0][0]
- source_id_cache[cache_key] = source_id
-
- return source_id
-
-def get_suite_version(source, suite):
- """
- Returns database id for a combination of C{source} and C{suite}.
-
- - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
- - B{suite} - a suite name, eg. I{unstable}
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type source: string
- @param source: source package name
-
- @type suite: string
- @param suite: the suite name
-
- @rtype: string
- @return: the version for I{source} in I{suite}
-
- """
-
- global suite_version_cache
- cache_key = "%s_%s" % (source, suite)
-
- if suite_version_cache.has_key(cache_key):
- return suite_version_cache[cache_key]
-
- q = projectB.query("""
- SELECT s.version FROM source s, suite su, src_associations sa
- WHERE sa.source=s.id
- AND sa.suite=su.id
- AND su.suite_name='%s'
- AND s.source='%s'"""
- % (suite, source))
-
- if not q.getresult():
- return None
-
- version = q.getresult()[0][0]
- suite_version_cache[cache_key] = version
-
- return version
-
-def get_latest_binary_version_id(binary, section, suite, arch):
- global suite_bin_version_cache
- cache_key = "%s_%s_%s_%s" % (binary, section, suite, arch)
- cache_key_all = "%s_%s_%s_%s" % (binary, section, suite, get_architecture_id("all"))
-
- # Check for the cache hit for its arch, then arch all
- if suite_bin_version_cache.has_key(cache_key):
- return suite_bin_version_cache[cache_key]
- if suite_bin_version_cache.has_key(cache_key_all):
- return suite_bin_version_cache[cache_key_all]
- if cache_preloaded == True:
- return # package does not exist
-
- q = projectB.query("SELECT DISTINCT b.id FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.package = '%s' AND b.architecture = '%d' AND ba.suite = '%d' AND o.section = '%d'" % (binary, int(arch), int(suite), int(section)))
-
- if not q.getresult():
- return False
-
- highest_bid = q.getresult()[0][0]
-
- suite_bin_version_cache[cache_key] = highest_bid
- return highest_bid
-
-def preload_binary_id_cache():
- global suite_bin_version_cache, cache_preloaded
-
- # Get suite info
- q = projectB.query("SELECT id FROM suite")
- suites = q.getresult()
-
- # Get arch mappings
- q = projectB.query("SELECT id FROM architecture")
- arches = q.getresult()
-
- for suite in suites:
- for arch in arches:
- q = projectB.query("SELECT DISTINCT b.id, b.package, o.section FROM binaries b JOIN bin_associations ba ON (b.id = ba.bin) JOIN override o ON (o.package=b.package) WHERE b.architecture = '%d' AND ba.suite = '%d'" % (int(arch[0]), int(suite[0])))
-
- for bi in q.getresult():
- cache_key = "%s_%s_%s_%s" % (bi[1], bi[2], suite[0], arch[0])
- suite_bin_version_cache[cache_key] = int(bi[0])
-
- cache_preloaded = True
-
-def get_suite_architectures(suite):
- """
- Returns list of architectures for C{suite}.
-
- @type suite: string, int
- @param suite: the suite name or the suite_id
-
- @rtype: list
- @return: the list of architectures for I{suite}
- """
-
- suite_id = None
- if type(suite) == str:
- suite_id = get_suite_id(suite)
- elif type(suite) == int:
- suite_id = suite
- else:
- return None
-
- sql = """ SELECT a.arch_string FROM suite_architectures sa
- JOIN architecture a ON (a.id = sa.architecture)
- WHERE suite='%s' """ % (suite_id)
-
- q = projectB.query(sql)
- return map(lambda x: x[0], q.getresult())
-
-def get_suite_untouchable(suite):
- """
- Returns true if the C{suite} is untouchable, otherwise false.
-
- @type suite: string, int
- @param suite: the suite name or the suite_id
-
- @rtype: boolean
- @return: status of suite
- """
-
- suite_id = None
- if type(suite) == str:
- suite_id = get_suite_id(suite.lower())
- elif type(suite) == int:
- suite_id = suite
- else:
- return None
-
- sql = """ SELECT untouchable FROM suite WHERE id='%s' """ % (suite_id)
-
- q = projectB.query(sql)
- if q.getresult()[0][0] == "f":
- return False
- else:
- return True
-
-################################################################################
-
-def get_or_set_maintainer_id (maintainer):
- """
- If C{maintainer} does not have an entry in the maintainer table yet, create one
- and return the new id.
- If C{maintainer} already has an entry, simply return the existing id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type maintainer: string
- @param maintainer: the maintainer name
-
- @rtype: int
- @return: the database id for the maintainer
-
- """
- global maintainer_id_cache
-
- if maintainer_id_cache.has_key(maintainer):
- return maintainer_id_cache[maintainer]
-
- q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer))
- if not q.getresult():
- projectB.query("INSERT INTO maintainer (name) VALUES ('%s')" % (maintainer))
- q = projectB.query("SELECT id FROM maintainer WHERE name = '%s'" % (maintainer))
- maintainer_id = q.getresult()[0][0]
- maintainer_id_cache[maintainer] = maintainer_id
-
- return maintainer_id
-
-################################################################################
-
-def get_or_set_keyring_id (keyring):
- """
- If C{keyring} does not have an entry in the C{keyrings} table yet, create one
- and return the new id.
- If C{keyring} already has an entry, simply return the existing id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type keyring: string
- @param keyring: the keyring name
-
- @rtype: int
- @return: the database id for the keyring
-
- """
- global keyring_id_cache
-
- if keyring_id_cache.has_key(keyring):
- return keyring_id_cache[keyring]
-
- q = projectB.query("SELECT id FROM keyrings WHERE name = '%s'" % (keyring))
- if not q.getresult():
- projectB.query("INSERT INTO keyrings (name) VALUES ('%s')" % (keyring))
- q = projectB.query("SELECT id FROM keyrings WHERE name = '%s'" % (keyring))
- keyring_id = q.getresult()[0][0]
- keyring_id_cache[keyring] = keyring_id
-
- return keyring_id
-
-################################################################################
-
-def get_or_set_uid_id (uid):
- """
- If C{uid} does not have an entry in the uid table yet, create one
- and return the new id.
- If C{uid} already has an entry, simply return the existing id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type uid: string
- @param uid: the uid.
-
- @rtype: int
- @return: the database id for the uid
-
- """
-
- global uid_id_cache
-
- if uid_id_cache.has_key(uid):
- return uid_id_cache[uid]
-
- q = projectB.query("SELECT id FROM uid WHERE uid = '%s'" % (uid))
- if not q.getresult():
- projectB.query("INSERT INTO uid (uid) VALUES ('%s')" % (uid))
- q = projectB.query("SELECT id FROM uid WHERE uid = '%s'" % (uid))
- uid_id = q.getresult()[0][0]
- uid_id_cache[uid] = uid_id
-
- return uid_id
-
-################################################################################
-
-def get_or_set_fingerprint_id (fingerprint):
- """
- If C{fingerprint} does not have an entry in the fingerprint table yet, create one
- and return the new id.
- If C{fingerprint} already has an entry, simply return the existing id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type fingerprint: string
- @param fingerprint: the fingerprint
-
- @rtype: int
- @return: the database id for the fingerprint
-
- """
- global fingerprint_id_cache
-
- if fingerprint_id_cache.has_key(fingerprint):
- return fingerprint_id_cache[fingerprint]
-
- q = projectB.query("SELECT id FROM fingerprint WHERE fingerprint = '%s'" % (fingerprint))
- if not q.getresult():
- projectB.query("INSERT INTO fingerprint (fingerprint) VALUES ('%s')" % (fingerprint))
- q = projectB.query("SELECT id FROM fingerprint WHERE fingerprint = '%s'" % (fingerprint))
- fingerprint_id = q.getresult()[0][0]
- fingerprint_id_cache[fingerprint] = fingerprint_id
-
- return fingerprint_id
-
-################################################################################
-
-def get_files_id (filename, size, md5sum, location_id):
- """
- Returns -1, -2 or the file_id for filename, if its C{size} and C{md5sum} match an
- existing copy.
-
- The database is queried using the C{filename} and C{location_id}. If a file does exist
- at that location, the existing size and md5sum are checked against the provided
- parameters. A size or checksum mismatch returns -2. If more than one entry is
- found within the database, a -1 is returned, no result returns None, otherwise
- the file id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type filename: string
- @param filename: the filename of the file to check against the DB
-
- @type size: int
- @param size: the size of the file to check against the DB
-
- @type md5sum: string
- @param md5sum: the md5sum of the file to check against the DB
-
- @type location_id: int
- @param location_id: the id of the location as returned by L{get_location_id}
-
- @rtype: int / None
- @return: Various return values are possible:
- - -2: size/checksum error
- - -1: more than one file found in database
- - None: no file found in database
- - int: file id
-
- """
- global files_id_cache
-
- cache_key = "%s_%d" % (filename, location_id)
-
- if files_id_cache.has_key(cache_key):
- return files_id_cache[cache_key]
-
- size = int(size)
- q = projectB.query("SELECT id, size, md5sum FROM files WHERE filename = '%s' AND location = %d" % (filename, location_id))
- ql = q.getresult()
- if ql:
- if len(ql) != 1:
- return -1
- ql = ql[0]
- orig_size = int(ql[1])
- orig_md5sum = ql[2]
- if orig_size != size or orig_md5sum != md5sum:
- return -2
- files_id_cache[cache_key] = ql[0]
- return files_id_cache[cache_key]
- else:
- return None
-
-################################################################################
-
-def get_or_set_queue_id (queue):
- """
- If C{queue} does not have an entry in the queue table yet, create one
- and return the new id.
- If C{queue} already has an entry, simply return the existing id.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type queue: string
- @param queue: the queue name (no full path)
-
- @rtype: int
- @return: the database id for the queue
-
- """
- global queue_id_cache
-
- if queue_id_cache.has_key(queue):
- return queue_id_cache[queue]
-
- q = projectB.query("SELECT id FROM queue WHERE queue_name = '%s'" % (queue))
- if not q.getresult():
- projectB.query("INSERT INTO queue (queue_name) VALUES ('%s')" % (queue))
- q = projectB.query("SELECT id FROM queue WHERE queue_name = '%s'" % (queue))
- queue_id = q.getresult()[0][0]
- queue_id_cache[queue] = queue_id
-
- return queue_id
-
-################################################################################
-
-def set_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id):
- """
- Insert a new entry into the files table and return its id.
-
- @type filename: string
- @param filename: the filename
-
- @type size: int
- @param size: the size in bytes
-
- @type md5sum: string
- @param md5sum: md5sum of the file
-
- @type sha1sum: string
- @param sha1sum: sha1sum of the file
-
- @type sha256sum: string
- @param sha256sum: sha256sum of the file
-
- @type location_id: int
- @param location_id: the id of the location as returned by L{get_location_id}
-
- @rtype: int
- @return: the database id for the new file
-
- """
- global files_id_cache
-
- projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', '%s', '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id))
-
- return get_files_id (filename, size, md5sum, location_id)
-
- ### currval has issues with postgresql 7.1.3 when the table is big
- ### it was taking ~3 seconds to return on auric which is very Not
- ### Cool(tm).
- ##
- ##q = projectB.query("SELECT id FROM files WHERE id = currval('files_id_seq')")
- ##ql = q.getresult()[0]
- ##cache_key = "%s_%d" % (filename, location_id)
- ##files_id_cache[cache_key] = ql[0]
- ##return files_id_cache[cache_key]
-
-################################################################################
-
-def get_maintainer (maintainer_id):
- """
- Return the name of the maintainer behind C{maintainer_id}.
-
- Results are kept in a cache during runtime to minimize database queries.
-
- @type maintainer_id: int
- @param maintainer_id: the id of the maintainer, eg. from L{get_or_set_maintainer_id}
-
- @rtype: string
- @return: the name of the maintainer
-
- """
- global maintainer_cache
-
- if not maintainer_cache.has_key(maintainer_id):
- q = projectB.query("SELECT name FROM maintainer WHERE id = %s" % (maintainer_id))
- maintainer_cache[maintainer_id] = q.getresult()[0][0]
-
- return maintainer_cache[maintainer_id]
-
-################################################################################
-
-def get_suites(pkgname, src=False):
- """
- Return the suites in which C{pkgname} can be found. If C{src} is True query for source
- package, else binary package.
-
- @type pkgname: string
- @param pkgname: name of the package
-
- @type src: bool
- @param src: if True look for source packages, false (default) looks for binary.
-
- @rtype: list
- @return: list of suites, or empty list if no match
-
- """
- if src:
- sql = """
- SELECT suite_name
- FROM source,
- src_associations,
- suite
- WHERE source.id = src_associations.source
- AND source.source = '%s'
- AND src_associations.suite = suite.id
- """ % (pkgname)
- else:
- sql = """
- SELECT suite_name
- FROM binaries,
- bin_associations,
- suite
- WHERE binaries.id = bin_associations.bin
- AND package = '%s'
- AND bin_associations.suite = suite.id
- """ % (pkgname)
-
- q = projectB.query(sql)
- return map(lambda x: x[0], q.getresult())
-
-
-################################################################################
-
-def get_new_comments(package):
- """
- Returns all the possible comments attached to C{package} in NEW. All versions.
-
- @type package: string
- @param package: name of the package
-
- @rtype: list
- @return: list of strings containing comments for all versions from all authors for package
- """
-
- comments = []
- query = projectB.query(""" SELECT version, comment, author, notedate
- FROM new_comments
- WHERE package = '%s'
- ORDER BY notedate
- """ % (package))
-
- for row in query.getresult():
- comments.append("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s\n" % (row[2], row[0], row[3], row[1]))
- comments.append("-"*72)
-
- return comments
-
-def has_new_comment(package, version, ignore_trainee=False):
- """
- Returns true if the given combination of C{package}, C{version} has a comment.
- If C{ignore_trainee} is true, comments from a trainee are ignored.
-
- @type package: string
- @param package: name of the package
-
- @type version: string
- @param version: package version
-
- @type ignore_trainee: boolean
- @param ignore_trainee: ignore trainee comments
-
- @rtype: boolean
- @return: true/false
- """
-
- trainee=""
- if ignore_trainee:
- trainee='AND trainee=false'
-
- exists = projectB.query("""SELECT 1 FROM new_comments
- WHERE package='%s'
- AND version='%s'
- %s
- LIMIT 1"""
- % (package, version, trainee) ).getresult()
-
- if not exists:
- return False
- else:
- return True
-
-def add_new_comment(package, version, comment, author, trainee=False):
- """
- Add a new comment for C{package}, C{version} written by C{author}
-
- @type package: string
- @param package: name of the package
-
- @type version: string
- @param version: package version
-
- @type comment: string
- @param comment: the comment
-
- @type author: string
- @param author: the authorname
-
- @type trainee: boolean
- @param trainee: trainee comment
- """
-
- projectB.query(""" INSERT INTO new_comments (package, version, comment, author, trainee)
- VALUES ('%s', '%s', '%s', '%s', '%s')
- """ % (package, version, pg.escape_string(comment), pg.escape_string(author), trainee))
-
- return
-
-def delete_new_comments(package, version):
- """
- Delete a comment for C{package}, C{version}, if one exists
- """
-
- projectB.query(""" DELETE FROM new_comments
- WHERE package = '%s' AND version = '%s'
- """ % (package, version))
- return
-
-def delete_all_new_comments(package):
- """
- Delete all comments for C{package}, if they exist
- """
-
- projectB.query(""" DELETE FROM new_comments
- WHERE package = '%s'
- """ % (package))
- return
-
-################################################################################
-def copy_temporary_contents(package, version, arch, deb, reject):
- """
- copy the previously stored contents from the temp table to the permanant one
-
- during process-unchecked, the deb should have been scanned and the
- contents stored in pending_content_associations
- """
-
- # first see if contents exist:
-
- arch_id = get_architecture_id (arch)
-
- exists = projectB.query("""SELECT 1 FROM pending_content_associations
- WHERE package='%s'
- AND version='%s'
- AND architecture=%d LIMIT 1"""
- % (package, version, arch_id) ).getresult()
-
- if not exists:
- # This should NOT happen. We should have added contents
- # during process-unchecked. if it did, log an error, and send
- # an email.
- subst = {
- "__PACKAGE__": package,
- "__VERSION__": version,
- "__ARCH__": arch,
- "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"],
- "__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"] }
-
- message = utils.TemplateSubst(subst, Cnf["Dir::Templates"]+"/missing-contents")
- utils.send_mail( message )
-
- exists = Binary(deb, reject).scan_package()
-
- if exists:
- sql = """INSERT INTO content_associations(binary_pkg,filepath,filename)
- SELECT currval('binaries_id_seq'), filepath, filename FROM pending_content_associations
- WHERE package='%s'
- AND version='%s'
- AND architecture=%d""" % (package, version, arch_id)
- projectB.query(sql)
- projectB.query("""DELETE from pending_content_associations
- WHERE package='%s'
- AND version='%s'
- AND architecture=%d""" % (package, version, arch_id))
-
- return exists
from inspect import getargspec
-from sqlalchemy import create_engine, Table, MetaData, select
+from sqlalchemy import create_engine, Table, MetaData
from sqlalchemy.orm import sessionmaker, mapper, relation
# Don't remove this, we re-export the exceptions to scripts which import us
################################################################################
def session_wrapper(fn):
+ """
+ Wrapper around common ".., session=None):" handling. If the wrapped
+ function is called without passing 'session', we create a local one
+ and destroy it when the function ends.
+
+ Also attaches a commit_or_flush method to the session; if we created a
+ local session, this is a synonym for session.commit(), otherwise it is a
+ synonym for session.flush().
+ """
+
def wrapped(*args, **kwargs):
private_transaction = False
+
+ # Find the session object
session = kwargs.get('session')
- # No session specified as last argument or in kwargs, create one.
- if session is None and len(args) <= len(getargspec(fn)[0]) - 1:
- private_transaction = True
- kwargs['session'] = DBConn().session()
+ if session is None:
+ if len(args) <= len(getargspec(fn)[0]) - 1:
+ # No session specified as last argument or in kwargs
+ private_transaction = True
+ session = kwargs['session'] = DBConn().session()
+ else:
+ # Session is last argument in args
+ session = args[-1]
+ if session is None:
+ args = list(args)
+ session = args[-1] = DBConn().session()
+ private_transaction = True
+
+ if private_transaction:
+ session.commit_or_flush = session.commit
+ else:
+ session.commit_or_flush = session.flush
try:
return fn(*args, **kwargs)
finally:
if private_transaction:
# We created a session; close it.
- kwargs['session'].close()
+ session.close()
wrapped.__doc__ = fn.__doc__
wrapped.func_name = fn.func_name
__all__.append('ContentFilename')
+@session_wrapper
def get_or_set_contents_file_id(filename, session=None):
"""
Returns database id for given filename.
@rtype: int
@return: the database id for the given component
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
q = session.query(ContentFilename).filter_by(filename=filename)
cf = ContentFilename()
cf.filename = filename
session.add(cf)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ session.commit_or_flush()
ret = cf.cafilename_id
- if privatetrans:
- session.close()
-
return ret
__all__.append('get_or_set_contents_file_id')
__all__.append('ContentFilepath')
+@session_wrapper
def get_or_set_contents_path_id(filepath, session=None):
"""
Returns database id for given path.
@rtype: int
@return: the database id for the given path
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
q = session.query(ContentFilepath).filter_by(filepath=filepath)
cf = ContentFilepath()
cf.filepath = filepath
session.add(cf)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ session.commit_or_flush()
ret = cf.cafilepath_id
- if privatetrans:
- session.close()
-
return ret
__all__.append('get_or_set_contents_path_id')
__all__.append('Fingerprint')
+@session_wrapper
def get_or_set_fingerprint(fpr, session=None):
"""
Returns Fingerprint object for given fpr.
@rtype: Fingerprint
@return: the Fingerprint object for the given fpr
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
q = session.query(Fingerprint).filter_by(fingerprint=fpr)
fingerprint = Fingerprint()
fingerprint.fingerprint = fpr
session.add(fingerprint)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ session.commit_or_flush()
ret = fingerprint
- if privatetrans:
- session.close()
-
return ret
__all__.append('get_or_set_fingerprint')
__all__.append('Keyring')
+@session_wrapper
def get_or_set_keyring(keyring, session=None):
"""
If C{keyring} does not have an entry in the C{keyrings} table yet, create one
@rtype: Keyring
@return: the Keyring object for this keyring
-
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
- try:
- obj = session.query(Keyring).filter_by(keyring_name=keyring).first()
- if obj is None:
- obj = Keyring(keyring_name=keyring)
- session.add(obj)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ q = session.query(Keyring).filter_by(keyring_name=keyring)
+ try:
+ return q.one()
+ except NoResultFound:
+ obj = Keyring(keyring_name=keyring)
+ session.add(obj)
+ session.commit_or_flush()
return obj
- finally:
- if privatetrans:
- session.close()
__all__.append('get_or_set_keyring')
__all__.append('Maintainer')
+@session_wrapper
def get_or_set_maintainer(name, session=None):
"""
Returns Maintainer object for given maintainer name.
@rtype: Maintainer
@return: the Maintainer object for the given maintainer
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
q = session.query(Maintainer).filter_by(name=name)
try:
maintainer = Maintainer()
maintainer.name = name
session.add(maintainer)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ session.commit_or_flush()
ret = maintainer
- if privatetrans:
- session.close()
-
return ret
__all__.append('get_or_set_maintainer')
+@session_wrapper
def get_maintainer(maintainer_id, session=None):
"""
Return the name of the maintainer behind C{maintainer_id} or None if that
@return: the Maintainer with this C{maintainer_id}
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
- try:
- return session.query(Maintainer).get(maintainer_id)
- finally:
- if privatetrans:
- session.close()
+ return session.query(Maintainer).get(maintainer_id)
__all__.append('get_maintainer')
session.add(qb)
- # If the .orig.tar.gz is in the pool, create a symlink to
- # it (if one doesn't already exist)
- if changes.orig_tar_id:
- # Determine the .orig.tar.gz file name
- for dsc_file in changes.dsc_files.keys():
- if dsc_file.endswith(".orig.tar.gz"):
- filename = dsc_file
-
- dest = os.path.join(dest_dir, filename)
+ # If the .orig tarballs are in the pool, create a symlink to
+ # them (if one doesn't already exist)
+ for dsc_file in changes.dsc_files.keys():
+ # Skip all files except orig tarballs
+ from daklib.regexes import re_is_orig_source
+ if not re_is_orig_source.match(dsc_file):
+ continue
+ # Skip orig files not identified in the pool
+ if not (changes.orig_files.has_key(dsc_file) and
+ changes.orig_files[dsc_file].has_key("id")):
+ continue
+ orig_file_id = changes.orig_files[dsc_file]["id"]
+ dest = os.path.join(dest_dir, dsc_file)
# If it doesn't exist, create a symlink
if not os.path.exists(dest):
q = session.execute("SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id",
- {'id': changes.orig_tar_id})
+ {'id': orig_file_id})
res = q.fetchone()
if not res:
- return "[INTERNAL ERROR] Couldn't find id %s in files table." % (changes.orig_tar_id)
+ return "[INTERNAL ERROR] Couldn't find id %s in files table." % (orig_file_id)
src = os.path.join(res[0], res[1])
os.symlink(src, dest)
__all__.append('Queue')
@session_wrapper
-def get_queue(queuename, session=None):
+def get_or_set_queue(queuename, session=None):
"""
- Returns Queue object for given C{queue name}.
+ Returns Queue object for given C{queue name}, creating it if it does not
+ exist.
@type queuename: string
@param queuename: The name of the queue
q = session.query(Queue).filter_by(queue_name=queuename)
try:
- return q.one()
+ ret = q.one()
except NoResultFound:
- return None
+ queue = Queue()
+ queue.queue_name = queuename
+ session.add(queue)
+ session.commit_or_flush()
+ ret = queue
-__all__.append('get_queue')
+ return ret
+
+__all__.append('get_or_set_queue')
################################################################################
################################################################################
+class SrcFormat(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<SrcFormat %s>' % (self.format_name)
+
+__all__.append('SrcFormat')
+
+################################################################################
+
class SrcUploader(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
+class SuiteSrcFormat(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
+
+__all__.append('SuiteSrcFormat')
+
+@session_wrapper
+def get_suite_src_formats(suite, session=None):
+ """
+ Returns list of allowed SrcFormat for C{suite}.
+
+ @type suite: str
+ @param suite: Suite name to search for
+
+ @type session: Session
+ @param session: Optional SQL session object (a temporary one will be
+ generated if not supplied)
+
+ @rtype: list
+ @return: the list of allowed source formats for I{suite}
+ """
+
+ q = session.query(SrcFormat)
+ q = q.join(SuiteSrcFormat)
+ q = q.join(Suite).filter_by(suite_name=suite)
+ q = q.order_by('format_name')
+
+ return q.all()
+
+__all__.append('get_suite_src_formats')
+
+################################################################################
+
class Uid(object):
def __init__(self, *args, **kwargs):
pass
__all__.append('Uid')
+@session_wrapper
def add_database_user(uidname, session=None):
"""
Adds a database user
@return: the uid object for the given uidname
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
session.execute("CREATE USER :uid", {'uid': uidname})
-
- if privatetrans:
- session.commit()
- session.close()
+ session.commit_or_flush()
__all__.append('add_database_user')
+@session_wrapper
def get_or_set_uid(uidname, session=None):
"""
Returns uid object for given uidname.
@return: the uid object for the given uidname
"""
- privatetrans = False
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
q = session.query(Uid).filter_by(uid=uidname)
try:
uid = Uid()
uid.uid = uidname
session.add(uid)
- if privatetrans:
- session.commit()
- else:
- session.flush()
+ session.commit_or_flush()
ret = uid
- if privatetrans:
- session.close()
-
return ret
__all__.append('get_or_set_uid')
self.tbl_section = Table('section', self.db_meta, autoload=True)
self.tbl_source = Table('source', self.db_meta, autoload=True)
self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
+ self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
self.tbl_suite = Table('suite', self.db_meta, autoload=True)
self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
+ self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
self.tbl_uid = Table('uid', self.db_meta, autoload=True)
def __setupmappers(self):
source_id = self.tbl_src_associations.c.source,
source = relation(DBSource)))
+ mapper(SrcFormat, self.tbl_src_format,
+ properties = dict(src_format_id = self.tbl_src_format.c.id,
+ format_name = self.tbl_src_format.c.format_name))
+
mapper(SrcUploader, self.tbl_src_uploaders,
properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
source_id = self.tbl_src_uploaders.c.source,
arch_id = self.tbl_suite_architectures.c.architecture,
architecture = relation(Architecture)))
+ mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
+ properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
+ suite = relation(Suite, backref='suitesrcformats'),
+ src_format_id = self.tbl_suite_src_formats.c.src_format,
+ src_format = relation(SrcFormat)))
+
mapper(Uid, self.tbl_uid,
properties = dict(uid_id = self.tbl_uid.c.id,
fingerprint = relation(Fingerprint)))
###############################################################################
-import cPickle
import errno
import os
import pg
import commands
import shutil
import textwrap
+import tempfile
from types import *
import yaml
from holding import Holding
from dbconn import *
from summarystats import SummaryStats
-from utils import parse_changes
+from utils import parse_changes, check_dsc_files
from textutils import fix_maintainer
from binary import Binary
# Determine the type
if f.has_key("dbtype"):
file_type = f["dbtype"]
- elif f["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]:
+ elif re_source_ext.match(f["type"]):
file_type = "dsc"
else:
utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (file_type))
self.rejects.append("%s: changes file doesn't say %s for Source" % (f, entry["package"]))
# Ensure the source version matches the version in the .changes file
- if entry["type"] == "orig.tar.gz":
+ if re_is_orig_source.match(f):
changes_version = self.pkg.changes["chopversion2"]
else:
changes_version = self.pkg.changes["chopversion"]
self.rejects.append("source only uploads are not supported.")
###########################################################################
- def check_dsc(self, action=True):
+ def check_dsc(self, action=True, session=None):
"""Returns bool indicating whether or not the source changes are valid"""
# Ensure there is source to check
if not self.pkg.changes["architecture"].has_key("source"):
if not re_valid_version.match(self.pkg.dsc["version"]):
self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
- # Bumping the version number of the .dsc breaks extraction by stable's
- # dpkg-source. So let's not do that...
- if self.pkg.dsc["format"] != "1.0":
- self.rejects.append("%s: incompatible 'Format' version produced by a broken version of dpkg-dev 1.9.1{3,4}." % (dsc_filename))
+ # Only a limited list of source formats are allowed in each suite
+ for dist in self.pkg.changes["distribution"].keys():
+ allowed = [ x.format_name for x in get_suite_src_formats(dist, session) ]
+ if self.pkg.dsc["format"] not in allowed:
+ self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed)))
# Validate the Maintainer field
try:
for field_name in [ "build-depends", "build-depends-indep" ]:
field = self.pkg.dsc.get(field_name)
if field:
- # Check for broken dpkg-dev lossage...
- if field.startswith("ARRAY"):
- self.rejects.append("%s: invalid %s field produced by a broken version of dpkg-dev (1.10.11)" % \
- (dsc_filename, field_name.title()))
-
# Have apt try to parse them...
try:
apt_pkg.ParseSrcDepends(field)
if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]:
self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version))
- # Ensure there is a .tar.gz in the .dsc file
- has_tar = False
- for f in self.pkg.dsc_files.keys():
- m = re_issource.match(f)
- if not m:
- self.rejects.append("%s: %s in Files field not recognised as source." % (dsc_filename, f))
- continue
- ftype = m.group(3)
- if ftype == "orig.tar.gz" or ftype == "tar.gz":
- has_tar = True
-
- if not has_tar:
- self.rejects.append("%s: no .tar.gz or .orig.tar.gz in 'Files' field." % (dsc_filename))
+ # Ensure the Files field contain only what's expected
+ self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files))
# Ensure source is newer than existing source in target suites
session = DBConn().session()
if not os.path.exists(src):
return
ftype = m.group(3)
- if ftype == "orig.tar.gz" and self.pkg.orig_tar_gz:
+ if re_is_orig_source.match(f) and pkg.orig_files.has_key(f) and \
+ pkg.orig_files[f].has_key("path"):
continue
dest = os.path.join(os.getcwd(), f)
os.symlink(src, dest)
- # If the orig.tar.gz is not a part of the upload, create a symlink to the
- # existing copy.
- if self.pkg.orig_tar_gz:
- dest = os.path.join(os.getcwd(), os.path.basename(self.pkg.orig_tar_gz))
- os.symlink(self.pkg.orig_tar_gz, dest)
+ # If the orig files are not a part of the upload, create symlinks to the
+ # existing copies.
+ for orig_file in self.pkg.orig_files.keys():
+ if not self.pkg.orig_files[orig_file].has_key("path"):
+ continue
+ dest = os.path.join(os.getcwd(), os.path.basename(orig_file))
+ os.symlink(self.pkg.orig_files[orig_file]["path"], dest)
# Extract the source
cmd = "dpkg-source -sn -x %s" % (dsc_filename)
(result, output) = commands.getstatusoutput(cmd)
if (result != 0):
self.rejects.append("'dpkg-source -x' failed for %s [return code: %s]." % (dsc_filename, result))
- self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] "), "")
+ self.rejects.append(utils.prefix_multi_line_string(output, " [dpkg-source output:] "))
return
if not cnf.Find("Dir::Queue::BTSVersionTrack"):
# We should probably scrap or rethink the whole reprocess thing
# Bail out if:
# a) there's no source
- # or b) reprocess is 2 - we will do this check next time when orig.tar.gz is in 'files'
- # or c) the orig.tar.gz is MIA
+ # or b) reprocess is 2 - we will do this check next time when orig
+ # tarball is in 'files'
+ # or c) the orig files are MIA
if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \
- or self.pkg.orig_tar_gz == -1:
+ or len(self.pkg.orig_files) == 0:
return
tmpdir = utils.temp_dirname()
self.ensure_hashes()
+ ###########################################################################
+ def check_lintian(self):
+ # Only check some distributions
+ valid_dist = False
+ for dist in ('unstable', 'experimental'):
+ if dist in self.pkg.changes['distribution']:
+ valid_dist = True
+ break
+
+ if not valid_dist:
+ return
+
+ cnf = Config()
+ tagfile = cnf.get("Dinstall::LintianTags")
+ if tagfile is None:
+ # We don't have a tagfile, so just don't do anything.
+ return
+ # Parse the yaml file
+ sourcefile = file(tagfile, 'r')
+ sourcecontent = sourcefile.read()
+ sourcefile.close()
+ try:
+ lintiantags = yaml.load(sourcecontent)['lintian']
+ except yaml.YAMLError, msg:
+ utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
+ return
+
+ # Now setup the input file for lintian. lintian wants "one tag per line" only,
+ # so put it together like it. We put all types of tags in one file and then sort
+ # through lintians output later to see if its a fatal tag we detected, or not.
+ # So we only run lintian once on all tags, even if we might reject on some, but not
+ # reject on others.
+ # Additionally build up a set of tags
+ tags = set()
+ (fd, temp_filename) = utils.temp_filename()
+ temptagfile = os.fdopen(fd, 'w')
+ for tagtype in lintiantags:
+ for tag in lintiantags[tagtype]:
+ temptagfile.write("%s\n" % tag)
+ tags.add(tag)
+ temptagfile.close()
+
+ # So now we should look at running lintian at the .changes file, capturing output
+ # to then parse it.
+ command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file)
+ (result, output) = commands.getstatusoutput(command)
+ # We are done with lintian, remove our tempfile
+ os.unlink(temp_filename)
+ if (result == 2):
+ utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result))
+ utils.warn(utils.prefix_multi_line_string(output, " [possible output:] "))
+
+ if len(output) == 0:
+ return
+
+ # We have output of lintian, this package isn't clean. Lets parse it and see if we
+ # are having a victim for a reject.
+ # W: tzdata: binary-without-manpage usr/sbin/tzconfig
+ for line in output.split('\n'):
+ m = re_parse_lintian.match(line)
+ if m is None:
+ continue
+
+ etype = m.group(1)
+ epackage = m.group(2)
+ etag = m.group(3)
+ etext = m.group(4)
+
+ # So lets check if we know the tag at all.
+ if etag not in tags:
+ continue
+
+ if etype == 'O':
+ # We know it and it is overriden. Check that override is allowed.
+ if etag in lintiantags['warning']:
+ # The tag is overriden, and it is allowed to be overriden.
+ # Don't add a reject message.
+ pass
+ elif etag in lintiantags['error']:
+ # The tag is overriden - but is not allowed to be
+ self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
+ else:
+ # Tag is known, it is not overriden, direct reject.
+ self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
+ # Now tell if they *might* override it.
+ if etag in lintiantags['warning']:
+ self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
+
###########################################################################
def check_urgency(self):
cnf = Config()
# <Ganneff> yes
# This routine returns None on success or an error on failure
- res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
+ res = get_or_set_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
if res:
utils.fubar(res)
"""
@warning: NB: this function can remove entries from the 'files' index [if
- the .orig.tar.gz is a duplicate of the one in the archive]; if
+ the orig tarball is a duplicate of the one in the archive]; if
you're iterating over 'files' and call this function as part of
the loop, be sure to add a check to the top of the loop to
ensure you haven't just tried to dereference the deleted entry.
"""
Cnf = Config()
- self.pkg.orig_tar_gz = None
+ self.pkg.orig_files = {} # XXX: do we need to clear it?
+ orig_files = self.pkg.orig_files
# Try and find all files mentioned in the .dsc. This has
# to work harder to cope with the multiple possible
if len(ql) > 0:
# Ignore exact matches for .orig.tar.gz
match = 0
- if dsc_name.endswith(".orig.tar.gz"):
+ if re_is_orig_source.match(dsc_name):
for i in ql:
if self.pkg.files.has_key(dsc_name) and \
int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
del self.pkg.files[dsc_name]
- self.pkg.orig_tar_gz = os.path.join(i.location.path, i.filename)
+ if not orig_files.has_key(dsc_name):
+ orig_files[dsc_name] = {}
+ orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
match = 1
if not match:
self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
- elif dsc_name.endswith(".orig.tar.gz"):
+ elif re_is_orig_source.match(dsc_name):
# Check in the pool
ql = get_poolfile_like_name(dsc_name, session)
# need this for updating dsc_files in install()
dsc_entry["files id"] = x.file_id
# See install() in process-accepted...
- self.pkg.orig_tar_id = x.file_id
- self.pkg.orig_tar_gz = old_file
- self.pkg.orig_tar_location = x.location.location_id
+ if not orig_files.has_key(dsc_name):
+ orig_files[dsc_name] = {}
+ orig_files[dsc_name]["id"] = x.file_id
+ orig_files[dsc_name]["path"] = old_file
+ orig_files[dsc_name]["location"] = x.location.location_id
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
in_otherdir_fh.close()
actual_size = os.stat(in_otherdir)[stat.ST_SIZE]
found = in_otherdir
- self.pkg.orig_tar_gz = in_otherdir
+ if not orig_files.has_key(dsc_name):
+ orig_files[dsc_name] = {}
+ orig_files[dsc_name]["path"] = in_otherdir
if not found:
self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (file, dsc_name))
- self.pkg.orig_tar_gz = -1
continue
else:
self.rejects.append("%s refers to %s, but I can't find it in the queue." % (file, dsc_name))
re_extract_src_version = re.compile (r"(\S+)\s*\((.*)\)")
re_isadeb = re.compile (r"(.+?)_(.+?)_(.+)\.u?deb$")
-re_issource = re.compile (r"(.+)_(.+?)\.(orig\.tar\.gz|diff\.gz|tar\.gz|dsc)$")
+orig_source_ext_re = r"orig(?:-.+)?\.tar\.(?:gz|bz2)"
+re_orig_source_ext = re.compile(orig_source_ext_re + "$")
+re_source_ext = re.compile("(" + orig_source_ext_re + r"|debian\.tar\.(?:gz|bz2)|diff\.gz|tar\.(?:gz|bz2)|dsc)$")
+re_issource = re.compile(r"(.+)_(.+?)\." + re_source_ext.pattern)
+re_is_orig_source = re.compile (r"(.+)_(.+?)\.orig(?:-.+)?\.tar\.(?:gz|bz2)$")
re_single_line_field = re.compile(r"^(\S*?)\s*:\s*(.*)")
re_multi_line_field = re.compile(r"^\s(.*)")
re_user_name = re.compile(r"^pub:.*:(.*)<.*$", re.MULTILINE);
re_re_mark = re.compile(r'^RE:')
+re_parse_lintian = re.compile(r"^(W|E|O): (.*?): ([^ ]*) ?(.*)$")
--- /dev/null
+import re
+
+srcformats = []
+
+class SourceFormat(type):
+ def __new__(cls, name, bases, attrs):
+ klass = super(SourceFormat, cls).__new__(cls, name, bases, attrs)
+ srcformats.append(klass)
+
+ assert str(klass.name)
+ assert iter(klass.requires)
+ assert iter(klass.disallowed)
+
+ klass.re_format = re.compile(klass.format)
+
+ return klass
+
+ @classmethod
+ def reject_msgs(cls, has):
+ if len(cls.requires) != len([x for x in cls.requires if has[x]]):
+ yield "lack of required files for format %s" % cls.name
+
+ for key in cls.disallowed:
+ if has[key]:
+ yield "contains source files not allowed in format %s" % cls.name
+
+class FormatOne(SourceFormat):
+ __metaclass__ = SourceFormat
+
+ name = '1.0'
+ format = r'1.0'
+
+ requires = ()
+ disallowed = ('debian_tar', 'more_orig_tar')
+
+ @classmethod
+ def reject_msgs(cls, has):
+ if not (has['native_tar_gz'] or (has['orig_tar_gz'] and has['debian_diff'])):
+ yield "no .tar.gz or .orig.tar.gz+.diff.gz in 'Files' field."
+ if has['native_tar_gz'] and has['debian_diff']:
+ yield "native package with diff makes no sense"
+ if (has['orig_tar_gz'] != has['orig_tar']) or \
+ (has['native_tar_gz'] != has['native_tar']):
+ yield "contains source files not allowed in format %s" % cls.name
+
+ for msg in super(FormatOne, cls).reject_msgs(has):
+ yield msg
+
+class FormatThree(SourceFormat):
+ __metaclass__ = SourceFormat
+
+ name = '3.x (native)'
+ format = r'3\.\d+ \(native\)'
+
+ requires = ('native_tar',)
+ disallowed = ('orig_tar', 'debian_diff', 'debian_tar', 'more_orig_tar')
+
+class FormatThreeQuilt(SourceFormat):
+ __metaclass__ = SourceFormat
+
+ name = '3.x (quilt)'
+ format = r'3\.\d+ \(quilt\)'
+
+ requires = ('orig_tar', 'debian_tar')
+ disallowed = ('debian_diff', 'native_tar')
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-import codecs
import commands
import email.Header
import os
import re
import string
import email as modemail
+import subprocess
from dbconn import DBConn, get_architecture, get_component, get_suite
from dak_exceptions import *
from textutils import fix_maintainer
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_verwithext, \
- re_parse_maintainer, re_taint_free, re_gpg_uid, re_re_mark, \
- re_whitespace_comment
+ re_taint_free, re_gpg_uid, re_re_mark, \
+ re_whitespace_comment, re_issource
+
+from srcformats import srcformats
+from collections import defaultdict
################################################################################
known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)),
("sha256", apt_pkg.sha256sum, (1, 8))] #: hashes we accept for entries in .changes/.dsc
+# Monkeypatch commands.getstatusoutput as it returns a "0" exit code in
+# all situations under lenny's Python.
+import commands
+def dak_getstatusoutput(cmd):
+ pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ output = "".join(pipe.stdout.readlines())
+
+ ret = pipe.wait()
+ if ret is None:
+ ret = 0
+
+ return ret, output
+commands.getstatusoutput = dak_getstatusoutput
+
################################################################################
def html_escape(s):
################################################################################
+def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
+ """
+ Verify that the files listed in the Files field of the .dsc are
+ those expected given the announced Format.
+
+ @type dsc_filename: string
+ @param dsc_filename: path of .dsc file
+
+ @type dsc: dict
+ @param dsc: the content of the .dsc parsed by C{parse_changes()}
+
+ @type dsc_files: dict
+ @param dsc_files: the file list returned by C{build_file_list()}
+
+ @rtype: list
+ @return: all errors detected
+ """
+ rejmsg = []
+
+ # Parse the file if needed
+ if dsc is None:
+ dsc = parse_changes(dsc_filename, signing_rules=1);
+
+ if dsc_files is None:
+ dsc_files = build_file_list(dsc, is_a_dsc=1)
+
+ # Ensure .dsc lists proper set of source files according to the format
+ # announced
+ has = defaultdict(lambda: 0)
+
+ ftype_lookup = (
+ (r'orig.tar.gz', ('orig_tar_gz', 'orig_tar')),
+ (r'diff.gz', ('debian_diff',)),
+ (r'tar.gz', ('native_tar_gz', 'native_tar')),
+ (r'debian\.tar\.(gz|bz2)', ('debian_tar',)),
+ (r'orig\.tar\.(gz|bz2)', ('orig_tar',)),
+ (r'tar\.(gz|bz2)', ('native_tar',)),
+ (r'orig-.+\.tar\.(gz|bz2)', ('more_orig_tar',)),
+ )
+
+ for f in dsc_files.keys():
+ m = re_issource.match(f)
+ if not m:
+ rejmsg.append("%s: %s in Files field not recognised as source."
+ % (dsc_filename, f))
+ continue
+
+ # Populate 'has' dictionary by resolving keys in lookup table
+ matched = False
+ for regex, keys in ftype_lookup:
+ if re.match(regex, m.group(3)):
+ matched = True
+ for key in keys:
+ has[key] += 1
+ break
+
+ # File does not match anything in lookup table; reject
+ if not matched:
+ reject("%s: unexpected source file '%s'" % (dsc_filename, f))
+
+ # Check for multiple files
+ for file_type in ('orig_tar', 'native_tar', 'debian_tar', 'debian_diff'):
+ if has[file_type] > 1:
+ rejmsg.append("%s: lists multiple %s" % (dsc_filename, file_type))
+
+ # Source format specific tests
+ for format in srcformats:
+ if format.re_format.match(dsc['format']):
+ rejmsg.extend([
+ '%s: %s' % (dsc_filename, x) for x in format.reject_msgs(has)
+ ])
+ break
+
+ return rejmsg
+
+################################################################################
+
def check_hash_fields(what, manifest):
"""
check_hash_fields ensures that there are no checksum fields in the
format = format[:2]
if is_a_dsc:
- # format = (1,0) are the only formats we currently accept,
# format = (0,0) are missing format headers of which we still
# have some in the archive.
- if format != (1,0) and format != (0,0):
+ if format != (1,0) and format != (0,0) and \
+ format != (3,0,"quilt") and format != (3,0,"native"):
raise UnknownFormatError, "%s" % (changes.get("format","0.0"))
else:
if (format < (1,5) or format > (1,8)):
Various
-------
-* Lintian based automated rejects
- - Have a set of lintian tags each package *must* not have. If it does
- -> reject.
- - If a tag is overriden by the maintainer, do not reject, but put it
- into NEW. If the override is ok note that in a table and dont act on
- it for any future uploads of this package anymore.
- - possibly have two classes of tags. one for "shouldnt happen by
- accident" and one "shouldnt happen". the first gets ignored from us
- if overwritten in the package, the second only us can overwrite.
- - its a suite option in dak, not active for all at once.
+* Implement autosigning, see ftpmaster_autosigning on ftp-master host in text/.
* Throw away all DD uploaded .debs. (Depend on "Lintian based automated
rejects")
- its a suite option, not active for all at once.
- should have all buildd machines under dsa control
-* Implement autosigning, see ftpmaster_autosigning on ftp-master host in text/.
-
* Check TODO.old and move still-valid/useful entries over here.
* need a testsuite _badly_
- needs updateX.py written and then the rest of the code changed to deal
with it.
-* Checkout SQL Alchemy and probably use that for our database layer.
-
-* reject on > or < in a version constraint
-
* use pythonX.Y-tarfile to check orig.tar.gz timestamps too.
* the .dak stuff is fundamentally braindamaged for various reasons, it
+#!/usr/bin/env python
+
import unittest
import os, sys
self.assertEqual(self.MATCH(': ::').groups(), ('', '::'))
self.assertEqual(self.MATCH('Foo::bar').groups(), ('Foo', ':bar'))
self.assertEqual(self.MATCH('Foo: :bar').groups(), ('Foo', ':bar'))
+
+class re_parse_lintian(unittest.TestCase):
+ MATCH = regexes.re_parse_lintian.match
+
+ def testBinary(self):
+ self.assertEqual(
+ self.MATCH('W: pkgname: some-tag path/to/file').groups(),
+ ('W', 'pkgname', 'some-tag', 'path/to/file')
+ )
+
+ def testBinaryNoDescription(self):
+ self.assertEqual(
+ self.MATCH('W: pkgname: some-tag').groups(),
+ ('W', 'pkgname', 'some-tag', '')
+ )
+
+ def testSource(self):
+ self.assertEqual(
+ self.MATCH('W: pkgname source: some-tag').groups(),
+ ('W', 'pkgname source', 'some-tag', '')
+ )
+
+ def testSourceNoDescription(self):
+ self.assertEqual(
+ self.MATCH('W: pkgname source: some-tag path/to/file').groups(),
+ ('W', 'pkgname source', 'some-tag', 'path/to/file')
+ )
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+#!/usr/bin/env python
+
+import unittest
+
+import os, sys
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from collections import defaultdict
+
+from daklib import srcformats
+
+class SourceFormatTestCase(unittest.TestCase):
+ def get_rejects(self, has_vars):
+ has = defaultdict(lambda: 0)
+ has.update(has_vars)
+ return list(self.fmt.reject_msgs(has))
+
+ def assertAccepted(self, has):
+ self.assertEqual(self.get_rejects(has), [])
+
+ def assertRejected(self, has):
+ self.assertNotEqual(self.get_rejects(has), [])
+
+class FormatOneTestCase(SourceFormatTestCase):
+ fmt = srcformats.FormatOne
+
+ def testEmpty(self):
+ self.assertRejected({})
+
+ def testNative(self):
+ self.assertAccepted({'native_tar': 1, 'native_tar_gz': 1})
+
+ def testStandard(self):
+ self.assertAccepted({
+ 'orig_tar': 1,
+ 'orig_tar_gz': 1,
+ 'debian_diff': 1,
+ })
+
+ def testDisallowed(self):
+ self.assertRejected({
+ 'native_tar': 1,
+ 'native_tar_gz': 1,
+ 'debian_tar': 1,
+ })
+ self.assertRejected({
+ 'orig_tar': 1,
+ 'orig_tar_gz': 1,
+ 'debian_diff': 0,
+ })
+ self.assertRejected({
+ 'native_tar': 1,
+ 'native_tar_gz': 1,
+ 'more_orig_tar': 1,
+ })
+ self.assertRejected({
+ 'native_tar': 1,
+ 'native_tar_gz': 1,
+ 'debian_diff': 1,
+ })
+
+class FormatTreeTestCase(SourceFormatTestCase):
+ fmt = srcformats.FormatThree
+
+ def testEmpty(self):
+ self.assertRejected({})
+
+ def testSimple(self):
+ self.assertAccepted({'native_tar': 1})
+
+ def testDisallowed(self):
+ self.assertRejected({'native_tar': 1, 'orig_tar': 1})
+ self.assertRejected({'native_tar': 1, 'debian_diff': 1})
+ self.assertRejected({'native_tar': 1, 'debian_tar': 1})
+ self.assertRejected({'native_tar': 1, 'more_orig_tar': 1})
+
+class FormatTreeQuiltTestCase(SourceFormatTestCase):
+ fmt = srcformats.FormatThreeQuilt
+
+ def testEmpty(self):
+ self.assertRejected({})
+
+ def testSimple(self):
+ self.assertAccepted({'orig_tar': 1, 'debian_tar': 1})
+
+ def testMultipleTarballs(self):
+ self.assertAccepted({
+ 'orig_tar': 1,
+ 'debian_tar': 1,
+ 'more_orig_tar': 42,
+ })
+
+ def testDisallowed(self):
+ self.assertRejected({
+ 'orig_tar': 1,
+ 'debian_tar': 1,
+ 'debian_diff': 1
+ })
+ self.assertRejected({
+ 'orig_tar': 1,
+ 'debian_tar': 1,
+ 'native_tar': 1,
+ })
+
+if __name__ == '__main__':
+ unittest.main()