Dir
{
+ Base "/srv/backports-master.debian.org/";
Root "/srv/backports-master.debian.org/ftp/";
Pool "/srv/backports-master.debian.org/ftp/pool/";
Export "/srv/backports-master.debian.org/export/";
ARCHLIST=$(tempfile)
log "Querying postgres"
- local query='SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)'
- psql -At -c "$query" | sed 's/|//;s,^/srv/ftp-master.debian.org/ftp,.,' | sort >$ARCHLIST
+ local query="
+ SELECT CONCAT('./pool/', c.name, '/', f.filename) AS path, a.arch_string AS arch_string
+ FROM files f
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ LEFT OUTER JOIN
+ (binaries b
+ JOIN architecture a ON b.architecture = a.id)
+ ON f.id = b.file
+ WHERE archive.name = 'ftp-master'
+ ORDER BY path, arch_string
+ "
+ psql -At -c "$query" >$ARCHLIST
includedirs () {
perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
log "Generating suite lists"
suite_list () {
- local query
- query="$(printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)' $1 $1)"
+ local suite_id="$(printf %d $1)"
+ local query
+ query="
+ SELECT DISTINCT CONCAT('./pool/', c.name, '/', f.filename)
+ FROM
+ (SELECT sa.source AS source
+ FROM src_associations sa
+ WHERE sa.suite = $suite_id
+ UNION
+ SELECT esr.src_id
+ FROM extra_src_references esr
+ JOIN bin_associations ba ON esr.bin_id = ba.bin
+ WHERE ba.suite = $suite_id
+ UNION
+ SELECT b.source AS source
+ FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id WHERE ba.suite = $suite_id) s
+ JOIN dsc_files df ON s.source = df.source
+ JOIN files f ON df.file = f.id
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ WHERE archive.name = 'ftp-master'
+ "
psql -F' ' -A -t -c "$query"
- query="$(printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d' $1)"
+ query="
+ SELECT CONCAT('./pool/', c.name, '/', f.filename)
+ FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id
+ JOIN files f ON b.file = f.id
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ WHERE ba.suite = $suite_id AND archive.name = 'ftp-master'
+ "
psql -F' ' -A -t -c "$query"
}
[ "$(readlink $distdir)" != "$distname" ] || echo $distdir
done
)
- suite_list $id | tr -d ' ' | sed 's,^/srv/ftp-master.debian.org/ftp,.,'
+ suite_list $id
) | sort -u | gzip -9 > suite-${suite}.list.gz
done
Dir
{
+ Base "/srv/security-master.debian.org/";
Root "/srv/security-master.debian.org/ftp/";
Pool "/srv/security-master.debian.org/ftp/pool/";
Export "/srv/security-master.debian.org/export/";
# used by cron.dinstall *and* cron.unchecked.
function make_buildd_dir () {
dak manage-build-queues -a
+ dak generate-packages-sources2 -a build-queues
+ dak generate-releases -a build-queues
+
+ for suite in unstable experimental; do
+ rm -rf "$incoming/dists/$suite/buildd"
+ dak export-suite -s "buildd-$suite" -d "$incoming/dists/$suite/buildd"
+ done
+
+ # export to old build queue directories
+ # XXX: Remove once the buildds use the version generated above.
+ for suite in $(ls -1 $incoming/dists/); do
+ # Skip project trace directory
+ if [ "${suite}x" = "projectx" ]; then continue; fi
+ cd ${incoming}/dists/${dist}/buildd
+
+ dpkg-scanpackages . $overridedir/override.sid.all3 >Packages
+ gzip -9c --rsyncable <Packages >Packages.gz
+ dpkg-scansources . $override/override.sid.all3.src >Sources
+ gzip -9c --rsyncable <Sources >Sources.gz
+
+ rm -f buildd/Release
+ cd ..
+ apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="Debian" -o APT::FTPArchive::Release::Label="Debian" -o APT::FTPArchive::Release::Description="buildd $suite incoming" -o APT::FTPArchive::Release::Architectures="$archs" release buildd >Release
+ if [ "$suite" = "experimental" ]; then
+ echo "NotAutomatic: yes" >>Release
+ fi
+
+ gpg --secret-keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 473041FA --detach-sign -o Release.gpg Release
+
+ mv Release Release.gpg buildd/
+ done
for dist in $(ls -1 ${incoming}/dists/); do
# Skip project trace directory
ln -sfT tree/${STAMP} ${incoming}/dists/${dist}/current
find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
done
-
}
# Process (oldstable)-proposed-updates "NEW" queue
function punew_do() {
+ local queue="$1"
date -u -R >> REPORT
- dak process-policy $1 | tee -a REPORT | mail -a "X-Debian: DAK" -e -s "NEW changes in $1" debian-release@lists.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
+ dak process-policy "${queue}" | tee -a REPORT | mail -a "X-Debian: DAK" -e -s "NEW changes in ${queue}" debian-release@lists.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
echo >> REPORT
+
+ dak generate-packages-sources2 -s "${queue}"
+
+ local exportdir="${queuedir}/${queue}/export"
+ rm -rf "${exportdir}"
+ dak export -q "${queue}" -d "${exportdir}" --all
}
# These versions used in dinstall
dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
}
-# Do the newstage processing, in case we have files.
-function do_newstage () {
- cd $newstage
-
- changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
- report=$queuedir/REPORT
- timestamp=$(date "+%Y-%m-%d %H:%M")
- UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
-
- echo "$timestamp": ${changes:-"Nothing to do in newstage"} >> $report
- dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+# process NEW policy queue
+function do_new () {
+ if [ "${PROGRAM}" = "dinstall" ]; then
+ log "Doing NEW processing"
+ fi
+ dak process-policy new | mail -a "X-Debian: DAK" -e -s "NEW processing" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
+ dak clean-suites -a new
}
function sync_debbugs () {
# do not run show-new and other stuff in parallel
LOCKFILE="$lockdir/unchecked.lock"
if lockfile -r16 $LOCKFILE 2> /dev/null; then
+ do_new
dak show-new > /dev/null || true
rm -f $LOCKFILE
fi
punew proposedupdates
opunew oldproposedupdates
-# Now process the NEW staging area
-do_newstage
-
# Finally deal with unchecked
do_unchecked
Dir
{
+ Base "/srv/ftp-master.debian.org/";
Root "/srv/ftp-master.debian.org/ftp/";
Pool "/srv/ftp-master.debian.org/ftp/pool/";
Templates "/srv/ftp-master.debian.org/dak/templates/";
function packages() {
log "Generating Packages and Sources files"
- dak generate-packages-sources2
- dak contents generate
+ dak generate-packages-sources2 -a ftp-master
+ dak contents generate -a ftp-master
}
function pdiff() {
done
)
log "Generating Release files"
- dak generate-releases
+ dak generate-releases -a ftp-master
}
function dakcleanup() {
log "Cleanup old packages/files"
dak clean-suites -m 10000
- dak clean-queues
+ # XXX: reactivate once clean-queues is fixed
+ #dak clean-queues
}
function buildd_dir() {
ARCHLIST=$(tempfile)
log "Querying postgres"
- local query='SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)'
- psql -At -c "$query" | sed 's/|//;s,^/srv/ftp-master.debian.org/ftp,.,' | sort >$ARCHLIST
+ local query="
+ SELECT CONCAT('./pool/', c.name, '/', f.filename) AS path, a.arch_string AS arch_string
+ FROM files f
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ LEFT OUTER JOIN
+ (binaries b
+ JOIN architecture a ON b.architecture = a.id)
+ ON f.id = b.file
+ WHERE archive.name = 'ftp-master'
+ ORDER BY path, arch_string
+ "
+ psql -At -c "$query" >$ARCHLIST
includedirs () {
perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
log "Generating suite lists"
suite_list () {
+ local suite_id="$(printf %d $1)"
local query
- query="$(printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)' $1 $1)"
+ query="
+ SELECT DISTINCT CONCAT('./pool/', c.name, '/', f.filename)
+ FROM
+ (SELECT sa.source AS source
+ FROM src_associations sa
+ WHERE sa.suite = $suite_id
+ UNION
+ SELECT esr.src_id
+ FROM extra_src_references esr
+ JOIN bin_associations ba ON esr.bin_id = ba.bin
+ WHERE ba.suite = $suite_id
+ UNION
+ SELECT b.source AS source
+ FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id WHERE ba.suite = $suite_id) s
+ JOIN dsc_files df ON s.source = df.source
+ JOIN files f ON df.file = f.id
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ WHERE archive.name = 'ftp-master'
+ "
psql -F' ' -A -t -c "$query"
- query="$(printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d' $1)"
+ query="
+ SELECT CONCAT('./pool/', c.name, '/', f.filename)
+ FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id
+ JOIN files f ON b.file = f.id
+ JOIN files_archive_map af ON f.id = af.file_id
+ JOIN component c ON af.component_id = c.id
+ JOIN archive ON af.archive_id = archive.id
+ WHERE ba.suite = $suite_id AND archive.name = 'ftp-master'
+ "
psql -F' ' -A -t -c "$query"
}
[ "$(readlink $distdir)" != "$distname" ] || echo $distdir
done
)
- suite_list $id | tr -d ' ' | sed 's,^/srv/ftp-master.debian.org/ftp,.,'
+ suite_list $id
) | sort -u | gzip -9 > suite-${suite}.list.gz
done
s-a rm SUITE ARCH remove ARCH from suite (will only work if
no packages remain for the arch in the suite)
+ archive:
+ archive list list all archives
+ archive add NAME ROOT DESCRIPTION [primary-mirror=MIRROR] [tainted=1]
+ add archive NAME with path ROOT,
+ primary mirror MIRROR.
+ archive rm NAME remove archive NAME (will only work if there are
+ no files and no suites in the archive)
+
version-check / v-c:
v-c list show version checks for all suites
v-c list-suite SUITE show version checks for suite SUITE
signingkey = get_field('signingkey')
if signingkey is not None:
suite.signingkeys = [signingkey.upper()]
+ archive_name = get_field('archive')
+ if archive_name is not None:
+ suite.archive = get_archive(archive_name, s)
+ else:
+ suite.archive = s.query(Archive).filter(~Archive.archive_name.in_(['build-queues', 'new', 'policy'])).one()
suite.srcformats = s.query(SrcFormat).all()
s.add(suite)
s.flush()
################################################################################
+def archive_list():
+ session = DBConn().session()
+ for archive in session.query(Archive).order_by(Archive.archive_name):
+ print "{0} path={1} description={2} tainted={3}".format(archive.archive_name, archive.path, archive.description, archive.tainted)
+
+def archive_add(args):
+ (name, path, description) = args[0:3]
+
+ attributes = dict(
+ archive_name=name,
+ path=path,
+ description=description,
+ )
+
+ for option in args[3:]:
+ (key, value) = option.split('=')
+ attributes[key] = value
+
+ session = DBConn().session()
+
+ archive = Archive()
+ for key, value in attributes.iteritems():
+ setattr(archive, key, value)
+
+ session.add(archive)
+ session.flush()
+
+ if dryrun:
+ session.rollback()
+ else:
+ session.commit()
+
+def archive_rm(name):
+ session = DBConn().session()
+ archive = session.query(Archive).filter_by(archive_name=name).one()
+ session.delete(archive)
+ session.flush()
+
+ if dryrun:
+ session.rollback()
+ else:
+ session.commit()
+
+def archive(command):
+ mode = command[1]
+ if mode == 'list':
+ archive_list()
+ elif mode == 'add':
+ archive_add(command[2:])
+ elif mode == 'rm':
+ archive_rm(command[2])
+ else:
+ die("E: archive command unknown")
+
+dispatch['archive'] = archive
+
+################################################################################
+
def __version_check_list(d):
session = d.session()
for s in session.query(Suite).order_by(Suite.suite_name):
Prepare the dictionary of existing filenames, then walk through the archive
pool/ directory to compare it.
"""
- global db_files
-
cnf = Config()
+ session = DBConn().session()
- print "Building list of database files..."
- q = DBConn().session().query(PoolFile).join(Location).order_by('path', 'location')
-
- print "Missing files:"
- db_files.clear()
-
- for f in q.all():
- filename = os.path.abspath(os.path.join(f.location.path, f.filename))
- db_files[filename] = ""
- if os.access(filename, os.R_OK) == 0:
- if f.last_used:
- print "(last used: %s) %s" % (f.last_used, filename)
- else:
- print "%s" % (filename)
-
-
- filename = os.path.join(cnf["Dir::Override"], 'override.unreferenced')
- if os.path.exists(filename):
- f = utils.open_file(filename)
- for filename in f.readlines():
- filename = filename[:-1]
- excluded[filename] = ""
-
- print "Existent files not in db:"
-
- os.path.walk(os.path.join(cnf["Dir::Root"], 'pool/'), process_dir, None)
-
- print
- print "%s wasted..." % (utils.size_type(waste))
+ query = """
+ SELECT archive.name, suite.suite_name, f.filename
+ FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin
+ JOIN suite ON ba.suite = suite.id
+ JOIN archive ON suite.archive_id = archive.id
+ JOIN files f ON b.file = f.id
+ WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af
+ WHERE af.archive_id = suite.archive_id
+ AND af.file_id = b.file)
+ ORDER BY archive.name, suite.suite_name, f.filename
+ """
+ for row in session.execute(query):
+ print "MISSING-ARCHIVE-FILE {0} {1} {2}".vformat(row)
+
+ query = """
+ SELECT archive.name, suite.suite_name, f.filename
+ FROM source s
+ JOIN src_associations sa ON s.id = sa.source
+ JOIN suite ON sa.suite = suite.id
+ JOIN archive ON suite.archive_id = archive.id
+ JOIN dsc_files df ON s.id = df.source
+ JOIN files f ON df.file = f.id
+ WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af
+ WHERE af.archive_id = suite.archive_id
+ AND af.file_id = df.file)
+ ORDER BY archive.name, suite.suite_name, f.filename
+ """
+ for row in session.execute(query):
+ print "MISSING-ARCHIVE-FILE {0} {1} {2}".vformat(row)
+
+ archive_files = session.query(ArchiveFile) \
+ .join(ArchiveFile.archive).join(ArchiveFile.file) \
+ .order_by(Archive.archive_name, PoolFile.filename)
+
+ expected_files = set()
+ for af in archive_files:
+ path = af.path
+ expected_files.add(af.path)
+ if not os.path.exists(path):
+ print "MISSING-FILE {0} {1} {2}".format(af.archive.archive_name, af.file.filename, path)
+
+ archives = session.query(Archive).order_by(Archive.archive_name)
+
+ for a in archives:
+ top = os.path.join(a.path, 'pool')
+ for dirpath, dirnames, filenames in os.walk(top):
+ for fn in filenames:
+ path = os.path.join(dirpath, fn)
+ if path in expected_files:
+ continue
+ print "UNEXPECTED-FILE {0} {1}".format(a.archive_name, path)
################################################################################
packages = {}
# TODO: Fix to use placeholders (check how to with arrays)
q = session.execute("""
-SELECT b.package FROM binaries b, bin_associations ba, files f,
- location l, component c
- WHERE b.type = :otype AND b.id = ba.bin AND f.id = b.file AND l.id = f.location
- AND c.id = l.component AND ba.suite IN (%s) AND c.id = :component_id
+SELECT b.package
+ FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin
+ JOIN suite ON ba.suite = suite.id
+ JOIN files_archive_map af ON b.file = af.file_id AND suite.archive_id = af.archive_id
+ WHERE b.type = :otype AND ba.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([ str(i) for i in affected_suites ])), {'otype': otype, 'component_id': component_id})
for i in q.fetchall():
packages[i[0]] = 0
src_packages = {}
q = session.execute("""
-SELECT s.source FROM source s, src_associations sa, files f, location l,
- component c
- WHERE s.id = sa.source AND f.id = s.file AND l.id = f.location
- AND c.id = l.component AND sa.suite IN (%s) AND c.id = :component_id
+SELECT s.source FROM source s
+ JOIN src_associations sa ON s.id = sa.source
+ JOIN suite ON sa.suite = suite.id
+ JOIN files_archive_map af ON s.file = af.file_id AND suite.archive_id = af.archive_id
+ WHERE sa.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([ str(i) for i in affected_suites])), {'component_id': component_id})
for i in q.fetchall():
src_packages[i[0]] = 0
################################################################################
-def check_binaries(now_date, delete_date, max_delete, session):
+def check_binaries(now_date, session):
print "Checking for orphaned binary packages..."
# Get the list of binary packages not in a suite and mark them for
# deletion.
-
- q = session.execute("""
-SELECT b.file, f.filename
- FROM binaries b
- LEFT JOIN files f
- ON (b.file = f.id)
- WHERE f.last_used IS NULL
- AND b.id NOT IN
- (SELECT ba.bin FROM bin_associations ba)
- AND f.id NOT IN
- (SELECT bqf.fileid FROM build_queue_files bqf)""")
- for i in q.fetchall():
- Logger.log(["set lastused", i[1]])
- if not Options["No-Action"]:
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
- {'lastused': now_date, 'fileid': i[0]})
-
- if not Options["No-Action"]:
- session.commit()
-
# Check for any binaries which are marked for eventual deletion
# but are now used again.
- q = session.execute("""
-SELECT b.file, f.filename
- FROM binaries b
- LEFT JOIN files f
- ON (b.file = f.id)
- WHERE f.last_used IS NOT NULL
- AND (b.id IN
- (SELECT ba.bin FROM bin_associations ba)
- OR f.id IN
- (SELECT bqf.fileid FROM build_queue_files bqf))""")
-
- for i in q.fetchall():
- Logger.log(["unset lastused", i[1]])
- if not Options["No-Action"]:
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
-
- if not Options["No-Action"]:
- session.commit()
+ query = """
+ WITH usage AS (
+ SELECT
+ af.archive_id AS archive_id,
+ af.file_id AS file_id,
+ af.component_id AS component_id,
+ BOOL_OR(EXISTS (SELECT 1 FROM bin_associations ba
+ JOIN suite s ON ba.suite = s.id
+ WHERE ba.bin = b.id
+ AND s.archive_id = af.archive_id))
+ AS in_use
+ FROM files_archive_map af
+ JOIN binaries b ON af.file_id = b.file
+ GROUP BY af.archive_id, af.file_id, af.component_id
+ )
+
+ UPDATE files_archive_map af
+ SET last_used = CASE WHEN usage.in_use THEN NULL ELSE :last_used END
+ FROM usage, files f, archive
+ WHERE af.archive_id = usage.archive_id AND af.file_id = usage.file_id AND af.component_id = usage.component_id
+ AND ((af.last_used IS NULL AND NOT usage.in_use) OR (af.last_used IS NOT NULL AND usage.in_use))
+ AND af.file_id = f.id
+ AND af.archive_id = archive.id
+ RETURNING archive.name, f.filename, af.last_used IS NULL"""
+
+ res = session.execute(query, {'last_used': now_date})
+ for i in res:
+ op = "set lastused"
+ if i[2]:
+ op = "unset lastused"
+ Logger.log([op, i[0], i[1]])
########################################
-def check_sources(now_date, delete_date, max_delete, session):
+def check_sources(now_date, session):
print "Checking for orphaned source packages..."
# Get the list of source packages not in a suite and not used by
# any binaries.
- q = session.execute("""
-SELECT s.id, s.file, f.filename
- FROM source s
- LEFT JOIN files f
- ON (s.file = f.id)
- WHERE f.last_used IS NULL
- AND s.id NOT IN
- (SELECT sa.source FROM src_associations sa)
- AND s.id NOT IN
- (SELECT b.source FROM binaries b)
- AND s.id NOT IN (SELECT esr.src_id FROM extra_src_references esr)
- AND f.id NOT IN
- (SELECT bqf.fileid FROM build_queue_files bqf)""")
#### XXX: this should ignore cases where the files for the binary b
#### have been marked for deletion (so the delay between bins go
#### byebye and sources go byebye is 0 instead of StayOfExecution)
- for i in q.fetchall():
- source_id = i[0]
- dsc_file_id = i[1]
- dsc_fname = i[2]
-
- # Mark the .dsc file for deletion
- Logger.log(["set lastused", dsc_fname])
- if not Options["No-Action"]:
- session.execute("""UPDATE files SET last_used = :last_used
- WHERE id = :dscfileid AND last_used IS NULL""",
- {'last_used': now_date, 'dscfileid': dsc_file_id})
-
- # Mark all other files references by .dsc too if they're not used by anyone else
- x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
- WHERE d.source = :sourceid AND d.file = f.id""",
- {'sourceid': source_id})
- for j in x.fetchall():
- file_id = j[0]
- file_name = j[1]
- y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
- if len(y.fetchall()) == 1:
- Logger.log(["set lastused", file_name])
- if not Options["No-Action"]:
- session.execute("""UPDATE files SET last_used = :lastused
- WHERE id = :fileid AND last_used IS NULL""",
- {'lastused': now_date, 'fileid': file_id})
-
- if not Options["No-Action"]:
- session.commit()
-
# Check for any sources which are marked for deletion but which
# are now used again.
- q = session.execute("""
-SELECT f.id, f.filename FROM source s, files f, dsc_files df
- WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
- AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
- OR (EXISTS (SELECT 1 FROM extra_src_references esr WHERE esr.src_id = s.id))
- OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id))
- OR (EXISTS (SELECT 1 FROM build_queue_files bqf WHERE bqf.fileid = s.file)))""")
#### XXX: this should also handle deleted binaries specially (ie, not
#### reinstate sources because of them
- for i in q.fetchall():
- Logger.log(["unset lastused", i[1]])
- if not Options["No-Action"]:
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
- {'fileid': i[0]})
+ # TODO: the UPDATE part is the same as in check_binaries. Merge?
+
+ query = """
+ WITH usage AS (
+ SELECT
+ af.archive_id AS archive_id,
+ af.file_id AS file_id,
+ af.component_id AS component_id,
+ BOOL_OR(EXISTS (SELECT 1 FROM src_associations sa
+ JOIN suite s ON sa.suite = s.id
+ WHERE sa.source = df.source
+ AND s.archive_id = af.archive_id)
+ OR EXISTS (SELECT 1 FROM files_archive_map af_bin
+ JOIN binaries b ON af_bin.file_id = b.file
+ WHERE b.source = df.source
+ AND af_bin.archive_id = af.archive_id
+ AND af_bin.last_used > ad.delete_date)
+ OR EXISTS (SELECT 1 FROM extra_src_references esr
+ JOIN bin_associations ba ON esr.bin_id = ba.bin
+ JOIN binaries b ON ba.bin = b.id
+ JOIN suite s ON ba.suite = s.id
+ WHERE esr.src_id = df.source
+ AND s.archive_id = af.archive_id))
+ AS in_use
+ FROM files_archive_map af
+ JOIN dsc_files df ON af.file_id = df.file
+ JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+ GROUP BY af.archive_id, af.file_id, af.component_id
+ )
- if not Options["No-Action"]:
- session.commit()
+ UPDATE files_archive_map af
+ SET last_used = CASE WHEN usage.in_use THEN NULL ELSE :last_used END
+ FROM usage, files f, archive
+ WHERE af.archive_id = usage.archive_id AND af.file_id = usage.file_id AND af.component_id = usage.component_id
+ AND ((af.last_used IS NULL AND NOT usage.in_use) OR (af.last_used IS NOT NULL AND usage.in_use))
+ AND af.file_id = f.id
+ AND af.archive_id = archive.id
+
+ RETURNING archive.name, f.filename, af.last_used IS NULL
+ """
+
+ res = session.execute(query, {'last_used': now_date})
+ for i in res:
+ op = "set lastused"
+ if i[2]:
+ op = "unset lastused"
+ Logger.log([op, i[0], i[1]])
########################################
-def check_files(now_date, delete_date, max_delete, session):
+def check_files(now_date, session):
# FIXME: this is evil; nothing should ever be in this state. if
# they are, it's a bug.
print "Checking for unused files..."
q = session.execute("""
-SELECT id, filename FROM files f
- WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
- AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
- AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
- AND NOT EXISTS (SELECT 1 FROM build_queue_files qf WHERE qf.fileid = f.id)
- AND last_used IS NULL
- ORDER BY filename""")
-
- ql = q.fetchall()
- if len(ql) > 0:
- utils.warn("check_files found something it shouldn't")
- for x in ql:
- utils.warn("orphaned file: %s" % x)
- Logger.log(["set lastused", x[1], "ORPHANED FILE"])
- if not Options["No-Action"]:
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
- {'lastused': now_date, 'fileid': x[0]})
+ UPDATE files_archive_map af
+ SET last_used = :last_used
+ FROM files f, archive
+ WHERE af.file_id = f.id
+ AND af.archive_id = archive.id
+ AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = af.file_id)
+ AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
+ AND af.last_used IS NULL
+ RETURNING archive.name, f.filename""", {'last_used': now_date})
+
+ for x in q:
+ utils.warn("orphaned file: {0}".format(x))
+ Logger.log(["set lastused", x[0], x[1], "ORPHANED FILE"])
- if not Options["No-Action"]:
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
-def clean_binaries(now_date, delete_date, max_delete, session):
+def clean_binaries(now_date, session):
# We do this here so that the binaries we remove will have their
# source also removed (if possible).
# XXX: why doesn't this remove the files here as well? I don't think it
# buys anything keeping this separate
- print "Cleaning binaries from the DB..."
+
print "Deleting from binaries table... "
- for bin in session.query(DBBinary).join(DBBinary.poolfile).filter(PoolFile.last_used <= delete_date):
- Logger.log(["delete binary", bin.poolfile.filename])
- if not Options["No-Action"]:
- session.delete(bin)
- if not Options["No-Action"]:
- session.commit()
+ q = session.execute("""
+ DELETE FROM binaries b
+ USING files f
+ WHERE f.id = b.file
+ AND NOT EXISTS (SELECT 1 FROM files_archive_map af
+ JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+ WHERE af.file_id = b.file
+ AND (af.last_used IS NULL OR af.last_used >= ad.delete_date))
+ RETURNING f.filename
+ """)
+ for b in q:
+ Logger.log(["delete binary", b[0]])
########################################
-def clean(now_date, delete_date, max_delete, session):
+def clean(now_date, archives, max_delete, session):
cnf = Config()
count = 0
# Delete from source
print "Deleting from source table... "
q = session.execute("""
-SELECT s.id, f.filename FROM source s, files f
- WHERE f.last_used <= :deletedate
- AND s.file = f.id
- AND s.id NOT IN (SELECT src_id FROM extra_src_references)""", {'deletedate': delete_date})
- for s in q.fetchall():
- Logger.log(["delete source", s[1], s[0]])
- if not Options["No-Action"]:
- session.execute("DELETE FROM dsc_files WHERE source = :s_id", {"s_id":s[0]})
- session.execute("DELETE FROM source WHERE id = :s_id", {"s_id":s[0]})
+ WITH
+ deleted_sources AS (
+ DELETE FROM source
+ USING files f
+ WHERE source.file = f.id
+ AND NOT EXISTS (SELECT 1 FROM files_archive_map af
+ JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+ WHERE af.file_id = source.file
+ AND (af.last_used IS NULL OR af.last_used >= ad.delete_date))
+ RETURNING source.id AS id, f.filename AS filename
+ ),
+ deleted_dsc_files AS (
+ DELETE FROM dsc_files df WHERE df.source IN (SELECT id FROM deleted_sources)
+ RETURNING df.file AS file_id
+ ),
+ now_unused_source_files AS (
+ UPDATE files_archive_map af
+ SET last_used = '1977-03-13 13:37:42' -- Kill it now. We waited long enough before removing the .dsc.
+ WHERE af.file_id IN (SELECT file_id FROM deleted_dsc_files)
+ AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
+ )
+ SELECT filename FROM deleted_sources""")
+ for s in q:
+ Logger.log(["delete source", s[0]])
if not Options["No-Action"]:
session.commit()
# Delete files from the pool
- old_files = session.query(PoolFile).filter(PoolFile.last_used <= delete_date)
+ old_files = session.query(ArchiveFile).filter('files_archive_map.last_used <= (SELECT delete_date FROM archive_delete_date ad WHERE ad.archive_id = files_archive_map.archive_id)').join(Archive)
if max_delete is not None:
old_files = old_files.limit(max_delete)
print "Limiting removals to %d" % max_delete
- for pf in old_files:
- filename = os.path.join(pf.location.path, pf.filename)
+ if archives is not None:
+ archive_ids = [ a.archive_id for a in archives ]
+ old_files = old_files.filter(ArchiveFile.archive_id.in_(archive_ids))
+
+ for af in old_files:
+ filename = af.path
if not os.path.exists(filename):
- utils.warn("can not find '%s'." % (filename))
+ Logger.log(["database referred to non-existing file", af.path])
+ session.delete(af)
continue
- Logger.log(["delete pool file", filename])
+ Logger.log(["delete archive file", filename])
if os.path.isfile(filename):
if os.path.islink(filename):
count += 1
if os.path.exists(dest_filename):
dest_filename = utils.find_next_free(dest_filename)
- Logger.log(["move to morgue", filename, dest_filename])
if not Options["No-Action"]:
- utils.move(filename, dest_filename)
+ if af.archive.use_morgue:
+ Logger.log(["move to morgue", filename, dest_filename])
+ utils.move(filename, dest_filename)
+ else:
+ Logger.log(["removed file", filename])
+ os.unlink(filename)
if not Options["No-Action"]:
- session.delete(pf)
+ session.delete(af)
session.commit()
else:
Logger.log(["total", count, utils.size_type(size)])
print "Cleaned %d files, %s." % (count, utils.size_type(size))
+ # Delete entries in files no longer referenced by any archive
+ query = """
+ DELETE FROM files f
+ WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af WHERE af.file_id = f.id)
+ """
+ session.execute(query)
+
+ if not Options["No-Action"]:
+ session.commit()
+
################################################################################
-def clean_maintainers(now_date, delete_date, max_delete, session):
+def clean_maintainers(now_date, session):
print "Cleaning out unused Maintainer entries..."
# TODO Replace this whole thing with one SQL statement
################################################################################
-def clean_fingerprints(now_date, delete_date, max_delete, session):
+def clean_fingerprints(now_date, session):
print "Cleaning out unused fingerprint entries..."
# TODO Replace this whole thing with one SQL statement
count = 0
cursor = session.execute(
- "SELECT DISTINCT(path) FROM location WHERE type = :type",
- {'type': 'pool'},
+ """SELECT DISTINCT(path) FROM archive"""
)
bases = [x[0] for x in cursor.fetchall()]
################################################################################
+def set_archive_delete_dates(now_date, session):
+ session.execute("""
+ CREATE TEMPORARY TABLE archive_delete_date (
+ archive_id INT NOT NULL,
+ delete_date TIMESTAMP NOT NULL
+ )""")
+
+ session.execute("""
+ INSERT INTO archive_delete_date
+ (archive_id, delete_date)
+ SELECT
+ archive.id, :now_date - archive.stayofexecution
+ FROM archive""", {'now_date': now_date})
+
+ session.flush()
+
+################################################################################
+
def main():
global Options, Logger
cnf["Clean-Suites::Options::%s" % (i)] = ""
Arguments = [('h',"help","Clean-Suites::Options::Help"),
+ ('a','archive','Clean-Suites::Options::Archive','HasArg'),
('n',"no-action","Clean-Suites::Options::No-Action"),
('m',"maximum","Clean-Suites::Options::Maximum", "HasArg")]
session = DBConn().session()
- now_date = datetime.now()
+ archives = None
+ if 'Archive' in Options:
+ archive_names = Options['Archive'].split(',')
+ archives = session.query(Archive).filter(Archive.archive_name.in_(archive_names)).all()
+ if len(archives) == 0:
+ utils.fubar('Unknown archive.')
- # Stay of execution; default to 1.5 days
- soe = int(cnf.get('Clean-Suites::StayOfExecution', '129600'))
+ now_date = datetime.now()
- delete_date = now_date - timedelta(seconds=soe)
+ set_archive_delete_dates(now_date, session)
- check_binaries(now_date, delete_date, max_delete, session)
- clean_binaries(now_date, delete_date, max_delete, session)
- check_sources(now_date, delete_date, max_delete, session)
- check_files(now_date, delete_date, max_delete, session)
- clean(now_date, delete_date, max_delete, session)
- clean_maintainers(now_date, delete_date, max_delete, session)
- clean_fingerprints(now_date, delete_date, max_delete, session)
+ check_binaries(now_date, session)
+ clean_binaries(now_date, session)
+ check_sources(now_date, session)
+ check_files(now_date, session)
+ clean(now_date, archives, max_delete, session)
+ clean_maintainers(now_date, session)
+ clean_fingerprints(now_date, session)
clean_empty_directories(session)
+ session.rollback()
+
Logger.close()
################################################################################
show this help and exit
OPTIONS for generate
+ -a, --archive=ARCHIVE
+ only operate on suites in the specified archive
+
-s, --suite={stable,testing,unstable,...}
only operate on specified suite names
################################################################################
-def write_all(cnf, suite_names = [], component_names = [], force = None):
+def write_all(cnf, archive_names = [], suite_names = [], component_names = [], force = None):
Logger = daklog.Logger('contents generate')
- ContentsWriter.write_all(Logger, suite_names, component_names, force)
+ ContentsWriter.write_all(Logger, archive_names, suite_names, component_names, force)
Logger.close()
################################################################################
cnf['Contents::Options::Limit'] = ''
cnf['Contents::Options::Force'] = ''
arguments = [('h', "help", 'Contents::Options::Help'),
+ ('a', 'archive', 'Contents::Options::Archive', 'HasArg'),
('s', "suite", 'Contents::Options::Suite', "HasArg"),
('c', "component", 'Contents::Options::Component', "HasArg"),
('l', "limit", 'Contents::Options::Limit', "HasArg"),
binary_scan_all(cnf, limit)
return
+ archive_names = utils.split_args(options['Archive'])
suite_names = utils.split_args(options['Suite'])
component_names = utils.split_args(options['Component'])
force = bool(options['Force'])
if args[0] == 'generate':
- write_all(cnf, suite_names, component_names, force)
+ write_all(cnf, archive_names, suite_names, component_names, force)
return
usage()
import apt_pkg
import os
+from daklib.archive import ArchiveTransaction
from daklib.config import Config
from daklib.dbconn import *
from daklib import daklog
#######################################################################################
-def get_id(package, version, architecture, session):
- if architecture == "source":
- q = session.execute("SELECT id FROM source WHERE source = :package AND version = :version",
- {'package': package, 'version': version})
+def get_pkg(package, version, architecture, session):
+ if architecture == 'source':
+ q = session.query(DBSource).filter_by(source=package, version=version) \
+ .join(DBSource.poolfile)
else:
- q = session.execute("""SELECT b.id FROM binaries b, architecture a
- WHERE b.package = :package AND b.version = :version
- AND (a.arch_string = :arch OR a.arch_string = 'all')
- AND b.architecture = a.id""",
- {'package': package, 'version': version, 'arch': architecture})
-
- ql = q.fetchall()
- if len(ql) < 1:
- utils.warn("Couldn't find '%s_%s_%s'." % (package, version, architecture))
- return None
-
- if len(ql) > 1:
- utils.warn("Found more than one match for '%s_%s_%s'." % (package, version, architecture))
- return None
+ q = session.query(DBBinary).filter_by(package=package, version=version) \
+ .join(DBBinary.architecture).filter(Architecture.arch_string.in_([architecture, 'all'])) \
+ .join(DBBinary.poolfile)
- return ql[0][0]
+ pkg = q.first()
+ if pkg is None:
+ utils.warn("Could not find {0}_{1}_{2}.".format(package, version, architecture))
+ return pkg
#######################################################################################
def cmp_package_version(a, b):
"""
- comparison function for tuples of the form (package-name, version ...)
+ comparison function for tuples of the form (package-name, version, arch, ...)
"""
- cmp_package = cmp(a[0], b[0])
- if cmp_package != 0:
- return cmp_package
- return apt_pkg.version_compare(a[1], b[1])
+ res = 0
+ if a[2] == 'source' and b[2] != 'source':
+ res = -1
+ elif a[2] != 'source' and b[2] == 'source':
+ res = 1
+ if res == 0:
+ res = cmp(a[0], b[0])
+ if res == 0:
+ res = apt_pkg.version_compare(a[1], b[1])
+ return res
#######################################################################################
-def set_suite(file, suite, session, britney=False, force=False):
+def set_suite(file, suite, transaction, britney=False, force=False):
+ session = transaction.session
suite_id = suite.suite_id
lines = file.readlines()
if key not in current:
(package, version, architecture) = key
version_checks(package, architecture, suite.suite_name, version, session, force)
- pkid = get_id (package, version, architecture, session)
- if not pkid:
+ pkg = get_pkg(package, version, architecture, session)
+ if pkg is None:
continue
+
+ component = pkg.poolfile.component
if architecture == "source":
- session.execute("""INSERT INTO src_associations (suite, source)
- VALUES (:suiteid, :pkid)""", {'suiteid': suite_id, 'pkid': pkid})
+ transaction.copy_source(pkg, suite, component)
else:
- session.execute("""INSERT INTO bin_associations (suite, bin)
- VALUES (:suiteid, :pkid)""", {'suiteid': suite_id, 'pkid': pkid})
- Logger.log(["added", " ".join(key), pkid])
+ transaction.copy_binary(pkg, suite, component)
+
+ Logger.log(["added", " ".join(key)])
# Check to see which packages need removed and remove them
for key, pkid in current.iteritems():
#######################################################################################
-def process_file(file, suite, action, session, britney=False, force=False):
+def process_file(file, suite, action, transaction, britney=False, force=False):
+ session = transaction.session
+
if action == "set":
- set_suite(file, suite, session, britney, force)
+ set_suite(file, suite, transaction, britney, force)
return
suite_id = suite.suite_id
request.sort(cmp=cmp_package_version)
for package, version, architecture in request:
- pkid = get_id(package, version, architecture, session)
- if not pkid:
+ pkg = get_pkg(package, version, architecture, session)
+ if pkg is None:
continue
+ if architecture == 'source':
+ pkid = pkg.source_id
+ else:
+ pkid = pkg.binary_id
+
+ component = pkg.poolfile.component
# Do version checks when adding packages
if action == "add":
utils.warn("'%s_%s_%s' already exists in suite %s." % (package, version, architecture, suite))
continue
else:
- session.execute("""INSERT INTO src_associations (suite, source)
- VALUES (:suiteid, :pkid)""",
- {'suiteid': suite_id, 'pkid': pkid})
+ transaction.copy_source(pkg, suite, component)
Logger.log(["added", package, version, architecture, suite.suite_name, pkid])
elif action == "remove":
utils.warn("'%s_%s_%s' already exists in suite %s." % (package, version, architecture, suite))
continue
else:
- session.execute("""INSERT INTO bin_associations (suite, bin)
- VALUES (:suiteid, :pkid)""",
- {'suiteid': suite_id, 'pkid': pkid})
+ transaction.copy_binary(pkg, suite, component)
Logger.log(["added", package, version, architecture, suite.suite_name, pkid])
elif action == "remove":
if association_id == None:
if Options["Help"]:
usage()
- session = DBConn().session()
-
force = Options.has_key("Force") and Options["Force"]
action = None
for i in ("add", "list", "remove", "set"):
if cnf["Control-Suite::Options::%s" % (i)] != "":
suite_name = cnf["Control-Suite::Options::%s" % (i)]
- suite = get_suite(suite_name, session=session)
- if suite is None:
- utils.fubar("Unknown suite '%s'." % (suite_name))
- else:
- if action:
- utils.fubar("Can only perform one action at a time.")
- action = i
- # Safety/Sanity check
- if action == "set" and (not suite.allowcsset):
- if force:
- utils.warn("Would not normally allow setting suite %s (allowsetcs is FALSE), but --force used" % (suite_name))
- else:
- utils.fubar("Will not reset suite %s due to its database configuration (allowsetcs is FALSE)" % (suite_name))
+ if action:
+ utils.fubar("Can only perform one action at a time.")
+
+ action = i
# Need an action...
- if action == None:
+ if action is None:
utils.fubar("No action specified.")
britney = False
britney = True
if action == "list":
+ session = DBConn().session()
+ suite = session.query(Suite).filter_by(suite_name=suite_name).one()
get_list(suite, session)
else:
Logger = daklog.Logger("control-suite")
- if file_list:
- for f in file_list:
- process_file(utils.open_file(f), suite, action, session, britney, force)
- else:
- process_file(sys.stdin, suite, action, session, britney, force)
+
+ with ArchiveTransaction() as transaction:
+ session = transaction.session
+ suite = session.query(Suite).filter_by(suite_name=suite_name).one()
+
+ if action == "set" and not suite.allowcsset:
+ if force:
+ utils.warn("Would not normally allow setting suite {0} (allowsetcs is FALSE), but --force used".format(suite_name))
+ else:
+ utils.fubar("Will not reset suite {0} due to its database configuration (allowsetcs is FALSE)".format(suite_name))
+
+ if file_list:
+ for f in file_list:
+ process_file(utils.open_file(f), suite, action, transaction, britney, force)
+ else:
+ process_file(sys.stdin, suite, action, transaction, britney, force)
+
Logger.close()
#######################################################################################
# Checks based on the Sources files
components = get_component_names(session)
for component in components:
- filename = "%s/dists/%s/%s/source/Sources.gz" % (cnf["Dir::Root"], suite_name, component)
+ filename = "%s/dists/%s/%s/source/Sources.gz" % (suite.archive.path, suite_name, component)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
(fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
for architecture in architectures:
if component == 'main/debian-installer' and re.match("kfreebsd", architecture):
continue
- filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (cnf["Dir::Root"], suite_name, component, architecture)
+ filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (suite.archive.path, suite_name, component, architecture)
# apt_pkg.ParseTagFile needs a real file handle
(fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
"Remove obsolete source and binary associations from suites"),
("export",
"Export uploads from policy queues"),
+ ("export-suite",
+ "export a suite to a flat directory structure"),
("make-pkg-file-mapping",
"Generate package <-> file mapping"),
("generate-filelist",
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Multi-archive support; convert policy and build queues to regular suites
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+import os
+
+################################################################################
+
+def _track_files_per_archive(cnf, c):
+ c.execute("SELECT id FROM archive")
+ (archive_id,) = c.fetchone()
+
+ if c.fetchone() is not None:
+ raise DBUpdateError("Cannot automatically upgrade from installation with multiple archives.")
+
+ c.execute("""CREATE TABLE files_archive_map (
+ file_id INT NOT NULL REFERENCES files(id),
+ archive_id INT NOT NULL REFERENCES archive(id),
+ component_id INT NOT NULL REFERENCES component(id),
+ last_used TIMESTAMP DEFAULT NULL,
+ created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (file_id, archive_id, component_id)
+ )""")
+
+ c.execute("""INSERT INTO files_archive_map (file_id, archive_id, component_id)
+ SELECT f.id, %s, l.component
+ FROM files f
+ JOIN location l ON f.location = l.id""", (archive_id,))
+
+ c.execute("""UPDATE files f SET filename = substring(f.filename FROM '/(.*)')
+ FROM location l, component c
+ WHERE f.location = l.id AND l.component = c.id
+ AND f.filename LIKE c.name || '/%'""")
+
+ # NOTE: The location table would need these changes, but we drop it later
+ # anyway.
+ #c.execute("""UPDATE location l SET path = path || c.name || '/'
+ # FROM component c
+ # WHERE l.component = c.id
+ # AND l.path NOT LIKE '%/' || c.name || '/'""")
+
+ c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch")
+ c.execute("ALTER TABLE files DROP COLUMN location")
+ c.execute("DROP TABLE location")
+
+def _convert_policy_queues(cnf, c):
+ base = cnf['Dir::Base']
+ new_path = os.path.join(base, 'new')
+ policy_path = os.path.join(base, 'policy')
+
+ # Forget changes in (old) policy queues so they can be processed again.
+ c.execute("DROP TABLE IF EXISTS build_queue_policy_files")
+ c.execute("DROP TABLE IF EXISTS build_queue_files")
+ c.execute("DROP TABLE IF EXISTS changes_pending_binaries")
+ c.execute("DROP TABLE IF EXISTS changes_pending_source_files")
+ c.execute("DROP TABLE IF EXISTS changes_pending_source")
+ c.execute("DROP TABLE IF EXISTS changes_pending_files_map")
+ c.execute("DROP TABLE IF EXISTS changes_pending_files")
+ c.execute("DROP TABLE IF EXISTS changes_pool_files")
+ c.execute("DELETE FROM changes WHERE in_queue IS NOT NULL")
+
+ # newstage and unchecked are no longer queues
+ c.execute("""
+ DELETE FROM policy_queue
+ WHERE queue_name IN ('newstage', 'unchecked')
+ """)
+
+ # Create archive for NEW
+ c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue, mode) VALUES ('new', 'new queue', %s, 't', 'f', '0640') RETURNING (id)", (new_path,))
+ (new_archive_id,) = c.fetchone()
+
+ # Create archive for policy queues
+ c.execute("INSERT INTO archive (name, description, path, use_morgue) VALUES ('policy', 'policy queues', %s, 'f') RETURNING (id)", (policy_path,))
+ (archive_id,) = c.fetchone()
+
+ # Add suites for policy queues
+ c.execute("""
+ INSERT INTO suite
+ (archive_id, suite_name, origin, label, description, signingkeys)
+ SELECT
+ %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL])
+ FROM policy_queue
+ WHERE queue_name NOT IN ('unchecked')
+ """, (archive_id,))
+
+ # move NEW to its own archive
+ c.execute("UPDATE suite SET archive_id=%s WHERE suite_name IN ('byhand', 'new')", (new_archive_id,))
+
+ c.execute("""ALTER TABLE policy_queue
+ DROP COLUMN origin,
+ DROP COLUMN label,
+ DROP COLUMN releasedescription,
+ DROP COLUMN signingkey,
+ DROP COLUMN stay_of_execution,
+ DROP COLUMN perms,
+ ADD COLUMN suite_id INT REFERENCES suite(id)
+ """)
+
+ c.execute("UPDATE policy_queue pq SET suite_id=s.id FROM suite s WHERE s.suite_name = pq.queue_name")
+ c.execute("ALTER TABLE policy_queue ALTER COLUMN suite_id SET NOT NULL")
+
+ c.execute("""INSERT INTO suite_architectures (suite, architecture)
+ SELECT pq.suite_id, sa.architecture
+ FROM policy_queue pq
+ JOIN suite ON pq.id = suite.policy_queue_id
+ JOIN suite_architectures sa ON suite.id = sa.suite
+ WHERE pq.queue_name NOT IN ('byhand', 'new')
+ GROUP BY pq.suite_id, sa.architecture""")
+
+ # We only add architectures from suite_architectures to only add
+ # arches actually in use. It's not too important to have the
+ # right set of arches for policy queues anyway unless you want
+ # to generate Packages indices.
+ c.execute("""INSERT INTO suite_architectures (suite, architecture)
+ SELECT DISTINCT pq.suite_id, sa.architecture
+ FROM policy_queue pq, suite_architectures sa
+ WHERE pq.queue_name IN ('byhand', 'new')""")
+
+ c.execute("""CREATE TABLE policy_queue_upload (
+ id SERIAL NOT NULL PRIMARY KEY,
+ policy_queue_id INT NOT NULL REFERENCES policy_queue(id),
+ target_suite_id INT NOT NULL REFERENCES suite(id),
+ changes_id INT NOT NULL REFERENCES changes(id),
+ source_id INT REFERENCES source(id),
+ UNIQUE (policy_queue_id, target_suite_id, changes_id)
+ )""")
+
+ c.execute("""CREATE TABLE policy_queue_upload_binaries_map (
+ policy_queue_upload_id INT REFERENCES policy_queue_upload(id) ON DELETE CASCADE,
+ binary_id INT REFERENCES binaries(id),
+ PRIMARY KEY (policy_queue_upload_id, binary_id)
+ )""")
+
+ c.execute("""
+ CREATE TABLE policy_queue_byhand_file (
+ id SERIAL NOT NULL PRIMARY KEY,
+ upload_id INT NOT NULL REFERENCES policy_queue_upload(id),
+ filename TEXT NOT NULL,
+ processed BOOLEAN NOT NULL DEFAULT 'f'
+ )""")
+
+ c.execute("""ALTER TABLE changes
+ DROP COLUMN in_queue,
+ DROP COLUMN approved_for
+ """)
+
+def _convert_build_queues(cnf, c):
+ base = cnf['Dir::Base']
+ build_queue_path = os.path.join(base, 'build-queues')
+
+ c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue) VALUES ('build-queues', 'build queues', %s, 't', 'f') RETURNING id", [build_queue_path])
+ archive_id, = c.fetchone()
+
+ c.execute("ALTER TABLE build_queue ADD COLUMN suite_id INT REFERENCES suite(id)")
+
+ c.execute("""
+ INSERT INTO suite
+ (archive_id, suite_name, origin, label, description, signingkeys, notautomatic)
+ SELECT
+ %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL]), notautomatic
+ FROM build_queue
+ """, [archive_id])
+ c.execute("UPDATE build_queue bq SET suite_id=(SELECT id FROM suite s WHERE s.suite_name = bq.queue_name)")
+ c.execute("ALTER TABLE build_queue ALTER COLUMN suite_id SET NOT NULL")
+
+ c.execute("""INSERT INTO suite_architectures (suite, architecture)
+ SELECT bq.suite_id, sa.architecture
+ FROM build_queue bq
+ JOIN suite_build_queue_copy sbqc ON bq.id = sbqc.build_queue_id
+ JOIN suite ON sbqc.suite = suite.id
+ JOIN suite_architectures sa ON suite.id = sa.suite
+ GROUP BY bq.suite_id, sa.architecture""")
+
+ c.execute("""ALTER TABLE build_queue
+ DROP COLUMN path,
+ DROP COLUMN copy_files,
+ DROP COLUMN origin,
+ DROP COLUMN label,
+ DROP COLUMN releasedescription,
+ DROP COLUMN signingkey,
+ DROP COLUMN notautomatic""")
+
+def do_update(self):
+ print __doc__
+ try:
+ cnf = Config()
+ if 'Dir::Base' not in cnf:
+ print """
+MANUAL UPGRADE INSTRUCTIONS
+===========================
+
+This database update will convert policy and build queues to regular suites.
+For these archives will be created under Dir::Base:
+
+ NEW: <base>/new
+ policy queues: <base>/policy
+ build queues: <base>/build-queues
+
+Please add Dir::Base to dak.conf and try the update again. Once the database
+upgrade is finished, you will have to reprocess all uploads currently in
+policy queues: just move them back to unchecked manually.
+"""
+ raise DBUpdateError("Please update dak.conf and try again.")
+
+ c = self.db.cursor()
+
+ _track_files_per_archive(cnf, c)
+ _convert_policy_queues(cnf, c)
+ _convert_build_queues(cnf, c)
+
+ c.execute("UPDATE config SET value = '75' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.ProgrammingError as msg:
+ self.db.rollback()
+ raise DBUpdateError('Unable to apply sick update 75, rollback issued. Error message : %s' % (str(msg)))
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add list of closed bugs to changes table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+ print __doc__
+ try:
+ cnf = Config()
+
+ c = self.db.cursor()
+
+ c.execute("ALTER TABLE changes ADD COLUMN closes TEXT[]")
+
+ c.execute("UPDATE config SET value = '76' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.ProgrammingError as msg:
+ self.db.rollback()
+ raise DBUpdateError('Unable to apply sick update 76, rollback issued. Error message: {0}'.format(msg))
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Move stayofexecution to the database
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+ print __doc__
+ try:
+ cnf = Config()
+
+ c = self.db.cursor()
+
+ stayofexecution = cnf.get('Clean-Suites::StayOfExecution', '129600')
+ c.execute("ALTER TABLE archive ADD COLUMN stayofexecution INTERVAL NOT NULL DEFAULT %s", (stayofexecution,))
+ c.execute("UPDATE archive SET stayofexecution='0' WHERE name IN ('new', 'policy', 'build-queues')")
+
+ c.execute("UPDATE config SET value = '77' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.ProgrammingError as msg:
+ self.db.rollback()
+ raise DBUpdateError('Unable to apply sick update 77, rollback issued. Error message: {0}'.format(msg))
session = DBConn().session()
for suite_name in utils.split_args(Options['Suite']):
suite = session.query(Suite).filter_by(suite_name = suite_name).one()
+
+ # Skip policy queues. We don't want to remove obsolete packages from those.
+ policy_queue = session.query(PolicyQueue).filter_by(suite=suite).first()
+ if policy_queue is not None:
+ continue
+
if not suite.untouchable or Options['Force']:
doDaDoDa(suite.suite_id, session)
if Options['No-Action']:
for u in uploads:
print "Processing {0}...".format(u.changes.changesname)
- UploadCopy(u).export(directory, symlink=symlink)
+ UploadCopy(u).export(directory, symlink=symlink, ignore_existing=True)
if __name__ == '__main__':
main()
--- /dev/null
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import os
+import sys
+
+from daklib.config import Config
+from daklib.dbconn import *
+from daklib.fstransactions import FilesystemTransaction
+
+def usage():
+ print """Usage: dak export-suite -s <suite> [options]
+
+Export binaries and sources from a suite to a flat directory structure.
+
+ -c --copy copy files instead of symlinking them
+ -d <directory> target directory to export packages to
+ default: current directory
+ -s <suite> suite to grab uploads from
+"""
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+
+ arguments = [('h', 'help', 'Export::Options::Help'),
+ ('c', 'copy', 'Export::Options::Copy'),
+ ('d', 'directory', 'Export::Options::Directory', 'HasArg'),
+ ('s', 'suite', 'Export::Options::Suite', 'HasArg')]
+
+ cnf = Config()
+ apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
+ options = cnf.subtree('Export::Options')
+
+ if 'Help' in options or 'Suite' not in options:
+ usage()
+ sys.exit(0)
+
+ session = DBConn().session()
+
+ suite = session.query(Suite).filter_by(suite_name=options['Suite']).first()
+ if suite is None:
+ print "Unknown suite '{0}'".format(options['Suite'])
+ sys.exit(1)
+
+ directory = options.get('Directory')
+ if not directory:
+ print "No target directory."
+ sys.exit(1)
+
+ symlink = 'Copy' not in options
+
+ binaries = suite.binaries
+ sources = suite.sources
+
+ files = []
+ files.extend([ b.poolfile for b in binaries ])
+ for s in sources:
+ files.extend([ ds.poolfile for ds in s.srcfiles ])
+
+ with FilesystemTransaction() as fs:
+ for f in files:
+ dst = os.path.join(directory, f.basename)
+ fs.copy(f.fullpath, dst, symlink=symlink)
+ fs.commit()
+
+if __name__ == '__main__':
+ main()
print """Usage: dak generate-packages-sources2 [OPTIONS]
Generate the Packages/Sources files
+ -a, --archive=ARCHIVE process suites in ARCHIVE
-s, --suite=SUITE process this suite
Default: All suites not marked 'untouchable'
-f, --force Allow processing of untouchable suites
WHERE s.id=sm.src_id
)
||
- E'\nDirectory\: pool/' || SUBSTRING(f.filename FROM E'\\A(.*)/[^/]*\\Z')
+ E'\nDirectory\: pool/' || :component_name || '/' || SUBSTRING(f.filename FROM E'\\A(.*)/[^/]*\\Z')
||
E'\nPriority\: ' || pri.priority
||
overridesuite_id = suite.get_overridesuite().suite_id
- writer = SourcesFileWriter(suite=suite.suite_name, component=component.component_name)
+ writer = SourcesFileWriter(archive=suite.archive.path, suite=suite.suite_name, component=component.component_name)
output = writer.open()
# run query and write Sources
- r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "dsc_type": dsc_type, "overridesuite": overridesuite_id})
+ r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "component_name": component.component_name, "dsc_type": dsc_type, "overridesuite": overridesuite_id})
for (stanza,) in r:
print >>output, stanza
print >>output, ""
binaries b
JOIN bin_associations ba ON b.id = ba.bin
JOIN files f ON f.id = b.file
- JOIN location l ON l.id = f.location
+ JOIN files_archive_map fam ON f.id = fam.file_id AND fam.archive_id = :archive_id
JOIN source s ON b.source = s.id
WHERE
(b.architecture = :arch_all OR b.architecture = :arch) AND b.type = :type_name
AND ba.suite = :suite
- AND l.component = :component
+ AND fam.component_id = :component
)
SELECT
), '')
|| E'\nSection\: ' || sec.section
|| E'\nPriority\: ' || pri.priority
- || E'\nFilename\: pool/' || tmp.filename
+ || E'\nFilename\: pool/' || :component_name || '/' || tmp.filename
|| E'\nSize\: ' || tmp.size
|| E'\nMD5sum\: ' || tmp.md5sum
|| E'\nSHA1\: ' || tmp.sha1sum
if include_long_description:
metadata_skip.append("Description-md5")
- writer = PackagesFileWriter(suite=suite.suite_name, component=component.component_name,
+ writer = PackagesFileWriter(archive=suite.archive.path, suite=suite.suite_name,
+ component=component.component_name,
architecture=architecture.arch_string, debtype=type_name)
output = writer.open()
- r = session.execute(_packages_query, {"suite": suite_id, "component": component_id,
+ r = session.execute(_packages_query, {"archive_id": suite.archive.archive_id,
+ "suite": suite_id, "component": component_id, 'component_name': component.component_name,
"arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id,
"overridesuite": overridesuite_id, "metadata_skip": metadata_skip,
"include_long_description": 'true' if include_long_description else 'false'})
suite = session.query(Suite).get(suite_id)
component = session.query(Component).get(component_id)
- writer = TranslationFileWriter(suite=suite.suite_name, component=component.component_name, language="en")
+ writer = TranslationFileWriter(archive=suite.archive.path, suite=suite.suite_name, component=component.component_name, language="en")
output = writer.open()
r = session.execute(_translations_query, {"suite": suite_id, "component": component_id})
cnf = Config()
Arguments = [('h',"help","Generate-Packages-Sources::Options::Help"),
+ ('a','archive','Generate-Packages-Sources::Options::Archive','HasArg'),
('s',"suite","Generate-Packages-Sources::Options::Suite"),
('f',"force","Generate-Packages-Sources::Options::Force"),
('o','option','','ArbItem')]
print "I: Cannot find suite %s" % s
logger.log(['Cannot find suite %s' % s])
else:
- suites = session.query(Suite).filter(Suite.untouchable == False).all()
+ query = session.query(Suite).filter(Suite.untouchable == False).all()
+ if 'Archive' in Options:
+ query = query.join(Suite.archive).filter(Archive.archive_name==Options['Archive'])
+ suites = query.all()
force = Options.has_key("Force") and Options["Force"]
print """Usage: dak generate-releases [OPTIONS]
Generate the Release files
+ -a, --archive=ARCHIVE process suites in ARCHIVE
-s, --suite=SUITE(s) process this suite
Default: All suites not marked 'untouchable'
-f, --force Allow processing of untouchable suites
cnf = Config()
- suite_suffix = "%s" % (cnf.find("Dinstall::SuiteSuffix"))
+ suite_suffix = cnf.find("Dinstall::SuiteSuffix", "")
- outfile = os.path.join(cnf["Dir::Root"], 'dists', "%s/%s" % (suite.suite_name, suite_suffix), "Release")
+ outfile = os.path.join(suite.archive.path, 'dists', suite.suite_name, suite_suffix, "Release")
out = open(outfile + ".new", "w")
for key, dbfield in attribs:
out.write("Description: %s\n" % suite.description)
for comp in components:
- for dirpath, dirnames, filenames in os.walk("%sdists/%s/%s%s" % (cnf["Dir::Root"], suite.suite_name, suite_suffix, comp), topdown=True):
+ for dirpath, dirnames, filenames in os.walk(os.path.join(suite.archive.path, "dists", suite.suite_name, suite_suffix, comp), topdown=True):
if not re_gensubrelease.match(dirpath):
continue
# their checksums to the main Release file
oldcwd = os.getcwd()
- os.chdir("%sdists/%s/%s" % (cnf["Dir::Root"], suite.suite_name, suite_suffix))
+ os.chdir(os.path.join(suite.archive.path, "dists", suite.suite_name, suite_suffix))
hashfuncs = { 'MD5Sum' : apt_pkg.md5sum,
'SHA1' : apt_pkg.sha1sum,
cnf["Generate-Releases::Options::%s" % (i)] = ""
Arguments = [('h',"help","Generate-Releases::Options::Help"),
+ ('a','archive','Generate-Releases::Options::Archive','HasArg'),
('s',"suite","Generate-Releases::Options::Suite"),
('f',"force","Generate-Releases::Options::Force"),
('o','option','','ArbItem')]
print "cannot find suite %s" % s
Logger.log(['cannot find suite %s' % s])
else:
- suites = session.query(Suite).filter(Suite.untouchable == False).all()
+ query = session.query(Suite).filter(Suite.untouchable == False)
+ if 'Archive' in Options:
+ query = query.join(Suite.archive).filter(Archive.archive_name==Options['Archive'])
+ suites = query.all()
broken=[]
# Process dists directories
# TODO: Store location of each suite in database
for suite in session.query(Suite):
- suite_dir = os.path.join( Cnf['Dir::Root'], 'dists', "%s/%s" % (suite.suite_name, suite_suffix) )
+ suite_dir = os.path.join(suite.archive.path, 'dists', suite.suite_name, suite_suffix)
# TODO: Store valid suite/component mappings in database
for component in session.query(Component):
q = session.execute("""
SELECT b.package, b.version, a.arch_string, su.suite_name, c.name, m.name
FROM binaries b, architecture a, suite su, bin_associations ba,
- files f, location l, component c, maintainer m
+ files f, files_archive_map af, component c, maintainer m
WHERE b.package %s :package AND a.id = b.architecture AND su.id = ba.suite
- AND b.id = ba.bin AND b.file = f.id AND f.location = l.id
- AND l.component = c.id AND b.maintainer = m.id %s %s %s
+ AND b.id = ba.bin AND b.file = f.id AND af.file_id = f.id AND su.archive_id = af.archive_id
+ AND af.component_id = c.id AND b.maintainer = m.id %s %s %s
""" % (comparison_operator, con_suites, con_architectures, con_bintype), {'package': package})
ql = q.fetchall()
if check_source:
q = session.execute("""
SELECT s.source, s.version, 'source', su.suite_name, c.name, m.name
- FROM source s, suite su, src_associations sa, files f, location l,
+ FROM source s, suite su, src_associations sa, files f, files_archive_map af,
component c, maintainer m
WHERE s.source %s :package AND su.id = sa.suite AND s.id = sa.source
- AND s.file = f.id AND f.location = l.id AND l.component = c.id
+ AND s.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id
AND s.maintainer = m.id %s
""" % (comparison_operator, con_suites), {'package': package})
if not Options["Architecture"] or con_architectures:
@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2006 James Troup <james@nocrew.org>
@copyright: 2009 Mark Hymers <mhy@debian.org>
+@copyright: 2012, Ansgar Burchardt <ansgar@debian.org>
"""
################################################################################
-import os
-import os.path
-import stat
-import sys
-from datetime import datetime
import apt_pkg
+from datetime import datetime, timedelta
+import sys
from daklib import daklog
+from daklib.archive import ArchiveTransaction
from daklib.dbconn import *
from daklib.config import Config
################################################################################
+def clean(build_queue, transaction, now=None):
+ session = transaction.session
+ if now is None:
+ now = datetime.now()
+
+ delete_before = now - timedelta(seconds=build_queue.stay_of_execution)
+ suite = build_queue.suite
+
+ # Remove binaries
+ query = """
+ SELECT b.*
+ FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin
+ WHERE ba.suite = :suite_id
+ AND ba.created < :delete_before"""
+ binaries = session.query(DBBinary).from_statement(query) \
+ .params({'suite_id': suite.suite_id, 'delete_before': delete_before})
+ for binary in binaries:
+ Logger.log(["removed binary from build queue", build_queue.queue_name, binary.package, binary.version])
+ transaction.remove_binary(binary, suite)
+
+ # Remove sources
+ query = """
+ SELECT s.*
+ FROM source s
+ JOIN src_associations sa ON s.id = sa.source
+ WHERE sa.suite = :suite_id
+ AND sa.created < :delete_before
+ AND NOT EXISTS (SELECT 1 FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id
+ WHERE ba.suite = :suite_id
+ AND b.source = s.id)"""
+ sources = session.query(DBSource).from_statement(query) \
+ .params({'suite_id': suite.suite_id, 'delete_before': delete_before})
+ for source in sources:
+ Logger.log(["removed source from build queue", build_queue.queue_name, source.source, source.version])
+ transaction.remove_source(source, suite)
+
def main ():
global Options, Logger
session = DBConn().session()
- if Options["All"]:
- if len(queue_names) != 0:
- print "E: Cannot use both -a and a queue_name"
- sys.exit(1)
- queues = session.query(BuildQueue).all()
-
- else:
- queues = []
- for q in queue_names:
- queue = get_build_queue(q.lower(), session)
- if queue:
- queues.append(queue)
- else:
- Logger.log(['cannot find queue %s' % q])
-
- # For each given queue, look up object and call manage_queue
- for q in queues:
- Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
- q.clean_and_update(starttime, Logger, dryrun=Options["No-Action"])
+ with ArchiveTransaction() as transaction:
+ session = transaction.session
+ if Options['All']:
+ if len(queue_names) != 0:
+ print "E: Cannot use both -a and a queue name"
+ sys.exit(1)
+ queues = session.query(BuildQueue)
+ else:
+ queues = session.query(BuildQueue).filter(BuildQueue.queue_name.in_(queue_names))
+
+ for q in queues:
+ Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
+ clean(q, transaction, now=starttime)
+ if not Options['No-Action']:
+ transaction.commit()
+ else:
+ transaction.rollback()
Logger.close()
"""
sys.exit(exit_code)
-def check_override_compliance(package, priority, suite_name, cnf, session):
+def check_override_compliance(package, priority, archive_path, suite_name, cnf, session):
print "Checking compliance with related overrides..."
depends = set()
arches -= set(["source", "all"])
for arch in arches:
for component in components:
- Packages = utils.get_packages_from_ftp(cnf['Dir::Root'], suite_name, component, arch)
+ Packages = utils.get_packages_from_ftp(archive_path, suite_name, component, arch)
while Packages.Step():
package_name = Packages.Section.Find("Package")
dep_list = Packages.Section.Find("Depends")
if arguments and len(arguments) > 2:
utils.fubar("Too many arguments")
+ suite = get_suite(suite_name, session)
+ if suite is None:
+ utils.fubar("Unknown suite '{0}'".format(suite_name))
+
if arguments and len(arguments) == 1:
# Determine if the argument is a priority or a section...
arg = arguments.pop()
utils.fubar("Trying to change priority of a source-only package")
if Options["Check"] and newpriority != oldpriority:
- check_override_compliance(package, p, suite_name, cnf, session)
+ check_override_compliance(package, p, suite.archive.path, suite_name, cnf, session)
# If we're in no-action mode
if Options["No-Action"]:
depends = {}
session = DBConn().session()
suite_name = Options['suite']
+ suite = get_suite(suite_name, session)
+ if suite is None:
+ utils.fubar("Unknown suite '{0}'".format(suite_name))
components = get_component_names(session)
arches = set([x.arch_string for x in get_suite_architectures(suite_name)])
arches -= set(['source', 'all'])
for arch in arches:
for component in components:
- Packages = utils.get_packages_from_ftp(cnf['Dir::Root'], suite_name, component, arch)
+ Packages = utils.get_packages_from_ftp(suite.archive.path, suite_name, component, arch)
while Packages.Step():
package = Packages.Section.Find('Package')
dep_list = Packages.Section.Find('Depends')
import pwd
import apt_pkg, apt_inst
import examine_package
+import subprocess
from daklib.dbconn import *
from daklib.queue import *
from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
from daklib.summarystats import SummaryStats
from daklib.config import Config
-from daklib.changesutils import *
+from daklib.policy import UploadCopy, PolicyQueueUploadHandler
# Globals
Options = None
################################################################################
################################################################################
-def recheck(upload, session):
-# STU: I'm not sure, but I don't thin kthis is necessary any longer: upload.recheck(session)
- if len(upload.rejects) > 0:
- answer = "XXX"
- if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]:
- answer = 'S'
-
- print "REJECT\n%s" % '\n'.join(upload.rejects)
- prompt = "[R]eject, Skip, Quit ?"
-
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.match(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
-
- if answer == 'R':
- upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
- upload.pkg.remove_known_changes(session=session)
- session.commit()
- return 0
- elif answer == 'S':
- return 0
- elif answer == 'Q':
- end()
- sys.exit(0)
-
- return 1
-
-################################################################################
-
class Section_Completer:
def __init__ (self, session):
self.sections = []
################################################################################
-def print_new (new, upload, indexed, file=sys.stdout):
- check_valid(new)
- broken = False
+def print_new (upload, missing, indexed, session, file=sys.stdout):
+ check_valid(missing, session)
index = 0
- for pkg in new.keys():
+ for m in missing:
index += 1
- section = new[pkg]["section"]
- priority = new[pkg]["priority"]
- if new[pkg]["section id"] == -1:
- section += "[!]"
- broken = True
- if new[pkg]["priority id"] == -1:
- priority += "[!]"
- broken = True
+ if m['type'] != 'deb':
+ package = '{0}:{1}'.format(m['type'], m['package'])
+ else:
+ package = m['package']
+ section = m['section']
+ priority = m['priority']
if indexed:
- line = "(%s): %-20s %-20s %-20s" % (index, pkg, priority, section)
+ line = "(%s): %-20s %-20s %-20s" % (index, package, priority, section)
else:
- line = "%-20s %-20s %-20s" % (pkg, priority, section)
- line = line.strip()+'\n'
- file.write(line)
- notes = get_new_comments(upload.pkg.changes.get("source"))
+ line = "%-20s %-20s %-20s" % (package, priority, section)
+ line = line.strip()
+ if not m['valid']:
+ line = line + ' [!]'
+ print >>file, line
+ notes = get_new_comments(upload.changes.source)
for note in notes:
print "\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
% (note.author, note.version, note.notedate, note.comment)
print "-" * 72
- return broken, len(notes) > 0
+ return len(notes) > 0
################################################################################
################################################################################
################################################################################
-def edit_new (new, upload):
+def edit_new (overrides, upload, session):
# Write the current data to a temporary file
(fd, temp_filename) = utils.temp_filename()
temp_file = os.fdopen(fd, 'w')
- print_new (new, upload, indexed=0, file=temp_file)
+ print_new (upload, overrides, indexed=0, session=session, file=temp_file)
temp_file.close()
# Spawn an editor on that file
editor = os.environ.get("EDITOR","vi")
lines = temp_file.readlines()
temp_file.close()
os.unlink(temp_filename)
+
+ overrides_map = dict([ ((o['type'], o['package']), o) for o in overrides ])
+ new_overrides = []
# Parse the new data
for line in lines:
line = line.strip()
- if line == "":
+ if line == "" or line[0] == '#':
continue
s = line.split()
# Pad the list if necessary
s[len(s):3] = [None] * (3-len(s))
(pkg, priority, section) = s[:3]
- if not new.has_key(pkg):
+ if pkg.find(':') != -1:
+ type, pkg = pkg.split(':', 1)
+ else:
+ type = 'deb'
+ if (type, pkg) not in overrides_map:
utils.warn("Ignoring unknown package '%s'" % (pkg))
else:
- # Strip off any invalid markers, print_new will readd them.
- if section.endswith("[!]"):
- section = section[:-3]
- if priority.endswith("[!]"):
- priority = priority[:-3]
- for f in new[pkg]["files"]:
- upload.pkg.files[f]["section"] = section
- upload.pkg.files[f]["priority"] = priority
- new[pkg]["section"] = section
- new[pkg]["priority"] = priority
+ if section.find('/') != -1:
+ component = section.split('/', 1)[0]
+ else:
+ component = 'main'
+ new_overrides.append(dict(
+ package=pkg,
+ type=type,
+ section=section,
+ component=component,
+ priority=priority,
+ ))
+ return new_overrides
################################################################################
def edit_index (new, upload, index):
+ package = new[index]['package']
priority = new[index]["priority"]
section = new[index]["section"]
ftype = new[index]["type"]
done = 0
while not done:
- print "\t".join([index, priority, section])
+ print "\t".join([package, priority, section])
answer = "XXX"
if ftype != "dsc":
# Reset the readline completer
readline.set_completer(None)
- for f in new[index]["files"]:
- upload.pkg.files[f]["section"] = section
- upload.pkg.files[f]["priority"] = priority
new[index]["priority"] = priority
new[index]["section"] = section
+ if section.find('/') != -1:
+ component = section.split('/', 1)[0]
+ else:
+ component = 'main'
+ new[index]['component'] = component
+
return new
################################################################################
print
done = 0
while not done:
- print_new (new, upload, indexed=1)
- new_index = {}
- index = 0
- for i in new.keys():
- index += 1
- new_index[index] = i
-
- prompt = "(%s) edit override <n>, Editor, Done ? " % (index_range(index))
+ print_new (upload, new, indexed=1, session=session)
+ prompt = "edit override <n>, Editor, Done ? "
got_answer = 0
while not got_answer:
got_answer = 1
elif re_isanum.match (answer):
answer = int(answer)
- if (answer < 1) or (answer > index):
- print "%s is not a valid index (%s). Please retry." % (answer, index_range(index))
+ if answer < 1 or answer > len(new):
+ print "{0} is not a valid index. Please retry.".format(answer)
else:
got_answer = 1
if answer == 'E':
- edit_new(new, upload)
+ new = edit_new(new, upload, session)
elif answer == 'D':
done = 1
else:
- edit_index (new, upload, new_index[answer])
+ edit_index (new, upload, answer - 1)
return new
################################################################################
-def check_pkg (upload):
+def check_pkg (upload, upload_copy):
save_stdout = sys.stdout
+ changes = os.path.join(upload_copy.directory, upload.changes.changesname)
+ suite_name = upload.target_suite.suite_name
try:
sys.stdout = os.popen("less -R -", 'w', 0)
- changes = utils.parse_changes (upload.pkg.changes_file)
- print examine_package.display_changes(changes['distribution'], upload.pkg.changes_file)
- files = upload.pkg.files
- for f in files.keys():
- if files[f].has_key("new"):
- ftype = files[f]["type"]
- if ftype == "deb":
- print examine_package.check_deb(changes['distribution'], f)
- elif ftype == "dsc":
- print examine_package.check_dsc(changes['distribution'], f)
+ print examine_package.display_changes(suite_name, changes)
+
+ source = upload.source
+ if source is not None:
+ source_file = os.path.join(upload_copy.directory, os.path.basename(source.poolfile.filename))
+ print examine_package.check_dsc(suite_name, source_file)
+
+ for binary in upload.binaries:
+ binary_file = os.path.join(upload_copy.directory, os.path.basename(binary.poolfile.filename))
+ print examine_package.check_deb(suite_name, binary_file)
+
print examine_package.output_package_relations()
except IOError as e:
if e.errno == errno.EPIPE:
utils.warn("[examine_package] Caught EPIPE; skipping.")
else:
- sys.stdout = save_stdout
raise
except KeyboardInterrupt:
utils.warn("[examine_package] Caught C-c; skipping.")
- sys.stdout = save_stdout
+ finally:
+ sys.stdout = save_stdout
################################################################################
## FIXME: horribly Debian specific
-def do_bxa_notification(upload):
- files = upload.pkg.files
+def do_bxa_notification(new, upload, session):
+ cnf = Config()
+
+ new = set([ o['package'] for o in new if o['type'] == 'deb' ])
+ if len(new) == 0:
+ return
+
+ key = session.query(MetadataKey).filter_by(key='Description').one()
summary = ""
- for f in files.keys():
- if files[f]["type"] == "deb":
- control = apt_pkg.TagSection(utils.deb_extract_control(utils.open_file(f)))
- summary += "\n"
- summary += "Package: %s\n" % (control.find("Package"))
- summary += "Description: %s\n" % (control.find("Description"))
- upload.Subst["__BINARY_DESCRIPTIONS__"] = summary
- bxa_mail = utils.TemplateSubst(upload.Subst,Config()["Dir::Templates"]+"/process-new.bxa_notification")
+ for binary in upload.binaries:
+ if binary.package not in new:
+ continue
+ description = session.query(BinaryMetadata).filter_by(binary=binary, key=key).one().value
+ summary += "\n"
+ summary += "Package: {0}\n".format(binary.package)
+ summary += "Description: {0}\n".format(description)
+
+ subst = {
+ '__DISTRO__': cnf['Dinstall::MyDistribution'],
+ '__BCC__': 'X-DAK: dak process-new',
+ '__BINARY_DESCRIPTIONS__': summary,
+ }
+
+ bxa_mail = utils.TemplateSubst(subst,os.path.join(cnf["Dir::Templates"], "process-new.bxa_notification"))
utils.send_mail(bxa_mail)
################################################################################
-def add_overrides (new, upload, session):
- changes = upload.pkg.changes
- files = upload.pkg.files
- srcpkg = changes.get("source")
-
- for suite in changes["suite"].keys():
- suite_id = get_suite(suite).suite_id
- for pkg in new.keys():
- component_id = get_component(new[pkg]["component"]).component_id
- type_id = get_override_type(new[pkg]["type"]).overridetype_id
- priority_id = new[pkg]["priority id"]
- section_id = new[pkg]["section id"]
- Logger.log(["%s (%s) overrides" % (pkg, srcpkg), suite, new[pkg]["component"], new[pkg]["type"], new[pkg]["priority"], new[pkg]["section"]])
- session.execute("INSERT INTO override (suite, component, type, package, priority, section, maintainer) VALUES (:sid, :cid, :tid, :pkg, :pid, :sectid, '')",
- { 'sid': suite_id, 'cid': component_id, 'tid':type_id, 'pkg': pkg, 'pid': priority_id, 'sectid': section_id})
- for f in new[pkg]["files"]:
- if files[f].has_key("new"):
- del files[f]["new"]
- del new[pkg]
+def add_overrides (new_overrides, suite, session):
+ if suite.overridesuite is not None:
+ suite = session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+
+ for override in new_overrides:
+ package = override['package']
+ priority = session.query(Priority).filter_by(priority=override['priority']).first()
+ section = session.query(Section).filter_by(section=override['section']).first()
+ component = get_mapped_component(override['component'], session)
+ overridetype = session.query(OverrideType).filter_by(overridetype=override['type']).one()
+
+ if priority is None:
+ raise Exception('Invalid priority {0} for package {1}'.format(priority, package))
+ if section is None:
+ raise Exception('Invalid section {0} for package {1}'.format(section, package))
+ if component is None:
+ raise Exception('Invalid component {0} for package {1}'.format(component, package))
+
+ o = Override(package=package, suite=suite, component=component, priority=priority, section=section, overridetype=overridetype)
+ session.add(o)
session.commit()
- if Config().find_b("Dinstall::BXANotify"):
- do_bxa_notification(upload)
+################################################################################
+
+def run_user_inspect_command(upload, upload_copy):
+ command = os.environ.get('DAK_INSPECT_UPLOAD')
+ if command is None:
+ return
+
+ directory = upload_copy.directory
+ if upload.source:
+ dsc = os.path.basename(upload.source.poolfile.filename)
+ else:
+ dsc = ''
+ changes = upload.changes.changesname
+
+ shell_command = command.format(
+ directory=directory,
+ dsc=dsc,
+ changes=changes,
+ )
+
+ subprocess.check_call(shell_command, shell=True)
################################################################################
-def do_new(upload, session):
- print "NEW\n"
- files = upload.pkg.files
- upload.check_files(not Options["No-Action"])
- changes = upload.pkg.changes
- cnf = Config()
+def get_reject_reason(reason=''):
+ """get reason for rejection
- # Check for a valid distribution
- upload.check_distributions()
+ @rtype: str
+ @return: string giving the reason for the rejection or C{None} if the
+ rejection should be cancelled
+ """
+ answer = 'E'
+ if Options['Automatic']:
+ answer = 'R'
+
+ while answer == 'E':
+ reason = utils.call_editor(reason)
+ print "Reject message:"
+ print utils.prefix_multi_line_string(reason, " ", include_blank_lines=1)
+ prompt = "[R]eject, Edit, Abandon, Quit ?"
+ answer = "XXX"
+ while prompt.find(answer) == -1:
+ answer = utils.our_raw_input(prompt)
+ m = re_default_answer.search(prompt)
+ if answer == "":
+ answer = m.group(1)
+ answer = answer[:1].upper()
- # Make a copy of distribution we can happily trample on
- changes["suite"] = copy.copy(changes["distribution"])
+ if answer == 'Q':
+ sys.exit(0)
- # Try to get an included dsc
- dsc = None
- (status, _) = upload.load_dsc()
- if status:
- dsc = upload.pkg.dsc
+ if answer == 'R':
+ return reason
+ return None
+
+################################################################################
+
+def do_new(upload, upload_copy, handler, session):
+ print "NEW\n"
+ cnf = Config()
+
+ run_user_inspect_command(upload, upload_copy)
# The main NEW processing loop
- done = 0
- new = {}
+ done = False
+ missing = []
while not done:
- # Find out what's new
- new, byhand = determine_new(upload.pkg.changes_file, changes, files, dsc=dsc, session=session, new=new)
+ queuedir = upload.policy_queue.path
+ byhand = upload.byhand
+
+ missing = handler.missing_overrides(hints=missing)
+ broken = not check_valid(missing, session)
- if not new:
- break
+ #if len(byhand) == 0 and len(missing) == 0:
+ # break
answer = "XXX"
if Options["No-Action"] or Options["Automatic"]:
answer = 'S'
- (broken, note) = print_new(new, upload, indexed=0)
+ note = print_new(upload, missing, indexed=0, session=session)
prompt = ""
- if not broken and not note:
- prompt = "Add overrides, "
+ has_unprocessed_byhand = False
+ for f in byhand:
+ path = os.path.join(queuedir, f.filename)
+ if not f.processed and os.path.exists(path):
+ print "W: {0} still present; please process byhand components and try again".format(f.filename)
+ has_unprocessed_byhand = True
+
+ if not has_unprocessed_byhand and not broken and not note:
+ if len(missing) == 0:
+ prompt = "Accept, "
+ else:
+ prompt = "Add overrides, "
if broken:
print "W: [!] marked entries must be fixed before package can be processed."
if note:
if answer == 'A' and not Options["Trainee"]:
try:
check_daily_lock()
- done = add_overrides (new, upload, session)
- new_accept(upload, Options["No-Action"], session)
- Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
+ add_overrides(missing, upload.target_suite, session)
+ if Config().find_b("Dinstall::BXANotify"):
+ do_bxa_notification(missing, upload, session)
+ handler.accept()
+ done = True
+ Logger.log(["NEW ACCEPT", upload.changes.changesname])
except CantGetLockError:
print "Hello? Operator! Give me the number for 911!"
print "Dinstall in the locked area, cant process packages, come back later"
elif answer == 'C':
- check_pkg(upload)
+ check_pkg(upload, upload_copy)
elif answer == 'E' and not Options["Trainee"]:
- new = edit_overrides (new, upload, session)
+ missing = edit_overrides (missing, upload, session)
elif answer == 'M' and not Options["Trainee"]:
- aborted = upload.do_reject(manual=1,
- reject_message=Options["Manual-Reject"],
- notes=get_new_comments(changes.get("source", ""), session=session))
- if not aborted:
- upload.pkg.remove_known_changes(session=session)
- session.commit()
- Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
- done = 1
+ reason = Options.get('Manual-Reject', '') + "\n"
+ reason = reason + "\n".join(get_new_comments(upload.changes.source, session=session))
+ reason = get_reject_reason(reason)
+ if reason is not None:
+ Logger.log(["NEW REJECT", upload.changes.changesname])
+ handler.reject(reason)
+ done = True
elif answer == 'N':
- edit_note(get_new_comments(changes.get("source", ""), session=session),
+ edit_note(get_new_comments(upload.changes.source, session=session),
upload, session, bool(Options["Trainee"]))
elif answer == 'P' and not Options["Trainee"]:
- prod_maintainer(get_new_comments(changes.get("source", ""), session=session),
+ prod_maintainer(get_new_comments(upload.changes.source, session=session),
upload)
- Logger.log(["NEW PROD: %s" % (upload.pkg.changes_file)])
+ Logger.log(["NEW PROD", upload.changes.changesname])
elif answer == 'R' and not Options["Trainee"]:
confirm = utils.our_raw_input("Really clear note (y/N)? ").lower()
if confirm == "y":
- for c in get_new_comments(changes.get("source", ""), changes.get("version", ""), session=session):
+ for c in get_new_comments(upload.changes.source, upload.changes.version, session=session):
session.delete(c)
session.commit()
elif answer == 'O' and not Options["Trainee"]:
confirm = utils.our_raw_input("Really clear all notes (y/N)? ").lower()
if confirm == "y":
- for c in get_new_comments(changes.get("source", ""), session=session):
+ for c in get_new_comments(upload.changes.source, session=session):
session.delete(c)
session.commit()
elif answer == 'S':
- done = 1
+ done = True
elif answer == 'Q':
end()
sys.exit(0)
-m, --manual-reject=MSG manual reject with `msg'
-n, --no-action don't do anything
-t, --trainee FTP Trainee mode
- -V, --version display the version number and exit"""
- sys.exit(exit_code)
+ -V, --version display the version number and exit
-################################################################################
+ENVIRONMENT VARIABLES
-def do_byhand(upload, session):
- done = 0
- while not done:
- files = upload.pkg.files
- will_install = True
- byhand = []
-
- for f in files.keys():
- if files[f]["section"] == "byhand":
- if os.path.exists(f):
- print "W: %s still present; please process byhand components and try again." % (f)
- will_install = False
- else:
- byhand.append(f)
-
- answer = "XXXX"
- if Options["No-Action"]:
- answer = "S"
- if will_install:
- if Options["Automatic"] and not Options["No-Action"]:
- answer = 'A'
- prompt = "[A]ccept, Manual reject, Skip, Quit ?"
- else:
- prompt = "Manual reject, [S]kip, Quit ?"
+ DAK_INSPECT_UPLOAD: shell command to run to inspect a package
+ The command is automatically run in a shell when an upload
+ is checked. The following substitutions are available:
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.search(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
+ {directory}: directory the upload is contained in
+ {dsc}: name of the included dsc or the empty string
+ {changes}: name of the changes file
- if answer == 'A':
- dbchg = get_dbchange(upload.pkg.changes_file, session)
- if dbchg is None:
- print "Warning: cannot find changes file in database; can't process BYHAND"
- else:
- try:
- check_daily_lock()
- done = 1
- for b in byhand:
- # Find the file entry in the database
- found = False
- for f in dbchg.files:
- if f.filename == b:
- found = True
- f.processed = True
- break
-
- if not found:
- print "Warning: Couldn't find BYHAND item %s in the database to mark it processed" % b
-
- session.commit()
- Logger.log(["BYHAND ACCEPT: %s" % (upload.pkg.changes_file)])
- except CantGetLockError:
- print "Hello? Operator! Give me the number for 911!"
- print "Dinstall in the locked area, cant process packages, come back later"
- elif answer == 'M':
- aborted = upload.do_reject(manual=1,
- reject_message=Options["Manual-Reject"],
- notes=get_new_comments(changes.get("source", ""), session=session))
- if not aborted:
- upload.pkg.remove_known_changes(session=session)
- session.commit()
- Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
- done = 1
- elif answer == 'S':
- done = 1
- elif answer == 'Q':
- end()
- sys.exit(0)
+ Note that Python's 'format' method is used to format the command.
+
+ Example: run mc in a tmux session to inspect the upload
+
+ export DAK_INSPECT_UPLOAD='tmux new-session -d -s process-new 2>/dev/null; tmux new-window -t process-new:0 -k "cd {directory}; mc"'
+
+ and run
+
+ tmux attach -t process-new
+
+ in a separate terminal session.
+"""
+ sys.exit(exit_code)
################################################################################
os.unlink(lockfile)
-
@contextlib.contextmanager
def lock_package(package):
"""
finally:
os.unlink(path)
-class clean_holding(object):
- def __init__(self,pkg):
- self.pkg = pkg
-
- def __enter__(self):
- pass
-
- def __exit__(self, type, value, traceback):
- h = Holding()
-
- for f in self.pkg.files.keys():
- if os.path.exists(os.path.join(h.holding_dir, f)):
- os.unlink(os.path.join(h.holding_dir, f))
-
-
-def do_pkg(changes_full_path, session):
- changes_dir = os.path.dirname(changes_full_path)
- changes_file = os.path.basename(changes_full_path)
-
- u = Upload()
- u.pkg.changes_file = changes_file
- (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
- u.load_changes(changes_file)
- u.pkg.directory = changes_dir
- u.update_subst()
- u.logger = Logger
- origchanges = os.path.abspath(u.pkg.changes_file)
-
+def do_pkg(upload, session):
# Try to get an included dsc
- dsc = None
- (status, _) = u.load_dsc()
- if status:
- dsc = u.pkg.dsc
+ dsc = upload.source
cnf = Config()
- bcc = "X-DAK: dak process-new"
- if cnf.has_key("Dinstall::Bcc"):
- u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
- else:
- u.Subst["__BCC__"] = bcc
-
- files = u.pkg.files
- u.check_distributions()
- for deb_filename, f in files.items():
- if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
- u.binary_file_checks(deb_filename, session)
- u.check_binary_against_db(deb_filename, session)
- else:
- u.source_file_checks(deb_filename, session)
- u.check_source_against_db(deb_filename, session)
-
- u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
+ #bcc = "X-DAK: dak process-new"
+ #if cnf.has_key("Dinstall::Bcc"):
+ # u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+ #else:
+ # u.Subst["__BCC__"] = bcc
try:
- with lock_package(u.pkg.changes["source"]):
- with clean_holding(u.pkg):
- if not recheck(u, session):
- return
-
- new, byhand = determine_new(u.pkg.changes_file, u.pkg.changes, files, dsc=dsc, session=session)
- if byhand:
- do_byhand(u, session)
- elif new:
- do_new(u, session)
- else:
- try:
- check_daily_lock()
- new_accept(u, Options["No-Action"], session)
- except CantGetLockError:
- print "Hello? Operator! Give me the number for 911!"
- print "Dinstall in the locked area, cant process packages, come back later"
+ with lock_package(upload.changes.source):
+ with UploadCopy(upload) as upload_copy:
+ handler = PolicyQueueUploadHandler(upload, session)
+ if handler.get_action() is not None:
+ return
+ do_new(upload, upload_copy, handler, session)
except AlreadyLockedError as e:
print "Seems to be locked by %s already, skipping..." % (e)
-def show_new_comments(changes_files, session):
- sources = set()
+def show_new_comments(uploads, session):
+ sources = [ upload.changes.source for upload in uploads ]
+ if len(sources) == 0:
+ return
+
query = """SELECT package, version, comment, author
FROM new_comments
- WHERE package IN ('"""
-
- for changes in changes_files:
- sources.add(os.path.basename(changes).split("_")[0])
+ WHERE package IN :sources
+ ORDER BY package, version"""
- query += "%s') ORDER BY package, version" % "', '".join(sources)
- r = session.execute(query)
+ r = session.execute(query, params=dict(sources=sources))
for i in r:
print "%s_%s\n%s\n(%s)\n\n\n" % (i[0], i[1], i[2], i[3])
- session.commit()
+ session.rollback()
################################################################################
('h',"help","Process-New::Options::Help"),
('m',"manual-reject","Process-New::Options::Manual-Reject", "HasArg"),
('t',"trainee","Process-New::Options::Trainee"),
+ ('q','queue','Process-New::Options::Queue', 'HasArg'),
('n',"no-action","Process-New::Options::No-Action")]
+ changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
+
for i in ["automatic", "no-binaries", "comments", "help", "manual-reject", "no-action", "version", "trainee"]:
if not cnf.has_key("Process-New::Options::%s" % (i)):
cnf["Process-New::Options::%s" % (i)] = ""
- changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
+ queue_name = cnf.get('Process-New::Options::Queue', 'new')
+ new_queue = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
if len(changes_files) == 0:
- new_queue = get_policy_queue('new', session );
- changes_paths = [ os.path.join(new_queue.path, j) for j in utils.get_changes_files(new_queue.path) ]
+ uploads = new_queue.uploads
else:
- changes_paths = [ os.path.abspath(j) for j in changes_files ]
+ uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=new_queue) \
+ .join(DBChange).filter(DBChange.changesname.in_(changes_files)).all()
Options = cnf.subtree("Process-New::Options")
Priorities = Priority_Completer(session)
readline.parse_and_bind("tab: complete")
- if len(changes_paths) > 1:
+ if len(uploads) > 1:
sys.stderr.write("Sorting changes...\n")
- changes_files = sort_changes(changes_paths, session, Options["No-Binaries"])
+ uploads.sort()
if Options["Comments"]:
- show_new_comments(changes_files, session)
+ show_new_comments(uploads, session)
else:
- for changes_file in changes_files:
- changes_file = utils.validate_changes_file_arg(changes_file, 0)
- if not changes_file:
- continue
- print "\n" + os.path.basename(changes_file)
+ for upload in uploads:
+ print "\n" + os.path.basename(upload.changes.changesname)
- do_pkg (changes_file, session)
+ do_pkg (upload, session)
end()
################################################################################
import os
-import copy
+import datetime
+import re
import sys
+import traceback
import apt_pkg
from daklib.dbconn import *
-from daklib.queue import *
from daklib import daklog
from daklib import utils
from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
from daklib.config import Config
-from daklib.changesutils import *
+from daklib.archive import ArchiveTransaction
+from daklib.urgencylog import UrgencyLog
+from daklib.textutils import fix_maintainer
# Globals
Options = None
################################################################################
-def do_comments(dir, srcqueue, opref, npref, line, fn, session):
+def do_comments(dir, srcqueue, opref, npref, line, fn, transaction):
+ session = transaction.session
for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
- lines = open("%s/%s" % (dir, comm)).readlines()
+ lines = open(os.path.join(dir, comm)).readlines()
if len(lines) == 0 or lines[0] != line + "\n": continue
- changes_files = [ x for x in os.listdir(".") if x.startswith(comm[len(opref):]+"_")
- and x.endswith(".changes") ]
- changes_files = sort_changes(changes_files, session)
- for f in changes_files:
- print "Processing changes file: %s" % f
- f = utils.validate_changes_file_arg(f, 0)
- if not f:
- print "Couldn't validate changes file %s" % f
- continue
- fn(f, srcqueue, "".join(lines[1:]), session)
-
- if opref != npref and not Options["No-Action"]:
+
+ # If the ACCEPT includes a _<arch> we only accept that .changes.
+ # Otherwise we accept all .changes that start with the given prefix
+ changes_prefix = comm[len(opref):]
+ if changes_prefix.count('_') < 2:
+ changes_prefix = changes_prefix + '_'
+ else:
+ changes_prefix = changes_prefix + '.changes'
+
+ uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=srcqueue) \
+ .join(PolicyQueueUpload.changes).filter(DBChange.changesname.startswith(changes_prefix)) \
+ .order_by(PolicyQueueUpload.source_id)
+ for u in uploads:
+ print "Processing changes file: %s" % u.changes.changesname
+ fn(u, srcqueue, "".join(lines[1:]), transaction)
+
+ if opref != npref:
newcomm = npref + comm[len(opref):]
- os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm))
+ transaction.fs.move(os.path.join(dir, comm), os.path.join(dir, newcomm))
+
+################################################################################
+
+def try_or_reject(function):
+ def wrapper(upload, srcqueue, comments, transaction):
+ try:
+ function(upload, srcqueue, comments, transaction)
+ except Exception as e:
+ comments = 'An exception was raised while processing the package:\n{0}\nOriginal comments:\n{1}'.format(traceback.format_exc(), comments)
+ try:
+ transaction.rollback()
+ real_comment_reject(upload, srcqueue, comments, transaction)
+ except Exception as e:
+ comments = 'In addition an exception was raised while trying to reject the upload:\n{0}\nOriginal rejection:\n{1}'.format(traceback.format_exc(), comments)
+ transaction.rollback()
+ real_comment_reject(upload, srcqueue, comments, transaction, notify=False)
+ if not Options['No-Action']:
+ transaction.commit()
+ return wrapper
################################################################################
-def comment_accept(changes_file, srcqueue, comments, session):
- u = Upload()
- u.pkg.changes_file = changes_file
- u.load_changes(changes_file)
- u.update_subst()
+@try_or_reject
+def comment_accept(upload, srcqueue, comments, transaction):
+ for byhand in upload.byhand:
+ path = os.path.join(srcqueue.path, byhand.filename)
+ if os.path.exists(path):
+ raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))
+ cnf = Config()
+
+ fs = transaction.fs
+ session = transaction.session
+ changesname = upload.changes.changesname
+ allow_tainted = srcqueue.suite.archive.tainted
+
+ # We need overrides to get the target component
+ overridesuite = upload.target_suite
+ if overridesuite.overridesuite is not None:
+ overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()
+
+ def binary_component_func(db_binary):
+ override = session.query(Override).filter_by(suite=overridesuite, package=db_binary.package) \
+ .join(OverrideType).filter(OverrideType.overridetype == db_binary.binarytype) \
+ .join(Component).one()
+ return override.component
+
+ def source_component_func(db_source):
+ override = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
+ .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
+ .join(Component).one()
+ return override.component
+
+ all_target_suites = [upload.target_suite]
+ all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])
+
+ for suite in all_target_suites:
+ if upload.source is not None:
+ transaction.copy_source(upload.source, suite, source_component_func(upload.source), allow_tainted=allow_tainted)
+ for db_binary in upload.binaries:
+ transaction.copy_binary(db_binary, suite, binary_component_func(db_binary), allow_tainted=allow_tainted, extra_archives=[upload.target_suite.archive])
+
+ # Copy .changes if needed
+ if upload.target_suite.copychanges:
+ src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+ dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
+ fs.copy(src, dst, mode=upload.target_suite.archive.mode)
+
+ if upload.source is not None and not Options['No-Action']:
+ urgency = upload.changes.urgency
+ if urgency not in cnf.value_list('Urgency::Valid'):
+ urgency = cnf['Urgency::Default']
+ UrgencyLog().log(upload.source.source, upload.source.version, urgency)
+
+ print " ACCEPT"
+ if not Options['No-Action']:
+ Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])
+
+ # Send announcement
+ subst = subst_for_upload(upload)
+ announce = ", ".join(upload.target_suite.announce or [])
+ tracking = cnf.get('Dinstall::TrackingServer')
+ if tracking and upload.source is not None:
+ announce = '{0}\nBcc: {1}@{2}'.format(announce, upload.changes.source, tracking)
+ subst['__ANNOUNCE_LIST_ADDRESS__'] = announce
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.announce'))
+ utils.send_mail(message)
+
+ # TODO: code duplication. Similar code is in process-upload.
+ if cnf.find_b('Dinstall::CloseBugs') and upload.changes.closes is not None and upload.source is not None:
+ for bugnum in upload.changes.closes:
+ subst['__BUG_NUMBER__'] = bugnum
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.bug-close'))
+ utils.send_mail(message)
+
+ del subst['__BUG_NUMBER__']
+
+ # TODO: code duplication. Similar code is in process-upload.
+ # Move .changes to done
+ src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+ now = datetime.datetime.now()
+ donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
+ dst = os.path.join(donedir, upload.changes.changesname)
+ dst = utils.find_next_free(dst)
+ fs.copy(src, dst, mode=0o644)
+
+ remove_upload(upload, transaction)
+
+################################################################################
+
+@try_or_reject
+def comment_reject(*args):
+ real_comment_reject(*args)
+
+def real_comment_reject(upload, srcqueue, comments, transaction, notify=True):
+ cnf = Config()
+
+ fs = transaction.fs
+ session = transaction.session
+ changesname = upload.changes.changesname
+ queuedir = upload.policy_queue.path
+ rejectdir = cnf['Dir::Reject']
+
+ ### Copy files to reject/
+
+ poolfiles = [b.poolfile for b in upload.binaries]
+ if upload.source is not None:
+ poolfiles.extend([df.poolfile for df in upload.source.srcfiles])
+ # Not beautiful...
+ files = [ af.path for af in session.query(ArchiveFile) \
+ .filter_by(archive=upload.policy_queue.suite.archive) \
+ .join(ArchiveFile.file) \
+ .filter(PoolFile.file_id.in_([ f.file_id for f in poolfiles ])) ]
+ for byhand in upload.byhand:
+ path = os.path.join(queuedir, byhand.filename)
+ if os.path.exists(path):
+ files.append(path)
+ files.append(os.path.join(queuedir, changesname))
+
+ for fn in files:
+ dst = utils.find_next_free(os.path.join(rejectdir, os.path.basename(fn)))
+ fs.copy(fn, dst, link=True)
+
+ ### Write reason
+
+ dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(changesname)))
+ fh = fs.create(dst)
+ fh.write(comments)
+ fh.close()
+
+ ### Send mail notification
+
+ if notify:
+ subst = subst_for_upload(upload)
+ subst['__MANUAL_REJECT_MESSAGE__'] = ''
+ subst['__REJECT_MESSAGE__'] = comments
+
+ # Try to use From: from comment file if there is one.
+ # This is not very elegant...
+ match = re.match(r"\AFrom: ([^\n]+)\n\n", comments)
+ if match:
+ subst['__REJECTOR_ADDRESS__'] = match.group(1)
+ subst['__REJECT_MESSAGE__'] = '\n'.join(comments.splitlines()[2:])
+
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'queue.rejected'))
+ utils.send_mail(message)
+
+ print " REJECT"
if not Options["No-Action"]:
- destqueue = get_policy_queue('newstage', session)
- if changes_to_queue(u, srcqueue, destqueue, session):
- print " ACCEPT"
- Logger.log(["Policy Queue ACCEPT: %s: %s" % (srcqueue.queue_name, u.pkg.changes_file)])
- else:
- print "E: Failed to migrate %s" % u.pkg.changes_file
+ Logger.log(["Policy Queue REJECT", srcqueue.queue_name, upload.changes.changesname])
+
+ remove_upload(upload, transaction)
################################################################################
-def comment_reject(changes_file, srcqueue, comments, session):
- u = Upload()
- u.pkg.changes_file = changes_file
- u.load_changes(changes_file)
- u.update_subst()
+def remove_upload(upload, transaction):
+ fs = transaction.fs
+ session = transaction.session
+ changes = upload.changes
+
+ # Remove byhand and changes files. Binary and source packages will be
+ # removed from {bin,src}_associations and eventually removed by clean-suites automatically.
+ queuedir = upload.policy_queue.path
+ for byhand in upload.byhand:
+ path = os.path.join(queuedir, byhand.filename)
+ if os.path.exists(path):
+ fs.unlink(path)
+ session.delete(byhand)
+ fs.unlink(os.path.join(queuedir, upload.changes.changesname))
+
+ session.delete(upload)
+ session.delete(changes)
+ session.flush()
- u.rejects.append(comments)
+################################################################################
+def subst_for_upload(upload):
+ # TODO: similar code in process-upload
cnf = Config()
- bcc = "X-DAK: dak process-policy"
- if cnf.has_key("Dinstall::Bcc"):
- u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
- else:
- u.Subst["__BCC__"] = bcc
- if not Options["No-Action"]:
- u.do_reject(manual=0, reject_message='\n'.join(u.rejects))
- u.pkg.remove_known_changes(session=session)
- session.commit()
+ maintainer_field = upload.changes.changedby or upload.changes.maintainer
+ addresses = utils.mail_addresses_for_upload(upload.changes.maintainer, maintainer_field, upload.changes.fingerprint)
+
+ changes_path = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+ changes_contents = open(changes_path, 'r').read()
+
+ bcc = 'X-DAK: dak process-policy'
+ if 'Dinstall::Bcc' in cnf:
+ bcc = '{0}\nBcc: {1}'.format(bcc, cnf['Dinstall::Bcc'])
- print " REJECT"
- Logger.log(["Policy Queue REJECT: %s: %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+ subst = {
+ '__DISTRO__': cnf['Dinstall::MyDistribution'],
+ '__ADMIN_ADDRESS__': cnf['Dinstall::MyAdminAddress'],
+ '__CHANGES_FILENAME__': upload.changes.changesname,
+ '__SOURCE__': upload.changes.source,
+ '__VERSION__': upload.changes.version,
+ '__ARCHITECTURE__': upload.changes.architecture,
+ '__MAINTAINER__': maintainer_field,
+ '__MAINTAINER_FROM__': fix_maintainer(maintainer_field)[1],
+ '__MAINTAINER_TO__': ", ".join(addresses),
+ '__CC__': 'X-DAK-Rejection: manual or automatic',
+ '__REJECTOR_ADDRESS__': cnf['Dinstall::MyEmailAddress'],
+ '__BCC__': bcc,
+ '__BUG_SERVER__': cnf.get('Dinstall::BugServer'),
+ '__FILE_CONTENTS__': changes_contents,
+ }
+
+ override_maintainer = cnf.get('Dinstall::OverrideMaintainer')
+ if override_maintainer:
+ subst['__MAINTAINER_TO__'] = override_maintainer
+
+ return subst
+
+################################################################################
+
+def remove_unreferenced_binaries(policy_queue, transaction):
+ """Remove binaries that are no longer referenced by an upload
+
+ @type policy_queue: L{daklib.dbconn.PolicyQueue}
+
+ @type transaction: L{daklib.archive.ArchiveTransaction}
+ """
+ session = transaction.session
+ suite = policy_queue.suite
+
+ query = """
+ SELECT b.*
+ FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin
+ WHERE ba.suite = :suite_id
+ AND NOT EXISTS (SELECT 1 FROM policy_queue_upload_binaries_map pqubm
+ JOIN policy_queue_upload pqu ON pqubm.policy_queue_upload_id = pqu.id
+ WHERE pqu.policy_queue_id = :policy_queue_id
+ AND pqubm.binary_id = b.id)"""
+ binaries = session.query(DBBinary).from_statement(query) \
+ .params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
+
+ for binary in binaries:
+ Logger.log(["removed binary from policy queue", policy_queue.queue_name, binary.package, binary.version])
+ transaction.remove_binary(binary, suite)
+
+def remove_unreferenced_sources(policy_queue, transaction):
+ """Remove sources that are no longer referenced by an upload or a binary
+
+ @type policy_queue: L{daklib.dbconn.PolicyQueue}
+
+ @type transaction: L{daklib.archive.ArchiveTransaction}
+ """
+ session = transaction.session
+ suite = policy_queue.suite
+
+ query = """
+ SELECT s.*
+ FROM source s
+ JOIN src_associations sa ON s.id = sa.source
+ WHERE sa.suite = :suite_id
+ AND NOT EXISTS (SELECT 1 FROM policy_queue_upload pqu
+ WHERE pqu.policy_queue_id = :policy_queue_id
+ AND pqu.source_id = s.id)
+ AND NOT EXISTS (SELECT 1 FROM binaries b
+ JOIN bin_associations ba ON b.id = ba.bin
+ WHERE b.source = s.id
+ AND ba.suite = :suite_id)"""
+ sources = session.query(DBSource).from_statement(query) \
+ .params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
+
+ for source in sources:
+ Logger.log(["removed source from policy queue", policy_queue.queue_name, source.source, source.version])
+ transaction.remove_source(source, suite)
################################################################################
if Options["Help"]:
usage()
+ Logger = daklog.Logger("process-policy")
if not Options["No-Action"]:
+ urgencylog = UrgencyLog()
+
+ with ArchiveTransaction() as transaction:
+ session = transaction.session
try:
- Logger = daklog.Logger("process-policy")
- except CantOpenError as e:
- Logger = None
+ pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
+ except NoResultFound:
+ print "E: Cannot find policy queue %s" % queue_name
+ sys.exit(1)
- # Find policy queue
- session.query(PolicyQueue)
+ commentsdir = os.path.join(pq.path, 'COMMENTS')
+ # The comments stuff relies on being in the right directory
+ os.chdir(pq.path)
- try:
- pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
- except NoResultFound:
- print "E: Cannot find policy queue %s" % queue_name
- sys.exit(1)
+ do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, transaction)
+ do_comments(commentsdir, pq, "ACCEPTED.", "ACCEPTED.", "OK", comment_accept, transaction)
+ do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, transaction)
- commentsdir = os.path.join(pq.path, 'COMMENTS')
- # The comments stuff relies on being in the right directory
- os.chdir(pq.path)
- do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, session)
- do_comments(commentsdir, pq, "ACCEPTED.", "ACCEPTED.", "OK", comment_accept, session)
- do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, session)
+ remove_unreferenced_binaries(pq, transaction)
+ remove_unreferenced_sources(pq, transaction)
+ if not Options['No-Action']:
+ urgencylog.close()
################################################################################
## Queue builds
+import datetime
+import errno
from errno import EACCES, EAGAIN
import fcntl
import os
import sys
import traceback
import apt_pkg
+import time
from sqlalchemy.orm.exc import NoResultFound
from daklib import daklog
-from daklib.queue import *
-from daklib.queue_install import *
-from daklib import utils
from daklib.dbconn import *
from daklib.urgencylog import UrgencyLog
from daklib.summarystats import SummaryStats
-from daklib.holding import Holding
from daklib.config import Config
-from daklib.regexes import re_match_expired
+import daklib.utils as utils
+from daklib.textutils import fix_maintainer
+from daklib.regexes import *
+
+import daklib.archive
+import daklib.upload
###############################################################################
###############################################################################
-def byebye():
- if not Options["No-Action"]:
- # Clean out the queue files
- session = DBConn().session()
- session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
- session.commit()
+def try_or_reject(function):
+ """Try to call function or reject the upload if that fails
+ """
+ def wrapper(directory, upload, *args, **kwargs):
+ try:
+ return function(directory, upload, *args, **kwargs)
+ except Exception as e:
+ try:
+ reason = "There was an uncaught exception when processing your upload:\n{0}\nAny original reject reason follows below.".format(traceback.format_exc())
+ upload.rollback()
+ return real_reject(directory, upload, reason=reason)
+ except Exception as e:
+ reason = "In addition there was an exception when rejecting the package:\n{0}\nPrevious reasons:\n{1}".format(traceback.format_exc(), reason)
+ upload.rollback()
+ return real_reject(directory, upload, reason=reason, notify=False)
+
+ return wrapper
+
+def subst_for_upload(upload):
+ cnf = Config()
+
+ changes = upload.changes
+ control = upload.changes.changes
+
+ if upload.final_suites is None or len(upload.final_suites) == 0:
+ suite_name = '(unknown)'
+ else:
+ suite_names = []
+ for suite in upload.final_suites:
+ if suite.policy_queue:
+ suite_names.append("{0}->{1}".format(suite.suite_name, suite.policy_queue.queue_name))
+ else:
+ suite_names.append(suite.suite_name)
+ suite_name = ','.join(suite_names)
+
+ maintainer_field = control.get('Changed-By', control.get('Maintainer', cnf['Dinstall::MyEmailAddress']))
+ maintainer = fix_maintainer(maintainer_field)
+ addresses = utils.mail_addresses_for_upload(control.get('Maintainer', cnf['Dinstall::MyEmailAddress']), maintainer_field, changes.primary_fingerprint)
+
+ bcc = 'X-DAK: dak process-upload'
+ if 'Dinstall::Bcc' in cnf:
+ bcc = '{0}\nBcc: {1}'.format(bcc, cnf['Dinstall::Bcc'])
+
+ subst = {
+ '__DISTRO__': cnf['Dinstall::MyDistribution'],
+ '__ADMIN_ADDRESS__': cnf['Dinstall::MyAdminAddress'],
+
+ '__CHANGES_FILENAME__': upload.changes.filename,
+
+ '__SOURCE__': control.get('Source', '(unknown)'),
+ '__ARCHITECTURE__': control.get('Architecture', '(unknown)'),
+ '__VERSION__': control.get('Version', '(unknown)'),
+
+ '__SUITE__': suite_name,
+
+ '__DAK_ADDRESS__': cnf['Dinstall::MyEmailAddress'],
+ '__MAINTAINER_FROM__': maintainer[1],
+ '__MAINTAINER_TO__': ", ".join(addresses),
+ '__MAINTAINER__': maintainer_field,
+ '__BCC__': bcc,
+
+ '__BUG_SERVER__': cnf.get('Dinstall::BugServer'),
+
+ # TODO: don't use private member
+ '__FILE_CONTENTS__': upload.changes._signed_file.contents,
+
+ # __REJECT_MESSAGE__
+ }
+
+ override_maintainer = cnf.get('Dinstall::OverrideMaintainer')
+ if override_maintainer:
+ subst['__MAINTAINER_TO__'] = subst['__MAINTAINER_FROM__'] = override_maintainer
+
+ return subst
+
+@try_or_reject
+def accept(directory, upload):
+ cnf = Config()
+
+ Logger.log(['ACCEPT', upload.changes.filename])
+
+ upload.install()
+
+ accepted_to_real_suite = False
+ for suite in upload.final_suites:
+ accepted_to_real_suite = accepted_to_real_suite or suite.policy_queue is None
+
+ control = upload.changes.changes
+ if 'source' in upload.changes.architectures and not Options['No-Action']:
+ urgency = control.get('Urgency')
+ if urgency not in cnf.value_list('Urgency::Valid'):
+ urgency = cnf['Urgency::Default']
+ UrgencyLog().log(control['Source'], control['Version'], urgency)
+
+ # send mail to maintainer
+ subst = subst_for_upload(upload)
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.accepted'))
+ utils.send_mail(message)
+
+ # send mail to announce lists and tracking server
+ subst = subst_for_upload(upload)
+ announce = set()
+ for suite in upload.final_suites:
+ if suite.policy_queue is None:
+ continue
+ announce.update(suite.announce or [])
+ announce_address = ", ".join(announce)
+ tracking = cnf.get('Dinstall::TrackingServer')
+ if tracking and 'source' in upload.changes.architectures:
+ announce_address = '{0}\nBcc: {1}@{2}'.format(announce_address, control['Source'], tracking)
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.announce'))
+ utils.send_mail(message)
+
+ # Only close bugs for uploads that were not redirected to a policy queue.
+ # process-policy will close bugs for those once they are accepted.
+ subst = subst_for_upload(upload)
+ if accepted_to_real_suite and cnf.find_b('Dinstall::CloseBugs') and upload.changes.source is not None:
+ for bugnum in upload.changes.closed_bugs:
+ subst['__BUG_NUMBER__'] = str(bugnum)
+
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.bug-close'))
+ utils.send_mail(message)
+
+ del subst['__BUG_NUMBER__']
+
+ # Move .changes to done, but only for uploads that were accepted to a
+ # real suite. process-policy will handle this for uploads to queues.
+ if accepted_to_real_suite:
+ src = os.path.join(upload.directory, upload.changes.filename)
+
+ now = datetime.datetime.now()
+ donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
+ dst = os.path.join(donedir, upload.changes.filename)
+ dst = utils.find_next_free(dst)
+
+ upload.transaction.fs.copy(src, dst, mode=0o644)
+
+ SummaryStats().accept_count += 1
+ SummaryStats().accept_bytes += upload.changes.bytes
+
+@try_or_reject
+def accept_to_new(directory, upload):
+ cnf = Config()
+
+ Logger.log(['ACCEPT-TO-NEW', upload.changes.filename])
+
+ upload.install_to_new()
+ # TODO: tag bugs pending, send announcement
+
+ subst = subst_for_upload(upload)
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.new'))
+ utils.send_mail(message)
+
+ SummaryStats().accept_count += 1
+ SummaryStats().accept_bytes += upload.changes.bytes
+
+@try_or_reject
+def reject(directory, upload, reason=None, notify=True):
+ real_reject(directory, upload, reason, notify)
+
+def real_reject(directory, upload, reason=None, notify=True):
+ # XXX: rejection itself should go to daklib.archive.ArchiveUpload
+ cnf = Config()
+
+ Logger.log(['REJECT', upload.changes.filename])
+
+ fs = upload.transaction.fs
+ rejectdir = cnf['Dir::Reject']
+ files = [ f.filename for f in upload.changes.files.itervalues() ]
+ files.append(upload.changes.filename)
+ for fn in files:
+ src = os.path.join(upload.directory, fn)
+ dst = utils.find_next_free(os.path.join(rejectdir, fn))
+ if not os.path.exists(src):
+ continue
+ fs.copy(src, dst)
+
+ if upload.reject_reasons is not None:
+ if reason is None:
+ reason = ''
+ reason = reason + '\n' + '\n'.join(upload.reject_reasons)
+
+ if reason is None:
+ reason = '(Unknown reason. Please check logs.)'
+
+ dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(upload.changes.filename)))
+ fh = fs.create(dst)
+ fh.write(reason)
+ fh.close()
+
+ # TODO: fix
+ if notify:
+ subst = subst_for_upload(upload)
+ subst['__REJECTOR_ADDRESS__'] = cnf['Dinstall::MyEmailAddress']
+ subst['__MANUAL_REJECT_MESSAGE__'] = ''
+ subst['__REJECT_MESSAGE__'] = reason
+ subst['__CC__'] = 'X-DAK-Rejection: automatic (moo)'
+
+ message = utils.TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'queue.rejected'))
+ utils.send_mail(message)
+
+ SummaryStats().reject_count += 1
+
+###############################################################################
+
+def action(directory, upload):
+ changes = upload.changes
+ processed = True
-def action(u, session):
global Logger
cnf = Config()
- holding = Holding()
- # changes["distribution"] may not exist in corner cases
- # (e.g. unreadable changes files)
- if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict):
- u.pkg.changes["distribution"] = {}
+ okay = upload.check()
- (summary, short_summary) = u.build_summaries()
+ summary = changes.changes.get('Changes', '')
+
+ package_info = []
+ if okay:
+ if changes.source is not None:
+ package_info.append("source:{0}".format(changes.source.dsc['Source']))
+ for binary in changes.binaries:
+ package_info.append("binary:{0}".format(binary.control['Package']))
(prompt, answer) = ("", "XXX")
if Options["No-Action"] or Options["Automatic"]:
queuekey = ''
- pi = u.package_info()
+ print summary
+ print
+ print "\n".join(package_info)
+ print
- try:
- chg = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one()
- except NoResultFound as e:
- chg = None
+ if len(upload.reject_reasons) > 0:
+ print "Reason:"
+ print "\n".join(upload.reject_reasons)
+ print
- if len(u.rejects) > 0:
- if u.upload_too_new():
- print "SKIP (too new)\n" + pi,
+ path = os.path.join(directory, changes.filename)
+ created = os.stat(path).st_mtime
+ now = time.time()
+ too_new = (now - created < int(cnf['Dinstall::SkipTime']))
+
+ if too_new:
+ print "SKIP (too new)"
prompt = "[S]kip, Quit ?"
else:
- print "REJECT\n" + pi
prompt = "[R]eject, Skip, Quit ?"
if Options["Automatic"]:
answer = 'R'
+ elif upload.new:
+ prompt = "[N]ew, Skip, Quit ?"
+ if Options['Automatic']:
+ answer = 'N'
else:
- # Are we headed for NEW / BYHAND / AUTOBYHAND?
- # Note that policy queues are no longer handled here
- qu = determine_target(u)
- if qu:
- print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
- queuekey = qu[0].upper()
- if queuekey in "RQSA":
- queuekey = "D"
- prompt = "[D]ivert, Skip, Quit ?"
- else:
- prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
- if Options["Automatic"]:
- answer = queuekey
- else:
- # Does suite have a policy_queue configured
- divert = False
- for s in u.pkg.changes["distribution"].keys():
- suite = get_suite(s, session)
- if suite.policy_queue:
- if not chg or chg.approved_for_id != suite.policy_queue.policy_queue_id:
- # This routine will check whether the upload is a binary
- # upload when the source is already in the target suite. If
- # so, we skip the policy queue, otherwise we go there.
- divert = package_to_suite(u, suite.suite_name, session=session)
- if divert:
- print "%s for %s\n%s%s" % ( suite.policy_queue.queue_name.upper(),
- ", ".join(u.pkg.changes["distribution"].keys()),
- pi, summary)
- queuekey = "P"
- prompt = "[P]olicy, Skip, Quit ?"
- policyqueue = suite.policy_queue
- if Options["Automatic"]:
- answer = 'P'
- break
-
- if not divert:
- print "ACCEPT\n" + pi + summary,
- prompt = "[A]ccept, Skip, Quit ?"
- if Options["Automatic"]:
- answer = 'A'
+ prompt = "[A]ccept, Skip, Quit ?"
+ if Options['Automatic']:
+ answer = 'A'
while prompt.find(answer) == -1:
answer = utils.our_raw_input(prompt)
answer = answer[:1].upper()
if answer == 'R':
- os.chdir(u.pkg.directory)
- u.do_reject(0, pi)
+ reject(directory, upload)
elif answer == 'A':
- if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
- session.commit()
- u.accept(summary, short_summary, session)
- u.check_override()
- chg.clean_from_queue()
- session.commit()
- u.remove()
- elif answer == 'P':
- if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
- package_to_queue(u, summary, short_summary, policyqueue, chg, session)
- session.commit()
- u.remove()
- elif answer == queuekey:
- if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
- QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
- session.commit()
- u.remove()
+ # upload.try_autobyhand must not be run with No-Action.
+ if Options['No-Action']:
+ accept(directory, upload)
+ elif upload.try_autobyhand():
+ accept(directory, upload)
+ else:
+ print "W: redirecting to BYHAND as automatic processing failed."
+ accept_to_new(directory, upload)
+ elif answer == 'N':
+ accept_to_new(directory, upload)
elif answer == 'Q':
- byebye()
sys.exit(0)
+ elif answer == 'S':
+ processed = False
+
+ #raise Exception("FAIL")
+ if not Options['No-Action']:
+ upload.commit()
- session.commit()
+ return processed
###############################################################################
-def cleanup():
- h = Holding()
- if not Options["No-Action"]:
- h.clean()
+def unlink_if_exists(path):
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
-def process_it(changes_file, session):
+def process_it(directory, changes, keyrings, session):
global Logger
- Logger.log(["Processing changes file", changes_file])
+ print "\n{0}\n".format(changes.filename)
+ Logger.log(["Processing changes file", changes.filename])
cnf = Config()
- holding = Holding()
-
- # TODO: Actually implement using pending* tables so that we don't lose track
- # of what is where
-
- u = Upload()
- u.pkg.changes_file = changes_file
- u.pkg.directory = os.getcwd()
- u.logger = Logger
- origchanges = os.path.abspath(u.pkg.changes_file)
-
# Some defaults in case we can't fully process the .changes file
- u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
- u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
+ #u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
+ #u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
# debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
bcc = "X-DAK: dak process-upload"
- if cnf.has_key("Dinstall::Bcc"):
- u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
- else:
- u.Subst["__BCC__"] = bcc
+ #if cnf.has_key("Dinstall::Bcc"):
+ # u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+ #else:
+ # u.Subst["__BCC__"] = bcc
+
+ with daklib.archive.ArchiveUpload(directory, changes, keyrings) as upload:
+ processed = action(directory, upload)
+ if processed and not Options['No-Action']:
+ unlink_if_exists(os.path.join(directory, changes.filename))
+ for fn in changes.files:
+ unlink_if_exists(os.path.join(directory, fn))
- # Remember where we are so we can come back after cd-ing into the
- # holding directory. TODO: Fix this stupid hack
- u.prevdir = os.getcwd()
+###############################################################################
- try:
- # If this is the Real Thing(tm), copy things into a private
- # holding directory first to avoid replacable file races.
- if not Options["No-Action"]:
- holding.chdir_to_holding()
-
- # Absolutize the filename to avoid the requirement of being in the
- # same directory as the .changes file.
- holding.copy_to_holding(origchanges)
-
- # Relativize the filename so we use the copy in holding
- # rather than the original...
- changespath = os.path.basename(u.pkg.changes_file)
- else:
- changespath = origchanges
+def process_changes(changes_filenames):
+ session = DBConn().session()
+ keyrings = session.query(Keyring).filter_by(active=True).order_by(Keyring.priority)
+ keyring_files = [ k.keyring_name for k in keyrings ]
+
+ changes = []
+ for fn in changes_filenames:
+ try:
+ directory, filename = os.path.split(fn)
+ c = daklib.upload.Changes(directory, filename, keyring_files)
+ changes.append([directory, c])
+ except Exception as e:
+ Logger.log([filename, "Error while loading changes: {0}".format(e)])
- (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
+ changes.sort(key=lambda x: x[1])
- if u.pkg.changes["fingerprint"]:
- valid_changes_p = u.load_changes(changespath)
- else:
- for reason in rejects:
- if re_match_expired.match(reason):
- # Hrm, key expired. Lets see if we can still parse the .changes before
- # we reject. Then we would be able to mail the maintainer, instead of
- # just silently dropping the upload.
- u.load_changes(changespath)
- valid_changes_p = False
- u.rejects.extend(rejects)
-
- if valid_changes_p:
- u.check_distributions()
- u.check_files(not Options["No-Action"])
- valid_dsc_p = u.check_dsc(not Options["No-Action"])
- if valid_dsc_p and not Options["No-Action"]:
- u.check_source()
- u.check_hashes()
- if valid_dsc_p and not Options["No-Action"] and not len(u.rejects):
- u.check_lintian()
- u.check_urgency()
- u.check_timestamps()
- u.check_signed_by_key()
-
- action(u, session)
-
- except (SystemExit, KeyboardInterrupt):
- cleanup()
- raise
-
- except:
- print "ERROR"
- traceback.print_exc(file=sys.stderr)
-
- cleanup()
- # Restore previous WD
- os.chdir(u.prevdir)
+ for directory, c in changes:
+ process_it(directory, c, keyring_files, session)
+
+ session.rollback()
###############################################################################
cnf = Config()
summarystats = SummaryStats()
- DBConn()
-
Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
('h',"help","Dinstall::Options::Help"),
('n',"no-action","Dinstall::Options::No-Action"),
else:
Logger.log(["Using changes files from command-line", len(changes_files)])
- # Sort the .changes files so that we process sourceful ones first
- changes_files.sort(utils.changes_compare)
-
- # Process the changes files
- for changes_file in changes_files:
- print "\n" + changes_file
- session = DBConn().session()
- process_it(changes_file, session)
- session.close()
+ process_changes(changes_files)
if summarystats.accept_count:
sets = "set"
print "Rejected %d package %s." % (summarystats.reject_count, sets)
Logger.log(["rejected", summarystats.reject_count])
- byebye()
-
if not Options["No-Action"]:
urgencylog.close()
pass
from daklib import utils
-from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment, DBChange, DBSource, \
- get_uid_from_fingerprint, get_policy_queue
+from daklib.dbconn import DBConn, DBSource, has_new_comment, PolicyQueue, \
+ get_uid_from_fingerprint
from daklib.textutils import fix_maintainer
from daklib.dak_exceptions import *
############################################################
-def process_changes_files(changes_files, type, log, rrd_dir):
+def process_queue(queue, log, rrd_dir):
msg = ""
- cache = {}
- unprocessed = []
- # Read in all the .changes files
- for filename in changes_files:
- try:
- u = Upload()
- u.load_changes(filename)
- cache[filename] = copy(u.pkg.changes)
- cache[filename]["filename"] = filename
- except Exception as e:
- print "WARNING: Exception %s" % e
- continue
+ type = queue.queue_name
+
# Divide the .changes into per-source groups
per_source = {}
- for filename in cache.keys():
- if not cache[filename].has_key("source"):
- unprocessed.append(filename)
- continue
- source = cache[filename]["source"]
- if not per_source.has_key(source):
+ for upload in queue.uploads:
+ source = upload.changes.source
+ if source not in per_source:
per_source[source] = {}
per_source[source]["list"] = []
- per_source[source]["list"].append(cache[filename])
+ per_source[source]["list"].append(upload)
# Determine oldest time and have note status for each source group
for source in per_source.keys():
source_list = per_source[source]["list"]
first = source_list[0]
- oldest = os.stat(first["filename"])[stat.ST_MTIME]
+ oldest = time.mktime(first.changes.created.timetuple())
have_note = 0
for d in per_source[source]["list"]:
- mtime = os.stat(d["filename"])[stat.ST_MTIME]
+ mtime = time.mktime(d.changes.created.timetuple())
if Cnf.has_key("Queue-Report::Options::New"):
if mtime > oldest:
oldest = mtime
else:
if mtime < oldest:
oldest = mtime
- have_note += has_new_comment(d["source"], d["version"])
+ have_note += has_new_comment(d.changes.source, d.changes.version)
per_source[source]["oldest"] = oldest
if not have_note:
per_source[source]["note_state"] = 0; # none
per_source_items = per_source.items()
per_source_items.sort(sg_compare)
- update_graph_database(rrd_dir, type, len(per_source_items), len(changes_files))
+ update_graph_database(rrd_dir, type, len(per_source_items), len(queue.uploads))
entries = []
max_source_len = 0
changeby = {}
changedby=""
sponsor=""
- filename=i[1]["list"][0]["filename"]
+ filename=i[1]["list"][0].changes.changesname
last_modified = time.time()-i[1]["oldest"]
- source = i[1]["list"][0]["source"]
+ source = i[1]["list"][0].changes.source
if len(source) > max_source_len:
max_source_len = len(source)
- binary_list = i[1]["list"][0]["binary"].keys()
- binary = ', '.join(binary_list)
- arches = {}
- versions = {}
+ binary_list = i[1]["list"][0].binaries
+ binary = ', '.join([ b.package for b in binary_list ])
+ arches = set()
+ versions = set()
for j in i[1]["list"]:
- changesbase = os.path.basename(j["filename"])
- try:
- session = DBConn().session()
- dbc = session.query(DBChange).filter_by(changesname=changesbase).one()
- session.close()
- except Exception as e:
- print "Can't find changes file in NEW for %s (%s)" % (changesbase, e)
- dbc = None
+ dbc = j.changes
+ changesbase = dbc.changesname
if Cnf.has_key("Queue-Report::Options::New") or Cnf.has_key("Queue-Report::Options::822"):
try:
(maintainer["maintainer822"], maintainer["maintainer2047"],
maintainer["maintainername"], maintainer["maintaineremail"]) = \
- fix_maintainer (j["maintainer"])
+ fix_maintainer (dbc.maintainer)
except ParseMaintError as msg:
print "Problems while parsing maintainer address\n"
maintainer["maintainername"] = "Unknown"
try:
(changeby["changedby822"], changeby["changedby2047"],
changeby["changedbyname"], changeby["changedbyemail"]) = \
- fix_maintainer (j["changed-by"])
+ fix_maintainer (dbc.changedby)
except ParseMaintError as msg:
(changeby["changedby822"], changeby["changedby2047"],
changeby["changedbyname"], changeby["changedbyemail"]) = \
("", "", "", "")
changedby="%s:%s" % (changeby["changedbyname"], changeby["changedbyemail"])
- distribution=j["distribution"].keys()
- closes=j["closes"].keys()
- if dbc:
- fingerprint = dbc.fingerprint
- sponsor_name = get_uid_from_fingerprint(fingerprint).name
- sponsor_email = get_uid_from_fingerprint(fingerprint).uid + "@debian.org"
- if sponsor_name != maintainer["maintainername"] and sponsor_name != changeby["changedbyname"] and \
- sponsor_email != maintainer["maintaineremail"] and sponsor_name != changeby["changedbyemail"]:
- sponsor = sponsor_email
-
- for arch in j["architecture"].keys():
- arches[arch] = ""
- version = j["version"]
- versions[version] = ""
- arches_list = arches.keys()
+ distribution=dbc.distribution.split()
+ closes=dbc.closes
+
+ fingerprint = dbc.fingerprint
+ sponsor_name = get_uid_from_fingerprint(fingerprint).name
+ sponsor_email = get_uid_from_fingerprint(fingerprint).uid + "@debian.org"
+ if sponsor_name != maintainer["maintainername"] and sponsor_name != changeby["changedbyname"] and \
+ sponsor_email != maintainer["maintaineremail"] and sponsor_name != changeby["changedbyemail"]:
+ sponsor = sponsor_email
+
+ for arch in dbc.architecture.split():
+ arches.add(arch)
+ versions.add(dbc.version)
+ arches_list = list(arches)
arches_list.sort(utils.arch_compare_sw)
arch_list = " ".join(arches_list)
- version_list = " ".join(versions.keys())
+ version_list = " ".join(versions)
if len(version_list) > max_version_len:
max_version_len = len(version_list)
if len(arch_list) > max_arch_len:
msg += format % (source, version_list, arch_list, note, time_pp(last_modified))
if msg:
- total_count = len(changes_files)
+ total_count = len(queue.uploads)
source_count = len(per_source_items)
print type.upper()
print "-"*len(type)
print "%s %s source package%s / %s %s package%s in total." % (source_count, type, plural(source_count), total_count, type, plural(total_count))
print
- if len(unprocessed):
- print "UNPROCESSED"
- print "-----------"
- for u in unprocessed:
- print u
- print
-
################################################################################
def main():
if Cnf.has_key("Queue-Report::Options::New"):
header()
- # Initialize db so we can get the NEW comments
- dbconn = DBConn()
-
- queue_names = [ ]
+ queue_names = []
if Cnf.has_key("Queue-Report::Options::Directories"):
for i in Cnf["Queue-Report::Options::Directories"].split(","):
# Open the report file
f = open(Cnf["Queue-Report::ReportLocations::822Location"], "w")
- session = dbconn.session()
+ session = DBConn().session()
for queue_name in queue_names:
- queue = get_policy_queue(queue_name, session)
- if queue:
- directory = os.path.abspath(queue.path)
- changes_files = glob.glob("%s/*.changes" % (directory))
- process_changes_files(changes_files, os.path.basename(directory), f, rrd_dir)
+ queue = session.query(PolicyQueue).filter_by(queue_name=queue_name).first()
+ if queue is not None:
+ process_queue(queue, f, rrd_dir)
else:
utils.warn("Cannot find queue %s" % queue_name)
if Options["Binary-Only"]:
# Binary-only
- q = session.execute("SELECT b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, location l, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s %s" % (con_packages, con_suites, con_components, con_architectures))
+ q = session.execute("SELECT b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, files_archive_map af, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s %s" % (con_packages, con_suites, con_components, con_architectures))
for i in q.fetchall():
to_remove.append(i)
else:
# Source-only
source_packages = {}
- q = session.execute("SELECT l.path, f.filename, s.source, s.version, 'source', s.id, s.maintainer FROM source s, src_associations sa, suite su, files f, location l, component c WHERE sa.source = s.id AND sa.suite = su.id AND s.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s" % (con_packages, con_suites, con_components))
+ q = session.execute("SELECT archive.path || '/pool/' || c.name || '/', f.filename, s.source, s.version, 'source', s.id, s.maintainer FROM source s, src_associations sa, suite su, archive, files f, files_archive_map af, component c WHERE sa.source = s.id AND sa.suite = su.id AND archive.id = su.archive_id AND s.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s" % (con_packages, con_suites, con_components))
for i in q.fetchall():
source_packages[i[2]] = i[:2]
to_remove.append(i[2:])
# Source + Binary
binary_packages = {}
# First get a list of binary package names we suspect are linked to the source
- q = session.execute("SELECT DISTINCT b.package FROM binaries b, source s, src_associations sa, suite su, files f, location l, component c WHERE b.source = s.id AND sa.source = s.id AND sa.suite = su.id AND s.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s" % (con_packages, con_suites, con_components))
+ q = session.execute("SELECT DISTINCT b.package FROM binaries b, source s, src_associations sa, suite su, archive, files f, files_archive_map af, component c WHERE b.source = s.id AND sa.source = s.id AND sa.suite = su.id AND su.archive_id = archive.id AND s.file = f.id AND f.id = af.file_id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s" % (con_packages, con_suites, con_components))
for i in q.fetchall():
binary_packages[i[0]] = ""
# Then parse each .dsc that we found earlier to see what binary packages it thinks it produces
# source package and if so add it to the list of packages
# to be removed.
for package in binary_packages.keys():
- q = session.execute("SELECT l.path, f.filename, b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, location l, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s AND b.package = '%s'" % (con_suites, con_components, con_architectures, package))
+ q = session.execute("SELECT archive.path || '/pool/' || c.name || '/', f.filename, b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, archive, files f, files_archive_map af, component c WHERE ba.bin = b.id AND ba.suite = su.id AND archive.id = su.archive_id AND b.architecture = a.id AND b.file = f.id AND f.id = af.file_id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s AND b.package = '%s'" % (con_suites, con_components, con_architectures, package))
for i in q.fetchall():
filename = "/".join(i[:2])
control = apt_pkg.TagSection(utils.deb_extract_control(utils.open_file(filename)))
import apt_pkg
import examine_package
+from daklib import policy
from daklib.dbconn import *
-from daklib.queue import determine_new, check_valid, Upload, get_policy_queue
from daklib import utils
from daklib.regexes import re_source_ext
from daklib.config import Config
from daklib import daklog
-from daklib.changesutils import *
from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS, PROC_STATUS_SIGNALRAISED
from multiprocessing import Manager, TimeoutError
################################################################################
################################################################################
-def html_header(name, filestoexamine):
+def html_header(name, missing):
if name.endswith('.changes'):
name = ' '.join(name.split('_')[:2])
result = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<p><a href="#source-lintian" onclick="show('source-lintian-body')">source lintian</a></p>
"""
- for fn in filter(lambda x: x.endswith('.deb') or x.endswith('.udeb'),filestoexamine):
- packagename = fn.split('_')[0]
+ for binarytype, packagename in filter(lambda m: m[0] in ('deb', 'udeb'), missing):
result += """
<p class="subtitle">%(pkg)s</p>
<p><a href="#binary-%(pkg)s-control" onclick="show('binary-%(pkg)s-control-body')">control file</a></p>
################################################################################
-def do_pkg(changes_file):
- changes_file = utils.validate_changes_file_arg(changes_file, 0)
- if not changes_file:
- return
- print "\n" + changes_file
+def do_pkg(upload_id):
+ session = DBConn().session()
+ upload = session.query(PolicyQueueUpload).filter_by(id=upload_id).one()
- u = Upload()
- u.pkg.changes_file = changes_file
- # We can afoord not to check the signature before loading the changes file
- # as we've validated it already (otherwise it couldn't be in new)
- # and we can more quickly skip over already processed files this way
- u.load_changes(changes_file)
+ queue = upload.policy_queue
+ changes = upload.changes
- origchanges = os.path.abspath(u.pkg.changes_file)
+ origchanges = os.path.join(queue.path, changes.changesname)
+ print origchanges
- # Still be cautious in case paring the changes file went badly
- if u.pkg.changes.has_key('source') and u.pkg.changes.has_key('version'):
- htmlname = u.pkg.changes["source"] + "_" + u.pkg.changes["version"] + ".html"
- htmlfile = os.path.join(cnf["Show-New::HTMLPath"], htmlname)
- else:
- # Changes file was bad
- print "Changes file %s missing source or version field" % changes_file
- return
+ htmlname = "{0}_{1}.html".format(changes.source, changes.version)
+ htmlfile = os.path.join(cnf['Show-New::HTMLPath'], htmlname)
# Have we already processed this?
- if os.path.exists(htmlfile) and \
- os.stat(htmlfile).st_mtime > os.stat(origchanges).st_mtime:
+ if False and os.path.exists(htmlfile) and \
+ os.stat(htmlfile).st_mtime > time.mktime(changes.created.timetuple()):
with open(htmlfile, "r") as fd:
if fd.read() != timeout_str:
sources.append(htmlname)
return (PROC_STATUS_SUCCESS,
'%s already up-to-date' % htmlfile)
- # Now we'll load the fingerprint
- session = DBConn().session()
+ # Go, process it... Now!
htmlfiles_to_process.append(htmlfile)
- (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file, session=session)
- new_queue = get_policy_queue('new', session );
- u.pkg.directory = new_queue.path
- u.update_subst()
- files = u.pkg.files
- changes = u.pkg.changes
sources.append(htmlname)
- for deb_filename, f in files.items():
- if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
- u.binary_file_checks(deb_filename, session)
- u.check_binary_against_db(deb_filename, session)
- else:
- u.source_file_checks(deb_filename, session)
- u.check_source_against_db(deb_filename, session)
- u.pkg.changes["suite"] = u.pkg.changes["distribution"]
-
- new, byhand = determine_new(u.pkg.changes_file, u.pkg.changes, files, 0, dsc=u.pkg.dsc, session=session)
-
- outfile = open(os.path.join(cnf["Show-New::HTMLPath"],htmlname),"w")
-
- filestoexamine = []
- for pkg in new.keys():
- for fn in new[pkg]["files"]:
- filestoexamine.append(fn)
-
- print >> outfile, html_header(changes["source"], filestoexamine)
+ with open(htmlfile, 'w') as outfile:
+ with policy.UploadCopy(upload) as upload_copy:
+ handler = policy.PolicyQueueUploadHandler(upload, session)
+ missing = [ (o['type'], o['package']) for o in handler.missing_overrides() ]
+ distribution = changes.distribution
- check_valid(new, session)
- distribution = changes["distribution"].keys()[0]
- print >> outfile, examine_package.display_changes(distribution, changes_file)
+ print >>outfile, html_header(changes.source, missing)
+ print >>outfile, examine_package.display_changes(distribution, origchanges)
- for fn in filter(lambda fn: fn.endswith(".dsc"), filestoexamine):
- print >> outfile, examine_package.check_dsc(distribution, fn, session)
- for fn in filter(lambda fn: fn.endswith(".deb") or fn.endswith(".udeb"), filestoexamine):
- print >> outfile, examine_package.check_deb(distribution, fn, session)
+ if upload.source is not None and ('dsc', upload.source.source) in missing:
+ fn = os.path.join(upload_copy.directory, upload.source.poolfile.basename)
+ print >>outfile, examine_package.check_dsc(distribution, fn, session)
+ for binary in upload.binaries:
+ if (binary.binarytype, binary.package) not in missing:
+ continue
+ fn = os.path.join(upload_copy.directory, binary.poolfile.basename)
+ print >>outfile, examine_package.check_deb(distribution, fn, session)
- print >> outfile, html_footer()
+ print >>outfile, html_footer()
- outfile.close()
session.close()
-
htmlfiles_to_process.remove(htmlfile)
- return (PROC_STATUS_SUCCESS, '%s already updated' % htmlfile)
+ return (PROC_STATUS_SUCCESS, '{0} already updated'.format(htmlfile))
################################################################################
if not cnf.has_key("Show-New::Options::%s" % (i)):
cnf["Show-New::Options::%s" % (i)] = ""
- changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
- if len(changes_files) == 0:
- new_queue = get_policy_queue('new', session );
- changes_files = utils.get_changes_files(new_queue.path)
-
+ changesnames = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
Options = cnf.subtree("Show-New::Options")
if Options["help"]:
usage()
- return changes_files
+ uploads = session.query(PolicyQueueUpload) \
+ .join(PolicyQueueUpload.policy_queue).filter(PolicyQueue.queue_name == 'new') \
+ .join(PolicyQueueUpload.changes).order_by(DBChange.source)
+
+ if len(changesnames) > 0:
+ uploads = uploads.filter(DBChange.changesname.in_(changesnames))
+
+ return uploads
################################################################################
def main():
session = DBConn().session()
- changes_files = init(session)
+ upload_ids = [ u.id for u in init(session) ]
+ session.close()
examine_package.use_html=1
pool = DakProcessPool(processes=5)
- p = pool.map_async(do_pkg, changes_files)
+ p = pool.map_async(do_pkg, upload_ids)
pool.close()
+
p.wait(timeout=600)
for htmlfile in htmlfiles_to_process:
with open(htmlfile, "w") as fd:
################################################################################
Cnf = None
-required_database_schema = 74
+required_database_schema = 77
################################################################################
for f in self.changes.files.itervalues():
src = os.path.join(self.original_directory, f.filename)
dst = os.path.join(self.directory, f.filename)
+ if not os.path.exists(src):
+ continue
fs.copy(src, dst)
source = self.changes.source
for f in source.files.itervalues():
src = os.path.join(self.original_directory, f.filename)
dst = os.path.join(self.directory, f.filename)
- if f.filename not in self.changes.files:
- db_file = self.transaction.get_file(f, source.dsc['Source'])
- db_archive_file = session.query(ArchiveFile).filter_by(file=db_file).first()
- fs.copy(db_archive_file.path, dst, symlink=True)
+ if not os.path.exists(dst):
+ try:
+ db_file = self.transaction.get_file(f, source.dsc['Source'])
+ db_archive_file = session.query(ArchiveFile).filter_by(file=db_file).first()
+ fs.copy(db_archive_file.path, dst, symlink=True)
+ except KeyError:
+ # Ignore if get_file could not find it. Upload will
+ # probably be rejected later.
+ pass
def unpacked_source(self):
"""Path to unpacked source
suites = session.query(Suite).filter(Suite.suite_name.in_(suite_names))
return suites
- def _mapped_component(self, component_name):
- """get component after mappings
-
- Evaluate component mappings from ComponentMappings in dak.conf for the
- given component name.
-
- @todo: ansgar wants to get rid of this. It's currently only used for
- the security archive
-
- @type component_name: str
- @param component_name: component name
-
- @rtype: L{daklib.dbconn.Component}
- @return: component after applying maps
- """
- cnf = Config()
- for m in cnf.value_list("ComponentMappings"):
- (src, dst) = m.split()
- if component_name == src:
- component_name = dst
- component = self.session.query(Component).filter_by(component_name=component_name).one()
- return component
-
def _check_new(self, suite):
"""Check if upload is NEW
return override.component
if only_overrides:
return None
- return self._mapped_component(binary.component)
+ return get_mapped_component(binary.component, self.session)
def check(self, force=False):
"""run checks against the upload
+++ /dev/null
-#!/usr/bin/env python
-# vim:set et ts=4 sw=4:
-
-"""Utilities for handling changes files
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-import copy
-import os
-import stat
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib.queue import *
-from daklib import utils
-from daklib.config import Config
-
-################################################################################
-
-__all__ = []
-
-################################################################################
-
-def indiv_sg_compare (a, b):
- """Sort by source name, source, version, 'have source', and
- finally by filename."""
- # Sort by source version
- q = apt_pkg.version_compare(a["version"], b["version"])
- if q:
- return -q
-
- # Sort by 'have source'
- a_has_source = a["architecture"].get("source")
- b_has_source = b["architecture"].get("source")
- if a_has_source and not b_has_source:
- return -1
- elif b_has_source and not a_has_source:
- return 1
-
- return cmp(a["filename"], b["filename"])
-
-__all__.append('indiv_sg_compare')
-
-############################################################
-
-def sg_compare (a, b):
- a = a[1]
- b = b[1]
- """Sort by have note, source already in database and time of oldest upload."""
- # Sort by have note
- a_note_state = a["note_state"]
- b_note_state = b["note_state"]
- if a_note_state < b_note_state:
- return -1
- elif a_note_state > b_note_state:
- return 1
- # Sort by source already in database (descending)
- source_in_database = cmp(a["source_in_database"], b["source_in_database"])
- if source_in_database:
- return -source_in_database
-
- # Sort by time of oldest upload
- return cmp(a["oldest"], b["oldest"])
-
-__all__.append('sg_compare')
-
-def sort_changes(changes_files, session, binaries = None):
- """Sort into source groups, then sort each source group by version,
- have source, filename. Finally, sort the source groups by have
- note, time of oldest upload of each source upload."""
- if len(changes_files) == 1:
- return changes_files
-
- sorted_list = []
- cache = {}
- # Read in all the .changes files
- for filename in changes_files:
- u = Upload()
- try:
- u.pkg.changes_file = filename
- u.load_changes(filename)
- u.update_subst()
- cache[filename] = copy.copy(u.pkg.changes)
- cache[filename]["filename"] = filename
- except:
- sorted_list.append(filename)
- break
- # Divide the .changes into per-source groups
- per_source = {}
- for filename in cache.keys():
- source = cache[filename]["source"]
- if not per_source.has_key(source):
- per_source[source] = {}
- per_source[source]["list"] = []
- per_source[source]["list"].append(cache[filename])
- # Determine oldest time and have note status for each source group
- for source in per_source.keys():
- q = session.query(DBSource).filter_by(source = source).all()
- per_source[source]["source_in_database"] = binaries and -(len(q)>0) or len(q)>0
- source_list = per_source[source]["list"]
- first = source_list[0]
- oldest = os.stat(first["filename"])[stat.ST_MTIME]
- have_note = 0
- for d in per_source[source]["list"]:
- mtime = os.stat(d["filename"])[stat.ST_MTIME]
- if mtime < oldest:
- oldest = mtime
- have_note += has_new_comment(d["source"], d["version"], session)
- per_source[source]["oldest"] = oldest
- if not have_note:
- per_source[source]["note_state"] = 0; # none
- elif have_note < len(source_list):
- per_source[source]["note_state"] = 1; # some
- else:
- per_source[source]["note_state"] = 2; # all
- per_source[source]["list"].sort(indiv_sg_compare)
- per_source_items = per_source.items()
- per_source_items.sort(sg_compare)
- for i in per_source_items:
- for j in i[1]["list"]:
- sorted_list.append(j["filename"])
- return sorted_list
-
-__all__.append('sort_changes')
-
-################################################################################
-
-def changes_to_queue(upload, srcqueue, destqueue, session):
- """Move a changes file to a different queue and mark as approved for the
- source queue"""
-
- try:
- chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
- except NoResultFound:
- return False
-
- chg.approved_for_id = srcqueue.policy_queue_id
-
- for f in chg.files:
- # update the changes_pending_files row
- f.queue = destqueue
- # Only worry about unprocessed files
- if not f.processed:
- utils.move(os.path.join(srcqueue.path, f.filename), destqueue.path, perms=int(destqueue.perms, 8))
-
- utils.move(os.path.join(srcqueue.path, upload.pkg.changes_file), destqueue.path, perms=int(destqueue.perms, 8))
- chg.in_queue = destqueue
- session.commit()
-
- return True
-
-__all__.append('changes_to_queue')
-
-def new_accept(upload, dry_run, session):
- print "ACCEPT"
-
- if not dry_run:
- cnf = Config()
-
- (summary, short_summary) = upload.build_summaries()
- destqueue = get_policy_queue('newstage', session)
-
- srcqueue = get_policy_queue_from_path(upload.pkg.directory, session)
-
- if not srcqueue:
- # Assume NEW and hope for the best
- srcqueue = get_policy_queue('new', session)
-
- changes_to_queue(upload, srcqueue, destqueue, session)
-
-__all__.append('new_accept')
except Exception as e:
raise Reject('{0}: APT could not parse {1} field: {2}'.format(dsc_fn, field, e))
- # TODO: check all expected files for given source format are included
+ rejects = utils.check_dsc_files(dsc_fn, control, source.files.keys())
+ if len(rejects) > 0:
+ raise Reject("\n".join(rejects))
+
+ return True
class SingleDistributionCheck(Check):
"""Check that the .changes targets only a single distribution."""
Returns a writer object.
'''
values = {
+ 'archive': self.suite.archive.path,
'suite': self.suite.suite_name,
'component': self.component.component_name,
'debtype': self.overridetype.overridetype,
insert into newest_sources (id, source)
select distinct on (source) s.id, s.source from source s
- join files f on f.id = s.file
- join location l on l.id = f.location
+ join files_archive_map af on s.file = af.file_id
where s.id in (select source from src_associations where suite = :suite_id)
- and l.component = :component_id
+ and af.component_id = :component_id
order by source, version desc;'''
self.session.execute(sql_create_temp, params=params)
Returns a writer object.
'''
values = {
+ 'archive': self.suite.archive.path,
'suite': self.suite.suite_name,
'component': self.component.component_name
}
class_.logger.log(result)
@classmethod
- def write_all(class_, logger, suite_names = [], component_names = [], force = False):
+ def write_all(class_, logger, archive_names = [], suite_names = [], component_names = [], force = False):
'''
Writes all Contents files for suites in list suite_names which defaults
to all 'touchable' suites if not specified explicitely. Untouchable
class_.logger = logger
session = DBConn().session()
suite_query = session.query(Suite)
+ if len(archive_names) > 0:
+ suite_query = suite_query.join(Suite.archive).filter(Archive.archive_name.in_(archive_names))
if len(suite_names) > 0:
suite_query = suite_query.filter(Suite.suite_name.in_(suite_names))
component_query = session.query(Component)
################################################################################
+class ArchiveFile(object):
+ def __init__(self, archive=None, component=None, file=None):
+ self.archive = archive
+ self.component = component
+ self.file = file
+ @property
+ def path(self):
+ return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
+
+__all__.append('ArchiveFile')
+
+################################################################################
+
class BinContents(ORMObject):
def __init__(self, file = None, binary = None):
self.file = file
metadata = association_proxy('key', 'value')
- def get_component_name(self):
- return self.poolfile.location.component.component_name
-
def scan_contents(self):
'''
Yields the contents of the package. Only regular files are yielded and
################################################################################
-MINIMAL_APT_CONF="""
-Dir
-{
- ArchiveDir "%(archivepath)s";
- OverrideDir "%(overridedir)s";
- CacheDir "%(cachedir)s";
-};
-
-Default
-{
- Packages::Compress ". bzip2 gzip";
- Sources::Compress ". bzip2 gzip";
- DeLinkLimit 0;
- FileMode 0664;
-}
-
-bindirectory "incoming"
-{
- Packages "Packages";
- Contents " ";
-
- BinOverride "override.sid.all3";
- BinCacheDB "packages-accepted.db";
-
- FileList "%(filelist)s";
-
- PathPrefix "";
- Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
- Sources "Sources";
- BinOverride "override.sid.all3";
- SrcOverride "override.sid.all3.src";
- FileList "%(filelist)s";
-};
-"""
-
class BuildQueue(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<BuildQueue %s>' % self.queue_name
- def write_metadata(self, starttime, force=False):
- # Do we write out metafiles?
- if not (force or self.generate_metadata):
- return
-
- session = DBConn().session().object_session(self)
-
- fl_fd = fl_name = ac_fd = ac_name = None
- tempdir = None
- arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
- startdir = os.getcwd()
-
- try:
- # Grab files we want to include
- newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
- newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
- # Write file list with newer files
- (fl_fd, fl_name) = mkstemp()
- for n in newer:
- os.write(fl_fd, '%s\n' % n.fullpath)
- os.close(fl_fd)
-
- cnf = Config()
-
- # Write minimal apt.conf
- # TODO: Remove hardcoding from template
- (ac_fd, ac_name) = mkstemp()
- os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
- 'filelist': fl_name,
- 'cachedir': cnf["Dir::Cache"],
- 'overridedir': cnf["Dir::Override"],
- })
- os.close(ac_fd)
-
- # Run apt-ftparchive generate
- os.chdir(os.path.dirname(ac_name))
- os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
- # Run apt-ftparchive release
- # TODO: Eww - fix this
- bname = os.path.basename(self.path)
- os.chdir(self.path)
- os.chdir('..')
-
- # We have to remove the Release file otherwise it'll be included in the
- # new one
- try:
- os.unlink(os.path.join(bname, 'Release'))
- except OSError:
- pass
-
- os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
- # Crude hack with open and append, but this whole section is and should be redone.
- if self.notautomatic:
- release=open("Release", "a")
- release.write("NotAutomatic: yes\n")
- release.close()
-
- # Sign if necessary
- if self.signingkey:
- keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
- if cnf.has_key("Dinstall::SigningPubKeyring"):
- keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
- os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
- # Move the files if we got this far
- os.rename('Release', os.path.join(bname, 'Release'))
- if self.signingkey:
- os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
- # Clean up any left behind files
- finally:
- os.chdir(startdir)
- if fl_fd:
- try:
- os.close(fl_fd)
- except OSError:
- pass
-
- if fl_name:
- try:
- os.unlink(fl_name)
- except OSError:
- pass
-
- if ac_fd:
- try:
- os.close(ac_fd)
- except OSError:
- pass
-
- if ac_name:
- try:
- os.unlink(ac_name)
- except OSError:
- pass
-
- def clean_and_update(self, starttime, Logger, dryrun=False):
- """WARNING: This routine commits for you"""
- session = DBConn().session().object_session(self)
-
- if self.generate_metadata and not dryrun:
- self.write_metadata(starttime)
-
- # Grab files older than our execution time
- older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
- older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
- for o in older:
- killdb = False
- try:
- if dryrun:
- Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
- else:
- Logger.log(["I: Removing %s from the queue" % o.fullpath])
- os.unlink(o.fullpath)
- killdb = True
- except OSError as e:
- # If it wasn't there, don't worry
- if e.errno == ENOENT:
- killdb = True
- else:
- # TODO: Replace with proper logging call
- Logger.log(["E: Could not remove %s" % o.fullpath])
-
- if killdb:
- session.delete(o)
-
- session.commit()
-
- for f in os.listdir(self.path):
- if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
- continue
-
- if not self.contains_filename(f):
- fp = os.path.join(self.path, f)
- if dryrun:
- Logger.log(["I: Would remove unused link %s" % fp])
- else:
- Logger.log(["I: Removing unused link %s" % fp])
- try:
- os.unlink(fp)
- except OSError:
- Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
- def contains_filename(self, filename):
- """
- @rtype Boolean
- @returns True if filename is supposed to be in the queue; False otherwise
- """
- session = DBConn().session().object_session(self)
- if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
- return True
- elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
- return True
- return False
-
- def add_file_from_pool(self, poolfile):
- """Copies a file into the pool. Assumes that the PoolFile object is
- attached to the same SQLAlchemy session as the Queue object is.
-
- The caller is responsible for committing after calling this function."""
- poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
- # Check if we have a file of this name or this ID already
- for f in self.queuefiles:
- if (f.fileid is not None and f.fileid == poolfile.file_id) or \
- (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
- # In this case, update the BuildQueueFile entry so we
- # don't remove it too early
- f.lastused = datetime.now()
- DBConn().session().object_session(poolfile).add(f)
- return f
-
- # Prepare BuildQueueFile object
- qf = BuildQueueFile()
- qf.build_queue_id = self.queue_id
- qf.filename = poolfile_basename
-
- targetpath = poolfile.fullpath
- queuepath = os.path.join(self.path, poolfile_basename)
-
- try:
- if self.copy_files:
- # We need to copy instead of symlink
- import utils
- utils.copy(targetpath, queuepath)
- # NULL in the fileid field implies a copy
- qf.fileid = None
- else:
- os.symlink(targetpath, queuepath)
- qf.fileid = poolfile.file_id
- except FileExistsError:
- if not poolfile.identical_to(queuepath):
- raise
- except OSError:
- return None
-
- # Get the same session as the PoolFile is using and add the qf to it
- DBConn().session().object_session(poolfile).add(qf)
-
- return qf
-
- def add_changes_from_policy_queue(self, policyqueue, changes):
- """
- Copies a changes from a policy queue together with its poolfiles.
-
- @type policyqueue: PolicyQueue
- @param policyqueue: policy queue to copy the changes from
-
- @type changes: DBChange
- @param changes: changes to copy to this build queue
- """
- for policyqueuefile in changes.files:
- self.add_file_from_policy_queue(policyqueue, policyqueuefile)
- for poolfile in changes.poolfiles:
- self.add_file_from_pool(poolfile)
-
- def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
- """
- Copies a file from a policy queue.
- Assumes that the policyqueuefile is attached to the same SQLAlchemy
- session as the Queue object is. The caller is responsible for
- committing after calling this function.
-
- @type policyqueue: PolicyQueue
- @param policyqueue: policy queue to copy the file from
-
- @type policyqueuefile: ChangePendingFile
- @param policyqueuefile: file to be added to the build queue
- """
- session = DBConn().session().object_session(policyqueuefile)
-
- # Is the file already there?
- try:
- f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
- f.lastused = datetime.now()
- return f
- except NoResultFound:
- pass # continue below
-
- # We have to add the file.
- f = BuildQueuePolicyFile()
- f.build_queue = self
- f.file = policyqueuefile
- f.filename = policyqueuefile.filename
-
- source = os.path.join(policyqueue.path, policyqueuefile.filename)
- target = f.fullpath
- try:
- # Always copy files from policy queues as they might move around.
- import utils
- utils.copy(source, target)
- except FileExistsError:
- if not policyqueuefile.identical_to(target):
- raise
- except OSError:
- return None
-
- session.add(f)
- return f
-
__all__.append('BuildQueue')
-@session_wrapper
-def get_build_queue(queuename, session=None):
- """
- Returns BuildQueue object for given C{queue name}, creating it if it does not
- exist.
-
- @type queuename: string
- @param queuename: The name of the queue
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
-
- @rtype: BuildQueue
- @return: BuildQueue object for the given queue
- """
-
- q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_build_queue')
-
-################################################################################
-
-class BuildQueueFile(object):
- """
- BuildQueueFile represents a file in a build queue coming from a pool.
- """
-
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
- @property
- def fullpath(self):
- return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class BuildQueuePolicyFile(object):
- """
- BuildQueuePolicyFile represents a file in a build queue that comes from a
- policy queue (and not a pool).
- """
-
- def __init__(self, *args, **kwargs):
- pass
-
- #@property
- #def filename(self):
- # return self.file.filename
-
- @property
- def fullpath(self):
- return os.path.join(self.build_queue.path, self.filename)
-
-__all__.append('BuildQueuePolicyFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingFile %s>' % self.change_pending_file_id
-
- def identical_to(self, filename):
- """
- compare size and hash with the given file
-
- @rtype: bool
- @return: true if the given file has the same size and hash as this object; false otherwise
- """
- st = os.stat(filename)
- if self.size != st.st_size:
- return False
-
- f = open(filename, "r")
- sha256sum = apt_pkg.sha256sum(f)
- if sha256sum != self.sha256sum:
- return False
-
- return True
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
################################################################################
class Component(ORMObject):
def properties(self):
return ['component_name', 'component_id', 'description', \
- 'location_count', 'meets_dfsg', 'overrides_count']
+ 'meets_dfsg', 'overrides_count']
def not_null_constraints(self):
return ['component_name']
__all__.append('get_component')
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+ """get component after mappings
+
+ Evaluate component mappings from ComponentMappings in dak.conf for the
+ given component name.
+
+ @todo: ansgar wants to get rid of this. It's currently only used for
+ the security archive
+
+ @type component_name: str
+ @param component_name: component name
+
+ @param session: database session
+
+ @rtype: L{daklib.dbconn.Component} or C{None}
+ @return: component after applying maps or C{None}
+ """
+ cnf = Config()
+ for m in cnf.value_list("ComponentMappings"):
+ (src, dst) = m.split()
+ if component_name == src:
+ component_name = dst
+ component = session.query(Component).filter_by(component_name=component_name).first()
+ return component
+
+__all__.append('get_mapped_component')
+
@session_wrapper
def get_component_names(session=None):
"""
################################################################################
class PoolFile(ORMObject):
- def __init__(self, filename = None, location = None, filesize = -1, \
+ def __init__(self, filename = None, filesize = -1, \
md5sum = None):
self.filename = filename
- self.location = location
self.filesize = filesize
self.md5sum = md5sum
@property
def fullpath(self):
- return os.path.join(self.location.path, self.filename)
+ session = DBConn().session().object_session(self)
+ af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
+ return af.path
+
+ @property
+ def component(self):
+ session = DBConn().session().object_session(self)
+ component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+ .group_by(ArchiveFile.component_id).one()
+ return session.query(Component).get(component_id)
@property
def basename(self):
def properties(self):
return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
- 'sha256sum', 'location', 'source', 'binary', 'last_used']
+ 'sha256sum', 'source', 'binary', 'last_used']
def not_null_constraints(self):
- return ['filename', 'md5sum', 'location']
+ return ['filename', 'md5sum']
def identical_to(self, filename):
"""
__all__.append('PoolFile')
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
- """
- Returns a tuple:
- (ValidFileFound [boolean], PoolFile object or None)
-
- @type filename: string
- @param filename: the filename of the file to check against the DB
-
- @type filesize: int
- @param filesize: the size of the file to check against the DB
-
- @type md5sum: string
- @param md5sum: the md5sum of the file to check against the DB
-
- @type location_id: int
- @param location_id: the id of the location to look in
-
- @rtype: tuple
- @return: Tuple of length 2.
- - If valid pool file found: (C{True}, C{PoolFile object})
- - If valid pool file not found:
- - (C{False}, C{None}) if no file found
- - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
- """
-
- poolfile = session.query(Location).get(location_id). \
- files.filter_by(filename=filename).first()
- valid = False
- if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
- valid = True
-
- return (valid, poolfile)
-
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
- """
- Returns a PoolFile objects or None for the given id
-
- @type file_id: int
- @param file_id: the id of the file to look for
-
- @rtype: PoolFile or None
- @return: either the PoolFile object or None
- """
-
- return session.query(PoolFile).get(file_id)
-
-__all__.append('get_poolfile_by_id')
-
@session_wrapper
def get_poolfile_like_name(filename, session=None):
"""
__all__.append('get_poolfile_like_name')
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
- """
- Add a new file to the pool
-
- @type filename: string
- @param filename: filename
-
- @type datadict: dict
- @param datadict: dict with needed data
-
- @type location_id: int
- @param location_id: database id of the location
-
- @rtype: PoolFile
- @return: the PoolFile object created
- """
- poolfile = PoolFile()
- poolfile.filename = filename
- poolfile.filesize = datadict["size"]
- poolfile.md5sum = datadict["md5sum"]
- poolfile.sha1sum = datadict["sha1sum"]
- poolfile.sha256sum = datadict["sha256sum"]
- poolfile.location_id = location_id
-
- session.add(poolfile)
- # Flush to get a file id (NB: This is not a commit)
- session.flush()
-
- return poolfile
-
-__all__.append('add_poolfile')
-
################################################################################
class Fingerprint(ORMObject):
def __repr__(self):
return '<DBChange %s>' % self.changesname
- def clean_from_queue(self):
- session = DBConn().session().object_session(self)
-
- # Remove changes_pool_files entries
- self.poolfiles = []
-
- # Remove changes_pending_files references
- self.files = []
-
- # Clear out of queue
- self.in_queue = None
- self.approved_for_id = None
-
__all__.append('DBChange')
@session_wrapper
################################################################################
-class Location(ORMObject):
- def __init__(self, path = None, component = None):
- self.path = path
- self.component = component
- # the column 'type' should go away, see comment at mapper
- self.archive_type = 'pool'
-
- def properties(self):
- return ['path', 'location_id', 'archive_type', 'component', \
- 'files_count']
-
- def not_null_constraints(self):
- return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
- """
- Returns Location object for the given combination of location, component
- and archive
-
- @type location: string
- @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
- @type component: string
- @param component: the component name (if None, no restriction applied)
-
- @type archive: string
- @param archive: the archive name (if None, no restriction applied)
-
- @rtype: Location / None
- @return: Either a Location object or None if one can't be found
- """
-
- q = session.query(Location).filter_by(path=location)
-
- if archive is not None:
- q = q.join(Archive).filter_by(archive_name=archive)
-
- if component is not None:
- q = q.join(Component).filter_by(component_name=component)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_location')
-
-################################################################################
-
class Maintainer(ORMObject):
def __init__(self, name = None):
self.name = name
__all__.append('get_policy_queue')
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
- """
- Returns PolicyQueue object for given C{path name}
-
- @type queuename: string
- @param queuename: The path
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
+################################################################################
- @rtype: PolicyQueue
- @return: PolicyQueue object for the given queue
- """
+class PolicyQueueUpload(object):
+ def __cmp__(self, other):
+ ret = cmp(self.changes.source, other.changes.source)
+ if ret == 0:
+ ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+ if ret == 0:
+ if self.source is not None and other.source is None:
+ ret = -1
+ elif self.source is None and other.source is not None:
+ ret = 1
+ if ret == 0:
+ ret = cmp(self.changes.changesname, other.changes.changesname)
+ return ret
+
+__all__.append('PolicyQueueUpload')
- q = session.query(PolicyQueue).filter_by(path=pathname)
+################################################################################
- try:
- return q.one()
- except NoResultFound:
- return None
+class PolicyQueueByhandFile(object):
+ pass
-__all__.append('get_policy_queue_from_path')
+__all__.append('PolicyQueueByhandFile')
################################################################################
metadata = association_proxy('key', 'value')
- def get_component_name(self):
- return self.poolfile.location.component.component_name
-
def scan_contents(self):
'''
Returns a set of names for non directories. The path names are
__all__.append('import_metadata_into_db')
-
-################################################################################
-
-def split_uploaders(uploaders_list):
- '''
- Split the Uploaders field into the individual uploaders and yield each of
- them. Beware: email addresses might contain commas.
- '''
- import re
- for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
- yield uploader.strip()
-
-@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
- entry = u.pkg.files[filename]
- source = DBSource()
- pfs = []
-
- source.source = u.pkg.dsc["source"]
- source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
- source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
- # If Changed-By isn't available, fall back to maintainer
- if u.pkg.changes.has_key("changed-by"):
- source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
- else:
- source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
- source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- source.install_date = datetime.now().date()
-
- dsc_component = entry["component"]
- dsc_location_id = entry["location id"]
-
- source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
- # Set up a new poolfile if necessary
- if not entry.has_key("files id") or not entry["files id"]:
- filename = entry["pool name"] + filename
- poolfile = add_poolfile(filename, entry, dsc_location_id, session)
- session.flush()
- pfs.append(poolfile)
- entry["files id"] = poolfile.file_id
-
- source.poolfile_id = entry["files id"]
- session.add(source)
-
- suite_names = u.pkg.changes["distribution"].keys()
- source.suites = session.query(Suite). \
- filter(Suite.suite_name.in_(suite_names)).all()
-
- # Add the source files to the DB (files and dsc_files)
- dscfile = DSCFile()
- dscfile.source_id = source.source_id
- dscfile.poolfile_id = entry["files id"]
- session.add(dscfile)
-
- for dsc_file, dentry in u.pkg.dsc_files.items():
- df = DSCFile()
- df.source_id = source.source_id
-
- # If the .orig tarball is already in the pool, it's
- # files id is stored in dsc_files by check_dsc().
- files_id = dentry.get("files id", None)
-
- # Find the entry in the files hash
- # TODO: Bail out here properly
- dfentry = None
- for f, e in u.pkg.files.items():
- if f == dsc_file:
- dfentry = e
- break
-
- if files_id is None:
- filename = dfentry["pool name"] + dsc_file
-
- (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
- # FIXME: needs to check for -1/-2 and or handle exception
- if found and obj is not None:
- files_id = obj.file_id
- pfs.append(obj)
-
- # If still not found, add it
- if files_id is None:
- # HACK: Force sha1sum etc into dentry
- dentry["sha1sum"] = dfentry["sha1sum"]
- dentry["sha256sum"] = dfentry["sha256sum"]
- poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
- pfs.append(poolfile)
- files_id = poolfile.file_id
- else:
- poolfile = get_poolfile_by_id(files_id, session)
- if poolfile is None:
- utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
- pfs.append(poolfile)
-
- df.poolfile_id = files_id
- session.add(df)
-
- # Add the src_uploaders to the DB
- session.flush()
- session.refresh(source)
- source.uploaders = [source.maintainer]
- if u.pkg.dsc.has_key("uploaders"):
- for up in split_uploaders(u.pkg.dsc["uploaders"]):
- source.uploaders.append(get_or_set_maintainer(up, session))
-
- session.flush()
-
- return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
-@session_wrapper
-def add_deb_to_db(u, filename, session=None):
- """
- Contrary to what you might expect, this routine deals with both
- debs and udebs. That info is in 'dbtype', whilst 'type' is
- 'deb' for both of them
- """
- cnf = Config()
- entry = u.pkg.files[filename]
-
- bin = DBBinary()
- bin.package = entry["package"]
- bin.version = entry["version"]
- bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
- bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- bin.arch_id = get_architecture(entry["architecture"], session).arch_id
- bin.binarytype = entry["dbtype"]
-
- # Find poolfile id
- filename = entry["pool name"] + filename
- fullpath = os.path.join(cnf["Dir::Pool"], filename)
- if not entry.get("location id", None):
- entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
- if entry.get("files id", None):
- poolfile = get_poolfile_by_id(bin.poolfile_id)
- bin.poolfile_id = entry["files id"]
- else:
- poolfile = add_poolfile(filename, entry, entry["location id"], session)
- bin.poolfile_id = entry["files id"] = poolfile.file_id
-
- # Find source id
- bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-
- # If we couldn't find anything and the upload contains Arch: source,
- # fall back to trying the source package, source version uploaded
- # This maintains backwards compatibility with previous dak behaviour
- # and deals with slightly broken binary debs which don't properly
- # declare their source package name
- if len(bin_sources) == 0:
- if u.pkg.changes["architecture"].has_key("source") \
- and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
- bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
-
- # If we couldn't find a source here, we reject
- # TODO: Fix this so that it doesn't kill process-upload and instead just
- # performs a reject. To be honest, we should probably spot this
- # *much* earlier than here
- if len(bin_sources) != 1:
- raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
- (bin.package, bin.version, entry["architecture"],
- filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
- bin.source_id = bin_sources[0].source_id
-
- if entry.has_key("built-using"):
- for srcname, version in entry["built-using"]:
- exsources = get_sources_from_name(srcname, version, session=session)
- if len(exsources) != 1:
- raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
- (srcname, version, bin.package, bin.version, entry["architecture"],
- filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
- bin.extra_sources.append(exsources[0])
-
- # Add and flush object so it has an ID
- session.add(bin)
-
- suite_names = u.pkg.changes["distribution"].keys()
- bin.suites = session.query(Suite). \
- filter(Suite.suite_name.in_(suite_names)).all()
-
- session.flush()
-
- # Deal with contents - disabled for now
- #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
- #if not contents:
- # print "REJECT\nCould not determine contents of package %s" % bin.package
- # session.rollback()
- # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
- return bin, poolfile
-
-__all__.append('add_deb_to_db')
-
################################################################################
class SourceACL(object):
'binary_acl',
'binary_acl_map',
'build_queue',
- 'build_queue_files',
- 'build_queue_policy_files',
'changelogs_text',
'changes',
'component',
'config',
- 'changes_pending_binaries',
- 'changes_pending_files',
- 'changes_pending_source',
- 'changes_pending_files_map',
- 'changes_pending_source_files',
- 'changes_pool_files',
'dsc_files',
'external_overrides',
'extra_src_references',
'files',
+ 'files_archive_map',
'fingerprint',
'keyrings',
'keyring_acl_map',
- 'location',
'maintainer',
'metadata_keys',
'new_comments',
'override',
'override_type',
'policy_queue',
+ 'policy_queue_upload',
+ 'policy_queue_upload_binaries_map',
+ 'policy_queue_byhand_file',
'priority',
'section',
'source',
'any_associations_source',
'bin_associations_binaries',
'binaries_suite_arch',
- 'binfiles_suite_component_arch',
'changelogs',
'file_arch_suite',
'newest_all_associations',
properties = dict(archive_id = self.tbl_archive.c.id,
archive_name = self.tbl_archive.c.name))
- mapper(BuildQueue, self.tbl_build_queue,
- properties = dict(queue_id = self.tbl_build_queue.c.id))
-
- mapper(BuildQueueFile, self.tbl_build_queue_files,
- properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
- poolfile = relation(PoolFile, backref='buildqueueinstances')))
+ mapper(ArchiveFile, self.tbl_files_archive_map,
+ properties = dict(archive = relation(Archive, backref='files'),
+ component = relation(Component),
+ file = relation(PoolFile, backref='archives')))
- mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
- properties = dict(
- build_queue = relation(BuildQueue, backref='policy_queue_files'),
- file = relation(ChangePendingFile, lazy='joined')))
+ mapper(BuildQueue, self.tbl_build_queue,
+ properties = dict(queue_id = self.tbl_build_queue.c.id,
+ suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
mapper(DBBinary, self.tbl_binaries,
properties = dict(binary_id = self.tbl_binaries.c.id,
arch_id = self.tbl_binaries.c.architecture,
architecture = relation(Architecture),
poolfile_id = self.tbl_binaries.c.file,
- poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
+ poolfile = relation(PoolFile),
binarytype = self.tbl_binaries.c.type,
fingerprint_id = self.tbl_binaries.c.sig_fpr,
fingerprint = relation(Fingerprint),
mapper(PoolFile, self.tbl_files,
properties = dict(file_id = self.tbl_files.c.id,
- filesize = self.tbl_files.c.size,
- location_id = self.tbl_files.c.location,
- location = relation(Location,
- # using lazy='dynamic' in the back
- # reference because we have A LOT of
- # files in one location
- backref=backref('files', lazy='dynamic'))),
+ filesize = self.tbl_files.c.size),
extension = validator)
mapper(Fingerprint, self.tbl_fingerprint,
mapper(DBChange, self.tbl_changes,
properties = dict(change_id = self.tbl_changes.c.id,
- poolfiles = relation(PoolFile,
- secondary=self.tbl_changes_pool_files,
- backref="changeslinks"),
seen = self.tbl_changes.c.seen,
source = self.tbl_changes.c.source,
binaries = self.tbl_changes.c.binaries,
maintainer = self.tbl_changes.c.maintainer,
changedby = self.tbl_changes.c.changedby,
date = self.tbl_changes.c.date,
- version = self.tbl_changes.c.version,
- files = relation(ChangePendingFile,
- secondary=self.tbl_changes_pending_files_map,
- backref="changesfile"),
- in_queue_id = self.tbl_changes.c.in_queue,
- in_queue = relation(PolicyQueue,
- primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
- approved_for_id = self.tbl_changes.c.approved_for))
-
- mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
- properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
- mapper(ChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
- filename = self.tbl_changes_pending_files.c.filename,
- size = self.tbl_changes_pending_files.c.size,
- md5sum = self.tbl_changes_pending_files.c.md5sum,
- sha1sum = self.tbl_changes_pending_files.c.sha1sum,
- sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
- mapper(ChangePendingSource, self.tbl_changes_pending_source,
- properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
- change = relation(DBChange),
- maintainer = relation(Maintainer,
- primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
- changedby = relation(Maintainer,
- primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
- fingerprint = relation(Fingerprint),
- source_files = relation(ChangePendingFile,
- secondary=self.tbl_changes_pending_source_files,
- backref="pending_sources")))
-
+ version = self.tbl_changes.c.version))
mapper(KeyringACLMap, self.tbl_keyring_acl_map,
properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
keyring = relation(Keyring, backref="keyring_acl_map"),
architecture = relation(Architecture)))
- mapper(Location, self.tbl_location,
- properties = dict(location_id = self.tbl_location.c.id,
- component_id = self.tbl_location.c.component,
- component = relation(Component, backref='location'),
- archive_id = self.tbl_location.c.archive,
- archive = relation(Archive),
- # FIXME: the 'type' column is old cruft and
- # should be removed in the future.
- archive_type = self.tbl_location.c.type),
- extension = validator)
-
mapper(Maintainer, self.tbl_maintainer,
properties = dict(maintainer_id = self.tbl_maintainer.c.id,
maintains_sources = relation(DBSource, backref='maintainer',
overridetype_id = self.tbl_override_type.c.id))
mapper(PolicyQueue, self.tbl_policy_queue,
- properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+ properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+ suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
+
+ mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+ properties = dict(
+ changes = relation(DBChange),
+ policy_queue = relation(PolicyQueue, backref='uploads'),
+ target_suite = relation(Suite),
+ source = relation(DBSource),
+ binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+ ))
+
+ mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+ properties = dict(
+ upload = relation(PolicyQueueUpload, backref='byhand'),
+ )
+ )
mapper(Priority, self.tbl_priority,
properties = dict(priority_id = self.tbl_priority.c.id))
version = self.tbl_source.c.version,
maintainer_id = self.tbl_source.c.maintainer,
poolfile_id = self.tbl_source.c.file,
- poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+ poolfile = relation(PoolFile),
fingerprint_id = self.tbl_source.c.sig_fpr,
fingerprint = relation(Fingerprint),
changedby_id = self.tbl_source.c.changedby,
mapper(Suite, self.tbl_suite,
properties = dict(suite_id = self.tbl_suite.c.id,
- policy_queue = relation(PolicyQueue),
+ policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
copy_queues = relation(BuildQueue,
secondary=self.tbl_suite_build_queue_copy),
srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
self.uncompressed = 'none' in compression
self.gzip = 'gzip' in compression
self.bzip2 = 'bzip2' in compression
- root_dir = Config()['Dir::Root']
- relative_dir = template % keywords
- self.path = os.path.join(root_dir, relative_dir)
+ self.path = template % keywords
def open(self):
'''
}
flags.update(keywords)
if flags['debtype'] == 'deb':
- template = "dists/%(suite)s/%(component)s/Contents-%(architecture)s"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-%(architecture)s"
else: # udeb
- template = "dists/%(suite)s/%(component)s/Contents-udeb-%(architecture)s"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-udeb-%(architecture)s"
BaseFileWriter.__init__(self, template, **flags)
class SourceContentsFileWriter(BaseFileWriter):
'compression': ['gzip'],
}
flags.update(keywords)
- template = "dists/%(suite)s/%(component)s/Contents-source"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-source"
BaseFileWriter.__init__(self, template, **flags)
class PackagesFileWriter(BaseFileWriter):
}
flags.update(keywords)
if flags['debtype'] == 'deb':
- template = "dists/%(suite)s/%(component)s/binary-%(architecture)s/Packages"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/binary-%(architecture)s/Packages"
else: # udeb
- template = "dists/%(suite)s/%(component)s/debian-installer/binary-%(architecture)s/Packages"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/debian-installer/binary-%(architecture)s/Packages"
BaseFileWriter.__init__(self, template, **flags)
class SourcesFileWriter(BaseFileWriter):
'compression': ['gzip', 'bzip2'],
}
flags.update(keywords)
- template = "dists/%(suite)s/%(component)s/source/Sources"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/source/Sources"
BaseFileWriter.__init__(self, template, **flags)
class TranslationFileWriter(BaseFileWriter):
'language': 'en',
}
flags.update(keywords)
- template = "dists/%(suite)s/%(component)s/i18n/Translation-%(language)s"
+ template = "%(archive)s/dists/%(suite)s/%(component)s/i18n/Translation-%(language)s"
super(TranslationFileWriter, self).__init__(template, **flags)
"""module to process policy queue uploads"""
from .config import Config
-from .dbconn import BinaryMetadata, Component, MetadataKey, Override, OverrideType
+from .dbconn import BinaryMetadata, Component, MetadataKey, Override, OverrideType, get_mapped_component
from .fstransactions import FilesystemTransaction
from .regexes import re_file_changes, re_file_safe
+import daklib.utils as utils
import errno
import os
self.directory = None
self.upload = upload
- def export(self, directory, mode=None, symlink=True):
+ def export(self, directory, mode=None, symlink=True, ignore_existing=False):
"""export a copy of the upload
@type directory: str
@type symlink: bool
@param symlink: use symlinks instead of copying the files
+
+ @type ignore_existing: bool
+ @param ignore_existing: ignore already existing files
"""
with FilesystemTransaction() as fs:
source = self.upload.source
for dsc_file in source.srcfiles:
f = dsc_file.poolfile
dst = os.path.join(directory, os.path.basename(f.filename))
- fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
+ if not os.path.exists(dst) or not ignore_existing:
+ fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
+
for binary in self.upload.binaries:
f = binary.poolfile
dst = os.path.join(directory, os.path.basename(f.filename))
- fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
+ if not os.path.exists(dst) or not ignore_existing:
+ fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
# copy byhand files
for byhand in self.upload.byhand:
src = os.path.join(queue.path, byhand.filename)
dst = os.path.join(directory, byhand.filename)
- fs.copy(src, dst, mode=mode, symlink=symlink)
+ if not os.path.exists(dst) or not ignore_existing:
+ fs.copy(src, dst, mode=mode, symlink=symlink)
# copy .changes
src = os.path.join(queue.path, self.upload.changes.changesname)
dst = os.path.join(directory, self.upload.changes.changesname)
- fs.copy(src, dst, mode=mode, symlink=symlink)
+ if not os.path.exists(dst) or not ignore_existing:
+ fs.copy(src, dst, mode=mode, symlink=symlink)
def __enter__(self):
assert self.directory is None
def _source_override(self, component_name):
package = self.upload.source.source
suite = self._overridesuite
+ component = get_mapped_component(component_name, self.session)
query = self.session.query(Override).filter_by(package=package, suite=suite) \
.join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
- .join(Component).filter(Component.component_name == component_name)
+ .filter(Override.component == component)
return query.first()
def _binary_override(self, binary, component_name):
package = binary.package
suite = self._overridesuite
overridetype = binary.binarytype
+ component = get_mapped_component(component_name, self.session)
query = self.session.query(Override).filter_by(package=package, suite=suite) \
.join(OverrideType).filter(OverrideType.overridetype == overridetype) \
- .join(Component).filter(Component.component_name == component_name)
+ .filter(Override.component == component)
return query.first()
def _binary_metadata(self, binary, key):
@type reason: str
@param reason: reason for the rejection
"""
+ cnf = Config()
+
fn1 = 'REJECT.{0}'.format(self._changes_prefix)
assert re_file_safe.match(fn1)
try:
fh = os.open(fn, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
os.write(fh, 'NOTOK\n')
+ os.write(fh, 'From: {0} <{1}>\n\n'.format(utils.whoami(), cnf['Dinstall::MyAdminAddress']))
os.write(fh, reason)
os.close(fh)
except OSError as e:
from lintian import parse_lintian_output, generate_reject_messages
from contents import UnpackedSource
-###############################################################################
-
-def get_type(f, session):
- """
- Get the file type of C{f}
-
- @type f: dict
- @param f: file entry from Changes object
-
- @type session: SQLA Session
- @param session: SQL Alchemy session object
-
- @rtype: string
- @return: filetype
-
- """
- # Determine the type
- if f.has_key("dbtype"):
- file_type = f["dbtype"]
- elif re_source_ext.match(f["type"]):
- file_type = "dsc"
- elif f['architecture'] == 'source' and f["type"] == 'unreadable':
- utils.warn('unreadable source file (will continue and hope for the best)')
- return f["type"]
- else:
- file_type = f["type"]
- utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (file_type))
-
- # Validate the override type
- type_id = get_override_type(file_type, session)
- if type_id is None:
- utils.fubar("invalid type (%s) for new. Say wha?" % (file_type))
-
- return file_type
-
################################################################################
-# Determine what parts in a .changes are NEW
-
-def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = None):
- """
- Determine what parts in a C{changes} file are NEW.
-
- @type filename: str
- @param filename: changes filename
-
- @type changes: Upload.Pkg.changes dict
- @param changes: Changes dictionary
-
- @type files: Upload.Pkg.files dict
- @param files: Files dictionary
-
- @type warn: bool
- @param warn: Warn if overrides are added for (old)stable
-
- @type dsc: Upload.Pkg.dsc dict
- @param dsc: (optional); Dsc dictionary
-
- @type new: dict
- @param new: new packages as returned by a previous call to this function, but override information may have changed
-
- @rtype: dict
- @return: dictionary of NEW components.
-
- """
- # TODO: This should all use the database instead of parsing the changes
- # file again
- byhand = {}
- if new is None:
- new = {}
-
- dbchg = get_dbchange(filename, session)
- if dbchg is None:
- print "Warning: cannot find changes file in database; won't check byhand"
-
- # Try to get the Package-Set field from an included .dsc file (if possible).
- if dsc:
- for package, entry in build_package_list(dsc, session).items():
- if package not in new:
- new[package] = entry
-
- # Build up a list of potentially new things
- for name, f in files.items():
- # Keep a record of byhand elements
- if f["section"] == "byhand":
- byhand[name] = 1
- continue
-
- pkg = f["package"]
- priority = f["priority"]
- section = f["section"]
- file_type = get_type(f, session)
- component = f["component"]
-
- if file_type == "dsc":
- priority = "source"
-
- if not new.has_key(pkg):
- new[pkg] = {}
- new[pkg]["priority"] = priority
- new[pkg]["section"] = section
- new[pkg]["type"] = file_type
- new[pkg]["component"] = component
- new[pkg]["files"] = []
- else:
- old_type = new[pkg]["type"]
- if old_type != file_type:
- # source gets trumped by deb or udeb
- if old_type == "dsc":
- new[pkg]["priority"] = priority
- new[pkg]["section"] = section
- new[pkg]["type"] = file_type
- new[pkg]["component"] = component
-
- new[pkg]["files"].append(name)
-
- if f.has_key("othercomponents"):
- new[pkg]["othercomponents"] = f["othercomponents"]
-
- # Fix up the list of target suites
- cnf = Config()
- for suite in changes["suite"].keys():
- oldsuite = get_suite(suite, session)
- if not oldsuite:
- print "WARNING: Invalid suite %s found" % suite
- continue
-
- if oldsuite.overridesuite:
- newsuite = get_suite(oldsuite.overridesuite, session)
-
- if newsuite:
- print "INFORMATION: Using overrides from suite %s instead of suite %s" % (
- oldsuite.overridesuite, suite)
- del changes["suite"][suite]
- changes["suite"][oldsuite.overridesuite] = 1
- else:
- print "WARNING: Told to use overridesuite %s for %s but it doesn't exist. Bugger" % (
- oldsuite.overridesuite, suite)
-
- # Check for unprocessed byhand files
- if dbchg is not None:
- for b in byhand.keys():
- # Find the file entry in the database
- found = False
- for f in dbchg.files:
- if f.filename == b:
- found = True
- # If it's processed, we can ignore it
- if f.processed:
- del byhand[b]
- break
-
- if not found:
- print "Warning: Couldn't find BYHAND item %s in the database; assuming unprocessed"
-
- # Check for new stuff
- for suite in changes["suite"].keys():
- for pkg in new.keys():
- ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
- if len(ql) > 0:
- for file_entry in new[pkg]["files"]:
- if files[file_entry].has_key("new"):
- del files[file_entry]["new"]
- del new[pkg]
-
- if warn:
- for s in ['stable', 'oldstable']:
- if changes["suite"].has_key(s):
- print "WARNING: overrides will be added for %s!" % s
- for pkg in new.keys():
- if new[pkg].has_key("othercomponents"):
- print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
-
- return new, byhand
-
-################################################################################
+def check_valid(overrides, session):
+ """Check if section and priority for new overrides exist in database.
-def check_valid(new, session = None):
- """
- Check if section and priority for NEW packages exist in database.
Additionally does sanity checks:
- debian-installer packages have to be udeb (or source)
- - non debian-installer packages can not be udeb
- - source priority can only be assigned to dsc file types
+ - non debian-installer packages cannot be udeb
- @type new: dict
- @param new: Dict of new packages with their section, priority and type.
-
- """
- for pkg in new.keys():
- section_name = new[pkg]["section"]
- priority_name = new[pkg]["priority"]
- file_type = new[pkg]["type"]
-
- section = get_section(section_name, session)
- if section is None:
- new[pkg]["section id"] = -1
- else:
- new[pkg]["section id"] = section.section_id
-
- priority = get_priority(priority_name, session)
- if priority is None:
- new[pkg]["priority id"] = -1
- else:
- new[pkg]["priority id"] = priority.priority_id
+ @type overrides: list of dict
+ @param overrides: list of overrides to check. The overrides need
+ to be given in form of a dict with the following keys:
- # Sanity checks
- di = section_name.find("debian-installer") != -1
-
- # If d-i, we must be udeb and vice-versa
- if (di and file_type not in ("udeb", "dsc")) or \
- (not di and file_type == "udeb"):
- new[pkg]["section id"] = -1
-
- # If dsc we need to be source and vice-versa
- if (priority == "source" and file_type != "dsc") or \
- (priority != "source" and file_type == "dsc"):
- new[pkg]["priority id"] = -1
-
-###############################################################################
-
-# Used by Upload.check_timestamps
-class TarTime(object):
- def __init__(self, future_cutoff, past_cutoff):
- self.reset()
- self.future_cutoff = future_cutoff
- self.past_cutoff = past_cutoff
+ - package: package name
+ - priority
+ - section
+ - component
+ - type: type of requested override ('dsc', 'deb' or 'udeb')
- def reset(self):
- self.future_files = {}
- self.ancient_files = {}
+ All values are strings.
- def callback(self, member, data):
- if member.mtime > self.future_cutoff:
- self.future_files[Name] = member.mtime
- if member.mtime < self.past_cutoff:
- self.ancient_files[Name] = member.mtime
+ @rtype: bool
+ @return: C{True} if all overrides are valid, C{False} if there is any
+ invalid override.
+ """
+ all_valid = True
+ for o in overrides:
+ o['valid'] = True
+ if session.query(Priority).filter_by(priority=o['priority']).first() is None:
+ o['valid'] = False
+ if session.query(Section).filter_by(section=o['section']).first() is None:
+ o['valid'] = False
+ if get_mapped_component(o['component'], session) is None:
+ o['valid'] = False
+ if o['type'] not in ('dsc', 'deb', 'udeb'):
+ raise Exception('Unknown override type {0}'.format(o['type']))
+ if o['type'] == 'udeb' and o['section'] != 'debian-installer':
+ o['valid'] = False
+ if o['section'] == 'debian-installer' and o['type'] not in ('dsc', 'udeb'):
+ o['valid'] = False
+ all_valid = all_valid and o['valid']
+ return all_valid
###############################################################################
def prod_maintainer(notes, upload):
cnf = Config()
+ changes = upload.changes
# Here we prepare an editor and get them ready to prod...
(fd, temp_filename) = utils.temp_filename()
user_email_address = utils.whoami() + " <%s>" % (
cnf["Dinstall::MyAdminAddress"])
- Subst = upload.Subst
+ changed_by = changes.changedby or changes.maintainer
+ maintainer = changes.maintainer
+ maintainer_to = utils.mail_addresses_for_upload(maintainer, changed_by, changes.fingerprint)
+
+ Subst = {
+ '__SOURCE__': upload.changes.source,
+ '__CHANGES_FILENAME__': upload.changes.changesname,
+ '__MAINTAINER_TO__': ", ".join(maintainer_to),
+ }
Subst["__FROM_ADDRESS__"] = user_email_address
Subst["__PROD_MESSAGE__"] = prod_message
sys.exit(0)
comment = NewComment()
- comment.package = upload.pkg.changes["source"]
- comment.version = upload.pkg.changes["version"]
+ comment.package = upload.changes.source
+ comment.version = upload.changes.version
comment.comment = newnote
comment.author = utils.whoami()
comment.trainee = trainee
###########################################################################
- def reset (self):
- """ Reset a number of internal variables."""
-
- # Initialize the substitution template map
- cnf = Config()
- self.Subst = {}
- self.Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"]
- if cnf.has_key("Dinstall::BugServer"):
- self.Subst["__BUG_SERVER__"] = cnf["Dinstall::BugServer"]
- self.Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"]
- self.Subst["__DAK_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
-
- self.rejects = []
- self.warnings = []
- self.notes = []
-
- self.later_check_files = []
-
- self.pkg.reset()
-
- def package_info(self):
- """
- Format various messages from this Upload to send to the maintainer.
- """
-
- msgs = (
- ('Reject Reasons', self.rejects),
- ('Warnings', self.warnings),
- ('Notes', self.notes),
- )
-
- msg = ''
- for title, messages in msgs:
- if messages:
- msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
- msg += '\n\n'
-
- return msg
-
- ###########################################################################
def update_subst(self):
""" Set up the per-package template substitution mappings """
+ raise Exception('to be removed')
cnf = Config()
self.Subst["__VERSION__"] = self.pkg.changes.get("version", "Unknown")
self.Subst["__SUITE__"] = ", ".join(self.pkg.changes["distribution"])
- ###########################################################################
- def load_changes(self, filename):
- """
- Load a changes file and setup a dictionary around it. Also checks for mandantory
- fields within.
-
- @type filename: string
- @param filename: Changes filename, full path.
-
- @rtype: boolean
- @return: whether the changes file was valid or not. We may want to
- reject even if this is True (see what gets put in self.rejects).
- This is simply to prevent us even trying things later which will
- fail because we couldn't properly parse the file.
- """
- Cnf = Config()
- self.pkg.changes_file = filename
-
- # Parse the .changes field into a dictionary
- try:
- self.pkg.changes.update(parse_changes(filename))
- except CantOpenError:
- self.rejects.append("%s: can't read file." % (filename))
- return False
- except ParseChangesError as line:
- self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
- return False
- except ChangesUnicodeError:
- self.rejects.append("%s: changes file not proper utf-8" % (filename))
- return False
-
- # Parse the Files field from the .changes into another dictionary
- try:
- self.pkg.files.update(utils.build_file_list(self.pkg.changes))
- except ParseChangesError as line:
- self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
- return False
- except UnknownFormatError as format:
- self.rejects.append("%s: unknown format '%s'." % (filename, format))
- return False
-
- # Check for mandatory fields
- for i in ("distribution", "source", "binary", "architecture",
- "version", "maintainer", "files", "changes", "description"):
- if not self.pkg.changes.has_key(i):
- # Avoid undefined errors later
- self.rejects.append("%s: Missing mandatory field `%s'." % (filename, i))
- return False
-
- # Strip a source version in brackets from the source field
- if re_strip_srcver.search(self.pkg.changes["source"]):
- self.pkg.changes["source"] = re_strip_srcver.sub('', self.pkg.changes["source"])
-
- # Ensure the source field is a valid package name.
- if not re_valid_pkg_name.match(self.pkg.changes["source"]):
- self.rejects.append("%s: invalid source name '%s'." % (filename, self.pkg.changes["source"]))
-
- # Split multi-value fields into a lower-level dictionary
- for i in ("architecture", "distribution", "binary", "closes"):
- o = self.pkg.changes.get(i, "")
- if o != "":
- del self.pkg.changes[i]
-
- self.pkg.changes[i] = {}
-
- for j in o.split():
- self.pkg.changes[i][j] = 1
-
- # Fix the Maintainer: field to be RFC822/2047 compatible
- try:
- (self.pkg.changes["maintainer822"],
- self.pkg.changes["maintainer2047"],
- self.pkg.changes["maintainername"],
- self.pkg.changes["maintaineremail"]) = \
- fix_maintainer (self.pkg.changes["maintainer"])
- except ParseMaintError as msg:
- self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
- % (filename, self.pkg.changes["maintainer"], msg))
-
- # ...likewise for the Changed-By: field if it exists.
- try:
- (self.pkg.changes["changedby822"],
- self.pkg.changes["changedby2047"],
- self.pkg.changes["changedbyname"],
- self.pkg.changes["changedbyemail"]) = \
- fix_maintainer (self.pkg.changes.get("changed-by", ""))
- except ParseMaintError as msg:
- self.pkg.changes["changedby822"] = ""
- self.pkg.changes["changedby2047"] = ""
- self.pkg.changes["changedbyname"] = ""
- self.pkg.changes["changedbyemail"] = ""
-
- self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
- % (filename, self.pkg.changes["changed-by"], msg))
-
- # Ensure all the values in Closes: are numbers
- if self.pkg.changes.has_key("closes"):
- for i in self.pkg.changes["closes"].keys():
- if re_isanum.match (i) == None:
- self.rejects.append(("%s: `%s' from Closes field isn't a number." % (filename, i)))
-
- # chopversion = no epoch; chopversion2 = no epoch and no revision (e.g. for .orig.tar.gz comparison)
- self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
- self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
-
- # Check the .changes is non-empty
- if not self.pkg.files:
- self.rejects.append("%s: nothing to do (Files field is empty)." % (os.path.basename(self.pkg.changes_file)))
- return False
-
- # Changes was syntactically valid even if we'll reject
- return True
-
###########################################################################
def check_distributions(self):
###########################################################################
- def binary_file_checks(self, f, session):
- cnf = Config()
- entry = self.pkg.files[f]
-
- # Extract package control information
- deb_file = utils.open_file(f)
- try:
- control = apt_pkg.TagSection(utils.deb_extract_control(deb_file))
- except:
- self.rejects.append("%s: deb_extract_control() raised %s." % (f, sys.exc_info()[0]))
- deb_file.close()
- # Can't continue, none of the checks on control would work.
- return
-
- deb_file.close()
-
- # Check for mandatory fields
- for field in [ "Package", "Architecture", "Version", "Description" ]:
- if field not in control:
- # Can't continue
- self.rejects.append("%s: No %s field in control." % (f, field))
- return
-
- # Ensure the package name matches the one give in the .changes
- if not self.pkg.changes["binary"].has_key(control.find("Package", "")):
- self.rejects.append("%s: control file lists name as `%s', which isn't in changes file." % (f, control.find("Package", "")))
-
- # Validate the package field
- package = control["Package"]
- if not re_valid_pkg_name.match(package):
- self.rejects.append("%s: invalid package name '%s'." % (f, package))
-
- # Validate the version field
- version = control["Version"]
- if not re_valid_version.match(version):
- self.rejects.append("%s: invalid version number '%s'." % (f, version))
-
- # Ensure the architecture of the .deb is one we know about.
- default_suite = cnf.get("Dinstall::DefaultSuite", "unstable")
- architecture = control["Architecture"]
- upload_suite = self.pkg.changes["distribution"].keys()[0]
-
- if architecture not in [a.arch_string for a in get_suite_architectures(default_suite, session = session)] \
- and architecture not in [a.arch_string for a in get_suite_architectures(upload_suite, session = session)]:
- self.rejects.append("Unknown architecture '%s'." % (architecture))
-
- # Ensure the architecture of the .deb is one of the ones
- # listed in the .changes.
- if not self.pkg.changes["architecture"].has_key(architecture):
- self.rejects.append("%s: control file lists arch as `%s', which isn't in changes file." % (f, architecture))
-
- # Sanity-check the Depends field
- depends = control.find("Depends")
- if depends == '':
- self.rejects.append("%s: Depends field is empty." % (f))
-
- # Sanity-check the Provides field
- provides = control.find("Provides")
- if provides is not None:
- provide = re_spacestrip.sub('', provides)
- if provide == '':
- self.rejects.append("%s: Provides field is empty." % (f))
- prov_list = provide.split(",")
- for prov in prov_list:
- if not re_valid_pkg_name.match(prov):
- self.rejects.append("%s: Invalid Provides field content %s." % (f, prov))
-
- # If there is a Built-Using field, we need to check we can find the
- # exact source version
- built_using = control.find("Built-Using")
- if built_using is not None:
- try:
- entry["built-using"] = []
- for dep in apt_pkg.parse_depends(built_using):
- bu_s, bu_v, bu_e = dep[0]
- # Check that it's an exact match dependency and we have
- # some form of version
- if bu_e != "=" or len(bu_v) < 1:
- self.rejects.append("%s: Built-Using contains non strict dependency (%s %s %s)" % (f, bu_s, bu_e, bu_v))
- else:
- # Find the source id for this version
- bu_so = get_sources_from_name(bu_s, version=bu_v, session = session)
- if len(bu_so) != 1:
- self.rejects.append("%s: Built-Using (%s = %s): Cannot find source package" % (f, bu_s, bu_v))
- else:
- entry["built-using"].append( (bu_so[0].source, bu_so[0].version, ) )
-
- except ValueError as e:
- self.rejects.append("%s: Cannot parse Built-Using field: %s" % (f, str(e)))
-
-
- # Check the section & priority match those given in the .changes (non-fatal)
- if control.find("Section") and entry["section"] != "" \
- and entry["section"] != control.find("Section"):
- self.warnings.append("%s control file lists section as `%s', but changes file has `%s'." % \
- (f, control.find("Section", ""), entry["section"]))
- if control.find("Priority") and entry["priority"] != "" \
- and entry["priority"] != control.find("Priority"):
- self.warnings.append("%s control file lists priority as `%s', but changes file has `%s'." % \
- (f, control.find("Priority", ""), entry["priority"]))
-
- entry["package"] = package
- entry["architecture"] = architecture
- entry["version"] = version
- entry["maintainer"] = control.find("Maintainer", "")
-
- if f.endswith(".udeb"):
- self.pkg.files[f]["dbtype"] = "udeb"
- elif f.endswith(".deb"):
- self.pkg.files[f]["dbtype"] = "deb"
- else:
- self.rejects.append("%s is neither a .deb or a .udeb." % (f))
-
- entry["source"] = control.find("Source", entry["package"])
-
- # Get the source version
- source = entry["source"]
- source_version = ""
-
- if source.find("(") != -1:
- m = re_extract_src_version.match(source)
- source = m.group(1)
- source_version = m.group(2)
-
- if not source_version:
- source_version = self.pkg.files[f]["version"]
-
- entry["source package"] = source
- entry["source version"] = source_version
-
- # Ensure the filename matches the contents of the .deb
- m = re_isadeb.match(f)
-
- # package name
- file_package = m.group(1)
- if entry["package"] != file_package:
- self.rejects.append("%s: package part of filename (%s) does not match package name in the %s (%s)." % \
- (f, file_package, entry["dbtype"], entry["package"]))
- epochless_version = re_no_epoch.sub('', control.find("Version"))
-
- # version
- file_version = m.group(2)
- if epochless_version != file_version:
- self.rejects.append("%s: version part of filename (%s) does not match package version in the %s (%s)." % \
- (f, file_version, entry["dbtype"], epochless_version))
-
- # architecture
- file_architecture = m.group(3)
- if entry["architecture"] != file_architecture:
- self.rejects.append("%s: architecture part of filename (%s) does not match package architecture in the %s (%s)." % \
- (f, file_architecture, entry["dbtype"], entry["architecture"]))
-
- # Check for existent source
- source_version = entry["source version"]
- source_package = entry["source package"]
- if self.pkg.changes["architecture"].has_key("source"):
- if source_version != self.pkg.changes["version"]:
- self.rejects.append("source version (%s) for %s doesn't match changes version %s." % \
- (source_version, f, self.pkg.changes["version"]))
- else:
- # Check in the SQL database
- if not source_exists(source_package, source_version, suites = \
- self.pkg.changes["distribution"].keys(), session = session):
- # Check in one of the other directories
- source_epochless_version = re_no_epoch.sub('', source_version)
- dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
-
- byhand_dir = get_policy_queue('byhand', session).path
- new_dir = get_policy_queue('new', session).path
-
- if os.path.exists(os.path.join(byhand_dir, dsc_filename)):
- entry["byhand"] = 1
- elif os.path.exists(os.path.join(new_dir, dsc_filename)):
- entry["new"] = 1
- else:
- dsc_file_exists = False
- # TODO: Don't hardcode this list: use all relevant queues
- # The question is how to determine what is relevant
- for queue_name in ["embargoed", "unembargoed", "proposedupdates", "oldproposedupdates"]:
- queue = get_policy_queue(queue_name, session)
- if queue:
- if os.path.exists(os.path.join(queue.path, dsc_filename)):
- dsc_file_exists = True
- break
-
- if not dsc_file_exists:
- self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
- # Check the version and for file overwrites
- self.check_binary_against_db(f, session)
-
- def source_file_checks(self, f, session):
- entry = self.pkg.files[f]
-
- m = re_issource.match(f)
- if not m:
- return
-
- entry["package"] = m.group(1)
- entry["version"] = m.group(2)
- entry["type"] = m.group(3)
-
- # Ensure the source package name matches the Source filed in the .changes
- if self.pkg.changes["source"] != entry["package"]:
- self.rejects.append("%s: changes file doesn't say %s for Source" % (f, entry["package"]))
-
- # Ensure the source version matches the version in the .changes file
- if re_is_orig_source.match(f):
- changes_version = self.pkg.changes["chopversion2"]
- else:
- changes_version = self.pkg.changes["chopversion"]
-
- if changes_version != entry["version"]:
- self.rejects.append("%s: should be %s according to changes file." % (f, changes_version))
-
- # Ensure the .changes lists source in the Architecture field
- if not self.pkg.changes["architecture"].has_key("source"):
- self.rejects.append("%s: changes file doesn't list `source' in Architecture field." % (f))
-
- # Check the signature of a .dsc file
- if entry["type"] == "dsc":
- # check_signature returns either:
- # (None, [list, of, rejects]) or (signature, [])
- (self.pkg.dsc["fingerprint"], rejects) = utils.check_signature(f)
- for j in rejects:
- self.rejects.append(j)
-
- entry["architecture"] = "source"
-
def per_suite_file_checks(self, f, suite, session):
- cnf = Config()
- entry = self.pkg.files[f]
-
- # Skip byhand
- if entry.has_key("byhand"):
- return
-
- # Check we have fields we need to do these checks
- oktogo = True
- for m in ['component', 'package', 'priority', 'size', 'md5sum']:
- if not entry.has_key(m):
- self.rejects.append("file '%s' does not have field %s set" % (f, m))
- oktogo = False
-
- if not oktogo:
- return
+ raise Exception('removed')
# Handle component mappings
for m in cnf.value_list("ComponentMappings"):
entry["original component"] = source
entry["component"] = dest
- # Ensure the component is valid for the target suite
- if entry["component"] not in get_component_names(session):
- self.rejects.append("unknown component `%s' for suite `%s'." % (entry["component"], suite))
- return
-
- # Validate the component
- if not get_component(entry["component"], session):
- self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"]))
- return
-
- # See if the package is NEW
- if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), f, session):
- entry["new"] = 1
-
- # Validate the priority
- if entry["priority"].find('/') != -1:
- self.rejects.append("file '%s' has invalid priority '%s' [contains '/']." % (f, entry["priority"]))
-
- # Determine the location
- location = cnf["Dir::Pool"]
- l = get_location(location, entry["component"], session=session)
- if l is None:
- self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s)" % entry["component"])
- entry["location id"] = -1
- else:
- entry["location id"] = l.location_id
-
- # Check the md5sum & size against existing files (if any)
- entry["pool name"] = utils.poolify(self.pkg.changes["source"], entry["component"])
-
- found, poolfile = check_poolfile(os.path.join(entry["pool name"], f),
- entry["size"], entry["md5sum"], entry["location id"])
-
- if found is None:
- self.rejects.append("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
- elif found is False and poolfile is not None:
- self.rejects.append("md5sum and/or size mismatch on existing copy of %s." % (f))
- else:
- if poolfile is None:
- entry["files id"] = None
- else:
- entry["files id"] = poolfile.file_id
-
- # Check for packages that have moved from one component to another
- entry['suite'] = suite
- arch_list = [entry["architecture"], 'all']
- component = get_component_by_package_suite(self.pkg.files[f]['package'], \
- [suite], arch_list = arch_list, session = session)
- if component is not None:
- entry["othercomponents"] = component
-
- def check_files(self, action=True):
- file_keys = self.pkg.files.keys()
- holding = Holding()
- cnf = Config()
-
- if action:
- cwd = os.getcwd()
- os.chdir(self.pkg.directory)
- for f in file_keys:
- ret = holding.copy_to_holding(f)
- if ret is not None:
- self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
-
- os.chdir(cwd)
-
- # check we already know the changes file
- # [NB: this check must be done post-suite mapping]
- base_filename = os.path.basename(self.pkg.changes_file)
-
- session = DBConn().session()
-
- try:
- dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
- # if in the pool or in a queue other than unchecked, reject
- if (dbc.in_queue is None) \
- or (dbc.in_queue is not None
- and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
- self.rejects.append("%s file already known to dak" % base_filename)
- except NoResultFound as e:
- # not known, good
- pass
-
- has_binaries = False
- has_source = False
-
- for f, entry in self.pkg.files.items():
- # Ensure the file does not already exist in one of the accepted directories
- # TODO: Dynamically generate this list
- for queue_name in [ "byhand", "new", "proposedupdates", "oldproposedupdates", "embargoed", "unembargoed" ]:
- queue = get_policy_queue(queue_name, session)
- if queue and os.path.exists(os.path.join(queue.path, f)):
- self.rejects.append("%s file already exists in the %s queue." % (f, queue_name))
-
- if not re_taint_free.match(f):
- self.rejects.append("!!WARNING!! tainted filename: '%s'." % (f))
-
- # Check the file is readable
- if os.access(f, os.R_OK) == 0:
- # When running in -n, copy_to_holding() won't have
- # generated the reject_message, so we need to.
- if action:
- if os.path.exists(f):
- self.rejects.append("Can't read `%s'. [permission denied]" % (f))
- else:
- # Don't directly reject, mark to check later to deal with orig's
- # we can find in the pool
- self.later_check_files.append(f)
- entry["type"] = "unreadable"
- continue
-
- # If it's byhand skip remaining checks
- if entry["section"] == "byhand" or entry["section"][:4] == "raw-":
- entry["byhand"] = 1
- entry["type"] = "byhand"
-
- # Checks for a binary package...
- elif re_isadeb.match(f):
- has_binaries = True
- entry["type"] = "deb"
-
- # This routine appends to self.rejects/warnings as appropriate
- self.binary_file_checks(f, session)
-
- # Checks for a source package...
- elif re_issource.match(f):
- has_source = True
-
- # This routine appends to self.rejects/warnings as appropriate
- self.source_file_checks(f, session)
-
- # Not a binary or source package? Assume byhand...
- else:
- entry["byhand"] = 1
- entry["type"] = "byhand"
-
- # Per-suite file checks
- entry["oldfiles"] = {}
- for suite in self.pkg.changes["distribution"].keys():
- self.per_suite_file_checks(f, suite, session)
-
- session.close()
-
- # If the .changes file says it has source, it must have source.
- if self.pkg.changes["architecture"].has_key("source"):
- if not has_source:
- self.rejects.append("no source found and Architecture line in changes mention source.")
-
- if (not has_binaries) and (not cnf.find_b("Dinstall::AllowSourceOnlyUploads")):
- self.rejects.append("source only uploads are not supported.")
-
- ###########################################################################
-
- def __dsc_filename(self):
- """
- Returns: (Status, Dsc_Filename)
- where
- Status: Boolean; True when there was no error, False otherwise
- Dsc_Filename: String; name of the dsc file if Status is True, reason for the error otherwise
- """
- dsc_filename = None
-
- # find the dsc
- for name, entry in self.pkg.files.items():
- if entry.has_key("type") and entry["type"] == "dsc":
- if dsc_filename:
- return False, "cannot process a .changes file with multiple .dsc's."
- else:
- dsc_filename = name
-
- if not dsc_filename:
- return False, "source uploads must contain a dsc file"
-
- return True, dsc_filename
-
- def load_dsc(self, action=True, signing_rules=1):
- """
- Find and load the dsc from self.pkg.files into self.dsc
-
- Returns: (Status, Reason)
- where
- Status: Boolean; True when there was no error, False otherwise
- Reason: String; When Status is False this describes the error
- """
-
- # find the dsc
- (status, dsc_filename) = self.__dsc_filename()
- if not status:
- # If status is false, dsc_filename has the reason
- return False, dsc_filename
-
- try:
- self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=signing_rules, dsc_file=1))
- except CantOpenError:
- if not action:
- return False, "%s: can't read file." % (dsc_filename)
- except ParseChangesError as line:
- return False, "%s: parse error, can't grok: %s." % (dsc_filename, line)
- except InvalidDscError as line:
- return False, "%s: syntax error on line %s." % (dsc_filename, line)
- except ChangesUnicodeError:
- return False, "%s: dsc file not proper utf-8." % (dsc_filename)
-
- return True, None
-
- ###########################################################################
-
- def check_dsc(self, action=True, session=None):
- """Returns bool indicating whether or not the source changes are valid"""
- # Ensure there is source to check
- if not self.pkg.changes["architecture"].has_key("source"):
- return True
-
- if session is None:
- session = DBConn().session()
-
- (status, reason) = self.load_dsc(action=action)
- if not status:
- self.rejects.append(reason)
- return False
- (status, dsc_filename) = self.__dsc_filename()
- if not status:
- # If status is false, dsc_filename has the reason
- self.rejects.append(dsc_filename)
- return False
-
- # Build up the file list of files mentioned by the .dsc
- try:
- self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
- except NoFilesFieldError:
- self.rejects.append("%s: no Files: field." % (dsc_filename))
- return False
- except UnknownFormatError as format:
- self.rejects.append("%s: unknown format '%s'." % (dsc_filename, format))
- return False
- except ParseChangesError as line:
- self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
- return False
-
- # Enforce mandatory fields
- for i in ("format", "source", "version", "binary", "maintainer", "architecture", "files"):
- if not self.pkg.dsc.has_key(i):
- self.rejects.append("%s: missing mandatory field `%s'." % (dsc_filename, i))
- return False
-
- # Validate the source and version fields
- if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
- self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
- if not re_valid_version.match(self.pkg.dsc["version"]):
- self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
-
- # Only a limited list of source formats are allowed in each suite
- for dist in self.pkg.changes["distribution"].keys():
- suite = get_suite(dist, session=session)
- if not suite:
- self.rejects.append("%s: cannot find suite %s when checking source formats" % (dsc_filename, dist))
- continue
- allowed = [ x.format_name for x in suite.srcformats ]
- if self.pkg.dsc["format"] not in allowed:
- self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed)))
-
- # Validate the Maintainer field
- try:
- # We ignore the return value
- fix_maintainer(self.pkg.dsc["maintainer"])
- except ParseMaintError as msg:
- self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
- % (dsc_filename, self.pkg.dsc["maintainer"], msg))
-
- # Validate the build-depends field(s)
- for field_name in [ "build-depends", "build-depends-indep" ]:
- field = self.pkg.dsc.get(field_name)
- if field:
- # Have apt try to parse them...
- try:
- apt_pkg.parse_src_depends(field)
- except:
- self.rejects.append("%s: invalid %s field (can not be parsed by apt)." % (dsc_filename, field_name.title()))
-
- # Ensure the version number in the .dsc matches the version number in the .changes
- epochless_dsc_version = re_no_epoch.sub('', self.pkg.dsc["version"])
- changes_version = self.pkg.files[dsc_filename]["version"]
-
- if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]:
- self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version))
-
- # Ensure the Files field contain only what's expected
- self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files))
-
- # Ensure source is newer than existing source in target suites
- session = DBConn().session()
- self.check_source_against_db(dsc_filename, session)
- self.check_dsc_against_db(dsc_filename, session)
-
- dbchg = get_dbchange(self.pkg.changes_file, session)
-
- # Finally, check if we're missing any files
- for f in self.later_check_files:
- print 'XXX: %s' % f
- # Check if we've already processed this file if we have a dbchg object
- ok = False
- if dbchg:
- for pf in dbchg.files:
- if pf.filename == f and pf.processed:
- self.notes.append('%s was already processed so we can go ahead' % f)
- ok = True
- del self.pkg.files[f]
- if not ok:
- self.rejects.append("Could not find file %s references in changes" % f)
-
- session.close()
-
- return (len(self.rejects) == 0)
-
- ###########################################################################
-
- def get_changelog_versions(self, source_dir):
- """Extracts a the source package and (optionally) grabs the
- version history out of debian/changelog for the BTS."""
-
- cnf = Config()
-
- # Find the .dsc (again)
- dsc_filename = None
- for f in self.pkg.files.keys():
- if self.pkg.files[f]["type"] == "dsc":
- dsc_filename = f
-
- # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
- if not dsc_filename:
- return
-
- # Create a symlink mirror of the source files in our temporary directory
- for f in self.pkg.files.keys():
- m = re_issource.match(f)
- if m:
- src = os.path.join(source_dir, f)
- # If a file is missing for whatever reason, give up.
- if not os.path.exists(src):
- return
- ftype = m.group(3)
- if re_is_orig_source.match(f) and self.pkg.orig_files.has_key(f) and \
- self.pkg.orig_files[f].has_key("path"):
- continue
- dest = os.path.join(os.getcwd(), f)
- os.symlink(src, dest)
-
- # If the orig files are not a part of the upload, create symlinks to the
- # existing copies.
- for orig_file in self.pkg.orig_files.keys():
- if not self.pkg.orig_files[orig_file].has_key("path"):
- continue
- dest = os.path.join(os.getcwd(), os.path.basename(orig_file))
- os.symlink(self.pkg.orig_files[orig_file]["path"], dest)
-
- # Extract the source
- try:
- unpacked = UnpackedSource(dsc_filename)
- except Exception as e:
- self.rejects.append("'dpkg-source -x' failed for %s. (%s)" % (dsc_filename, str(e)))
- return
-
- if not cnf.find("Dir::BTSVersionTrack"):
- return
-
- # Get the upstream version
- upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
- if re_strip_revision.search(upstr_version):
- upstr_version = re_strip_revision.sub('', upstr_version)
-
- # Ensure the changelog file exists
- changelog_file = unpacked.get_changelog_file()
- if changelog_file is None:
- self.rejects.append("%s: debian/changelog not found in extracted source." % (dsc_filename))
- return
-
- # Parse the changelog
- self.pkg.dsc["bts changelog"] = ""
- for line in changelog_file.readlines():
- m = re_changelog_versions.match(line)
- if m:
- self.pkg.dsc["bts changelog"] += line
- changelog_file.close()
- unpacked.cleanup()
-
- # Check we found at least one revision in the changelog
- if not self.pkg.dsc["bts changelog"]:
- self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
-
- def check_source(self):
- # Bail out if:
- # a) there's no source
- if not self.pkg.changes["architecture"].has_key("source"):
- return
-
- tmpdir = utils.temp_dirname()
-
- # Move into the temporary directory
- cwd = os.getcwd()
- os.chdir(tmpdir)
-
- # Get the changelog version history
- self.get_changelog_versions(cwd)
-
- # Move back and cleanup the temporary tree
- os.chdir(cwd)
-
- try:
- shutil.rmtree(tmpdir)
- except OSError as e:
- if e.errno != errno.EACCES:
- print "foobar"
- utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
- self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
- # We probably have u-r or u-w directories so chmod everything
- # and try again.
- cmd = "chmod -R u+rwx %s" % (tmpdir)
- result = os.system(cmd)
- if result != 0:
- utils.fubar("'%s' failed with result %s." % (cmd, result))
- shutil.rmtree(tmpdir)
- except Exception as e:
- print "foobar2 (%s)" % e
- utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
- ###########################################################################
- def ensure_hashes(self):
- # Make sure we recognise the format of the Files: field in the .changes
- format = self.pkg.changes.get("format", "0.0").split(".", 1)
- if len(format) == 2:
- format = int(format[0]), int(format[1])
- else:
- format = int(float(format[0])), 0
-
- # We need to deal with the original changes blob, as the fields we need
- # might not be in the changes dict serialised into the .dak anymore.
- orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
-
- # Copy the checksums over to the current changes dict. This will keep
- # the existing modifications to it intact.
- for field in orig_changes:
- if field.startswith('checksums-'):
- self.pkg.changes[field] = orig_changes[field]
-
- # Check for unsupported hashes
- for j in utils.check_hash_fields(".changes", self.pkg.changes):
- self.rejects.append(j)
-
- for j in utils.check_hash_fields(".dsc", self.pkg.dsc):
- self.rejects.append(j)
-
- # We have to calculate the hash if we have an earlier changes version than
- # the hash appears in rather than require it exist in the changes file
- for hashname, hashfunc, version in utils.known_hashes:
- # TODO: Move _ensure_changes_hash into this class
- for j in utils._ensure_changes_hash(self.pkg.changes, format, version, self.pkg.files, hashname, hashfunc):
- self.rejects.append(j)
- if "source" in self.pkg.changes["architecture"]:
- # TODO: Move _ensure_dsc_hash into this class
- for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
- self.rejects.append(j)
-
- def check_hashes(self):
- for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
- self.rejects.append(m)
-
- for m in utils.check_size(".changes", self.pkg.files):
- self.rejects.append(m)
-
- for m in utils.check_hash(".dsc", self.pkg.dsc_files, "md5", apt_pkg.md5sum):
- self.rejects.append(m)
-
- for m in utils.check_size(".dsc", self.pkg.dsc_files):
- self.rejects.append(m)
-
- self.ensure_hashes()
-
- ###########################################################################
-
- def ensure_orig(self, target_dir='.', session=None):
- """
- Ensures that all orig files mentioned in the changes file are present
- in target_dir. If they do not exist, they are symlinked into place.
-
- An list containing the symlinks that were created are returned (so they
- can be removed).
- """
-
- symlinked = []
- cnf = Config()
-
- for filename, entry in self.pkg.dsc_files.iteritems():
- if not re_is_orig_source.match(filename):
- # File is not an orig; ignore
- continue
-
- if os.path.exists(filename):
- # File exists, no need to continue
- continue
-
- def symlink_if_valid(path):
- f = utils.open_file(path)
- md5sum = apt_pkg.md5sum(f)
- f.close()
-
- fingerprint = (os.stat(path)[stat.ST_SIZE], md5sum)
- expected = (int(entry['size']), entry['md5sum'])
-
- if fingerprint != expected:
- return False
-
- dest = os.path.join(target_dir, filename)
-
- os.symlink(path, dest)
- symlinked.append(dest)
-
- return True
-
- session_ = session
- if session is None:
- session_ = DBConn().session()
-
- found = False
-
- # Look in the pool
- for poolfile in get_poolfile_like_name('%s' % filename, session_):
- poolfile_path = os.path.join(
- poolfile.location.path, poolfile.filename
- )
-
- if symlink_if_valid(poolfile_path):
- found = True
- break
-
- if session is None:
- session_.close()
-
- if found:
- continue
-
- # Look in some other queues for the file
- queue_names = ['new', 'byhand',
- 'proposedupdates', 'oldproposedupdates',
- 'embargoed', 'unembargoed']
-
- for queue_name in queue_names:
- queue = get_policy_queue(queue_name, session)
- if not queue:
- continue
-
- queuefile_path = os.path.join(queue.path, filename)
-
- if not os.path.exists(queuefile_path):
- # Does not exist in this queue
- continue
-
- if symlink_if_valid(queuefile_path):
- break
-
- return symlinked
-
- ###########################################################################
-
- def check_lintian(self):
- """
- Extends self.rejects by checking the output of lintian against tags
- specified in Dinstall::LintianTags.
- """
-
- cnf = Config()
-
- # Don't reject binary uploads
- if not self.pkg.changes['architecture'].has_key('source'):
- return
-
- # Only check some distributions
- for dist in ('unstable', 'experimental'):
- if dist in self.pkg.changes['distribution']:
- break
- else:
- return
-
- # If we do not have a tagfile, don't do anything
- tagfile = cnf.get("Dinstall::LintianTags")
- if not tagfile:
- return
-
- # Parse the yaml file
- sourcefile = file(tagfile, 'r')
- sourcecontent = sourcefile.read()
- sourcefile.close()
-
- try:
- lintiantags = yaml.load(sourcecontent)['lintian']
- except yaml.YAMLError as msg:
- utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
- return
-
- # Try and find all orig mentioned in the .dsc
- symlinked = self.ensure_orig()
-
- # Setup the input file for lintian
- fd, temp_filename = utils.temp_filename()
- temptagfile = os.fdopen(fd, 'w')
- for tags in lintiantags.values():
- temptagfile.writelines(['%s\n' % x for x in tags])
- temptagfile.close()
-
- try:
- cmd = "lintian --show-overrides --tags-from-file %s %s" % \
- (temp_filename, self.pkg.changes_file)
-
- result, output = commands.getstatusoutput(cmd)
- finally:
- # Remove our tempfile and any symlinks we created
- os.unlink(temp_filename)
-
- for symlink in symlinked:
- os.unlink(symlink)
-
- if result == 2:
- utils.warn("lintian failed for %s [return code: %s]." % \
- (self.pkg.changes_file, result))
- utils.warn(utils.prefix_multi_line_string(output, \
- " [possible output:] "))
-
- def log(*txt):
- if self.logger:
- self.logger.log(
- [self.pkg.changes_file, "check_lintian"] + list(txt)
- )
-
- # Generate messages
- parsed_tags = parse_lintian_output(output)
- self.rejects.extend(
- generate_reject_messages(parsed_tags, lintiantags, log=log)
- )
-
- ###########################################################################
- def check_urgency(self):
- cnf = Config()
- if self.pkg.changes["architecture"].has_key("source"):
- if not self.pkg.changes.has_key("urgency"):
- self.pkg.changes["urgency"] = cnf["Urgency::Default"]
- self.pkg.changes["urgency"] = self.pkg.changes["urgency"].lower()
- if self.pkg.changes["urgency"] not in cnf.value_list("Urgency::Valid"):
- self.warnings.append("%s is not a valid urgency; it will be treated as %s by testing." % \
- (self.pkg.changes["urgency"], cnf["Urgency::Default"]))
- self.pkg.changes["urgency"] = cnf["Urgency::Default"]
-
###########################################################################
# Sanity check the time stamps of files inside debs.
# [Files in the near future cause ugly warnings and extreme time
# travel can cause errors on extraction]
- def check_timestamps(self):
- Cnf = Config()
-
- future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
- past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
- tar = TarTime(future_cutoff, past_cutoff)
-
- for filename, entry in self.pkg.files.items():
- if entry["type"] == "deb":
- tar.reset()
- try:
- deb = apt_inst.DebFile(filename)
- deb.control.go(tar.callback)
-
- future_files = tar.future_files.keys()
- if future_files:
- num_future_files = len(future_files)
- future_file = future_files[0]
- future_date = tar.future_files[future_file]
- self.rejects.append("%s: has %s file(s) with a time stamp too far into the future (e.g. %s [%s])."
- % (filename, num_future_files, future_file, time.ctime(future_date)))
-
- ancient_files = tar.ancient_files.keys()
- if ancient_files:
- num_ancient_files = len(ancient_files)
- ancient_file = ancient_files[0]
- ancient_date = tar.ancient_files[ancient_file]
- self.rejects.append("%s: has %s file(s) with a time stamp too ancient (e.g. %s [%s])."
- % (filename, num_ancient_files, ancient_file, time.ctime(ancient_date)))
- except:
- self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_info()[0], sys.exc_info()[1]))
-
def check_if_upload_is_sponsored(self, uid_email, uid_name):
for key in "maintaineremail", "changedbyemail", "maintainername", "changedbyname":
if not self.pkg.changes.has_key(key):
return sponsored
-
- ###########################################################################
- # check_signed_by_key checks
- ###########################################################################
-
- def check_signed_by_key(self):
- """Ensure the .changes is signed by an authorized uploader."""
- session = DBConn().session()
-
- # First of all we check that the person has proper upload permissions
- # and that this upload isn't blocked
- fpr = get_fingerprint(self.pkg.changes['fingerprint'], session=session)
-
- if fpr is None:
- self.rejects.append("Cannot find fingerprint %s" % self.pkg.changes["fingerprint"])
- return
-
- # TODO: Check that import-keyring adds UIDs properly
- if not fpr.uid:
- self.rejects.append("Cannot find uid for fingerprint %s. Please contact ftpmaster@debian.org" % fpr.fingerprint)
- return
-
- # Check that the fingerprint which uploaded has permission to do so
- self.check_upload_permissions(fpr, session)
-
- # Check that this package is not in a transition
- self.check_transition(session)
-
- session.close()
-
-
- def check_upload_permissions(self, fpr, session):
- # Check any one-off upload blocks
- self.check_upload_blocks(fpr, session)
-
- # If the source_acl is None, source is never allowed
- if fpr.source_acl is None:
- if self.pkg.changes["architecture"].has_key("source"):
- rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
- rej += '\nPlease contact ftpmaster if you think this is incorrect'
- self.rejects.append(rej)
- return
- # Do DM as a special case
- # DM is a special case unfortunately, so we check it first
- # (keys with no source access get more access than DMs in one
- # way; DMs can only upload for their packages whether source
- # or binary, whereas keys with no access might be able to
- # upload some binaries)
- elif fpr.source_acl.access_level == 'dm':
- self.check_dm_upload(fpr, session)
- else:
- # If not a DM, we allow full upload rights
- uid_email = "%s@debian.org" % (fpr.uid.uid)
- self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
-
-
- # Check binary upload permissions
- # By this point we know that DMs can't have got here unless they
- # are allowed to deal with the package concerned so just apply
- # normal checks
- if fpr.binary_acl.access_level == 'full':
- return
-
- # Otherwise we're in the map case
- tmparches = self.pkg.changes["architecture"].copy()
- tmparches.pop('source', None)
-
- for bam in fpr.binary_acl_map:
- tmparches.pop(bam.architecture.arch_string, None)
-
- if len(tmparches.keys()) > 0:
- if fpr.binary_reject:
- rej = "changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
- if len(tmparches.keys()) == 1:
- rej += "\n\narchitecture involved is: %s" % ",".join(tmparches.keys())
- else:
- rej += "\n\narchitectures involved are: %s" % ",".join(tmparches.keys())
- self.rejects.append(rej)
- else:
- # TODO: This is where we'll implement reject vs throw away binaries later
- rej = "Uhm. I'm meant to throw away the binaries now but that's not implemented yet"
- rej += "\nPlease complain to ftpmaster@debian.org as this shouldn't have been turned on"
- rej += "\nFingerprint: %s", (fpr.fingerprint)
- self.rejects.append(rej)
-
-
- def check_upload_blocks(self, fpr, session):
- """Check whether any upload blocks apply to this source, source
- version, uid / fpr combination"""
-
- def block_rej_template(fb):
- rej = 'Manual upload block in place for package %s' % fb.source
- if fb.version is not None:
- rej += ', version %s' % fb.version
- return rej
-
- for fb in session.query(UploadBlock).filter_by(source = self.pkg.changes['source']).all():
- # version is None if the block applies to all versions
- if fb.version is None or fb.version == self.pkg.changes['version']:
- # Check both fpr and uid - either is enough to cause a reject
- if fb.fpr is not None:
- if fb.fpr.fingerprint == fpr.fingerprint:
- self.rejects.append(block_rej_template(fb) + ' for fingerprint %s\nReason: %s' % (fpr.fingerprint, fb.reason))
- if fb.uid is not None:
- if fb.uid == fpr.uid:
- self.rejects.append(block_rej_template(fb) + ' for uid %s\nReason: %s' % (fb.uid.uid, fb.reason))
-
-
def check_dm_upload(self, fpr, session):
# Quoth the GR (http://www.debian.org/vote/2007/vote_003):
## none of the uploaded packages are NEW
- rej = False
- for f in self.pkg.files.keys():
- if self.pkg.files[f].has_key("byhand"):
- self.rejects.append("%s may not upload BYHAND file %s" % (fpr.uid.uid, f))
- rej = True
- if self.pkg.files[f].has_key("new"):
- self.rejects.append("%s may not upload NEW file %s" % (fpr.uid.uid, f))
- rej = True
-
- if rej:
- return
-
- r = get_newest_source(self.pkg.changes["source"], session)
-
- if r is None:
- rej = "Could not find existing source package %s in the DM allowed suites and this is a DM upload" % self.pkg.changes["source"]
- self.rejects.append(rej)
- return
-
- if not r.dm_upload_allowed:
- rej = "Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version (%s)" % (self.pkg.changes["source"], r.version)
- self.rejects.append(rej)
- return
-
- ## the Maintainer: field of the uploaded .changes file corresponds with
- ## the owner of the key used (ie, non-developer maintainers may not sponsor
- ## uploads)
- if self.check_if_upload_is_sponsored(fpr.uid.uid, fpr.uid.name):
- self.rejects.append("%s (%s) is not authorised to sponsor uploads" % (fpr.uid.uid, fpr.fingerprint))
-
- ## the most recent version of the package uploaded to unstable or
- ## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie,
- ## non-developer maintainers cannot NMU or hijack packages)
-
- # uploader includes the maintainer
- accept = False
- for uploader in r.uploaders:
- (rfc822, rfc2047, name, email) = uploader.get_split_maintainer()
- # Eww - I hope we never have two people with the same name in Debian
- if email == fpr.uid.uid or name == fpr.uid.name:
- accept = True
- break
-
- if not accept:
- self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (fpr.uid.uid, self.pkg.changes["source"]))
- return
-
## none of the packages are being taken over from other source packages
for b in self.pkg.changes["binary"].keys():
for suite in self.pkg.changes["distribution"].keys():
if s.source != self.pkg.changes["source"]:
self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (fpr.uid.uid, b, s, suite))
-
-
- def check_transition(self, session):
- cnf = Config()
-
- sourcepkg = self.pkg.changes["source"]
-
- # No sourceful upload -> no need to do anything else, direct return
- # We also work with unstable uploads, not experimental or those going to some
- # proposed-updates queue
- if "source" not in self.pkg.changes["architecture"] or \
- "unstable" not in self.pkg.changes["distribution"]:
- return
-
- # Also only check if there is a file defined (and existant) with
- # checks.
- transpath = cnf.get("Dinstall::ReleaseTransitions", "")
- if transpath == "" or not os.path.exists(transpath):
- return
-
- # Parse the yaml file
- sourcefile = file(transpath, 'r')
- sourcecontent = sourcefile.read()
- try:
- transitions = yaml.load(sourcecontent)
- except yaml.YAMLError as msg:
- # This shouldn't happen, there is a wrapper to edit the file which
- # checks it, but we prefer to be safe than ending up rejecting
- # everything.
- utils.warn("Not checking transitions, the transitions file is broken: %s." % (msg))
- return
-
- # Now look through all defined transitions
- for trans in transitions:
- t = transitions[trans]
- source = t["source"]
- expected = t["new"]
-
- # Will be None if nothing is in testing.
- current = get_source_in_suite(source, "testing", session)
- if current is not None:
- compare = apt_pkg.version_compare(current.version, expected)
-
- if current is None or compare < 0:
- # This is still valid, the current version in testing is older than
- # the new version we wait for, or there is none in testing yet
-
- # Check if the source we look at is affected by this.
- if sourcepkg in t['packages']:
- # The source is affected, lets reject it.
-
- rejectmsg = "%s: part of the %s transition.\n\n" % (
- sourcepkg, trans)
-
- if current is not None:
- currentlymsg = "at version %s" % (current.version)
- else:
- currentlymsg = "not present in testing"
-
- rejectmsg += "Transition description: %s\n\n" % (t["reason"])
-
- rejectmsg += "\n".join(textwrap.wrap("""Your package
-is part of a testing transition designed to get %s migrated (it is
-currently %s, we need version %s). This transition is managed by the
-Release Team, and %s is the Release-Team member responsible for it.
-Please mail debian-release@lists.debian.org or contact %s directly if you
-need further assistance. You might want to upload to experimental until this
-transition is done."""
- % (source, currentlymsg, expected,t["rm"], t["rm"])))
-
- self.rejects.append(rejectmsg)
- return
-
###########################################################################
# End check_signed_by_key checks
###########################################################################
###########################################################################
- def close_bugs(self, summary, action):
- """
- Send mail to close bugs as instructed by the closes field in the changes file.
- Also add a line to summary if any work was done.
-
- @type summary: string
- @param summary: summary text, as given by L{build_summaries}
-
- @type action: bool
- @param action: Set to false no real action will be done.
-
- @rtype: string
- @return: summary. If action was taken, extended by the list of closed bugs.
-
- """
-
- template = os.path.join(Config()["Dir::Templates"], 'process-unchecked.bug-close')
-
- bugs = self.pkg.changes["closes"].keys()
-
- if not bugs:
- return summary
-
- bugs.sort()
- summary += "Closing bugs: "
- for bug in bugs:
- summary += "%s " % (bug)
- if action:
- self.update_subst()
- self.Subst["__BUG_NUMBER__"] = bug
- if self.pkg.changes["distribution"].has_key("stable"):
- self.Subst["__STABLE_WARNING__"] = """
-Note that this package is not part of the released stable Debian
-distribution. It may have dependencies on other unreleased software,
-or other instabilities. Please take care if you wish to install it.
-The update will eventually make its way into the next released Debian
-distribution."""
- else:
- self.Subst["__STABLE_WARNING__"] = ""
- mail_message = utils.TemplateSubst(self.Subst, template)
- utils.send_mail(mail_message)
-
- # Clear up after ourselves
- del self.Subst["__BUG_NUMBER__"]
- del self.Subst["__STABLE_WARNING__"]
-
- if action and self.logger:
- self.logger.log(["closing bugs"] + bugs)
-
- summary += "\n"
-
- return summary
-
- ###########################################################################
-
def announce(self, short_summary, action):
"""
Send an announce mail about a new upload.
return summary
###########################################################################
- @session_wrapper
- def accept (self, summary, short_summary, session=None):
- """
- Accept an upload.
-
- This moves all files referenced from the .changes into the pool,
- sends the accepted mail, announces to lists, closes bugs and
- also checks for override disparities. If enabled it will write out
- the version history for the BTS Version Tracking and will finally call
- L{queue_build}.
-
- @type summary: string
- @param summary: Summary text
-
- @type short_summary: string
- @param short_summary: Short summary
- """
-
- cnf = Config()
- stats = SummaryStats()
-
- print "Installing."
- self.logger.log(["installing changes", self.pkg.changes_file])
-
- binaries = []
- poolfiles = []
-
- # Add the .dsc file to the DB first
- for newfile, entry in self.pkg.files.items():
- if entry["type"] == "dsc":
- source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
- for j in pfs:
- poolfiles.append(j)
-
- # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
- for newfile, entry in self.pkg.files.items():
- if entry["type"] == "deb":
- b, pf = add_deb_to_db(self, newfile, session)
- binaries.append(b)
- poolfiles.append(pf)
-
- # If this is a sourceful diff only upload that is moving
- # cross-component we need to copy the .orig files into the new
- # component too for the same reasons as above.
- # XXX: mhy: I think this should be in add_dsc_to_db
- if self.pkg.changes["architecture"].has_key("source"):
- for orig_file in self.pkg.orig_files.keys():
- if not self.pkg.orig_files[orig_file].has_key("id"):
- continue # Skip if it's not in the pool
- orig_file_id = self.pkg.orig_files[orig_file]["id"]
- if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
- continue # Skip if the location didn't change
-
- # Do the move
- oldf = get_poolfile_by_id(orig_file_id, session)
- old_filename = os.path.join(oldf.location.path, oldf.filename)
- old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
- 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
- new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
- # TODO: Care about size/md5sum collisions etc
- (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
-
- # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
- if newf is None:
- utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
- newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
- session.flush()
-
- # Don't reference the old file from this changes
- for p in poolfiles:
- if p.file_id == oldf.file_id:
- poolfiles.remove(p)
-
- poolfiles.append(newf)
-
- # Fix up the DSC references
- toremove = []
-
- for df in source.srcfiles:
- if df.poolfile.file_id == oldf.file_id:
- # Add a new DSC entry and mark the old one for deletion
- # Don't do it in the loop so we don't change the thing we're iterating over
- newdscf = DSCFile()
- newdscf.source_id = source.source_id
- newdscf.poolfile_id = newf.file_id
- session.add(newdscf)
-
- toremove.append(df)
-
- for df in toremove:
- session.delete(df)
-
- # Flush our changes
- session.flush()
-
- # Make sure that our source object is up-to-date
- session.expire(source)
-
- # Add changelog information to the database
- self.store_changelog()
-
- # Install the files into the pool
- for newfile, entry in self.pkg.files.items():
- destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
- utils.move(newfile, destination)
- self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
- stats.accept_bytes += float(entry["size"])
-
- # Copy the .changes file across for suite which need it.
- copy_changes = dict([(x.copychanges, '')
- for x in session.query(Suite).filter(Suite.suite_name.in_(self.pkg.changes["distribution"].keys())).all()
- if x.copychanges is not None])
-
- for dest in copy_changes.keys():
- utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
-
- # We're done - commit the database changes
- session.commit()
- # Our SQL session will automatically start a new transaction after
- # the last commit
-
- # Now ensure that the metadata has been added
- # This has to be done after we copy the files into the pool
- # For source if we have it:
- if self.pkg.changes["architecture"].has_key("source"):
- import_metadata_into_db(source, session)
-
- # Now for any of our binaries
- for b in binaries:
- import_metadata_into_db(b, session)
-
- session.commit()
-
- # Move the .changes into the 'done' directory
- ye, mo, da = time.gmtime()[0:3]
- donedir = os.path.join(cnf["Dir::Done"], str(ye), "%0.2d" % mo, "%0.2d" % da)
- if not os.path.isdir(donedir):
- os.makedirs(donedir)
-
- utils.move(self.pkg.changes_file,
- os.path.join(donedir, os.path.basename(self.pkg.changes_file)))
-
- if self.pkg.changes["architecture"].has_key("source"):
- UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
-
- self.update_subst()
- self.Subst["__SUMMARY__"] = summary
- mail_message = utils.TemplateSubst(self.Subst,
- os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
- utils.send_mail(mail_message)
- self.announce(short_summary, 1)
-
- ## Helper stuff for DebBugs Version Tracking
- if cnf.find("Dir::BTSVersionTrack"):
- if self.pkg.changes["architecture"].has_key("source"):
- (fd, temp_filename) = utils.temp_filename(cnf["Dir::BTSVersionTrack"], prefix=".")
- version_history = os.fdopen(fd, 'w')
- version_history.write(self.pkg.dsc["bts changelog"])
- version_history.close()
- filename = "%s/%s" % (cnf["Dir::BTSVersionTrack"],
- self.pkg.changes_file[:-8]+".versions")
- os.rename(temp_filename, filename)
- os.chmod(filename, 0o644)
-
- # Write out the binary -> source mapping.
- (fd, temp_filename) = utils.temp_filename(cnf["Dir::BTSVersionTrack"], prefix=".")
- debinfo = os.fdopen(fd, 'w')
- for name, entry in sorted(self.pkg.files.items()):
- if entry["type"] == "deb":
- line = " ".join([entry["package"], entry["version"],
- entry["architecture"], entry["source package"],
- entry["source version"]])
- debinfo.write(line+"\n")
- debinfo.close()
- filename = "%s/%s" % (cnf["Dir::BTSVersionTrack"],
- self.pkg.changes_file[:-8]+".debinfo")
- os.rename(temp_filename, filename)
- os.chmod(filename, 0o644)
-
- session.commit()
-
- # Set up our copy queues (e.g. buildd queues)
- for suite_name in self.pkg.changes["distribution"].keys():
- suite = get_suite(suite_name, session)
- for q in suite.copy_queues:
- for f in poolfiles:
- q.add_file_from_pool(f)
-
- session.commit()
-
- # Finally...
- stats.accept_count += 1
def check_override(self):
"""
utils.send_mail(mail_message)
del self.Subst["__SUMMARY__"]
- ###########################################################################
-
- def remove(self, from_dir=None):
- """
- Used (for instance) in p-u to remove the package from unchecked
-
- Also removes the package from holding area.
- """
- if from_dir is None:
- from_dir = self.pkg.directory
- h = Holding()
-
- for f in self.pkg.files.keys():
- os.unlink(os.path.join(from_dir, f))
- if os.path.exists(os.path.join(h.holding_dir, f)):
- os.unlink(os.path.join(h.holding_dir, f))
-
- os.unlink(os.path.join(from_dir, self.pkg.changes_file))
- if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
- os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
-
- ###########################################################################
-
- def move_to_queue (self, queue):
- """
- Move files to a destination queue using the permissions in the table
- """
- h = Holding()
- utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
- queue.path, perms=int(queue.change_perms, 8))
- for f in self.pkg.files.keys():
- utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
-
- ###########################################################################
-
- def force_reject(self, reject_files):
- """
- Forcefully move files from the current directory to the
- reject directory. If any file already exists in the reject
- directory it will be moved to the morgue to make way for
- the new file.
-
- @type reject_files: dict
- @param reject_files: file dictionary
-
- """
-
- cnf = Config()
-
- for file_entry in reject_files:
- # Skip any files which don't exist or which we don't have permission to copy.
- if os.access(file_entry, os.R_OK) == 0:
- continue
-
- dest_file = os.path.join(cnf["Dir::Reject"], file_entry)
-
- try:
- dest_fd = os.open(dest_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o644)
- except OSError as e:
- # File exists? Let's find a new name by adding a number
- if e.errno == errno.EEXIST:
- try:
- dest_file = utils.find_next_free(dest_file, 255)
- except NoFreeFilenameError:
- # Something's either gone badly Pete Tong, or
- # someone is trying to exploit us.
- utils.warn("**WARNING** failed to find a free filename for %s in %s." % (file_entry, cnf["Dir::Reject"]))
- return
-
- # Make sure we really got it
- try:
- dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0o644)
- except OSError as e:
- # Likewise
- utils.warn("**WARNING** failed to claim %s in the reject directory." % (file_entry))
- return
- else:
- raise
- # If we got here, we own the destination file, so we can
- # safely overwrite it.
- utils.move(file_entry, dest_file, 1, perms=0o660)
- os.close(dest_fd)
-
- ###########################################################################
- def do_reject (self, manual=0, reject_message="", notes=""):
- """
- Reject an upload. If called without a reject message or C{manual} is
- true, spawn an editor so the user can write one.
-
- @type manual: bool
- @param manual: manual or automated rejection
-
- @type reject_message: string
- @param reject_message: A reject message
-
- @return: 0
-
- """
- # If we weren't given a manual rejection message, spawn an
- # editor so the user can add one in...
- if manual and not reject_message:
- (fd, temp_filename) = utils.temp_filename()
- temp_file = os.fdopen(fd, 'w')
- if len(notes) > 0:
- for note in notes:
- temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
- % (note.author, note.version, note.notedate, note.comment))
- temp_file.close()
- editor = os.environ.get("EDITOR","vi")
- answer = 'E'
- while answer == 'E':
- os.system("%s %s" % (editor, temp_filename))
- temp_fh = utils.open_file(temp_filename)
- reject_message = "".join(temp_fh.readlines())
- temp_fh.close()
- print "Reject message:"
- print utils.prefix_multi_line_string(reject_message," ",include_blank_lines=1)
- prompt = "[R]eject, Edit, Abandon, Quit ?"
- answer = "XXX"
- while prompt.find(answer) == -1:
- answer = utils.our_raw_input(prompt)
- m = re_default_answer.search(prompt)
- if answer == "":
- answer = m.group(1)
- answer = answer[:1].upper()
- os.unlink(temp_filename)
- if answer == 'A':
- return 1
- elif answer == 'Q':
- sys.exit(0)
-
- print "Rejecting.\n"
-
- cnf = Config()
-
- reason_filename = self.pkg.changes_file[:-8] + ".reason"
- reason_filename = os.path.join(cnf["Dir::Reject"], reason_filename)
- changesfile = os.path.join(cnf["Dir::Reject"], self.pkg.changes_file)
-
- # Move all the files into the reject directory
- reject_files = self.pkg.files.keys() + [self.pkg.changes_file]
- self.force_reject(reject_files)
-
- # Change permissions of the .changes file to be world readable
- try:
- os.chmod(changesfile, os.stat(changesfile).st_mode | stat.S_IROTH)
- except OSError as (errno, strerror):
- # Ignore 'Operation not permitted' error.
- if errno != 1:
- raise
-
- # If we fail here someone is probably trying to exploit the race
- # so let's just raise an exception ...
- if os.path.exists(reason_filename):
- os.unlink(reason_filename)
- reason_fd = os.open(reason_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0o644)
-
- rej_template = os.path.join(cnf["Dir::Templates"], "queue.rejected")
-
- self.update_subst()
- if not manual:
- self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
- self.Subst["__MANUAL_REJECT_MESSAGE__"] = ""
- self.Subst["__CC__"] = "X-DAK-Rejection: automatic (moo)"
- os.write(reason_fd, reject_message)
- reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
- else:
- # Build up the rejection email
- user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
- self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
- self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
- self.Subst["__REJECT_MESSAGE__"] = ""
- self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
- reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
- # Write the rejection email out as the <foo>.reason file
- os.write(reason_fd, reject_mail_message)
-
- del self.Subst["__REJECTOR_ADDRESS__"]
- del self.Subst["__MANUAL_REJECT_MESSAGE__"]
- del self.Subst["__CC__"]
-
- os.close(reason_fd)
-
- # Send the rejection mail
- utils.send_mail(reject_mail_message)
-
- if self.logger:
- self.logger.log(["rejected", self.pkg.changes_file])
-
- stats = SummaryStats()
- stats.reject_count += 1
- return 0
-
- ################################################################################
- def in_override_p(self, package, component, suite, binary_type, filename, session):
- """
- Check if a package already has override entries in the DB
-
- @type package: string
- @param package: package name
-
- @type component: string
- @param component: database id of the component
-
- @type suite: int
- @param suite: database id of the suite
-
- @type binary_type: string
- @param binary_type: type of the package
-
- @type filename: string
- @param filename: filename we check
-
- @return: the database result. But noone cares anyway.
-
- """
-
- cnf = Config()
-
- if binary_type == "": # must be source
- file_type = "dsc"
- else:
- file_type = binary_type
-
- # Override suite name; used for example with proposed-updates
- oldsuite = get_suite(suite, session)
- if (not oldsuite is None) and oldsuite.overridesuite:
- suite = oldsuite.overridesuite
-
- result = get_override(package, suite, component, file_type, session)
-
- # If checking for a source package fall back on the binary override type
- if file_type == "dsc" and len(result) < 1:
- result = get_override(package, suite, component, ['deb', 'udeb'], session)
-
- # Remember the section and priority so we can check them later if appropriate
- if len(result) > 0:
- result = result[0]
- self.pkg.files[filename]["override section"] = result.section.section
- self.pkg.files[filename]["override priority"] = result.priority.priority
- return result
-
- return None
-
################################################################################
def get_anyversion(self, sv_list, suite):
"""
self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
################################################################################
- def check_binary_against_db(self, filename, session):
- # Ensure version is sane
- self.cross_suite_version_check( \
- get_suite_version_by_package(self.pkg.files[filename]["package"], \
- self.pkg.files[filename]["architecture"], session),
- filename, self.pkg.files[filename]["version"], sourceful=False)
-
- # Check for any existing copies of the file
- q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"])
- q = q.filter_by(version=self.pkg.files[filename]["version"])
- q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"])
-
- if q.count() > 0:
- self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename)
-
- ################################################################################
-
- def check_source_against_db(self, filename, session):
- source = self.pkg.dsc.get("source")
- version = self.pkg.dsc.get("version")
-
- # Ensure version is sane
- self.cross_suite_version_check( \
- get_suite_version_by_source(source, session), filename, version,
- sourceful=True)
-
- ################################################################################
- def check_dsc_against_db(self, filename, session):
- """
-
- @warning: NB: this function can remove entries from the 'files' index [if
- the orig tarball is a duplicate of the one in the archive]; if
- you're iterating over 'files' and call this function as part of
- the loop, be sure to add a check to the top of the loop to
- ensure you haven't just tried to dereference the deleted entry.
- """
-
- Cnf = Config()
- self.pkg.orig_files = {} # XXX: do we need to clear it?
- orig_files = self.pkg.orig_files
-
- # Try and find all files mentioned in the .dsc. This has
- # to work harder to cope with the multiple possible
- # locations of an .orig.tar.gz.
- # The ordering on the select is needed to pick the newest orig
- # when it exists in multiple places.
- for dsc_name, dsc_entry in self.pkg.dsc_files.items():
- found = None
- if self.pkg.files.has_key(dsc_name):
- actual_md5 = self.pkg.files[dsc_name]["md5sum"]
- actual_size = int(self.pkg.files[dsc_name]["size"])
- found = "%s in incoming" % (dsc_name)
-
- # Check the file does not already exist in the archive
- ql = get_poolfile_like_name(dsc_name, session)
-
- # Strip out anything that isn't '%s' or '/%s$'
- for i in ql:
- if not i.filename.endswith(dsc_name):
- ql.remove(i)
-
- # "[dak] has not broken them. [dak] has fixed a
- # brokenness. Your crappy hack exploited a bug in
- # the old dinstall.
- #
- # "(Come on! I thought it was always obvious that
- # one just doesn't release different files with
- # the same name and version.)"
- # -- ajk@ on d-devel@l.d.o
-
- if len(ql) > 0:
- # Ignore exact matches for .orig.tar.gz
- match = 0
- if re_is_orig_source.match(dsc_name):
- for i in ql:
- if self.pkg.files.has_key(dsc_name) and \
- int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \
- self.pkg.files[dsc_name]["md5sum"] == i.md5sum:
- self.warnings.append("ignoring %s, since it's already in the archive." % (dsc_name))
- # TODO: Don't delete the entry, just mark it as not needed
- # This would fix the stupidity of changing something we often iterate over
- # whilst we're doing it
- del self.pkg.files[dsc_name]
- dsc_entry["files id"] = i.file_id
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
- match = 1
-
- # Don't bitch that we couldn't find this file later
- try:
- self.later_check_files.remove(dsc_name)
- except ValueError:
- pass
-
-
- if not match:
- self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
-
- elif re_is_orig_source.match(dsc_name):
- # Check in the pool
- ql = get_poolfile_like_name(dsc_name, session)
-
- # Strip out anything that isn't '%s' or '/%s$'
- # TODO: Shouldn't we just search for things which end with our string explicitly in the SQL?
- for i in ql:
- if not i.filename.endswith(dsc_name):
- ql.remove(i)
-
- if len(ql) > 0:
- # Unfortunately, we may get more than one match here if,
- # for example, the package was in potato but had an -sa
- # upload in woody. So we need to choose the right one.
-
- # default to something sane in case we don't match any or have only one
- x = ql[0]
-
- if len(ql) > 1:
- for i in ql:
- old_file = os.path.join(i.location.path, i.filename)
- old_file_fh = utils.open_file(old_file)
- actual_md5 = apt_pkg.md5sum(old_file_fh)
- old_file_fh.close()
- actual_size = os.stat(old_file)[stat.ST_SIZE]
- if actual_md5 == dsc_entry["md5sum"] and actual_size == int(dsc_entry["size"]):
- x = i
-
- old_file = os.path.join(i.location.path, i.filename)
- old_file_fh = utils.open_file(old_file)
- actual_md5 = apt_pkg.md5sum(old_file_fh)
- old_file_fh.close()
- actual_size = os.stat(old_file)[stat.ST_SIZE]
- found = old_file
- suite_type = x.location.archive_type
- # need this for updating dsc_files in install()
- dsc_entry["files id"] = x.file_id
- # See install() in process-accepted...
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["id"] = x.file_id
- orig_files[dsc_name]["path"] = old_file
- orig_files[dsc_name]["location"] = x.location.location_id
- else:
- # TODO: Determine queue list dynamically
- # Not there? Check the queue directories...
- for queue_name in [ "byhand", "new", "proposedupdates", "oldproposedupdates", "embargoed", "unembargoed" ]:
- queue = get_policy_queue(queue_name, session)
- if not queue:
- continue
-
- in_otherdir = os.path.join(queue.path, dsc_name)
-
- if os.path.exists(in_otherdir):
- in_otherdir_fh = utils.open_file(in_otherdir)
- actual_md5 = apt_pkg.md5sum(in_otherdir_fh)
- in_otherdir_fh.close()
- actual_size = os.stat(in_otherdir)[stat.ST_SIZE]
- found = in_otherdir
- if not orig_files.has_key(dsc_name):
- orig_files[dsc_name] = {}
- orig_files[dsc_name]["path"] = in_otherdir
-
- if not found:
- self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name))
- continue
- else:
- self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name))
- continue
- if actual_md5 != dsc_entry["md5sum"]:
- self.rejects.append("md5sum for %s doesn't match %s." % (found, filename))
- if actual_size != int(dsc_entry["size"]):
- self.rejects.append("size for %s doesn't match %s." % (found, filename))
-
- ################################################################################
- # This is used by process-new and process-holding to recheck a changes file
- # at the time we're running. It mainly wraps various other internal functions
- # and is similar to accepted_checks - these should probably be tidied up
- # and combined
- def recheck(self, session):
- cnf = Config()
- for f in self.pkg.files.keys():
- # The .orig.tar.gz can disappear out from under us is it's a
- # duplicate of one in the archive.
- if not self.pkg.files.has_key(f):
- continue
-
- entry = self.pkg.files[f]
-
- # Check that the source still exists
- if entry["type"] == "deb":
- source_version = entry["source version"]
- source_package = entry["source package"]
- if not self.pkg.changes["architecture"].has_key("source") \
- and not source_exists(source_package, source_version, \
- suites = self.pkg.changes["distribution"].keys(), session = session):
- source_epochless_version = re_no_epoch.sub('', source_version)
- dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
- found = False
- for queue_name in ["embargoed", "unembargoed", "newstage"]:
- queue = get_policy_queue(queue_name, session)
- if queue and os.path.exists(os.path.join(queue.path, dsc_filename)):
- found = True
- if not found:
- self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
- # Version and file overwrite checks
- if entry["type"] == "deb":
- self.check_binary_against_db(f, session)
- elif entry["type"] == "dsc":
- self.check_source_against_db(f, session)
- self.check_dsc_against_db(f, session)
-
- ################################################################################
def accepted_checks(self, overwrite_checks, session):
# Recheck anything that relies on the database; since that's not
# frozen between accept and our run time when called from p-a.
propogate={}
nopropogate={}
- # Find the .dsc (again)
- dsc_filename = None
- for f in self.pkg.files.keys():
- if self.pkg.files[f]["type"] == "dsc":
- dsc_filename = f
-
for checkfile in self.pkg.files.keys():
# The .orig.tar.gz can disappear out from under us is it's a
# duplicate of one in the archive.
entry = self.pkg.files[checkfile]
- # Check that the source still exists
- if entry["type"] == "deb":
- source_version = entry["source version"]
- source_package = entry["source package"]
- if not self.pkg.changes["architecture"].has_key("source") \
- and not source_exists(source_package, source_version, \
- suites = self.pkg.changes["distribution"].keys(), \
- session = session):
- self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
-
- # Version and file overwrite checks
- if overwrite_checks:
- if entry["type"] == "deb":
- self.check_binary_against_db(checkfile, session)
- elif entry["type"] == "dsc":
- self.check_source_against_db(checkfile, session)
- self.check_dsc_against_db(dsc_filename, session)
-
# propogate in the case it is in the override tables:
for suite in self.pkg.changes.get("propdistribution", {}).keys():
if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
for suite in self.pkg.changes["distribution"].keys():
if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
self.rejects.append("%s is NEW for %s." % (checkfile, suite))
-
- ################################################################################
- # If any file of an upload has a recent mtime then chances are good
- # the file is still being uploaded.
-
- def upload_too_new(self):
- cnf = Config()
- too_new = False
- # Move back to the original directory to get accurate time stamps
- cwd = os.getcwd()
- os.chdir(self.pkg.directory)
- file_list = self.pkg.files.keys()
- file_list.extend(self.pkg.dsc_files.keys())
- file_list.append(self.pkg.changes_file)
- for f in file_list:
- try:
- last_modified = time.time()-os.path.getmtime(f)
- if last_modified < int(cnf["Dinstall::SkipTime"]):
- too_new = True
- break
- except:
- pass
-
- os.chdir(cwd)
- return too_new
-
- def store_changelog(self):
-
- # Skip binary-only upload if it is not a bin-NMU
- if not self.pkg.changes['architecture'].has_key('source'):
- from daklib.regexes import re_bin_only_nmu
- if not re_bin_only_nmu.search(self.pkg.changes['version']):
- return
-
- session = DBConn().session()
-
- # Check if upload already has a changelog entry
- query = """SELECT changelog_id FROM changes WHERE source = :source
- AND version = :version AND architecture = :architecture AND changelog_id != 0"""
- if session.execute(query, {'source': self.pkg.changes['source'], \
- 'version': self.pkg.changes['version'], \
- 'architecture': " ".join(self.pkg.changes['architecture'].keys())}).rowcount:
- session.commit()
- return
-
- # Add current changelog text into changelogs_text table, return created ID
- query = "INSERT INTO changelogs_text (changelog) VALUES (:changelog) RETURNING id"
- ID = session.execute(query, {'changelog': self.pkg.changes['changes']}).fetchone()[0]
-
- # Link ID to the upload available in changes table
- query = """UPDATE changes SET changelog_id = :id WHERE source = :source
- AND version = :version AND architecture = :architecture"""
- session.execute(query, {'id': ID, 'source': self.pkg.changes['source'], \
- 'version': self.pkg.changes['version'], \
- 'architecture': " ".join(self.pkg.changes['architecture'].keys())})
-
- session.commit()
from dbconn import DBConn, get_architecture, get_component, get_suite, \
get_override_type, Keyring, session_wrapper, \
get_active_keyring_paths, get_primary_keyring_path, \
- get_suite_architectures, get_or_set_metadatakey, DBSource
+ get_suite_architectures, get_or_set_metadatakey, DBSource, \
+ Component, Override, OverrideType
from sqlalchemy import desc
from dak_exceptions import *
from gpg import SignedFile
################################################################################
-def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
+def check_dsc_files(dsc_filename, dsc, dsc_files):
"""
Verify that the files listed in the Files field of the .dsc are
those expected given the announced Format.
"""
rejmsg = []
- # Parse the file if needed
- if dsc is None:
- dsc = parse_changes(dsc_filename, signing_rules=1, dsc_file=1);
-
- if dsc_files is None:
- dsc_files = build_file_list(dsc, is_a_dsc=1)
-
# Ensure .dsc lists proper set of source files according to the format
# announced
has = defaultdict(lambda: 0)
(r'orig-.+\.tar\.(gz|bz2|xz)', ('more_orig_tar',)),
)
- for f in dsc_files.keys():
+ for f in dsc_files:
m = re_issource.match(f)
if not m:
rejmsg.append("%s: %s in Files field not recognised as source."
################################################################################
-def poolify (source, component):
- if component:
- component += '/'
+def poolify (source, component=None):
if source[:3] == "lib":
- return component + source[:4] + '/' + source + '/'
+ return source[:4] + '/' + source + '/'
else:
- return component + source[:1] + '/' + source + '/'
+ return source[:1] + '/' + source + '/'
################################################################################
def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False):
dbsuite = get_suite(suite, session)
+ overridesuite = dbsuite
+ if dbsuite.overridesuite is not None:
+ overridesuite = get_suite(dbsuite.overridesuite, session)
dep_problem = 0
p2c = {}
all_broken = {}
FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
JOIN source s ON b.source = s.id
- JOIN files f ON b.file = f.id
- JOIN location l ON f.location = l.id
- JOIN component c ON l.component = c.id
+ JOIN files_archive_map af ON b.file = af.file_id
+ JOIN component c ON af.component_id = c.id
WHERE b.architecture = :arch_id'''
query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
from_statement(statement).params(params)
if dep_package in removals:
unsat += 1
if unsat == len(dep):
- component = DBSource.get(source_id, session).get_component_name()
+ component, = session.query(Component.component_name) \
+ .join(Component.overrides) \
+ .filter(Override.suite == overridesuite) \
+ .filter(Override.package == source) \
+ .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
+ .first()
if component != "main":
source = "%s/%s" % (source, component)
all_broken.setdefault(source, set()).add(pp_deps(dep))