]> git.decadent.org.uk Git - dak.git/commitdiff
Merge branch 'pu/xz-for-indices'
authorAnsgar Burchardt <ansgar@debian.org>
Sun, 27 Oct 2013 15:20:41 +0000 (16:20 +0100)
committerAnsgar Burchardt <ansgar@debian.org>
Sun, 27 Oct 2013 15:20:41 +0000 (16:20 +0100)
Conflicts:
dak/generate_packages_sources2.py
daklib/filewriter.py

298 files changed:
config/backports/Contents.top [deleted file]
config/backports/apt.conf [deleted file]
config/backports/common [deleted file]
config/backports/cron.daily [deleted file]
config/backports/cron.dinstall [deleted file]
config/backports/cron.hourly [deleted file]
config/backports/cron.monthly [deleted file]
config/backports/cron.reboot [deleted file]
config/backports/cron.unchecked [deleted file]
config/backports/cron.weekly [deleted file]
config/backports/dak.conf [deleted file]
config/backports/dak.conf-etc [deleted file]
config/backports/dinstall.functions [deleted file]
config/backports/dinstall.variables [deleted file]
config/backports/removalsrss.rc [deleted file]
config/backports/vars [deleted file]
config/debian-security/apache.conf
config/debian-security/apt.conf [deleted file]
config/debian-security/apt.conf.buildd [deleted file]
config/debian-security/cron.buildd
config/debian-security/cron.daily
config/debian-security/cron.hourly
config/debian-security/cron.unchecked
config/debian-security/cron.weekly
config/debian-security/dak.conf
config/debian-security/dak.conf-etc
config/debian-security/export.sh
config/debian-security/map.sh
config/debian-security/vars
config/debian/apache.conf-ftp
config/debian/apache.conf-incoming
config/debian/apt.conf [deleted file]
config/debian/apt.conf.buildd [deleted file]
config/debian/common
config/debian/cron.daily
config/debian/cron.dinstall
config/debian/cron.hourly
config/debian/cron.monthly
config/debian/cron.reboot
config/debian/cron.unchecked
config/debian/cron.weekly
config/debian/crontab
config/debian/dak.conf
config/debian/dak.conf-backports [new file with mode: 0644]
config/debian/dak.conf-dak [new file with mode: 0644]
config/debian/dak.conf-etc
config/debian/dinstall.functions
config/debian/dinstall.variables
config/debian/lintian.tags
config/debian/robots.txt-incoming [new file with mode: 0644]
config/debian/vars
config/examples/dak.conf
config/homedir/.bashrc
config/homedir/ssh/ftpmaster-authorized_keys [new file with mode: 0644]
config/homedir/ssh/ftpmaster-config [new file with mode: 0644]
config/homedir/ssh/ftpmaster-mirror-authorized_keys [new file with mode: 0644]
config/homedir/syncdd.sh [deleted file]
dak/acl.py [new file with mode: 0644]
dak/admin.py
dak/check_archive.py
dak/check_overrides.py
dak/clean_queues.py
dak/clean_suites.py
dak/contents.py
dak/control_suite.py
dak/cruft_report.py
dak/dak.py
dak/dakdb/update1.py [changed mode: 0755->0644]
dak/dakdb/update10.py [changed mode: 0755->0644]
dak/dakdb/update100.py [new file with mode: 0644]
dak/dakdb/update11.py [changed mode: 0755->0644]
dak/dakdb/update12.py [changed mode: 0755->0644]
dak/dakdb/update13.py [changed mode: 0755->0644]
dak/dakdb/update14.py [changed mode: 0755->0644]
dak/dakdb/update15.py [changed mode: 0755->0644]
dak/dakdb/update16.py [changed mode: 0755->0644]
dak/dakdb/update17.py [changed mode: 0755->0644]
dak/dakdb/update18.py [changed mode: 0755->0644]
dak/dakdb/update19.py [changed mode: 0755->0644]
dak/dakdb/update2.py [changed mode: 0755->0644]
dak/dakdb/update20.py [changed mode: 0755->0644]
dak/dakdb/update21.py [changed mode: 0755->0644]
dak/dakdb/update22.py [changed mode: 0755->0644]
dak/dakdb/update23.py [changed mode: 0755->0644]
dak/dakdb/update24.py [changed mode: 0755->0644]
dak/dakdb/update25.py [changed mode: 0755->0644]
dak/dakdb/update26.py [changed mode: 0755->0644]
dak/dakdb/update27.py [changed mode: 0755->0644]
dak/dakdb/update28.py [changed mode: 0755->0644]
dak/dakdb/update3.py [changed mode: 0755->0644]
dak/dakdb/update32.py [changed mode: 0755->0644]
dak/dakdb/update37.py [changed mode: 0755->0644]
dak/dakdb/update38.py [changed mode: 0755->0644]
dak/dakdb/update4.py [changed mode: 0755->0644]
dak/dakdb/update40.py [changed mode: 0755->0644]
dak/dakdb/update41.py [changed mode: 0755->0644]
dak/dakdb/update42.py [changed mode: 0755->0644]
dak/dakdb/update43.py [changed mode: 0755->0644]
dak/dakdb/update44.py [changed mode: 0755->0644]
dak/dakdb/update45.py [changed mode: 0755->0644]
dak/dakdb/update46.py [changed mode: 0755->0644]
dak/dakdb/update47.py [changed mode: 0755->0644]
dak/dakdb/update48.py [changed mode: 0755->0644]
dak/dakdb/update49.py [changed mode: 0755->0644]
dak/dakdb/update5.py [changed mode: 0755->0644]
dak/dakdb/update50.py [changed mode: 0755->0644]
dak/dakdb/update51.py [changed mode: 0755->0644]
dak/dakdb/update52.py [changed mode: 0755->0644]
dak/dakdb/update53.py [changed mode: 0755->0644]
dak/dakdb/update54.py [changed mode: 0755->0644]
dak/dakdb/update55.py [changed mode: 0755->0644]
dak/dakdb/update56.py [changed mode: 0755->0644]
dak/dakdb/update57.py [changed mode: 0755->0644]
dak/dakdb/update58.py [changed mode: 0755->0644]
dak/dakdb/update59.py [changed mode: 0755->0644]
dak/dakdb/update6.py [changed mode: 0755->0644]
dak/dakdb/update60.py [changed mode: 0755->0644]
dak/dakdb/update61.py [changed mode: 0755->0644]
dak/dakdb/update62.py [changed mode: 0755->0644]
dak/dakdb/update63.py [changed mode: 0755->0644]
dak/dakdb/update64.py [changed mode: 0755->0644]
dak/dakdb/update65.py [changed mode: 0755->0644]
dak/dakdb/update66.py [changed mode: 0755->0644]
dak/dakdb/update67.py [changed mode: 0755->0644]
dak/dakdb/update68.py [changed mode: 0755->0644]
dak/dakdb/update69.py [changed mode: 0755->0644]
dak/dakdb/update7.py [changed mode: 0755->0644]
dak/dakdb/update70.py [changed mode: 0755->0644]
dak/dakdb/update71.py [changed mode: 0755->0644]
dak/dakdb/update72.py [changed mode: 0755->0644]
dak/dakdb/update73.py [new file with mode: 0644]
dak/dakdb/update74.py [new file with mode: 0644]
dak/dakdb/update75.py [new file with mode: 0644]
dak/dakdb/update76.py [new file with mode: 0644]
dak/dakdb/update77.py [new file with mode: 0644]
dak/dakdb/update78.py [new file with mode: 0644]
dak/dakdb/update79.py [new file with mode: 0755]
dak/dakdb/update8.py [changed mode: 0755->0644]
dak/dakdb/update80.py [new file with mode: 0755]
dak/dakdb/update81.py [new file with mode: 0755]
dak/dakdb/update82.py [new file with mode: 0644]
dak/dakdb/update83.py [new file with mode: 0644]
dak/dakdb/update84.py [new file with mode: 0644]
dak/dakdb/update85.py [new file with mode: 0644]
dak/dakdb/update86.py [new file with mode: 0755]
dak/dakdb/update87.py [new file with mode: 0644]
dak/dakdb/update88.py [new file with mode: 0644]
dak/dakdb/update89.py [new file with mode: 0644]
dak/dakdb/update9.py [changed mode: 0755->0644]
dak/dakdb/update90.py [new file with mode: 0644]
dak/dakdb/update91.py [new file with mode: 0644]
dak/dakdb/update92.py [new file with mode: 0644]
dak/dakdb/update93.py [new file with mode: 0644]
dak/dakdb/update94.py [new file with mode: 0644]
dak/dakdb/update95.py [new file with mode: 0644]
dak/dakdb/update96.py [new file with mode: 0644]
dak/dakdb/update97.py [new file with mode: 0644]
dak/dakdb/update98.py [new file with mode: 0644]
dak/dakdb/update99.py [new file with mode: 0644]
dak/dominate.py
dak/examine_package.py
dak/export.py [new file with mode: 0644]
dak/export_suite.py [new file with mode: 0644]
dak/find_null_maintainers.py [deleted file]
dak/generate_index_diffs.py
dak/generate_packages_sources.py
dak/generate_packages_sources2.py
dak/generate_releases.py
dak/import.py [new file with mode: 0644]
dak/import_keyring.py
dak/import_known_changes.py [deleted file]
dak/import_ldap_fingerprints.py [deleted file]
dak/import_new_files.py [deleted file]
dak/init_dirs.py
dak/ls.py
dak/make_changelog.py
dak/make_maintainers.py
dak/make_pkg_file_mapping.py
dak/manage_build_queues.py
dak/metadata.py [deleted file]
dak/new_security_install.py
dak/override.py
dak/override_disparity.py
dak/process_commands.py [new file with mode: 0644]
dak/process_new.py
dak/process_policy.py
dak/process_upload.py
dak/queue_report.py
dak/rm.py
dak/show_deferred.py
dak/show_new.py
dak/split_done.py [deleted file]
dak/stats.py
dak/transitions.py
dak/update_db.py
daklib/announce.py [new file with mode: 0644]
daklib/archive.py [new file with mode: 0644]
daklib/changesutils.py [deleted file]
daklib/checks.py [new file with mode: 0644]
daklib/command.py [new file with mode: 0644]
daklib/config.py
daklib/contents.py [changed mode: 0755->0644]
daklib/daksubprocess.py [new file with mode: 0644]
daklib/dbconn.py [changed mode: 0755->0644]
daklib/filewriter.py [changed mode: 0755->0644]
daklib/formats.py [changed mode: 0755->0644]
daklib/fstransactions.py [new file with mode: 0644]
daklib/gpg.py
daklib/lintian.py [changed mode: 0755->0644]
daklib/lists.py
daklib/metadata.py [deleted file]
daklib/policy.py [new file with mode: 0644]
daklib/queue.py
daklib/queue_install.py [changed mode: 0755->0644]
daklib/regexes.py [changed mode: 0755->0644]
daklib/srcformats.py [changed mode: 0755->0644]
daklib/textutils.py
daklib/upload.py [new file with mode: 0644]
daklib/utils.py
debian/postinst
docs/README.config
docs/README.first
docs/README.quotes
docs/README.stable-point-release
docs/talks/DebConf9/Makefile [deleted file]
docs/talks/DebConf9/background.jpg [deleted file]
docs/talks/DebConf9/ftpmaster.pdf [deleted file]
docs/talks/DebConf9/ftpmaster.tex [deleted file]
scripts/debian/byhand-debian-faq [new file with mode: 0755]
scripts/debian/byhand-di
scripts/debian/byhand-win32-loader
scripts/debian/ddtp-i18n-check.sh
scripts/debian/dm-monitor [deleted file]
scripts/debian/generate-d-i
scripts/debian/import_dataset.sh
scripts/debian/link_morgue.sh
scripts/debian/rrd-release-freeze-dates
scripts/debian/sync-dd [new file with mode: 0755]
scripts/debian/update-bugdoctxt
scripts/debian/update-buildd-archive [new file with mode: 0755]
scripts/debian/update-mailingliststxt
scripts/debian/update-mirrorlists
scripts/debian/update-pseudopackages.sh
setup/README
setup/core-init.d/020_sections
setup/dak-minimal.conf.template
templates/README
templates/process-accepted.install [deleted file]
templates/process-command.processed [new file with mode: 0644]
templates/process-new.bxa_notification
templates/process-unchecked.accepted
templates/process-unchecked.announce
templates/process-unchecked.bug-close
templates/process-unchecked.bug-experimental-fixed [deleted file]
templates/process-unchecked.bug-nmu-fixed [deleted file]
templates/process-unchecked.new
templates/process-unchecked.override-disparity
templates/queue.rejected
templates/reject-proposed-updates.rejected [deleted file]
templates/rm.bug-close
templates/rm.bug-close-with-related
templates/security-install.advisory [deleted file]
templates/transition.removed
tests/dbtest_packages.py
tests/fixtures/packages/.gitignore [new file with mode: 0644]
tests/fixtures/packages/Makefile [new file with mode: 0644]
tests/fixtures/packages/gpg/pubring.gpg [new file with mode: 0644]
tests/fixtures/packages/gpg/random_seed [new file with mode: 0644]
tests/fixtures/packages/gpg/secring.gpg [new file with mode: 0644]
tests/fixtures/packages/gpg/trustdb.gpg [new file with mode: 0644]
tests/fixtures/packages/nonfree-package-0.1/debian/changelog [new file with mode: 0644]
tests/fixtures/packages/nonfree-package-0.1/debian/compat [new file with mode: 0644]
tests/fixtures/packages/nonfree-package-0.1/debian/control [new file with mode: 0644]
tests/fixtures/packages/nonfree-package-0.1/debian/nonfree-package.install [new file with mode: 0644]
tests/fixtures/packages/nonfree-package-0.1/debian/rules [new file with mode: 0755]
tests/fixtures/packages/nonfree-package-0.1/some-file [new file with mode: 0644]
tests/fixtures/packages/package-0.1/debian/changelog [new file with mode: 0644]
tests/fixtures/packages/package-0.1/debian/compat [new file with mode: 0644]
tests/fixtures/packages/package-0.1/debian/control [new file with mode: 0644]
tests/fixtures/packages/package-0.1/debian/package.install [new file with mode: 0644]
tests/fixtures/packages/package-0.1/debian/rules [new file with mode: 0755]
tests/fixtures/packages/package-0.1/some-file [new file with mode: 0644]
tests/fixtures/packages/package-built-using-0.1/debian/changelog [new file with mode: 0644]
tests/fixtures/packages/package-built-using-0.1/debian/compat [new file with mode: 0644]
tests/fixtures/packages/package-built-using-0.1/debian/control [new file with mode: 0644]
tests/fixtures/packages/package-built-using-0.1/debian/package-built-using.install [new file with mode: 0644]
tests/fixtures/packages/package-built-using-0.1/debian/rules [new file with mode: 0755]
tests/fixtures/packages/package-built-using-0.1/some-file [new file with mode: 0644]
tests/test_daklib_fstransactions.py [new file with mode: 0755]
tests/test_split_uploaders.py
tools/debianqueued-0.9/config
tools/debianqueued-0.9/config-backports
tools/debianqueued-0.9/config-security
tools/debianqueued-0.9/config-security-disembargo [new file with mode: 0644]
tools/debianqueued-0.9/config-security-embargoed [new file with mode: 0644]
tools/debianqueued-0.9/config-upload
tools/debianqueued-0.9/debianqueued
tools/obsolete_lintian_tags.pl [new file with mode: 0755]

diff --git a/config/backports/Contents.top b/config/backports/Contents.top
deleted file mode 100644 (file)
index e03f7a6..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-This file maps each file available in the backports.debian.org archive
-system to the package from which it originates.  It includes packages
-from the DIST distribution for the ARCH architecture.
-
-You can use this list to determine which package contains a specific
-file, or whether or not a specific file is available.  The list is
-updated weekly, each architecture on a different day.
-
-When a file is contained in more than one package, all packages are
-listed.  When a directory is contained in more than one package, only
-the first is listed.
-
-The best way to search quickly for a file is with the Unix `grep'
-utility, as in `grep <regular expression> CONTENTS':
-
- $ grep nose Contents
- etc/nosendfile                                          net/sendfile
- usr/X11R6/bin/noseguy                                   x11/xscreensaver
- usr/X11R6/man/man1/noseguy.1x.gz                        x11/xscreensaver
- usr/doc/examples/ucbmpeg/mpeg_encode/nosearch.param     graphics/ucbmpeg
- usr/lib/cfengine/bin/noseyparker                        admin/cfengine
-
-This list contains files in all packages, even though not all of the
-packages are installed on an actual system at once.  If you want to
-find out which packages on an installed Debian system provide a
-particular file, you can use `dpkg --search <filename>':
-
- $ dpkg --search /usr/bin/dselect
- dpkg: /usr/bin/dselect
-
-
-FILE                                                    LOCATION
diff --git a/config/backports/apt.conf b/config/backports/apt.conf
deleted file mode 100644 (file)
index e9a1e9f..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-Dir
-{
-   ArchiveDir "/srv/backports-master.debian.org/ftp/";
-   OverrideDir "/srv/backports-master.debian.org/scripts/override/";
-   CacheDir "/srv/backports-master.debian.org/database/";
-};
-
-Default
-{
-   Packages::Compress "gzip bzip2";
-   Sources::Compress "gzip bzip2";
-   DeLinkLimit 0;
-   FileMode 0664;
-   Contents::Compress "gzip";
-   MaxContentsChange 12000;
-};
-
-TreeDefault
-{
-   Contents::Header "/srv/backports-master.debian.org/dak/config/backports/Contents.top";
-};
-
-tree "dists/squeeze-backports"
-{
-   FileList "/srv/backports-master.debian.org/database/dists/squeeze-backports_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/backports-master.debian.org/database/dists/squeeze-backports_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 sparc source";
-   BinOverride "override.squeeze-backports.$(SECTION)";
-   ExtraOverride "override.squeeze-backports.extra.$(SECTION)";
-   SrcOverride "override.squeeze-backports.$(SECTION).src";
-};
-
-tree "dists/squeeze-backports/main"
-{
-   FileList "/srv/backports-master.debian.org/database/dists/squeeze-backports_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "amd64 armel i386 ia64 kfreebsd-amd64 kfreebsd-i386 mips mipsel powerpc s390 sparc source";
-   BinOverride "override.squeeze-backports.main.$(SECTION)";
-   SrcOverride "override.squeeze-backports.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb";
-};
diff --git a/config/backports/common b/config/backports/common
deleted file mode 100644 (file)
index 050f506..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-# -*- mode:sh -*-
-# log something (basically echo it together with a timestamp)
-#
-# Set $PROGRAM to a string to have it added to the output.
-function log () {
-        if [ -z "${PROGRAM}" ]; then
-                echo "$(date +"%b %d %H:%M:%S") $(hostname -s) [$$] $@"
-        else
-                echo "$(date +"%b %d %H:%M:%S") $(hostname -s) ${PROGRAM}[$$]: $@"
-        fi
-}
-
-# log the message using log() but then also send a mail
-# to the address configured in MAILTO (if non-empty)
-function log_error () {
-        log "$@"
-        if [ -z "${MAILTO}" ]; then
-                echo "$@" | mail -e -s "[$PROGRAM@$(hostname -s)] ERROR [$$]" ${MAILTO}
-        fi
-}
-
-# debug log, only output when DEBUG=1
-function debug () {
-    if [ $DEBUG -eq 1 ]; then
-        log "$*"
-    fi
-}
-
-function wbtrigger() {
-    SSHOPT="-o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240"
-    if lockfile -r 3 -l 3600 "${LOCK_BUILDD}"; then
-        ssh -q -q ${SSHOPT} wbadm@buildd /org/wanna-build/trigger.often
-    fi
-    rm -f "${LOCK_BUILDD}"
-}
-
-# used by cron.dinstall *and* cron.unchecked.
-function make_buildd_dir () {
-    dak manage-build-queues -a
-
-    for dist in $(ls -1 ${incoming}/dists/); do
-        cd ${incoming}/dists/${dist}
-        mkdir -p tree/${STAMP}
-        cp -al ${incoming}/dists/${dist}/buildd/. tree/${STAMP}/
-        ln -sfT tree/${STAMP} ${incoming}/dists/${dist}/current
-        find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
-    done
-
-}
-
-# Do the unchecked processing, in case we have files.
-function do_unchecked () {
-    cd $unchecked
-
-    changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
-    report=$queuedir/REPORT
-    timestamp=$(date "+%Y-%m-%d %H:%M")
-    UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
-
-    echo "$timestamp": ${changes:-"Nothing to do"}  >> $report
-    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
-}
-
-# Do the newstage processing, in case we have files.
-function do_newstage () {
-    cd $newstage
-
-    changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
-    report=$queuedir/REPORT
-    timestamp=$(date "+%Y-%m-%d %H:%M")
-    UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
-
-    echo "$timestamp": ${changes:-"Nothing to do in newstage"}  >> $report
-    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
-}
-
-function sync_debbugs () {
-    # sync with debbugs
-    echo "--" >> $report
-    timestamp=$(date "+%Y-%m-%d-%H:%M")
-    mkdir -p $queuedir/bts_version_track_archive/${timestamp}
-    rsync -aq --remove-source-files $queuedir/bts_version_track/ $queuedir/bts_version_track_archive/${timestamp}
-    rmdir --ignore-fail-on-non-empty $queuedir/bts_version_track_archive/${timestamp} # remove if empty.
-    return 0
-    rsync -aq -e "ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30" --remove-source-files  $queuedir/bts_version_track/ bugs-sync:/org/bugs.debian.org/versions/queue/ftp-master/ 2>/dev/null && touch $lockdir/synced_bts_version || true
-    NOW=$(date +%s)
-    TSTAMP=$(stat -c %Y $lockdir/synced_bts_version)
-    DIFF=$(( NOW - TSTAMP ))
-    if [ $DIFF -ge 259200 ]; then
-        log "Kids, you tried your best and you failed miserably. The lesson is, never try. (Homer Simpson)"
-    fi
-}
-
-function reports() {
-    # Send a report on NEW/BYHAND packages
-    log "Nagging ftpteam about NEW/BYHAND packages"
-    dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" team@backports.debian.org
-    # and one on crufty packages
-    log "Sending information about crufty packages"
-    dak cruft-report > $webdir/cruft-report-daily.txt
-#    dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
-    cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" team@backports.debian.org
-}
-
-function pg_timestamp() {
-    tsname=${1:-"unknown"}
-    log "Saving postgres transaction id for ${tsname}"
-    psql -tAc 'select txid_current();' > $base/backup/txid_${tsname}_$(date +%Y.%m.%d-%H:%M:%S)
-}
diff --git a/config/backports/cron.daily b/config/backports/cron.daily
deleted file mode 100755 (executable)
index 147210c..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#! /bin/bash
-#
-
-set -e
-set -u
-
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-################################################################################
-# Clean out old packages
-dak clean-suites -m 10000
-dak clean-queues
-
-# Send a report on NEW/BYHAND packages
-dak queue-report -d new,proposedupdates | mail -e -s "NEW and BYHAND on $(date +%D)" team@backports.debian.org
-# and one on crufty packages
-
-dak cruft-report -m bdo -s squeeze-backports > $webdir/cruft-report-daily.txt
-cat $webdir/cruft-report-daily.txt | mail -e -s "Debian backports archive cruft report for $(date +%D)" team@backports.debian.org
-
-echo Daily cron scripts successful.
diff --git a/config/backports/cron.dinstall b/config/backports/cron.dinstall
deleted file mode 100755 (executable)
index 2c94296..0000000
+++ /dev/null
@@ -1,436 +0,0 @@
-#!/bin/bash
-# No way I try to deal with a crippled sh just for POSIX foo.
-
-# Copyright (C) 2009, 2010 Joerg Jaspert <joerg@debian.org>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-# Homer: Are you saying you're never going to eat any animal again? What
-#        about bacon?
-# Lisa: No.
-# Homer: Ham?
-# Lisa: No.
-# Homer: Pork chops?
-# Lisa: Dad, those all come from the same animal.
-# Homer: Heh heh heh. Ooh, yeah, right, Lisa. A wonderful, magical animal.
-
-# exit on errors
-set -e
-# make sure to only use defined variables
-set -u
-# ERR traps should be inherited from functions too. (And command
-# substitutions and subshells and whatnot, but for us the functions is
-# the important part here)
-set -E
-
-# import the general variable set.
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-########################################################################
-# Functions                                                            #
-########################################################################
-# common functions are "outsourced"
-. "${configdir}/common"
-
-# source the dinstall functions
-. "${configdir}/dinstall.functions"
-
-########################################################################
-########################################################################
-
-# Function to save which stage we are in, so we can restart an interrupted
-# dinstall. Or even run actions in parallel, if we dare to, by simply
-# backgrounding the call to this function. But that should only really be
-# done for things we don't care much about.
-#
-# This should be called with the first argument being an array, with the
-# members
-#  - FUNC - the function name to call
-#  - ARGS - Possible arguments to hand to the function. Can be the empty string
-#  - TIME - The timestamp name. Can be the empty string
-#  - ERR  - if this is the string false, then the call will be surrounded by
-#           set +e ... set -e calls, so errors in the function do not exit
-#           dinstall. Can be the empty string, meaning true.
-#
-# MAKE SURE TO KEEP THIS THE LAST FUNCTION, AFTER ALL THE VARIOUS ONES
-# ADDED FOR DINSTALL FEATURES!
-function stage() {
-    ARGS='GO[@]'
-    local "${!ARGS}"
-
-    error=${ERR:-"true"}
-
-    STAGEFILE="${stagedir}/${FUNC}"
-    if [ -f "${STAGEFILE}" ]; then
-        stamptime=$(/usr/bin/stat -c %Z "${STAGEFILE}")
-        unixtime=$(date +%s)
-        difference=$(( $unixtime - $stamptime ))
-        if [ ${difference} -ge 14400 ]; then
-            log_error "Did already run ${FUNC}, stagefile exists, but that was ${difference} seconds ago. Please check."
-        else
-            log "Did already run ${FUNC}, not calling again..."
-        fi
-        return
-    fi
-
-    debug "Now calling function ${FUNC}. Arguments: ${ARGS}. Timestamp: ${TIME}"
-
-    # Make sure we are always at the same place. If a function wants to be elsewhere,
-    # it has to cd first!
-    cd ${configdir}
-
-    # Now redirect the output into $STAGEFILE.log. In case it errors out somewhere our
-    # errorhandler trap can then mail the contents of $STAGEFILE.log only, instead of a whole
-    # dinstall logfile. Short error mails ftw!
-    exec >> "${STAGEFILE}.log" 2>&1
-
-    if [ -f "${LOCK_STOP}" ]; then
-        log "${LOCK_STOP} exists, exiting immediately"
-        exit 42
-    fi
-
-    if [ "${error}" = "false" ]; then
-        set +e
-    fi
-    ${FUNC} ${ARGS}
-
-    # No matter what happened in the function, we make sure we have set -e default state back
-    set -e
-
-    # Make sure we are always at the same place.
-    cd ${configdir}
-
-    # We always use the same umask. If a function wants to do different, fine, but we reset.
-    umask 022
-
-    touch "${STAGEFILE}"
-
-    if [ -n "${TIME}" ]; then
-        ts "${TIME}"
-    fi
-
-    # And the output goes back to the normal logfile
-    exec >> "$LOGFILE" 2>&1
-
-    # Now we should make sure that we have a usable dinstall.log, so append the $STAGEFILE.log
-    # to it.
-    cat "${STAGEFILE}.log" >> "${LOGFILE}"
-    rm -f "${STAGEFILE}.log"
-
-    if [ -f "${LOCK_STOP}" ]; then
-        log "${LOCK_STOP} exists, exiting immediately"
-        exit 42
-    fi
-}
-
-########################################################################
-
-# We need logs.
-LOGFILE="$logdir/dinstall.log"
-
-exec >> "$LOGFILE" 2>&1
-
-# And now source our default config
-. "${configdir}/dinstall.variables"
-
-# Make sure we start out with a sane umask setting
-umask 022
-
-# And use one locale, no matter what the caller has set
-export LANG=C
-export LC_ALL=C
-
-# If we did not install new packages, we dont want to run.
-if ! [ -f "${DINSTALLPACKAGES}" ]; then
-    log "nothing to do"
-    exit 0
-fi
-rm -f "${DINSTALLPACKAGES}"
-
-touch "${DINSTALLSTART}"
-ts "startup"
-DINSTALLBEGIN="$(date -u +"%a %b %d %T %Z %Y (%s)")"
-state "Startup"
-
-lockfile -l 3600 "${LOCK_DAILY}"
-trap onerror ERR
-trap cleanup EXIT TERM HUP INT QUIT
-
-touch "${LOCK_BRITNEY}"
-
-GO=(
-    FUNC="savetimestamp"
-    TIME=""
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-
-GO=(
-    FUNC="pg_timestamp"
-    TIME="pg_dump1"
-    ARGS="predinstall"
-    ERR=""
-)
-stage $GO
-
-lockfile "$LOCK_ACCEPTED"
-lockfile "$LOCK_NEW"
-
-GO=(
-    FUNC="punew"
-    TIME="p-u-new"
-    ARGS="proposedupdates"
-    ERR="false"
-)
-stage $GO
-
-GO=(
-    FUNC="newstage"
-    TIME="newstage"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="cruft"
-    TIME="cruft"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-state "indices"
-
-GO=(
-    FUNC="dominate"
-    TIME="dominate"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="filelist"
-    TIME="generate-filelist"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="fingerprints"
-    TIME="import-keyring"
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-
-GO=(
-    FUNC="overrides"
-    TIME="overrides"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="mpfm"
-    TIME="pkg-file-mapping"
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-
-state "packages/contents"
-GO=(
-    FUNC="packages"
-    TIME="apt-ftparchive"
-    ARGS=""
-    ERR=""
-)
-# Careful: When we ever go and remove this monster-long thing, we have to check the backgrounded
-# functions before it. We no longer have a 1.5hour sync point then.
-stage $GO
-
-state "dists/"
-GO=(
-    FUNC="pdiff"
-    TIME="pdiff"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="release"
-    TIME="release files"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="dakcleanup"
-    TIME="cleanup"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="buildd_dir"
-    TIME="buildd_dir"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-state "scripts"
-GO=(
-    FUNC="mkmaintainers"
-    TIME="mkmaintainers"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="copyoverrides"
-    TIME="copyoverrides"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="mklslar"
-    TIME="mklslar"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="mkchecksums"
-    TIME="mkchecksums"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-GO=(
-    FUNC="mirror"
-    TIME="mirror hardlinks"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-rm -f "$LOCK_ACCEPTED"
-rm -f "$LOCK_NEW"
-rm -f "${LOCK_DAILY}"
-
-ts "locked part finished"
-state "postlock"
-
-GO=(
-    FUNC="changelogs"
-    TIME="changelogs"
-    ARGS=""
-    ERR="false"
-)
-stage $GO &
-
-GO=(
-    FUNC="pg_timestamp"
-    TIME="pg_dump2"
-    ARGS="postdinstall"
-    ERR=""
-)
-stage $GO &
-
-GO=(
-    FUNC="expire"
-    TIME="expire_dumps"
-    ARGS=""
-    ERR=""
-)
-#stage $GO &
-
-# GO=(
-#     FUNC="dm"
-#     TIME=""
-#     ARGS=""
-#     ERR=""
-# )
-# stage $GO &
-
-GO=(
-    FUNC="mirrorpush"
-    TIME="mirrorpush"
-    ARGS=""
-    ERR="false"
-)
-stage $GO &
-
-GO=(
-    FUNC="stats"
-    TIME="stats"
-    ARGS=""
-    ERR="false"
-)
-stage $GO &
-
-rm -f "${LOCK_BRITNEY}"
-
-GO=(
-    FUNC="cleantransactions"
-    TIME=""
-    ARGS=""
-    ERR=""
-)
-stage $GO &
-
-# GO=(
-#     FUNC="aptftpcleanup"
-#     TIME="apt-ftparchive cleanup"
-#     ARGS=""
-#     ERR="false"
-# )
-# stage $GO
-
-# we need to wait for the background processes before the end of dinstall
-wait
-
-log "Daily cron scripts successful, all done"
-
-exec > "$logdir/afterdinstall.log" 2>&1
-
-GO=(
-    FUNC="renamelogfile"
-    TIME=""
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-state "all done"
-
-
-# Now, at the very (successful) end of dinstall, make sure we remove
-# our stage files, so the next dinstall run will do it all again.
-rm -f ${stagedir}/*
-touch "${DINSTALLEND}"
diff --git a/config/backports/cron.hourly b/config/backports/cron.hourly
deleted file mode 100755 (executable)
index f973606..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#! /bin/bash
-#
-# Executed hourly via cron, out of dak's crontab.
-
-set -e
-set -u
-
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-dak import-users-from-passwd
-dak queue-report -n > $webdir/new.html
-dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates -r $webdir/stat
-#dak show-deferred -r $webdir/stat > ${webdir}/deferred.html
-dak graph -n new,byhand,proposedupdates,oldproposedupdates,deferred -r $webdir/stat -i $webdir/stat -x $scriptsdir/rrd-release-freeze-dates
-dak show-new > /dev/null
-
-# cd $webdir
-# cat removals-20*.txt > removals-full.txt
-# cat removals.txt >> removals-full.txt
-# cat removals-20*.822 > removals-full.822
-# cat removals.822 >> removals-full.822
-
-#$base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc -l $base/log/
-$base/dak/tools/removals.pl $configdir/removalsrss.rc > $webdir/removals.rss
-
-#$scriptsdir/generate-di
-
-# do the buildd key updates
-BUILDDFUN=$(mktemp -p "${TMPDIR}" BUILDDFUN.XXXXXX)
-exec >> "${BUILDDFUN}" 2>&1
-#${scriptsdir}/buildd-remove-keys
-#${scriptsdir}/buildd-add-keys
-#${scriptsdir}/buildd-prepare-dir
-for keyring in $(dak admin k list-binary); do
-    dak import-keyring --generate-users "%s" ${keyring}
-done
-exec >>/dev/null 2>&1
-
-DATE=$(date -Is)
-cat "${BUILDDFUN}" | mail -a "X-Debian: DAK" -e -s "[$(hostname -s)] Buildd key changes ${DATE}" buildd-keys@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-
-rm -f "${BUILDDFUN}"
diff --git a/config/backports/cron.monthly b/config/backports/cron.monthly
deleted file mode 100755 (executable)
index 38a57fd..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-#
-
-set -e
-set -u
-
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-################################################################################
-
-DATE=`date -d yesterday +%y%m`
-
-cd ${base}/mail/archive
-for m in mail import; do
-    if [ -f $m ]; then
-        mv $m ${m}-$DATE
-        sleep 20
-        xz -9 ${m}-$DATE
-        chgrp backports ${m}-$DATE.xz
-        chmod 660 ${m}-$DATE.xz
-    fi;
-done
-
-DATE=`date +%Y-%m`
-cd ${base}/log
-touch $DATE
-ln -sf $DATE current
-chmod g+w $DATE
-chown dak:backports $DATE
-
-################################################################################
diff --git a/config/backports/cron.reboot b/config/backports/cron.reboot
deleted file mode 100755 (executable)
index a5a132e..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-# No way I try to deal with a crippled sh just for POSIX foo.
-
-# Copyright (C) 2009 Joerg Jaspert <joerg@debian.org>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-# exit on errors
-set -e
-# make sure to only use defined variables
-set -u
-# ERR traps should be inherited from functions too. (And command
-# substitutions and subshells and whatnot, but for us the functions is
-# the important part here)
-set -E
-
-# import the general variable set.
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-# common functions are "outsourced"
-. "${configdir}/common"
-
-# usually we are not using debug logs. Set to 1 if you want them.
-DEBUG=0
-
-# our name
-PROGRAM="dinstall_reboot"
-
-# where do we want mails to go? For example log entries made with error()
-if [ "x$(hostname -s)x" != "xfranckx" ]; then
-    # Not our ftpmaster host
-    MAILTO=${MAILTO:-"root"}
-else
-    # Yay, ftpmaster
-    MAILTO=${MAILTO:-"ftpmaster@backports.debian.org"}
-fi
-
-# Marker for dinstall start
-DINSTALLSTART="${lockdir}/dinstallstart"
-# Marker for dinstall end
-DINSTALLEND="${lockdir}/dinstallend"
-
-set +e
-starttime=$(/usr/bin/stat -c %Z "${DINSTALLSTART}")
-endtime=$(/usr/bin/stat -c %Z "${DINSTALLEND}")
-set -e
-
-if [ ${endtime} -gt ${starttime} ]; then
-       # Great, last dinstall run did seem to end without trouble, no need to rerun
-       log "Last dinstall run did end without trouble, not rerunning"
-       exit 0
-else
-       # Hrm, it looks like we did not successfully end the last run.
-       # This either means dinstall did abort due to an error, or we had a reboot
-       # No way to tell, so lets restart and see what happens.
-
-       # Make sure we are not fooled by some random touching of the files, only
-       # really restart if we have the first stage stampfile there, indicating that
-       # dinstall got started
-       if [ -f "${stagedir}/savetimestamp" ]; then
-               log "Seems we have to restart a dinstall run after reboot"
-               ${configdir}/cron.dinstall
-       fi
-fi
diff --git a/config/backports/cron.unchecked b/config/backports/cron.unchecked
deleted file mode 100755 (executable)
index 423ce77..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-#! /bin/bash
-
-# Copyright (C) 2009 Joerg Jaspert <joerg@debian.org>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-# exit on errors
-set -e
-# make sure to only use defined variables
-set -u
-# ERR traps should be inherited from functions too. (And command
-# substitutions and subshells and whatnot, but for us the functions is
-# the important part here)
-set -E
-
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-
-LOCKDAILY=""
-LOCKFILE="$lockdir/unchecked.lock"
-LOCK_NEW="$lockdir/processnew.lock"
-NOTICE="$lockdir/daily.lock"
-LOCK_BUILDD="$lockdir/buildd.lock"
-# The state file telling us we have something new to do
-DINSTALLPACKAGES="${lockdir}/dinstall.packages"
-
-# our name
-PROGRAM="unchecked"
-
-if [ -e $NOTICE ]; then
-    exit 0;
-fi
-
-########################################################################
-# Functions                                                            #
-########################################################################
-# common functions are "outsourced"
-. "${configdir}/common"
-
-STAMP=$(date "+%Y%m%d%H%M")
-
-cleanup() {
-    rm -f "$LOCKFILE"
-    if [ ! -z "$LOCKDAILY" ]; then
-        rm -f "$NOTICE"
-    fi
-}
-
-function do_buildd () {
-    if lockfile -r3 $NOTICE; then
-        LOCKDAILY="YES"
-        cd $overridedir
-        dak make-overrides &>/dev/null
-        rm -f override.sid.all3 override.sid.all3.src
-        for i in main contrib non-free main.debian-installer; do
-            cat override.squeeze-backports.$i >> override.sid.all3
-            if [ "$i" != "main.debian-installer" ]; then
-                cat override.squeeze-backports.$i.src >> override.sid.all3.src
-            fi
-        done
-        make_buildd_dir
-        wbtrigger
-    fi
-}
-
-function do_dists () {
-    cd $configdir
-    dak generate-filelist
-    dak generate-packages-sources
-}
-
-########################################################################
-# the actual unchecked functions follow                                #
-########################################################################
-
-# And use one locale, no matter what the caller has set
-export LANG=C
-export LC_ALL=C
-
-# only run one cron.unchecked
-lockfile -r3 $LOCKFILE || exit 0
-trap cleanup 0
-
-do_newstage
-do_unchecked
-
-if [ ! -z "$changes" ]; then
-    touch ${DINSTALLPACKAGES}
-    sync_debbugs
-    do_buildd
-
-#    echo "Starting g-p-s: $(date +%H:%M:%S)"
-#    do_dists
-#    echo "Done with g-p-s: $(date +%H:%M:%S)"
-fi
diff --git a/config/backports/cron.weekly b/config/backports/cron.weekly
deleted file mode 100755 (executable)
index 097aa63..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#
-
-set -e
-set -u
-# ERR traps should be inherited from functions too. (And command
-# substitutions and subshells and whatnot, but for us the functions is
-# the important part here)
-set -E
-export SCRIPTVARS=/srv/backports-master.debian.org/dak/config/backports/vars
-. $SCRIPTVARS
-
-# Start logging
-NOW=`date "+%Y.%m.%d-%H:%M:%S"`
-LOGFILE="$logdir/weekly_${NOW}.log"
-exec > "$LOGFILE" 2>&1
-
-cleanup() {
-  echo "Cleanup"
-  rm -f "$LOGFILE"
-}
-trap cleanup 0
-
-################################################################################
-
-# Purge empty directories
-echo "Purging empty directories in $ftpdir/pool/"
-if [ ! -z "$(find $ftpdir/pool/ -type d -empty)" ]; then
-   find $ftpdir/pool/ -type d -empty | xargs rmdir;
-fi
-
-# Clean up apt-ftparchive's databases
-
-echo "Splitting queue/done"
-dak split-done
-
-# Clean up apt-ftparchive's databases
-cd $configdir
-#echo "Cleanup apt-ftparchive's database"
-#apt-ftparchive -q clean apt.conf
-
-echo "Fixing symlinks in $ftpdir"
-symlinks -d -r $ftpdir
-
-echo "Finally, all is done, compressing logfile"
-exec > /dev/null 2>&1
-
-bzip2 -9 "$LOGFILE"
-
-
-################################################################################
diff --git a/config/backports/dak.conf b/config/backports/dak.conf
deleted file mode 100644 (file)
index ec7ff5f..0000000
+++ /dev/null
@@ -1,365 +0,0 @@
-Dinstall
-{
-   // To sign the release files. Adjust the keyid!
-   // Note: Key must be without a passphrase or it wont work automagically!
-   SigningKeyring "/srv/backports-master.debian.org/s3kr1t/dot-gnupg/secring.gpg";
-   SigningPubKeyring "/srv/backports-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg";
-   SendmailCommand "/usr/sbin/sendmail -oi -t -f envelope@backports.debian.org";
-   MyEmailAddress "Backports Debian FTP Masters <ftpmaster@backports.debian.org>";
-   MyAdminAddress "ftpmaster@backports.debian.org";
-   MyHost "backports.debian.org";  // used for generating user@my_host addresses in e.g. manual_reject()
-   MyDistribution "backports.debian.org archive"; // Used in emails
-   // Alicia and melanie can use it
-   BugServer "bugs.debian.org";
-   // melanie uses the packages server.
-   // PackagesServer "packages.test.backports.org";
-   // If defined then the package@this.server gets a copy of most of the
-   // actions related to the package. For an example look at
-   // packages.qa.debian.org
-   // TrackingServer "packages.qa.test.backports.org";
-   // If defined this address gets a bcc of all mails.
-   // FIXME: Einrichten wenn das hier produktiv geht!
-   Bcc "debian-backports-changes@lists.debian.org";
-   GroupOverrideFilename "override.group-maint";
-   FutureTimeTravelGrace 28800; // 8 hours
-   PastCutoffYear "1984";
-   SkipTime 300;
-   // If defined then mails to close bugs are sent to the bugserver.
-   CloseBugs "false";
-   OverrideDisparityCheck "false";
-   DefaultSuite "squeeze-backports";
-   ReleaseTransitions "/srv/backports-master.debian.org/hints/transitions.yaml";
-   // If set, only send mails to addresses listed there.
-   // format of entries: one entry per line. Either an email address directly, or a regular expression,
-   // prefixed by "RE:". Examples: "jane.doe@domain.com" or "RE:jane[^@]@domain.com", where the first will
-   // only allow to mail jane.doe@domain.com while the second will mail all of jane*@domain.com
-   MailWhiteList "/srv/backports-master.debian.org/dak/config/backports/mail-whitelist";
-};
-
-Generate-Index-Diffs
-{
-   Options
-   {
-     TempDir "/srv/backports-master.debian.org/tiffani";
-     MaxDiffs { Default 50; };
-   };
-};
-
-Add-User
-{
-// Should we sent a mail to newly added users?
-  SendEmail "true";
-
-// Should we create an account so they can login?
-// Account will be created with the defaults from adduser, so adjust
-// it's configuration to fit your needs.
-// NOTE: This requires that your dak user has a sudo entry, allowing
-// to run /usr/sbin/useradd!
-  CreateAccount "false";
-
-// Note: This is a comma separated list of additional groupnames to
-// which uma should add the user. NO spaces between the groupnames or
-// useradd will die.
-// Disable it if you dont want or need that feature.
-//  GID "debuser";
-
-};
-
-Show-New
-{
-  HTMLPath "/srv/backports-web.debian.org/underlay/new/";
-}
-
-Check-Overrides
-{
-  OverrideSuites
-  {
-    squeeze-backports
-    {
-      Process "1";
-//      OriginSuite "Unstable";
-    };
-
-  };
-};
-
-
-Import-Users-From-Passwd
-{
-  // The Primary GID of your users. Using uma it is the gid from group users.
-  ValidGID "800";
-  // Comma separated list of users who are in Postgres but not the passwd file
-  KnownPostgres "postgres,katie,dak,www-data,qa,guest,repuser";
-};
-
-Queue-Report
-{
-  ReportLocations
-  {
-    822Location "/srv/backports-web.debian.org/underlay/new.822";
-  };
-};
-
-Clean-Queues
-{
-  Options
-  {
-    Days 14;
-   };
- MorgueSubDir "queues";
-};
-
-Control-Overrides
-{
-  Options
-  {
-    Component "main";
-    Suite "squeeze-backports";
-    Type "deb";
-   };
-};
-
-Rm
-{
-  Options
-  {
-    Suite "squeeze-backports";
-   };
-
-   LogFile "/srv/backports-web.debian.org/underlay/removals.txt";
-   LogFile822 "/srv/backports-web.debian.org/underlay/removals.822";
-};
-
-Import-Archive
-{
-  ExportDir "/srv/backports-master.debian.org/dak/import-archive-files/";
-};
-
-Clean-Suites
-{
-  // How long (in seconds) dead packages are left before being killed
-  StayOfExecution 1209600; // 14 days
-  AcceptedAutoBuildStayOfExecution 86400; // 24 hours
-  MorgueSubDir "pool";
-};
-
-Process-New
-{
-  DinstallLockFile "/srv/backports-master.debian.org/lock/processnew.lock";
-  LockDir "/srv/backports-master.debian.org/lock/new/";
-};
-
-Suite
-{
-  squeeze-backports
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-};
-
-Dir
-{
-  Root "/srv/backports-master.debian.org/ftp/";
-  Pool "/srv/backports-master.debian.org/ftp/pool/";
-  Export "/srv/backports-master.debian.org/export/";
-  Templates "/srv/backports-master.debian.org/dak/templates/";
-  Lists "/srv/backports-master.debian.org/database/dists/";
-  Cache "/srv/backports-master.debian.org/database/";
-  Log "/srv/backports-master.debian.org/log/";
-  Lock "/srv/backports-master.debian.org/lock";
-  Morgue "/srv/backports-master.debian.org/morgue/";
-  Override "/srv/backports-master.debian.org/scripts/override/";
-  UrgencyLog "/srv/backports-master.debian.org/testing/urgencies/";
-  TempPath "/srv/backports-master.debian.org/tmp";
-  BTSVersionTrack "/srv/backports-master.debian.org/queue/bts_version_track/";
-  Holding "/srv/backports-master.debian.org/queue/holding/";
-  Done "/srv/backports-master.debian.org/queue/done/";
-  Reject "/srv/backports-master.debian.org/queue/reject/";
-
-  Queue
-  {
-    Byhand "/srv/backports-master.debian.org/queue/byhand/";
-    New "/srv/backports-master.debian.org/queue/new/";
-    Unchecked "/srv/backports-master.debian.org/queue/unchecked/";
-    Newstage "/srv/backports-master.debian.org/queue/newstage/";
-    Embargoed "/srv/backports-master.debian.org/queue/Embargoed/";
-    Unembargoed "/srv/backports-master.debian.org/queue/Unembargoed/";
-    ProposedUpdates "/srv/backports-master.debian.org/queue/p-u-new/";
-    OldProposedUpdates "/srv/backports-master.debian.org/queue/Unembargoed/";
-  };
-};
-
-DB
-{
-  Service "backports";
-  // PoolSize should be at least ThreadCount + 1
-  PoolSize 5;
-  // MaxOverflow shouldn't exceed postgresql.conf's max_connections - PoolSize
-  MaxOverflow 13;
-  // should be false for encoding == SQL_ASCII
-  Unicode "false"
-};
-
-SuiteMappings
-{
- "map squeeze squeeze-backports";
- "map squeeze-bpo squeeze-backports";
-};
-
-Architectures
-{
-  source "Source";
-  all "Architecture Independent";
-  alpha "DEC Alpha";
-  amd64 "AMD x86_64 (AMD64)";
-  hurd-i386 "Intel ia32 running the HURD";
-  hppa "HP PA RISC";
-  arm "ARM";
-  armel "ARM EABI";
-  i386 "Intel ia32";
-  ia64 "Intel ia64";
-  m68k "Motorola Mc680x0";
-  mips "MIPS (Big Endian)";
-  mipsel "MIPS (Little Endian)";
-  powerpc "PowerPC";
-  s390 "IBM S/390";
-  sh "Hitatchi SuperH";
-  sparc "Sun SPARC/UltraSPARC";
-  kfreebsd-i386 "GNU/kFreeBSD i386";
-  kfreebsd-amd64 "GNU/kFreeBSD amd64";
-};
-
-Archive
-{
-  backports
-  {
-    OriginServer "backports.debian.org";
-    PrimaryMirror "backports.debian.org";
-    Description "Master Archive for backports.debian.org archive";
-  };
-};
-
-Section
-{
-  admin;
-  cli-mono;
-  comm;
-  database;
-  debian-installer;
-  debug;
-  devel;
-  doc;
-  editors;
-  embedded;
-  electronics;
-  fonts;
-  games;
-  gnome;
-  graphics;
-  gnu-r;
-  gnustep;
-  hamradio;
-  haskell;
-  httpd;
-  interpreters;
-  java;
-  kde;
-  kernel;
-  libdevel;
-  libs;
-  lisp;
-  localization;
-  mail;
-  math;
-  misc;
-  net;
-  news;
-  ocaml;
-  oldlibs;
-  otherosfs;
-  perl;
-  php;
-  python;
-  ruby;
-  science;
-  shells;
-  sound;
-  tex;
-  text;
-  utils;
-  web;
-  vcs;
-  video;
-  x11;
-  xfce;
-  zope;
-};
-
-Priority
-{
-  required 1;
-  important 2;
-  standard 3;
-  optional 4;
-  extra 5;
-  source 0; // i.e. unused
-};
-
-Urgency
-{
-  Default "low";
-  Valid
-  {
-    low;
-    medium;
-    high;
-    emergency;
-    critical;
-  };
-};
-
-Common
-{
-  // The default number of threads for multithreading parts of dak:
-  ThreadCount 16;
-}
-
-Import-LDAP-Fingerprints
-{
-  LDAPDn "ou=users,dc=debian,dc=org";
-  LDAPServer "db.debian.org";
-  ExtraKeyrings
-  {
-    "/srv/keyring.debian.org/keyrings/removed-keys.pgp";
-    "/srv/keyring.debian.org/keyrings/removed-keys.gpg";
-    "/srv/keyring.debian.org/keyrings/extra-keys.pgp";
-  };
-  KeyServer "wwwkeys.eu.pgp.net";
-};
-
-Changelogs
-{
-  Export "/srv/backports-master.debian.org/export/changelogs";
-}
-
-Generate-Releases
-{
-  MD5Sum
-  {
-    squeeze-backports;
-  };
-  SHA1
-  {
-    squeeze-backports;
-  };
-  SHA256
-  {
-    squeeze-backports;
-  };
-}
diff --git a/config/backports/dak.conf-etc b/config/backports/dak.conf-etc
deleted file mode 100644 (file)
index bf7d26c..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-Config
-{
-  // FQDN hostname
-  morricone.debian.org
-  {
-
-    // Optional hostname as it appears in the database (if it differs
-    // from the FQDN hostname).
-    DatabaseHostname     "backports";
-
-    // Optional filename of katie's config file; if not present, this
-    // file is assumed to contain katie config info.
-    DakConfig          "/srv/backports-master.debian.org/dak/config/backports/dak.conf";
-
-    // Optional filename of apt-ftparchive's config file; if not
-    // present, the file is assumed to be 'apt.conf' in the same
-    // directory as this file.
-    AptConfig          "/srv/backports-master.debian.org/dak/config/backports/apt.conf";
-  }
-
-}
-
diff --git a/config/backports/dinstall.functions b/config/backports/dinstall.functions
deleted file mode 100644 (file)
index 8709e6e..0000000
+++ /dev/null
@@ -1,524 +0,0 @@
-# -*- mode:sh -*-
-# Timestamp. Used for dinstall stat graphs
-function ts() {
-        echo "Archive maintenance timestamp ($1): $(date +%H:%M:%S)"
-}
-
-# Cleanup actions
-function cleanup() {
-       rm -f ${LOCK_DAILY}
-       rm -f ${LOCK_ACCEPTED}
-}
-
-# If we error out this one is called, *FOLLOWED* by cleanup above
-function onerror() {
-    ERRDATE=$(date "+%Y.%m.%d-%H:%M:%S")
-
-    subject="ATTENTION ATTENTION!"
-    if [ "${error}" = "false" ]; then
-        subject="${subject} (continued)"
-    else
-        subject="${subject} (interrupted)"
-    fi
-    subject="${subject} dinstall error at ${ERRDATE} in ${STAGEFILE} - (Be quiet, Brain, or I'll stab you with a Q-tip)"
-
-    if [ -r "${STAGEFILE}.log" ]; then
-        cat "${STAGEFILE}.log"
-    else
-        echo  "file ${STAGEFILE}.log does not exist, sorry"
-    fi | mail -s "${subject}" -a "X-Debian: DAK" cron@backports.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-}
-
-########################################################################
-# the actual dinstall functions follow                                 #
-########################################################################
-
-function qa1() {
-    log "Telling QA user that we start dinstall"
-    ssh -2 -i ~dak/.ssh/push_merkel_qa  -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@qa.debian.org sleep 1
- }
-
-# Updating various files
-function updates() {
-    log "Updating Bugs docu, Mirror list and mailing-lists.txt"
-    cd $configdir
-    $scriptsdir/update-bugdoctxt
-    $scriptsdir/update-mirrorlists
-    $scriptsdir/update-mailingliststxt
-    $scriptsdir/update-pseudopackages.sh
-}
-
-# Process (oldstable)-proposed-updates "NEW" queue
-function punew_do() {
-    date -u -R >> REPORT
-    dak process-policy $1 | tee -a REPORT | mail -a "X-Debian: DAK" -e -s "NEW changes in $1" team@backports.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-    echo >> REPORT
-}
-function punew() {
-    log "Doing automated p-u-new processing"
-    cd "${queuedir}/p-u-new"
-    punew_do "$1"
-}
-function opunew() {
-    log "Doing automated o-p-u-new processing"
-    cd "${queuedir}/o-p-u-new"
-    punew_do "$1"
-}
-
-# The first i18n one, syncing new descriptions
-function i18n1() {
-    log "Synchronizing i18n package descriptions"
-    # First sync their newest data
-    cd ${scriptdir}/i18nsync
-    rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true
-
-    # Now check if we still know about the packages for which they created the files
-    # is the timestamp signed by us?
-    if $(gpgv --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
-        # now read it. As its signed by us we are sure the content is what we expect, no need
-        # to do more here. And we only test -d a directory on it anyway.
-        TSTAMP=$(cat timestamp)
-        # do we have the dir still?
-        if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then
-            # Lets check!
-            if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
-                # Yay, worked, lets copy around
-                for dir in wheezy sid; do
-                    if [ -d dists/${dir}/ ]; then
-                        cd dists/${dir}/main/i18n
-                        rsync -aq --delete --delete-after  . ${ftpdir}/dists/${dir}/main/i18n/.
-                    fi
-                    cd ${scriptdir}/i18nsync
-                done
-            else
-                echo "ARRRR, bad guys, wrong files, ARRR"
-                echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail -a "X-Debian: DAK" -s "Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head." debian-l10n-devel@lists.alioth.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-            fi
-        else
-            echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR"
-            echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail -a "X-Debian: DAK" -s "Lisa, if you don't like your job you don't strike. You just go in every day and do it really half-assed. That's the American way." debian-l10n-devel@lists.alioth.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-        fi
-    else
-        echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR."
-        echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail -a "X-Debian: DAK" -s "You can't keep blaming yourself. Just blame yourself once, and move on." debian-l10n-devel@lists.alioth.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-    fi
-}
-
-function cruft() {
-    log "Checking for cruft in overrides"
-    dak check-overrides
-}
-
-function dominate() {
-    log "Removing obsolete source and binary associations"
-    dak dominate
-}
-
-function filelist() {
-    log "Generating file lists for apt-ftparchive"
-    dak generate-filelist
-}
-
-function fingerprints() {
-    log "Updating fingerprints"
-    dak import-keyring -L /srv/backports-master.debian.org/keyrings/keyring.gpg
-
-    OUTFILE=$(mktemp)
-    dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
-
-    if [ -s "${OUTFILE}" ]; then
-        /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
-From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
-To: <debian-backports@lists.debian.org>
-Subject: Debian Maintainers Keyring changes
-Content-Type: text/plain; charset=utf-8
-X-Debian: DAK
-MIME-Version: 1.0
-
-The following changes to the debian-maintainers keyring have just been activated:
-
-$(cat $OUTFILE)
-
-Debian distribution maintenance software,
-on behalf of the Keyring maintainers
-
-EOF
-    fi
-    rm -f "$OUTFILE"
-}
-
-function overrides() {
-    log "Writing overrides into text files"
-    cd $overridedir
-    dak make-overrides
-
-    # FIXME
-    rm -f override.sid.all3
-    for i in main contrib non-free main.debian-installer; do cat override.squeeze-backports.$i >> override.sid.all3; done
-}
-
-function mpfm() {
-    log "Generating package / file mapping"
-    dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
-}
-
-function packages() {
-    log "Generating Packages and Sources files"
-    cd $configdir
-    apt-ftparchive generate apt.conf
-    #dak generate-packages-sources
-}
-
-function pdiff() {
-    log "Generating pdiff files"
-    dak generate-index-diffs
-}
-
-function release() {
-    log "Generating Release files"
-    dak generate-releases
-}
-
-function dakcleanup() {
-    log "Cleanup old packages/files"
-    dak clean-suites -m 10000
-    dak clean-queues
-}
-
-function buildd_dir() {
-    # Rebuilt the buildd dir to avoid long times of 403
-    log "Regenerating the buildd incoming dir"
-    STAMP=$(date "+%Y%m%d%H%M")
-    make_buildd_dir
-}
-
-function mklslar() {
-    cd $ftpdir
-
-    FILENAME=ls-lR
-
-    log "Removing any core files ..."
-    find -type f -name core -print0 | xargs -0r rm -v
-
-    log "Checking permissions on files in the FTP tree ..."
-    find -type f \( \! -perm -444 -o -perm +002 \) -ls
-    find -type d \( \! -perm -555 -o -perm +002 \) -ls
-
-    log "Checking symlinks ..."
-    symlinks -rd .
-
-    log "Creating recursive directory listing ... "
-    rm -f .${FILENAME}.new
-    TZ=UTC ls -lR > .${FILENAME}.new
-
-    if [ -r ${FILENAME}.gz ] ; then
-        mv -f ${FILENAME}.gz ${FILENAME}.old.gz
-        mv -f .${FILENAME}.new ${FILENAME}
-        rm -f ${FILENAME}.patch.gz
-        zcat ${FILENAME}.old.gz | diff -u - ${FILENAME} | gzip -9cfn - >${FILENAME}.patch.gz
-        rm -f ${FILENAME}.old.gz
-    else
-        mv -f .${FILENAME}.new ${FILENAME}
-    fi
-
-    gzip -9cfN ${FILENAME} >${FILENAME}.gz
-    rm -f ${FILENAME}
-}
-
-function mkmaintainers() {
-    log 'Creating Maintainers index ... '
-
-    cd $indices
-    dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers
-    gzip -9v --rsyncable <Maintainers >Maintainers.gz
-    gzip -9v --rsyncable <Uploaders >Uploaders.gz
-}
-
-function copyoverrides() {
-    log 'Copying override files into public view ...'
-
-    for ofile in $copyoverrides ; do
-           cd $overridedir
-           chmod g+w override.$ofile
-
-           cd $indices
-
-           newofile=override.$ofile.gz
-           rm -f .newover-$ofile.gz
-           pc="`gzip 2>&1 -9nv <$overridedir/override.$ofile >.newover-$ofile.gz`"
-        if ! cmp -s .newover-$ofile.gz $newofile || [ ! -f $newofile ]; then
-                   log "   installing new $newofile $pc"
-                   mv -f .newover-$ofile.gz $newofile
-                   chmod g+w $newofile
-        else
-                   rm -f .newover-$ofile.gz
-           fi
-    done
-}
-
-function mkfilesindices() {
-    umask 002
-    cd $base/ftp/indices/files/components
-
-    ARCHLIST=$(tempfile)
-
-    log "Querying postgres"
-    echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql -At | sed 's/|//;s,^/srv/ftp-master.debian.org/ftp,.,' | sort >$ARCHLIST
-
-    includedirs () {
-        perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
-    }
-    poolfirst () {
-        perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
-    }
-
-    log "Generating sources list"
-    (
-        sed -n 's/|$//p' $ARCHLIST
-        cd $base/ftp
-        find ./dists -maxdepth 1 \! -type d
-        find ./dists \! -type d | grep "/source/"
-    ) | sort -u | gzip -9 > source.list.gz
-
-    log "Generating arch lists"
-
-    ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
-    for a in $ARCHES; do
-        (sed -n "s/|$a$//p" $ARCHLIST
-            sed -n 's/|all$//p' $ARCHLIST
-
-            cd $base/ftp
-            find ./dists -maxdepth 1 \! -type d
-            find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)"
-        ) | sort -u | gzip -9 > arch-$a.list.gz
-    done
-
-    log "Generating suite lists"
-
-    suite_list () {
-        printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t
-
-        printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t
-    }
-
-    printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At |
-    while read id suite; do
-        [ -e $base/ftp/dists/$suite ] || continue
-        (
-            (cd $base/ftp
-                distname=$(cd dists; readlink $suite || echo $suite)
-                find ./dists/$distname \! -type d
-                for distdir in ./dists/*; do
-                    [ "$(readlink $distdir)" != "$distname" ] || echo $distdir
-                done
-            )
-            suite_list $id | tr -d ' ' | sed 's,^/srv/ftp-master.debian.org/ftp,.,'
-        ) | sort -u | gzip -9 > suite-${suite}.list.gz
-    done
-
-    log "Finding everything on the ftp site to generate sundries"
-    (cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
-
-    rm -f sundries.list
-    zcat *.list.gz | cat - *.list | sort -u |
-    diff - $ARCHLIST | sed -n 's/^> //p' > sundries.list
-
-    log "Generating files list"
-
-    for a in $ARCHES; do
-        (echo ./project/trace; zcat arch-$a.list.gz source.list.gz) |
-        cat - sundries.list dists.list project.list docs.list indices.list |
-        sort -u | poolfirst > ../arch-$a.files
-    done
-
-    (cd $base/ftp/
-           for dist in sid wheezy; do
-                   find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip -9 > $base/ftp/indices/files/components/translation-$dist.list.gz
-           done
-    )
-
-    (cat ../arch-i386.files ../arch-amd64.files; zcat suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-wheezy.list.gz) |
-    sort -u | poolfirst > ../typical.files
-
-    rm -f $ARCHLIST
-    log "Done!"
-}
-
-function mkchecksums() {
-    dsynclist=$dbdir/dsync.list
-    md5list=$indices/md5sums
-
-    log -n "Creating md5 / dsync index file ... "
-
-    cd "$ftpdir"
-    ${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
-    ${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n > ${md5list}.gz
-    ${bindir}/dsync-flist -q link-dups $dsynclist || true
-}
-
-function mirror() {
-    log "Regenerating \"public\" mirror/ hardlink fun"
-    DATE_SERIAL=$(date +"%Y%m%d01")
-    FILESOAPLUS1=$(awk '/serial/ { print $3+1 }' ${TRACEFILE} )
-    if [ "$DATE_SERIAL" -gt "$FILESOAPLUS1" ]; then
-        SERIAL="$DATE_SERIAL"
-    else
-        SERIAL="$FILESOAPLUS1"
-    fi
-    date -u > ${TRACEFILE}
-    echo "Using dak v1" >> ${TRACEFILE}
-    echo "Running on host: $(hostname -f)" >> ${TRACEFILE}
-    echo "Archive serial: ${SERIAL}" >> ${TRACEFILE}
-    cd ${mirrordir}
-    rsync -aH --link-dest ${ftpdir} --delete --delete-after --delete-excluded --exclude Packages.*.new --exclude Sources.*.new  --ignore-errors ${ftpdir}/. .
-}
-
-function expire() {
-    log "Expiring old database dumps..."
-    cd $base/backup
-    $scriptsdir/expire_dumps -d . -p -f "dump_*"
-}
-
-function transitionsclean() {
-    log "Removing out of date transitions..."
-    cd $base
-    dak transitions -c -a
-}
-
-function dm() {
-    log "Updating DM html page"
-    $scriptsdir/dm-monitor >$webdir/dm-uploaders.html
-}
-
-function bts() {
-    log "Categorizing uncategorized bugs filed against ftp.debian.org"
-    dak bts-categorize
-}
-
-function ddaccess() {
-    # Tell our dd accessible mirror to sync itself up. Including ftp dir.
-    log "Trigger dd accessible parts sync including ftp dir"
-    ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org pool
-}
-
-function mirrorpush() {
-    log "Starting the mirrorpush"
-    date -u > /srv/backports-web.debian.org/underlay/mirrorstart
-    echo "Using dak v1" >> /srv/backports-web.debian.org/underlay/mirrorstart
-    echo "Running on host $(hostname -f)" >> /srv/backports-web.debian.org/underlay/mirrorstart
-    sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
-}
-
-function i18n2() {
-    log "Exporting package data foo for i18n project"
-    STAMP=$(date "+%Y%m%d%H%M")
-    mkdir -p ${scriptdir}/i18n/${STAMP}
-    cd ${scriptdir}/i18n/${STAMP}
-    dak control-suite -l stable > squeeze
-    dak control-suite -l testing > wheezy
-    dak control-suite -l unstable > sid
-    echo "${STAMP}" > timestamp
-    gpg --secret-keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
-    rm -f md5sum
-    md5sum * > md5sum
-    cd ${webdir}/
-    ln -sfT ${scriptdir}/i18n/${STAMP} i18n
-
-    cd ${scriptdir}
-    find ./i18n -mindepth 1 -maxdepth 1 -mtime +2 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
-}
-
-function stats() {
-    log "Updating stats data"
-    cd $configdir
-    #$scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data
-    #R --slave --vanilla < $base/misc/ftpstats.R
-    dak stats arch-space > $webdir/arch-space
-    dak stats pkg-nums > $webdir/pkg-nums
-}
-
-function aptftpcleanup() {
-    log "Clean up apt-ftparchive's databases"
-    cd $configdir
-    apt-ftparchive -q clean apt.conf
-}
-
-function cleantransactions() {
-    log "Cleanup transaction ids older than 3 months"
-    cd $base/backup/
-    find -maxdepth 1 -mindepth 1 -type f -name 'txid_*' -mtime +90 -print0 | xargs -0 --no-run-if-empty rm
-}
-
-function logstats() {
-    $masterdir/tools/logs.py "$1"
-}
-
-# save timestamp when we start
-function savetimestamp() {
-       NOW=`date "+%Y.%m.%d-%H:%M:%S"`
-       echo ${NOW} > "${dbdir}/dinstallstart"
-}
-
-function maillogfile() {
-    cat "$LOGFILE" | mail -a "X-Debian: DAK" -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-}
-
-function renamelogfile() {
-    if [ -f "${dbdir}/dinstallstart" ]; then
-        NOW=$(cat "${dbdir}/dinstallstart")
-#        maillogfile
-        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
-#        logstats "$logdir/dinstall_${NOW}.log"
-        bzip2 -9 "$logdir/dinstall_${NOW}.log"
-    else
-        error "Problem, I don't know when dinstall started, unable to do log statistics."
-        NOW=`date "+%Y.%m.%d-%H:%M:%S"`
-#        maillogfile
-        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
-        bzip2 -9 "$logdir/dinstall_${NOW}.log"
-    fi
-}
-
-function testingsourcelist() {
-    dak ls -s testing -f heidi -r .| egrep 'source$' > ${webdir}/testing.list
-}
-
-# do a last run of process-unchecked before dinstall is on.
-function process_unchecked() {
-    log "Processing the unchecked queue"
-    UNCHECKED_WITHOUT_LOCK="-p"
-    do_unchecked
-    sync_debbugs
-}
-
-# do a run of newstage only before dinstall is on.
-function newstage() {
-    log "Processing the newstage queue"
-    UNCHECKED_WITHOUT_LOCK="-p"
-    do_newstage
-}
-
-# Function to update a "statefile" telling people what we are doing
-# (more or less).
-#
-# This should be called with the argument(s)
-#  - Status name we want to show.
-#
-function state() {
-    RIGHTNOW="$(date -u +"%a %b %d %T %Z %Y (%s)")"
-    cat >"${DINSTALLSTATE}" <<EOF
-Dinstall start: ${DINSTALLBEGIN}
-Current action: ${1}
-Action start: ${RIGHTNOW}
-EOF
-}
-
-# extract changelogs and stuff
-function changelogs() {
-    log "Extracting changelogs"
-    dak make-changelog -e
-    mkdir -p ${exportpublic}/changelogs
-    cd ${exportpublic}/changelogs
-    rsync -aHW --delete --delete-after --ignore-errors ${exportdir}/changelogs/. .
-    sudo -H -u archvsync /home/archvsync/runmirrors metabdo > ~dak/runmirrors-metadata.log 2>&1 &
-}
diff --git a/config/backports/dinstall.variables b/config/backports/dinstall.variables
deleted file mode 100644 (file)
index d6d7bb3..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- mode:sh -*-
-# usually we are not using debug logs. Set to 1 if you want them.
-DEBUG=0
-
-# our name
-PROGRAM="dinstall"
-
-# where do we want mails to go? For example log entries made with error()
-if [ "x$(hostname -s)x" != "xmorriconex" ]; then
-    # Not our ftpmaster host
-    MAILTO=${MAILTO:-"root"}
-else
-    # Yay, ftpmaster
-    MAILTO=${MAILTO:-"ftpmaster@backports.debian.org"}
-fi
-
-# How many logfiles to keep
-LOGROTATE=${LOGROTATE:-400}
-
-# Marker for dinstall start
-DINSTALLSTART="${lockdir}/dinstallstart"
-# Marker for dinstall end
-DINSTALLEND="${lockdir}/dinstallend"
-
-# lock cron.unchecked (it immediately exits when this exists)
-LOCK_DAILY="$lockdir/daily.lock"
-
-# Lock cron.unchecked from doing work
-LOCK_ACCEPTED="$lockdir/unchecked.lock"
-
-# Lock process-new from doing work
-LOCK_NEW="$lockdir/processnew.lock"
-
-# This file is simply used to indicate to britney whether or not
-# the Packages file updates completed sucessfully.  It's not a lock
-# from our point of view
-LOCK_BRITNEY="$lockdir/britney.lock"
-
-# If this file exists we exit immediately after the currently running
-# function is done
-LOCK_STOP="$lockdir/archive.stop"
-
-# Lock buildd updates
-LOCK_BUILDD="$lockdir/buildd.lock"
-
-# Statefile for the users
-DINSTALLSTATE="${webdir}/dinstall.status"
-
-# The mirror trace file
-TRACEFILE="${ftpdir}/project/trace/backports-master.debian.org"
-
-# The state file telling us we have something new to do
-DINSTALLPACKAGES="${lockdir}/dinstall.packages"
diff --git a/config/backports/removalsrss.rc b/config/backports/removalsrss.rc
deleted file mode 100644 (file)
index 92395b2..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-input: /srv/backports-web.debian.org/underlay/removals.txt
-items: 16
-titlelength: 80
-
-title: Removals from Debian Backports
-link: http://backports-master.debian.org/dak/removals.txt
-description: List of all the removals from Debian's Backport archives
-subject: Removals from Debian Backports
-creator: tfheen@debian.org
-publisher: joerg@debian.org
-rights: Copyright 2005, Tollef Fog Heen
-language: en-us
diff --git a/config/backports/vars b/config/backports/vars
deleted file mode 100644 (file)
index 5a1ca15..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# locations used by many scripts
-
-base=/srv/backports-master.debian.org
-public=/srv/backports-web.debian.org
-bindir=$base/bin
-ftpdir=$base/ftp/
-indices=$ftpdir/indices
-webdir=$public/underlay/
-scriptdir=$base/scripts
-
-archs="alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 "
-
-masterdir=$base/dak/
-overridedir=$base/scripts/override
-extoverridedir=$scriptdir/external-overrides
-configdir=$base/dak/config/backports/
-scriptsdir=$base/dak/scripts/debian/
-dbdir=$base/database/
-
-queuedir=$base/queue
-unchecked=$queuedir/unchecked/
-accepted=$queuedir/accepted/
-done=$queuedir/done/
-over=$base/over/
-lockdir=$base/lock/
-stagedir=$lockdir/stages
-incoming=$base/incoming
-logdir=$base/log/cron/
-mirrordir=$base/mirror/
-newstage=$queuedir/newstage/
-exportdir=$base/export/
-exportpublic=$public/export/
-
-
-ftpgroup=debadmin
-
-copyoverrides="squeeze-backports.contrib squeeze-backports.contrib.src squeeze-backports.main squeeze-backports.main.debian-installer squeeze-backports.main.src squeeze-backports.extra.contrib squeeze-backports.extra.main"
-
-# Change this to your hostname
-uploadhost=localhost
-uploaddir=/pub/UploadQueue/
-
-# What components to support
-components="main contrib non-free"
-suites="squeeze-backports"
-override_types="deb dsc udeb"
-
-# export TMP=/srv/backports-master.debian.org/tmp
-# export TEMP=/srv/backports-master.debian.org/tmp
-
-TMPDIR=$base/tmp
-
-PATH=$masterdir:$PATH
-umask 022
-unset CDPATH
-GZIP='--rsyncable' ; export GZIP
-
-# Set the database variables
-eval $(dak admin config db-shell)
index c022c8bd7ec07e48d6c6362c099111226f2ff6d4..172cbcee0a9dfb7a15ebde29f420b2c3b0ae9053 100644 (file)
@@ -9,17 +9,14 @@
 
 
     Alias /debian-security /org/security.debian.org/archive/debian-security/
+    Alias /debian-security-buildd /srv/security-master.debian.org/buildd/debian-security-buildd/
     Alias /buildd/ /org/security-master.debian.org/buildd/
 
     #RewriteEngine on
     #RewriteRule ^/$    http://www.debian.org/security/
 
-    # New suite aliases
-    Alias /buildd-squeeze /srv/security-master.debian.org/buildd/squeeze/
-    Alias /buildd-wheezy /srv/security-master.debian.org/buildd/wheezy/
-
     # BuildD access list
-    <LocationMatch "^/(buildd|buildd-squeeze|buildd-wheezy|debian-security)/">
+    <LocationMatch "^/(buildd|buildd-squeeze|buildd-wheezy|debian-security|debian-security-buildd)/">
         order deny,allow
         deny from all
 
diff --git a/config/debian-security/apt.conf b/config/debian-security/apt.conf
deleted file mode 100644 (file)
index aba3409..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-APT::FTPArchive::Contents off;
-
-Dir 
-{
-   ArchiveDir "/srv/security-master.debian.org/ftp/";
-   OverrideDir "/srv/security-master.debian.org/override/";
-   CacheDir "/srv/security-master.debian.org/dak-database/";
-};
-
-Default
-{
-   Packages::Compress "gzip bzip2";
-   Sources::Compress "gzip bzip2";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-tree "dists/stable/updates"
-{
-   FileList "/srv/security-master.debian.org/dak-database/dists/stable_updates/$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/security-master.debian.org/dak-database/dists/stable_updates/$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 kfreebsd-i386 kfreebsd-amd64 mips mipsel powerpc s390 sparc source";
-   BinOverride "override.squeeze.$(SECTION)";
-   ExtraOverride "override.squeeze.extra.$(SECTION)";
-   SrcOverride "override.squeeze.$(SECTION).src";
-   Contents " ";
-   Packages::Compress "gzip bzip2";
-   Sources::Compress "gzip bzip2";
-};
-
-tree "dists/testing/updates"
-{
-   FileList "/srv/security-master.debian.org/dak-database/dists/testing_updates/$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/security-master.debian.org/dak-database/dists/testing_updates/$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 kfreebsd-i386 kfreebsd-amd64 mips mipsel powerpc s390 sparc source";
-   BinOverride "override.wheezy.$(SECTION)";
-   ExtraOverride "override.wheezy.extra.$(SECTION)";
-   SrcOverride "override.wheezy.$(SECTION).src";
-   Contents " ";
-   Packages::Compress "gzip bzip2";
-   Sources::Compress "gzip bzip2";
-};
diff --git a/config/debian-security/apt.conf.buildd b/config/debian-security/apt.conf.buildd
deleted file mode 100644 (file)
index 9647c63..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-APT::FTPArchive::Contents off;
-
-Dir 
-{
-   ArchiveDir "/srv/security-master.debian.org/buildd/";
-   OverrideDir "/srv/security-master.debian.org/override/";
-   CacheDir "/srv/security-master.debian.org/dak-database/";
-};
-
-Default
-{
-   Packages::Compress ". gzip bzip2";
-   Sources::Compress ". gzip bzip2";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "squeeze"
-{
-   Packages "squeeze/Packages";
-   Sources "squeeze/Sources";
-   Contents " ";
-
-   BinOverride "override.squeeze.all3";
-   SrcOverride "override.squeeze.all3.src";
-   BinCacheDB "packages-accepted-squeeze.db";
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "wheezy"
-{
-   Packages "wheezy/Packages";
-   Sources "wheezy/Sources";
-   Contents " ";
-
-   BinOverride "override.wheezy.all3";
-   SrcOverride "override.wheezy.all3.src";
-   BinCacheDB "packages-accepted-squeeze.db";
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
index 970a232263f3e9a8d9307db7ff1c6dccda37bd33..1f1d4c6556a9756d2bb636162f4673a2d5641e32 100755 (executable)
@@ -3,12 +3,13 @@
 # Executed after cron.unchecked
 
 set -e
+set -o pipefail
 set -u
 
 export SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
 . $SCRIPTVARS
 SSH_SOCKET=~/.ssh/buildd.debian.org.socket
-DISTS=$(dak admin s list)
+DISTS="oldstable stable"
 
 if [ -e $ftpdir/Archive_Maintenance_In_Progress ]; then
     exit 0
@@ -19,8 +20,8 @@ now=$(date +%s)
 check=$(( now - 3*60 ))
 
 for dist in $DISTS; do
-    smodtime=$(stat -c "%Y" $base/buildd/$dist/Sources.gz)
-    pmodtime=$(stat -c "%Y" $base/buildd/$dist/Packages.gz)
+    smodtime=$(stat -c "%Y" $base/build-queues/dists/buildd-$dist/updates/*/source/Sources.gz | sort -n | tail -1)
+    pmodtime=$(stat -c "%Y" $base/build-queues/dists/buildd-$dist/updates/*/binary-*/Packages.gz | sort -n | tail -1)
 
     if [ ${smodtime} -gt ${check} ] || [ ${pmodtime} -gt ${check} ]; then
         # Packages/Sources changed in the last minutes
@@ -40,12 +41,15 @@ if [ ! -z "${dists}" ]; then
     trap 'kill -TERM $SSH_PID' 0
     for d in ${dists}; do
         case ${d} in
-            stable)
+            oldstable)
                 send=squeeze
                 ;;
-            testing)
+            stable)
                 send=wheezy
                 ;;
+            testing)
+                send=jessie
+                ;;
             *)
                 send=unknown
                 ;;
index 3c34d769ec2bea6d1abccf73910ef2aff5aae791..ba4aaa6235b44edd048f54bb97e4c6a8198410cf 100755 (executable)
@@ -3,6 +3,7 @@
 # Executed daily via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 export SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
 . $SCRIPTVARS
 
@@ -20,10 +21,11 @@ for file in override*.gz; do
 done
 find . -maxdepth 1 -mindepth 1 -type l | xargs --no-run-if-empty rm
 
-for suite in $suites; do
+for suite in stable testing; do
     case $suite in
-        stable) override_suite=squeeze;;
-        testing) override_suite=wheezy;;
+        oldstable) override_suite=squeeze;;
+        stable) override_suite=wheezy;;
+        testing) override_suite=jessie;;
         *) echo "Unknown suite type ($suite)"; exit 1;;
     esac
     for component in $components; do
@@ -49,7 +51,7 @@ for suite in $suites; do
 done
 
 # Generate .all3 overides for the buildd support
-for dist in squeeze wheezy; do
+for dist in squeeze wheezy jessie; do
     rm -f override.$dist.all3
     components="main contrib non-free";
     if [ -f override.$dist.main.debian-installer.gz ]; then
@@ -78,7 +80,7 @@ if ! lockfile -r100 "$LOCKFILE"; then
 fi
 trap cleanup EXIT
 
-dak clean-queues
+dak clean-queues -i ${unchecked}
 dak clean-queues -i $disembargo
 dak clean-suites
 
@@ -93,7 +95,4 @@ while read dumpname; do
     bzip2 -9fv "$dumpname"
 done
 
-#apt-ftparchive -q clean apt.conf
-#apt-ftparchive -q clean apt.conf.buildd
-
 ################################################################################
index ddbf09fcf4f3ccebe575d7aaf8ececcfd630cf84..6a6980579e1204adc6ec566a2d3d1342438d57ca 100755 (executable)
@@ -3,6 +3,7 @@
 # Executed hourly via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 set -u
 
 export SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
index d8ae45342bd82971a3c95d4e0198b3801d06b2a6..3b03a0ddfbecca826a396fa8ba72e20b29aa203c 100755 (executable)
@@ -1,6 +1,7 @@
 #! /bin/bash
 
 set -e
+set -o pipefail
 set -u
 
 export SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
@@ -17,14 +18,6 @@ doanything=false
 dopolicy=false
 LOCKFILE="$lockdir/unchecked.lock"
 
-# So first we should go and see if any process-policy action is done
-dak process-policy embargoed | mail -a "X-Debian: DAK" -e -s "Automatically accepted from embargoed" team@security.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-dak process-policy unembargoed | mail -a "X-Debian: DAK" -e -s "Automatically accepted from unembargoed" team@security.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
-
-# Now, if this really did anything, we can then sync it over. Files
-# in newstage mean they are (late) accepts of security stuff, need
-# to sync to ftp-master
-
 cleanup() {
     rm -f "$LOCKFILE"
 }
@@ -35,15 +28,6 @@ if ! lockfile -r8 "$LOCKFILE"; then
 fi
 trap cleanup EXIT
 
-cd $newstage
-changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
-if [ -n "$changes" ]; then
-    dopolicy=true
-    echo "$timestamp": ${changes:-"Nothing to do in newstage"}  >> $report
-    rsync -a -q $newstage/. /srv/queued/ftpmaster/.
-    dak process-upload -a -d "$newstage" >> $report
-fi
-
 cd $unchecked
 changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
 if [ -n "$changes" ]; then
@@ -54,19 +38,46 @@ fi
 
 cd $disembargo
 changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
-
 if [ -n "$changes" ]; then
     doanything=true
     echo "$timestamp": ${changes:-"Nothing to do in disembargo"}  >> $reportdis
     dak process-upload -a -d "$disembargo" >> $reportdis
 fi
 
+for queue in embargoed unembargoed; do
+    echo "$timestamp: processing ${queue}" >> ${report}
+    dak process-policy ${queue} | mail -a "X-Debian: DAK" -e -s "Automatically accepted from ${queue}" team@security.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
+done
+accepted=$(find ${queuedir}/accepted -type f -name "*.changes")
+if [ -n "${accepted}" ]; then
+    dopolicy=true
+fi
+
+# sync accepted files to ftpmaster
+cd ${base}
+find ${queuedir}/accepted -type f -exec mv -t /srv/queued/ftpmaster '{}' +
+
+# export policy queues
+for queue in embargoed; do
+    cd ${queuedir}/${queue}
+    rm -rf export.new
+    mkdir export.new
+    dak export -q ${queue} -d export.new --all
+    rsync -a --delete export.new/. export/.
+    rm -rf export.new
+    cd ${base}
+done
+
 if [ "${doanything}" = "false" ] && [ "${dopolicy}" = "false" ]; then
     echo "$timestamp": Nothing to do >> $report
     exit 0
 fi
 
+# manage build queues
 dak manage-build-queues -a
+dak generate-packages-sources2 -a build-queues
+dak generate-releases -a build-queues >/dev/null
+${scriptsdir}/update-buildd-archive ${base}/build-queues ${incoming}/debian-security-buildd
 
 if [ "x${dopolicy}x" = "xtruex" ]; then
     # We had something approved from a policy queue, push out new archive
@@ -74,9 +85,8 @@ if [ "x${dopolicy}x" = "xtruex" ]; then
     #dak generate-filelist
     cd $configdir
     $configdir/map.sh
-    #apt-ftparchive generate apt.conf
-    dak generate-packages-sources2
-    dak generate-releases
+    dak generate-packages-sources2 -a security
+    dak generate-releases -a security
     /srv/security-master.debian.org/dak/config/debian-security/make-mirror.sh
     sudo -u archvsync -H /home/archvsync/signal_security
 fi
index 15c9d16fe2fa6db2c2265aae7b97570381d13f1d..5cfdf42c551d3fd2ef16ad44c0ee5c88192f1b08 100755 (executable)
@@ -3,6 +3,7 @@
 # Executed weekly via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 export SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
 . $SCRIPTVARS
 
@@ -25,7 +26,7 @@ if ! lockfile -r100 "$LOCKFILE"; then
 fi
 trap cleanup EXIT
 
-dak generate-releases
+dak generate-releases -a security
 
 cleanup
 trap - EXIT
index c5ad1ca1ca4bcb2cb36a3096a353c10e831a6ce6..363d8c79196eaa17b2be9634ef122a7297bb58a7 100644 (file)
@@ -3,7 +3,6 @@ Dinstall
    // was non-us.d.o path before
    SigningKeyring "/srv/security-master.debian.org/s3kr1t/dot-gnupg/secring.gpg";
    SigningPubKeyring "/srv/security-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg";
-   SigningKeyIds "55BE302B";
    SendmailCommand "/usr/sbin/sendmail -odq -oi -t";
    MyEmailAddress "Debian FTP Masters <ftpmaster@ftp-master.debian.org>";
    MyAdminAddress "ftpmaster@debian.org";
@@ -13,8 +12,6 @@ Dinstall
    PackagesServer "packages.debian.org";
    Bcc "archive@ftp-master.debian.org";
    // GroupOverrideFilename "override.group-maint";
-   FutureTimeTravelGrace 28800; // 8 hours
-   PastCutoffYear "1984";
    SkipTime 300;
    CloseBugs "false";
    OverrideDisparityCheck "false";
@@ -32,6 +29,11 @@ Process-New
   LockDir "/srv/security-master.debian.org/lock/new/";
 };
 
+Process-Policy
+{
+  CopyDir "/srv/security-master.debian.org/queue/accepted";
+};
+
 Import-Users-From-Passwd
 {
   ValidGID "800";
@@ -54,6 +56,7 @@ Import-LDAP-Fingerprints
 {
   LDAPDn "ou=users,dc=debian,dc=org";
   LDAPServer "db.debian.org";
+  CACertFile "/etc/ssl/certs/spi-cacert-2008.pem";
   ExtraKeyrings
   {
     "/srv/keyring.debian.org/keyrings/removed-keys.pgp";
@@ -121,62 +124,20 @@ Security-Install
   };
 };
 
-Suite
-{
-  // Priority determines which suite is used for the Maintainers file
-  // as generated by 'dak make-maintainers' (highest wins).
-
-  Stable
-  {
-       Components
-       {
-         updates/main;
-         updates/contrib;
-         updates/non-free;
-       };
-       Announce "dak@security.debian.org";
-       Version "";
-       Origin "Debian";
-       Label "Debian-Security";
-       Description "Debian 6.0 Security Updates";
-       ValidTime 864000; // 10 days
-       CodeName "squeeze";
-       OverrideCodeName "squeeze";
-       CopyDotDak "/srv/security-master.debian.org/queue/done/";
-  };
-
-  Testing
-  {
-       Components
-       {
-         updates/main;
-         updates/contrib;
-         updates/non-free;
-       };
-       Announce "dak@security.debian.org";
-       Version "";
-       Origin "Debian";
-       Label "Debian-Security";
-       Description "Debian testing Security Updates";
-       ValidTime 864000; // 10 days
-       CodeName "wheezy";
-       OverrideCodeName "wheezy";
-       CopyDotDak "/srv/security-master.debian.org/queue/done/";
-  };
-};
-
 SuiteMappings
 {
  "silent-map stable-security stable";
  "silent-map oldstable-security oldstable";
   "silent-map etch-secure oldstable";
   "silent-map testing-security testing";
-  "silent-map squeeze-security stable";
-  "silent-map wheezy-security testing";
+  "silent-map squeeze-security oldstable";
+  "silent-map wheezy-security stable";
+  "silent-map jessie-security testing";
 };
 
 Dir
 {
+  Base "/srv/security-master.debian.org/";
   Root "/srv/security-master.debian.org/ftp/";
   Pool "/srv/security-master.debian.org/ftp/pool/";
   Export "/srv/security-master.debian.org/export/";
@@ -197,14 +158,6 @@ Dir
 
   Queue
   {
-    Byhand "/srv/security-master.debian.org/queue/byhand/";
-    New "/srv/security-master.debian.org/queue/new/";
-    Unchecked "/srv/security-master.debian.org/queue/unchecked/";
-    Newstage "/srv/security-master.debian.org/queue/newstage/";
-
-    ProposedUpdates "/srv/security-master.debian.org/does/not/exist/"; // XXX fixme
-    OldProposedUpdates "/srv/security-master.debian.org/does/not/exist/"; // XXX fixme
-
     Embargoed "/srv/security-master.debian.org/queue/embargoed/";
     Unembargoed "/srv/security-master.debian.org/queue/unembargoed/";
     Disembargo "/srv/security-master.debian.org/queue/unchecked-disembargo/";
@@ -222,40 +175,6 @@ DB
   Unicode "false"
 };
 
-Architectures
-{
-
-  source "Source";
-  all "Architecture Independent";
-  alpha "DEC Alpha";
-  hppa "HP PA RISC";
-  arm "ARM";
-  armel "ARM EABI";
-  i386 "Intel ia32";
-  ia64 "Intel ia64";
-  mips "MIPS (Big Endian)";
-  mipsel "MIPS (Little Endian)";
-  powerpc "PowerPC";
-  s390 "IBM S/390";
-  sparc "Sun SPARC/UltraSPARC";
-  amd64 "AMD x86_64 (AMD64)";
-  kfreebsd-i386 "GNU/kFreeBSD i386";
-  kfreebsd-amd64 "GNU/kFreeBSD amd64";
-
-};
-
-Archive
-{
-
-  security
-  {
-    OriginServer "security.debian.org";
-    PrimaryMirror "security.debian.org";
-    Description "Security Updates for the Debian project";
-  };
-
-};
-
 ComponentMappings
 {
  "main updates/main";
@@ -266,72 +185,6 @@ ComponentMappings
  "non-US/non-free updates/non-free";
 };
 
-Section
-{
-  admin;
-  cli-mono;
-  comm;
-  database;
-  debian-installer;
-  debug;
-  devel;
-  doc;
-  editors;
-  embedded;
-  electronics;
-  fonts;
-  games;
-  gnome;
-  graphics;
-  gnu-r;
-  gnustep;
-  hamradio;
-  haskell;
-  httpd;
-  interpreters;
-  java;
-  kde;
-  kernel;
-  libdevel;
-  libs;
-  lisp;
-  localization;
-  mail;
-  math;
-  misc;
-  net;
-  news;
-  ocaml;
-  oldlibs;
-  otherosfs;
-  perl;
-  php;
-  python;
-  ruby;
-  science;
-  shells;
-  sound;
-  tex;
-  text;
-  utils;
-  web;
-  vcs;
-  video;
-  x11;
-  xfce;
-  zope;
-};
-
-Priority
-{
-  required 1;
-  important 2;
-  standard 3;
-  optional 4;
-  extra 5;
-  source 0; // i.e. unused
-};
-
 Urgency
 {
   Default "low";
@@ -344,30 +197,3 @@ Urgency
     critical;
   };
 };
-
-Changelogs
-{
-  Export "/srv/security-master.debian.org/export/changelogs";
-}
-
-Generate-Releases
-{
-  MD5Sum
-  {
-    oldstable;
-    stable;
-    testing;
-  };
-  SHA1
-  {
-    oldstable;
-    stable;
-    testing;
-  };
-  SHA256
-  {
-    oldstable;
-    stable;
-    testing;
-  };
-}
index e8af8d981b3d39c7932931893052d14c5b2a475a..a817013b37176663d9882ea16c0081babee7cffa 100644 (file)
@@ -4,6 +4,5 @@ Config
   {
     DatabaseHostname    "security";
     DakConfig           "/org/security-master.debian.org/dak/config/debian-security/dak.conf";
-    AptConfig           "/org/security-master.debian.org/dak/config/debian-security/apt.conf";
   }
-}
\ No newline at end of file
+}
index 81d5ee085b8268e8d2b802c84b7d1adfa6b9d1f2..f3520e82edaebc3cf65ec592afbebb4c1f74174c 100755 (executable)
@@ -19,7 +19,7 @@ export LC_ALL=C
 # extract changelogs and stuff
 function changelogs() {
     log "Extracting changelogs"
-    dak make-changelog -e
+    dak make-changelog -e -a security
     mkdir -p ${exportpublic}/changelogs
     cd ${exportpublic}/changelogs
     rsync -aHW --delete --delete-after --ignore-errors ${exportdir}/changelogs/. .
index 06cd5384720a222e806142e3bcc22c96c68a2e6d..caa80b9c407ea3cdcf3d848a469ef52dafe821c8 100755 (executable)
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-dak make-pkg-file-mapping | bzip2 -9 > /srv/security-master.debian.org/ftp/indices/package-file.map.bz2
+dak make-pkg-file-mapping security | bzip2 -9 > /srv/security-master.debian.org/ftp/indices/package-file.map.bz2
index 2d040a910fd4eba7f0a3b8673127db4d58cfa1bf..2a94fcd12cef772c07102d7a9196c5d0d4e77a2e 100644 (file)
@@ -26,6 +26,7 @@ disembargo=$queuedir/unchecked-disembargo/
 done=$queuedir/done/
 
 mirrordir=$base/mirror/
+incoming=${base}/buildd
 exportdir=$base/export/
 exportpublic=$public/rsync/export/
 
index 52a85528a8d67d84130973a6623e709e75ccdf47..a326bb27630de461ed89866d9acdf9386fee1595 100644 (file)
@@ -18,5 +18,9 @@
     Options MultiViews Indexes SymLinksIfOwnerMatch
   </Directory>
 
+  <Directory /srv/ftp.debian.org/web>
+    AddCharset utf-8 .txt
+  </Directory>
+
   RewriteEngine On
   RewriteRule ^/~(.+) /users/$1 [R=permanent,L]
index 0b6c4db287d0eaa3bd6e73b4471298eb4c2ec287..7055f777e0108fb1b8dfd5819228924e9b63dcfb 100644 (file)
     Order deny,allow
   </Directory>
 
+  <Directory /srv/incoming.debian.org>
+    <Files robots.txt>
+      Order deny,allow
+    </Files>
+  </Directory>
+  Alias /robots.txt /srv/incoming.debian.org/robots.txt
+
   # buildd direct access -- buildds only
   Alias /debian /srv/ftp.debian.org/mirror
+  Alias /debian-backports /srv/backports-master.debian.org/mirror
+  Alias /debian-buildd /srv/incoming.debian.org/debian-buildd
 
-  # New suite aliases
-  Alias /buildd-unstable /srv/incoming.debian.org/dists/unstable/current/
-  Alias /buildd-experimental /srv/incoming.debian.org/dists/experimental/current/
-
-  <DirectoryMatch ~ "^/srv/(incoming\.debian\.org/(dists/|builddweb)|ftp\.debian\.org/mirror)">
+  <DirectoryMatch ~ "^/srv/(incoming\.debian\.org/(builddweb|debian-buildd/)|ftp\.debian\.org/mirror|backports-master\.debian\.org/mirror)">
     Order allow,deny
 
     Use DebianBuilddHostList
     allow from 206.12.19.8
 
     # franck.d.o
-    allow from 128.148.34.3
+    allow from 138.16.160.12
 
     # test access to check functionality, ganneff
     allow from 213.146.108.162
 
+    # for testing sbuild-db; rleigh@codelibre.net
+    # nagini.codelibre.net
+    allow from 80.68.93.164
+    allow from 2001:41c8:1:5750::2
+
     # Should be in DSA list
     # amd64
     # vitry (archive rebuild)
diff --git a/config/debian/apt.conf b/config/debian/apt.conf
deleted file mode 100644 (file)
index eff5239..0000000
+++ /dev/null
@@ -1,197 +0,0 @@
-// The only use of this file nowadays is to serve as something for "apt-ftparchive clean"
-// and for the "python apt read an apt.conf" thingie. Otherwise its not really in use,
-// all the generation stuff is done in generate-packages-sources
-//
-// Well, except for that generate-releases and generate-index-diffs codebase, which
-// REALLY wants a cleanup.
-
-Dir 
-{
-   ArchiveDir "/srv/ftp-master.debian.org/ftp/";
-   OverrideDir "/srv/ftp-master.debian.org/scripts/override/";
-   CacheDir "/srv/ftp-master.debian.org/database/";
-};
-
-Default
-{
-   Packages::Compress "gzip bzip2";
-   Sources::Compress "gzip bzip2";
-   Contents::Compress "gzip";
-   DeLinkLimit 0;
-   MaxContentsChange 25000;
-   FileMode 0664;
-}
-
-TreeDefault
-{
-   Contents::Header "/srv/ftp-master.debian.org/dak/config/debian/Contents.top";
-};
-
-tree "dists/proposed-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.squeeze.$(SECTION)";
-   ExtraOverride "override.squeeze.extra.$(SECTION)";
-   SrcOverride "override.squeeze.$(SECTION).src";
-   Contents " ";
-};
-
-tree "dists/squeeze-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/squeeze-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/squeeze-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.squeeze.$(SECTION)";
-   ExtraOverride "override.squeeze.extra.$(SECTION)";
-   SrcOverride "override.squeeze.$(SECTION).src";
-   Contents " ";
-};
-
-tree "dists/testing"
-{
-   FakeDI "dists/unstable";
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/testing_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.wheezy.$(SECTION)";
-   ExtraOverride "override.wheezy.extra.$(SECTION)";
-   SrcOverride "override.wheezy.$(SECTION).src";
-};
-
-tree "dists/testing-proposed-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.wheezy.$(SECTION)";
-   ExtraOverride "override.wheezy.extra.$(SECTION)";
-   SrcOverride "override.wheezy.$(SECTION).src";
-   Contents " ";
-};
-
-tree "dists/unstable"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/unstable_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "alpha amd64 armel armhf hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.sid.$(SECTION)";
-   ExtraOverride "override.sid.extra.$(SECTION)";
-   SrcOverride "override.sid.$(SECTION).src";
-};
-
-// debian-installer
-
-tree "dists/proposed-updates/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.squeeze.main.$(SECTION)";
-   SrcOverride "override.squeeze.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents " ";
-};
-
-tree "dists/testing/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb";
-};
-
-tree "dists/testing/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb-nf";
-};
-
-tree "dists/testing-proposed-updates/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "amd64 armel i386 ia64 mips mipsel powerpc s390 sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents " ";
-};
-
-tree "dists/unstable/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "alpha amd64 armel armhf hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb";
-};
-
-tree "dists/unstable/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "alpha amd64 armel armhf hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb-nf";
-};
-
-tree "dists/experimental/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "alpha amd64 armel armhf hppa i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb";
-};
-
-tree "dists/experimental/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "alpha amd64 armel armhf hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents "$(DIST)/../Contents-udeb-nf";
-};
-
-// Experimental
-
-tree "dists/experimental"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/experimental_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "alpha amd64 armel armhf hppa hurd-i386 i386 ia64 mips mipsel powerpc s390 s390x sparc kfreebsd-i386 kfreebsd-amd64 source";
-   BinOverride "override.sid.$(SECTION)";
-   SrcOverride "override.sid.$(SECTION).src";
-};
diff --git a/config/debian/apt.conf.buildd b/config/debian/apt.conf.buildd
deleted file mode 100644 (file)
index 88ebf4a..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-Dir 
-{
-   ArchiveDir "/srv/incoming.debian.org/buildd/";
-   OverrideDir "/srv/ftp-master.debian.org/scripts/override/";
-   CacheDir "/srv/ftp-master.debian.org/database/";
-};
-
-Default
-{
-   Packages::Compress ". bzip2 gzip";
-   Sources::Compress ". bzip2 gzip";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "incoming"
-{
-   Packages "Packages";
-   Contents " ";
-
-   BinOverride "override.sid.all3";
-   BinCacheDB "packages-accepted.db";
-   
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_accepted.list";
-
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
-   Sources "Sources";
-   BinOverride "override.sid.all3";
-   SrcOverride "override.sid.all3.src";
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_accepted.list";
-};
-
index 3a45b45649469ce5bd1f73d42a856dca71e40bb0..606f614a400afba90e29e7e4a87fbdfeb214b334 100644 (file)
@@ -33,25 +33,37 @@ function wbtrigger() {
 
 # used by cron.dinstall *and* cron.unchecked.
 function make_buildd_dir () {
-    dak manage-build-queues -a
+    local overridesuite
+    local suite
 
-    for dist in $(ls -1 ${incoming}/dists/); do
-        # Skip project trace directory
-        if [ "${dist}x" = "projectx" ]; then continue; fi
-        cd ${incoming}/dists/${dist}
-        mkdir -p tree/${STAMP}
-        cp -al ${incoming}/dists/${dist}/buildd/. tree/${STAMP}/
-        ln -sfT tree/${STAMP} ${incoming}/dists/${dist}/current
-        find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
-    done
+    dak manage-build-queues -a
+    dak generate-packages-sources2 -a build-queues
+    dak generate-releases -a build-queues >/dev/null
+    ${scriptsdir}/update-buildd-archive ${base}/build-queues ${incoming}/debian-buildd
 
+    rm -f ${incoming}/public/*
+    dak export-suite -s "accepted" -d "$incoming/public"
 }
 
 # Process (oldstable)-proposed-updates "NEW" queue
 function punew_do() {
+    local queue="$1"
+    local qdir="$2"
+    local to="${3}"
+
     date -u -R >> REPORT
-    dak process-policy $1 | tee -a REPORT | mail -a "X-Debian: DAK" -e -s "NEW changes in $1" debian-release@lists.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
+    dak process-policy "${queue}" | tee -a REPORT | mail -a "X-Debian: DAK" -e -s "NEW changes in ${queue}" "${to}" -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
     echo >> REPORT
+
+    dak generate-packages-sources2 -s "${queue}"
+
+    STAMP=${STAMP:-$(date "+%Y%m%d%H%M")}
+
+    local exportdir="${qdir}/tree/${STAMP}"
+    local targetdir="${qdir}/export"
+    dak export -q "${queue}" -d "${exportdir}" --all
+    ln -sfT ${exportdir} ${targetdir}
+    find "${qdir}/tree" -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
 }
 
 # These versions used in dinstall
@@ -60,7 +72,7 @@ function punew() {
         log "Doing automated p-u-new processing"
     fi
     cd "${queuedir}/p-u-new"
-    punew_do "$1"
+    punew_do "$1" "${queuedir}/p-u-new" "debian-release@lists.debian.org"
 }
 
 function opunew() {
@@ -68,7 +80,20 @@ function opunew() {
         log "Doing automated o-p-u-new processing"
     fi
     cd "${queuedir}/o-p-u-new"
-    punew_do "$1"
+    punew_do "$1" "${queuedir}/o-p-u-new" "debian-release@lists.debian.org"
+}
+
+function backports_policy() {
+    local queue="backports-policy"
+    local qdir="/srv/backports-master.debian.org/queue/policy"
+    local to="backports-team@debian.org"
+
+    if [ "${PROGRAM}" = "dinstall" ]; then
+        log "Doing automated ${queue} processing"
+    fi
+
+    cd "${qdir}"
+    punew_do "${queue}" "${qdir}" "${to}"
 }
 
 # Do the unchecked processing, in case we have files.
@@ -82,19 +107,19 @@ function do_unchecked () {
 
     echo "$timestamp": ${changes:-"Nothing to do"}  >> $report
     dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
+    dak process-commands -d "$unchecked" >> $report
 }
 
-# Do the newstage processing, in case we have files.
-function do_newstage () {
-    cd $newstage
+# process NEW policy queue
+function do_new () {
+    if [ "${PROGRAM}" = "dinstall" ]; then
+       log "Doing NEW processing"
+    fi
+    (dak process-policy new; dak process-policy byhand) | mail -a "X-Debian: DAK" -e -s "NEW and BYHAND processing" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
 
-    changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
-    report=$queuedir/REPORT
-    timestamp=$(date "+%Y-%m-%d %H:%M")
-    UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
+    dak process-policy backports-new | mail -a "X-Debian: DAK" -e -s "NEW processing for backports-new" backports-team@debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
 
-    echo "$timestamp": ${changes:-"Nothing to do in newstage"}  >> $report
-    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+    dak clean-suites -a new,backports-new
 }
 
 function sync_debbugs () {
@@ -124,10 +149,11 @@ function reports() {
     # Send a report on NEW/BYHAND packages
     log "Nagging ftpteam about NEW/BYHAND packages"
     dak queue-report | mail -a "X-Debian: DAK" -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
+    dak queue-report -d backports-new,backports-policy | mail -a "X-Debian: DAK" -e -s "NEW and POLICY on $(date +%D)" backports-team@debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
     # and one on crufty packages
     log "Sending information about crufty packages"
-    dak cruft-report > $webdir/cruft-report-daily.txt
-    dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
+    dak cruft-report -R > $webdir/cruft-report-daily.txt
+    dak cruft-report -R -s experimental >> $webdir/cruft-report-daily.txt
     cat $webdir/cruft-report-daily.txt | mail -a "X-Debian: DAK" -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
 }
 
@@ -136,3 +162,14 @@ function pg_timestamp() {
     log "Saving postgres transaction id for ${tsname}"
     psql -tAc 'select txid_current();' > $base/backup/txid_${tsname}_$(date +%Y.%m.%d-%H:%M:%S)
 }
+
+function get_archiveroot() {
+    local archivename="$1"
+    local query="SELECT path FROM archive WHERE name='${archivename}'"
+    local archiveroot="$(psql -tAc "${query}")"
+    if [ -z "${archiveroot}" ]; then
+        echo "get_archiveroot: couldn't get archiveroot for '${archivename}'" >&2
+        return 1
+    fi
+    echo "${archiveroot}"
+}
index 172b5bba57c44f842e202067ad7b16afc9583ab3..d24c7f9a7e8baf55d88170ccbf865e695e41aa3d 100755 (executable)
@@ -3,6 +3,7 @@
 # Run daily via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 set -u
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
@@ -42,6 +43,9 @@ clean_debbugs
 # Generate list of override disparities
 dak override-disparity | gzip -9 > ${webdir}/override-disparity.gz
 
+# Generate stats about the new queue
+dak stats new ${webdir}/NEW-stats.yaml 2> /dev/null
+
 ${scriptsdir}/link_morgue.sh
 
 ################################################################################
index d2443f79cea4768d95112d8e9d449e0423c7a68c..701754caf722f02355d0deea8555d9df96cf89c6 100755 (executable)
@@ -27,6 +27,7 @@
 
 # exit on errors
 set -e
+set -o pipefail
 # make sure to only use defined variables
 set -u
 # ERR traps should be inherited from functions too. (And command
@@ -206,13 +207,12 @@ GO=(
 stage $GO
 
 lockfile "$LOCK_ACCEPTED"
-lockfile "$LOCK_NEW"
 trap remove_all_locks EXIT TERM HUP INT QUIT
 
 GO=(
     FUNC="punew"
     TIME="p-u-new"
-    ARGS="proposedupdates"
+    ARGS="stable-new"
     ERR="false"
 )
 stage $GO
@@ -220,16 +220,16 @@ stage $GO
 GO=(
     FUNC="opunew"
     TIME="o-p-u-new"
-    ARGS="oldproposedupdates"
+    ARGS="oldstable-new"
     ERR="false"
 )
 stage $GO
 
 GO=(
-    FUNC="newstage"
-    TIME="newstage"
+    FUNC="backports_policy"
+    TIME="backports-policy"
     ARGS=""
-    ERR=""
+    ERR="false"
 )
 stage $GO
 
@@ -325,14 +325,6 @@ GO=(
 )
 stage $GO
 
-GO=(
-    FUNC="buildd_dir"
-    TIME="buildd_dir"
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
 state "scripts"
 GO=(
     FUNC="mkmaintainers"
@@ -452,6 +444,14 @@ GO=(
 )
 stage $GO &
 
+GO=(
+    FUNC="mirrorpush-backports"
+    TIME="mirrorpush-backports"
+    ARGS=""
+    ERR="false"
+)
+stage $GO &
+
 GO=(
     FUNC="i18n2"
     TIME="i18n 2"
@@ -486,14 +486,6 @@ GO=(
 )
 stage $GO
 
-GO=(
-    FUNC="aptftpcleanup"
-    TIME="apt-ftparchive cleanup"
-    ARGS=""
-    ERR="false"
-)
-#stage $GO
-
 # we need to wait for the background processes before the end of dinstall
 wait
 
index 39a4d0d8214f298e16f3748ae1b57fff502dd82c..0c460a6bd8b1e7146c5242f1fc6e91a5982cdf50 100755 (executable)
@@ -3,24 +3,41 @@
 # Executed hourly via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 set -u
 
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 
+PROGRAM="Hourly"
+########################################################################
+# Functions                                                            #
+########################################################################
+# common functions are "outsourced"
+. "${configdir}/common"
+
 dak import-users-from-passwd
-dak queue-report -n > $webdir/new.html
-dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates -r $webdir/stat
-dak show-deferred -r $webdir/stat > ${webdir}/deferred.html
-dak graph -n new,byhand,p-u-new,o-p-u-new,deferred -r $webdir/stat -i $webdir/stat -x $scriptsdir/rrd-release-freeze-dates
 
 # do not run show-new and other stuff in parallel
 LOCKFILE="$lockdir/unchecked.lock"
+cleanup() {
+    rm -f "${LOCKFILE}"
+}
+
 if lockfile -r16 $LOCKFILE 2> /dev/null; then
+    trap cleanup EXIT
+    do_new
     dak show-new > /dev/null || true
-    rm -f $LOCKFILE
+    cleanup
+    trap - EXIT
 fi
 
+dak queue-report -n > $webdir/new.html
+dak queue-report -n -d backports-new > ${webdir}/backports-new.html
+dak queue-report -8 -d new,byhand,stable-new,oldstable-new -r $webdir/stat
+sudo -u dak-unpriv dak show-deferred -r $webdir/stat > ${webdir}/deferred.html
+dak graph -n new,byhand,stable-new,oldstable-new,deferred -r $webdir/stat -i $webdir/stat -x $scriptsdir/rrd-release-freeze-dates
+
 cd $webdir
 cat removals-20*.txt > removals-full.txt
 cat removals.txt >> removals-full.txt
@@ -32,10 +49,21 @@ $base/dak/tools/removals.pl $configdir/removalsrss.rc > $webdir/rss/removals.rss
 
 
 # Tell ries to sync its tree
-ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org sync
+${scriptsdir}/sync-dd ries-sync ries-sync1 ries-sync2 sync
 
 $scriptsdir/generate-di
 
+# Push files over to security
+#pg_dump -a -F p -t files | sed -e "s,^COPY files (,DELETE FROM external_files; COPY external_files (," | xz -3 | \
+#        ssh -o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/push-external_files dak@wherever sync
+#
+# The key should run the following command:
+#        'xzcat | pg_restore -1 -a'
+
+# Update backports ACL
+sudo -u dak-unpriv cat /srv/backports-master.debian.org/etc/acl \
+  | dak acl set-fingerprints backports \
+  | mail -a "X-Debian: DAK" -e -s "backports: acl changes" backports-team@debian.org -- -F "Debian FTP Masters" -f ftpmaster@ftp-master.debian.org
 
 # do the buildd key updates
 BUILDDFUN=$(mktemp -p "${TMPDIR}" BUILDDFUN.XXXXXX)
index 980cb6d6905d63651932c50e6d516d80f804e876..d525534effc06ac898fec804c1de239e98d006a7 100755 (executable)
@@ -3,6 +3,7 @@
 # Run at the beginning of the month via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 set -u
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
@@ -25,11 +26,19 @@ for m in mail bxamail; do
     fi;
 done
 
-DATE=`date +%Y-%m`
+DATE=$(date +%Y-%m)
 cd ${base}/log
 touch $DATE
 ln -sf $DATE current
 chmod g+w $DATE
 chown dak:ftpteam $DATE
 
+DATE=$(date -d yesterday +%Y-%m)
+cd /srv/upload.debian.org/queued
+sudo -u dak-unpriv /srv/upload.debian.org/queued/debianqueued -k || true
+mv run/log log.${DATE}
+sudo -u dak-unpriv touch run/log
+xz -9 log.${DATE}
+sudo -u dak-unpriv /srv/upload.debian.org/queued/debianqueued 2>/dev/null
+
 ################################################################################
index b0129733422ee1ff4e8e4ec40e13f356a4cd1654..78c0e9ba62e3cfbbb0386e7f6541474aae8fe63b 100755 (executable)
@@ -18,6 +18,7 @@
 
 # exit on errors
 set -e
+set -o pipefail
 # make sure to only use defined variables
 set -u
 # ERR traps should be inherited from functions too. (And command
@@ -25,6 +26,11 @@ set -u
 # the important part here)
 set -E
 
+# We sleep a while here, as cron - and as such @reboot jobs like this
+# one - are started pretty early during boot. With parts of the system
+# still missing, most noticable for us is a not-yet-started postgres...
+sleep 75
+
 # import the general variable set.
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
index a534ba06cf2cd159045f1c009c818fe8581fad47..34571185fa69f0fb2d85ef6b303872c9166202b8 100755 (executable)
@@ -18,6 +18,7 @@
 
 # exit on errors
 set -e
+set -o pipefail
 # make sure to only use defined variables
 set -u
 # ERR traps should be inherited from functions too. (And command
@@ -31,7 +32,6 @@ export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 
 LOCKDAILY=""
 LOCKFILE="$lockdir/unchecked.lock"
-LOCK_NEW="$lockdir/processnew.lock"
 NOTICE="$lockdir/daily.lock"
 LOCK_BUILDD="$lockdir/buildd.lock"
 
@@ -62,11 +62,13 @@ function do_buildd () {
         LOCKDAILY="YES"
         cd $overridedir
         dak make-overrides &>/dev/null
-        rm -f override.sid.all3 override.sid.all3.src
+        rm -f override.sid.all3 override.sid.all3.src override.squeeze-backports.all3 override.squeeze-backports.all3.src
         for i in main contrib non-free main.debian-installer; do
             cat override.sid.$i >> override.sid.all3
+            cat override.squeeze-backports.$i >> override.squeeze-backports.all3
             if [ "$i" != "main.debian-installer" ]; then
                 cat override.sid.$i.src >> override.sid.all3.src
+                cat override.squeeze-backports.$i.src >> override.squeeze-backports.all3.src
             fi
         done
         make_buildd_dir
@@ -99,11 +101,10 @@ trap cleanup 0
 pg_timestamp preunchecked >/dev/null
 
 # Process policy queues
-punew proposedupdates
-opunew oldproposedupdates
-
-# Now process the NEW staging area
-do_newstage
+punew stable-new
+opunew oldstable-new
+backports_policy
+dak clean-suites -a backports-policy,policy
 
 # Finally deal with unchecked
 do_unchecked
index 06d3f90a93d6e3a54e81a2afd6f571b93127237a..7d884b6a1e707a07f9afb1eb5378b1cc8f0db933 100755 (executable)
@@ -1,8 +1,9 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Run once a week via cron, out of dak's crontab.
 
 set -e
+set -o pipefail
 set -u
 # ERR traps should be inherited from functions too. (And command
 # substitutions and subshells and whatnot, but for us the functions is
@@ -31,10 +32,6 @@ if [ ! -z "$(find $ftpdir/pool/ -type d -empty)" ]; then
    find $ftpdir/pool/ -type d -empty | xargs rmdir;
 fi
 
-# Split queue/done
-echo "Splitting queue/done"
-dak split-done > /dev/null
-
 # Do git cleanup stuff
 echo "Doing git stuff"
 cd /srv/ftp.debian.org/git/dak.git
index f1bc068513951cb23d9c8ade53fb1657e5a88613..eb861b45a8d96decf230edb90f3318d97c9bef9e 100644 (file)
@@ -15,5 +15,5 @@ PATH=/usr/local/bin:/usr/bin:/bin
 
 # We do like our queued and make sure it will always be there.
 # Luckily it checks itself and doesn't start if it already runs
-@reboot /srv/queued/debianqueued-0.9/debianqueued
-*/10        *          *   *   *   /srv/queued/debianqueued-0.9/debianqueued 2>/dev/null
+@reboot sudo -u dak-unpriv /srv/queued/debianqueued-0.9/debianqueued
+*/10        *          *   *   *   sudo -u dak-unpriv /srv/queued/debianqueued-0.9/debianqueued 2>/dev/null
index 532d53a7eef44b982f613c8516513a74b8da722b..26e4616752cf1206f788df172c0823385c669422 100644 (file)
@@ -5,15 +5,13 @@ Dinstall
 //   SendmailCommand "/usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org";
    SendmailCommand "/usr/sbin/sendmail -oi -t -f envelope@ftp-master.debian.org";
    MyEmailAddress "Debian FTP Masters <ftpmaster@ftp-master.debian.org>";
-   MyAdminAddress "ftpmaster@debian.org";
+   MyAdminAddress "ftpmaster@ftp-master.debian.org";
    MyHost "debian.org";  // used for generating user@my_host addresses in e.g. manual_reject()
    MyDistribution "Debian"; // Used in emails
    BugServer "bugs.debian.org";
    PackagesServer "packages.debian.org";
    TrackingServer "packages.qa.debian.org";
    Bcc "archive@ftp-master.debian.org";
-   FutureTimeTravelGrace 28800; // 8 hours
-   PastCutoffYear "1984";
    SkipTime 300;
    BXANotify "true";
    CloseBugs "true";
@@ -40,6 +38,7 @@ Generate-Index-Diffs
 {
    Options
    {
+     Archive "ftp-master";
      TempDir "/srv/ftp-master.debian.org/tiffani";
      MaxDiffs { Default 56; };
    };
@@ -48,6 +47,7 @@ Generate-Index-Diffs
 Show-New
 {
   HTMLPath "/srv/ftp.debian.org/web/new/";
+  Options::Queue "new,backports-new";
 }
 
 Show-Deferred
@@ -60,7 +60,7 @@ Import-Users-From-Passwd
 {
   ValidGID "800";
   // Comma separated list of users who are in Postgres but not the passwd file
-  KnownPostgres "postgres,dak,katie,release,qa,www-data,guest,repuser";
+  KnownPostgres "postgres,dak,dak-unpriv,katie,release,qa,www-data,guest,repuser";
 };
 
 Clean-Queues
@@ -98,6 +98,7 @@ Import-LDAP-Fingerprints
 {
   LDAPDn "ou=users,dc=debian,dc=org";
   LDAPServer "db.debian.org";
+  CACertFile "/etc/ssl/certs/spi-cacert-2008.pem";
   ExtraKeyrings
   {
     "/srv/keyring.debian.org/keyrings/removed-keys.pgp";
@@ -116,131 +117,54 @@ Clean-Suites
 
 Process-New
 {
-  DinstallLockFile "/srv/ftp-master.debian.org/lock/processnew.lock";
   LockDir "/srv/ftp-master.debian.org/lock/new/";
 };
 
-Check-Overrides
-{
-  OverrideSuites
-  {
-    Stable
-    {
-      Process "0";
-    };
-
-    Testing
-    {
-      Process "1";
-      OriginSuite "Unstable";
-    };
-
-    Unstable
-    {
-      Process "1";
-    };
-  };
-};
-
-Suite
-{
-  Stable
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Proposed-Updates
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Testing
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Testing-Proposed-Updates
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Squeeze-Updates
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Unstable
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-
-  Experimental
-  {
-       Components
-       {
-         main;
-         contrib;
-         non-free;
-       };
-  };
-};
-
 SuiteMappings
 {
- "silent-map squeeze-security stable-security";
- "silent-map wheezy-security testing-security";
+ "silent-map squeeze-security oldstable-security";
+ "silent-map wheezy-security stable-security";
+ "silent-map jessie-security testing-security";
  "propup-version stable-security testing testing-proposed-updates unstable";
  "propup-version testing-security unstable";
- "map squeeze stable";
- "map squeeze-proposed-updates proposed-updates";
- "map squeeze-updates proposed-updates";
- "map wheezy testing";
- "map wheezy-proposed-updates testing-proposed-updates";
+ "map squeeze oldstable";
+ "map squeeze-proposed-updates oldstable-proposed-updates";
+ "map squeeze-updates oldstable-proposed-updates";
+ "map wheezy stable";
+ "map wheezy-proposed-updates proposed-updates";
+ "map wheezy-updates proposed-updates";
+ "map jessie testing";
+ "map jessie-proposed-updates testing-proposed-updates";
+ "map jessie-updates testing-proposed-updates";
  "map sid unstable";
  "map rc-buggy experimental";
+ "map oldstable oldstable-proposed-updates";
+ "map oldstable-security oldstable-proposed-updates";
+ "map oldstable-updates oldstable-proposed-updates";
  "map stable proposed-updates";
  "map stable-security proposed-updates";
  "map stable-proposed-updates proposed-updates";
  "map stable-updates proposed-updates";
+ "map testing-updates testing-proposed-updates";
  "map-unreleased stable unstable";
  "map-unreleased proposed-updates unstable";
  "map testing testing-proposed-updates";
  "map testing-security testing-proposed-updates";
  "map-unreleased testing unstable";
  "map-unreleased testing-proposed-updates unstable";
+
+ "reject jessie-backports";
+ "reject jessie-updates";
 };
 
 AutomaticByHandPackages {
+  "debian-faq" {
+    Source "debian-faq";
+    Section "byhand";
+    Extension "gz";
+    Script "/srv/ftp-master.debian.org/dak/scripts/debian/byhand-debian-faq";
+  };
+
   "debian-installer-images" {
     Source "debian-installer";
     Section "raw-installer";
@@ -271,10 +195,10 @@ AutomaticByHandPackages {
 
 Dir
 {
+  Base "/srv/ftp-master.debian.org/";
   Root "/srv/ftp-master.debian.org/ftp/";
   Pool "/srv/ftp-master.debian.org/ftp/pool/";
   Templates "/srv/ftp-master.debian.org/dak/templates/";
-  Export "/srv/ftp-master.debian.org/export/";
   Lists "/srv/ftp-master.debian.org/database/dists/";
   Cache "/srv/ftp-master.debian.org/database/";
   Log "/srv/ftp-master.debian.org/log/";
@@ -340,36 +264,29 @@ Changelogs
   Export "/srv/ftp-master.debian.org/export/changelogs";
 }
 
-Generate-Releases
-{
-  MD5Sum
-  {
-    proposed-updates;
-    stable;
-    squeeze-updates;
-    testing;
-    testing-proposed-updates;
-    unstable;
-    experimental;
+ByGroup {
+  dak-unpriv "/srv/ftp-master.debian.org/dak/config/debian/dak.conf-dak";
+  ftpteam "";
+  backports "/srv/ftp-master.debian.org/dak/config/debian/dak.conf-backports";
+};
+
+Command::DM {
+  ACL "dm";
+  AdminKeyrings {
+    "/srv/keyring.debian.org/keyrings/debian-keyring.gpg";
   };
-  SHA1
-  {
-    proposed-updates;
-    squeeze-updates;
-    stable;
-    testing;
-    testing-proposed-updates;
-    unstable;
-    experimental;
+  Keyrings {
+    "/srv/keyring.debian.org/keyrings/debian-maintainers.gpg";
   };
-  SHA256
-  {
-    proposed-updates;
-    squeeze-updates;
-    stable;
-    testing;
-    testing-proposed-updates;
-    unstable;
-    experimental;
+};
+
+Command::DM-Admin {
+  AdminFingerprints {
+    "AB41C1C68AFD668CA045EBF8673A03E4C1DB921F"; // gwolf
+    "0E3A94C3E83002DAB88CCA1694FA372B2DA8B985"; // noodles
+
+    "80E976F14A508A48E9CA3FE9BC372252CA1CF964"; // ansgar
+    "FBFABDB541B5DC955BD9BA6EDB16CF5BB12525C4"; // joerg
+    "309911BEA966D0613053045711B4E5FF15B0FD82"; // mhy
   };
-}
+};
diff --git a/config/debian/dak.conf-backports b/config/debian/dak.conf-backports
new file mode 100644 (file)
index 0000000..82fb16f
--- /dev/null
@@ -0,0 +1,24 @@
+Dinstall {
+  MyAdminAddress "debian-backports@lists.debian.org";
+};
+
+Dir {
+  Log "/srv/backports-master.debian.org/log/";
+  Lock "/srv/backports-master.debian.org/lock/";
+};
+
+Rm {
+  LogFile "/srv/backports-master.debian.org/log/removals.txt";
+  LogFile822 "/srv/backports-master.debian.org/log/removals.822";
+};
+
+Process-New
+{
+  DinstallLockFile "/srv/backports-master.debian.org/lock/processnew.lock";
+  LockDir "/srv/backports-master.debian.org/lock/new/";
+  Options::Queue "backports-new";
+};
+
+Queue-Report {
+  Options::Directories "backports-new";
+};
diff --git a/config/debian/dak.conf-dak b/config/debian/dak.conf-dak
new file mode 100644 (file)
index 0000000..c46b0a2
--- /dev/null
@@ -0,0 +1,4 @@
+Dinstall {
+  UnprivUser "dak-unpriv";
+  UnprivGroup "dak-unpriv";
+};
index 5336a8df0aaca8774b1a30ff754512c444507cd8..63f42126fb85e414a9d9017e7d5e74bfb330c33d 100644 (file)
@@ -5,14 +5,12 @@ Config
     AllowLocalConfig    "false";
     DatabaseHostname    "ftp-master";
     DakConfig           "/srv/ftp-master.debian.org/dak/config/debian/dak.conf";
-    AptConfig           "/srv/ftp-master.debian.org/dak/config/debian/apt.conf";
   }
-  ries.debian.org
+  coccia.debian.org
   {
     AllowLocalConfig    "false";
     DatabaseHostname    "ftp-master";
     DakConfig           "/srv/ftp-master.debian.org/dak/config/debian/dak.conf";
-    AptConfig           "/srv/ftp-master.debian.org/dak/config/debian/apt.conf";
   }
 }
 
index b7679dcfa50edd7b59abf82813ce9a0499f4410a..4441f4e813aaa2591b05ebf05372485566d81ca2 100644 (file)
@@ -9,9 +9,14 @@ function remove_daily_lock() {
     rm -f $LOCK_DAILY
 }
 
+# Remove changelog lock
+function remove_changelog_lock() {
+    rm -f $LOCK_CHANGELOG
+}
+
 # Remove all locks
 function remove_all_locks() {
-    rm -f $LOCK_DAILY $LOCK_ACCEPTED $LOCK_NEW
+    rm -f $LOCK_DAILY $LOCK_ACCEPTED
 }
 
 # If we error out this one is called, *FOLLOWED* by cleanup above
@@ -71,7 +76,7 @@ function i18n1() {
             # Lets check!
             if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
                 # Yay, worked, lets copy around
-                for dir in wheezy sid; do
+                for dir in jessie sid; do
                     if [ -d dists/${dir}/ ]; then
                         cd dists/${dir}/main/i18n
                         rsync -aq --delete --delete-after --exclude Translation-en.bz2 --exclude Translation-*.diff/ . ${ftpdir}/dists/${dir}/main/i18n/.
@@ -146,14 +151,23 @@ function overrides() {
 }
 
 function mpfm() {
+    local archiveroot
+
     log "Generating package / file mapping"
-    dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
+    for archive in "${public_archives[@]}"; do
+        archiveroot="$(get_archiveroot "${archive}")"
+        dak make-pkg-file-mapping "${archive}" | bzip2 -9 > "${archiveroot}/indices/package-file.map.bz2"
+    done
 }
 
 function packages() {
     log "Generating Packages and Sources files"
-    dak generate-packages-sources2
-    dak contents generate
+    for archive in "${public_archives[@]}"; do
+        log "  Generating Packages/Sources for ${archive}"
+        dak generate-packages-sources2 -a "${archive}"
+        log "  Generating Contents for ${archive}"
+        dak contents generate -a "${archive}"
+    done
 }
 
 function pdiff() {
@@ -162,77 +176,62 @@ function pdiff() {
 }
 
 function release() {
-    # XXX: disable once we can remove i18n/Index (#649314)
-    log "Generating i18n/Index"
-    (
-        cd "$ftpdir/dists";
-        for dist in testing unstable experimental proposed-updates testing-proposed-updates; do
-            $scriptsdir/generate-i18n-Index $dist;
-        done
-    )
     log "Generating Release files"
-    dak generate-releases
+    for archive in "${public_archives[@]}"; do
+        dak generate-releases -a "${archive}"
+    done
 }
 
 function dakcleanup() {
     log "Cleanup old packages/files"
     dak clean-suites -m 10000
-    dak clean-queues
-}
-
-function buildd_dir() {
-    # Rebuilt the buildd dir to avoid long times of 403
-    log "Regenerating the buildd incoming dir"
-    STAMP=$(date "+%Y%m%d%H%M")
-    make_buildd_dir
+    dak clean-queues -i "$unchecked"
 }
 
 function mklslar() {
-    cd $ftpdir
-
-    FILENAME=ls-lR
+    local archiveroot
+    local FILENAME=ls-lR
 
-    log "Removing any core files ..."
-    find -type f -name core -print0 | xargs -0r rm -v
+    for archive in "${public_archives[@]}"; do
+        archiveroot="$(get_archiveroot "${archive}")"
+        cd "${archiveroot}"
 
-    log "Checking permissions on files in the FTP tree ..."
-    find -type f \( \! -perm -444 -o -perm +002 \) -ls
-    find -type d \( \! -perm -555 -o -perm +002 \) -ls
+        log "Removing any core files ..."
+        find -type f -name core -print -delete
 
-    log "Checking symlinks ..."
-    symlinks -rd .
+        log "Checking symlinks ..."
+        symlinks -rd .
 
-    log "Creating recursive directory listing ... "
-    rm -f .${FILENAME}.new
-    TZ=UTC ls -lR > .${FILENAME}.new
-
-    if [ -r ${FILENAME}.gz ] ; then
-        mv -f ${FILENAME}.gz ${FILENAME}.old.gz
-        mv -f .${FILENAME}.new ${FILENAME}
-        rm -f ${FILENAME}.patch.gz
-        zcat ${FILENAME}.old.gz | diff -u - ${FILENAME} | gzip -9cfn - >${FILENAME}.patch.gz
-        rm -f ${FILENAME}.old.gz
-    else
-        mv -f .${FILENAME}.new ${FILENAME}
-    fi
-
-    gzip -9cfN ${FILENAME} >${FILENAME}.gz
-    rm -f ${FILENAME}
+        log "Creating recursive directory listing ... "
+        rm -f ${FILENAME}.gz
+        TZ=UTC ls -lR | gzip -9c --rsyncable > ${FILENAME}.gz
+    done
 }
 
 function mkmaintainers() {
+    local archiveroot
+    local indices
+
     log 'Creating Maintainers index ... '
 
-    cd $indices
-    dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers
-    gzip -9v --rsyncable <Maintainers >Maintainers.gz
-    gzip -9v --rsyncable <Uploaders >Uploaders.gz
+    for archive in "${public_archives[@]}"; do
+        archiveroot="$(get_archiveroot "${archive}")"
+       indices="${archiveroot}/indices"
+       if ! [ -d "${indices}" ]; then
+           mkdir "${indices}"
+       fi
+        cd "${indices}"
+
+        dak make-maintainers -a "${archive}" ${scriptdir}/masterfiles/pseudo-packages.maintainers
+        gzip -9v --rsyncable <Maintainers >Maintainers.gz
+        gzip -9v --rsyncable <Uploaders >Uploaders.gz
+    done
 }
 
 function copyoverrides() {
     log 'Copying override files into public view ...'
 
-    for ofile in ${overridedir}/override.{squeeze,wheezy,sid}.{,extra.}{main,contrib,non-free}*; do
+    for ofile in ${overridedir}/override.{squeeze,wheezy,jessie,sid}.{,extra.}{main,contrib,non-free}*; do
         bname=${ofile##*/}
         gzip -9cv --rsyncable ${ofile} > ${indices}/${bname}.gz
         chmod g+w ${indices}/${bname}.gz
@@ -240,13 +239,27 @@ function copyoverrides() {
 }
 
 function mkfilesindices() {
+    set +o pipefail
     umask 002
     cd $base/ftp/indices/files/components
 
     ARCHLIST=$(tempfile)
 
     log "Querying postgres"
-    echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql -At | sed 's/|//;s,^/srv/ftp-master.debian.org/ftp,.,' | sort >$ARCHLIST
+    local query="
+      SELECT './pool/' || c.name || '/' || f.filename AS path, a.arch_string AS arch_string
+      FROM files f
+      JOIN files_archive_map af ON f.id = af.file_id
+      JOIN component c ON af.component_id = c.id
+      JOIN archive ON af.archive_id = archive.id
+      LEFT OUTER JOIN
+        (binaries b
+         JOIN architecture a ON b.architecture = a.id)
+        ON f.id = b.file
+      WHERE archive.name = 'ftp-master'
+      ORDER BY path, arch_string
+    "
+    psql -At -c "$query" >$ARCHLIST
 
     includedirs () {
         perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
@@ -279,12 +292,46 @@ function mkfilesindices() {
     log "Generating suite lists"
 
     suite_list () {
-        printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t
-
-        printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t
+       local suite_id="$(printf %d $1)"
+       local query
+       query="
+          SELECT DISTINCT './pool/' || c.name || '/' || f.filename
+          FROM
+            (SELECT sa.source AS source
+               FROM src_associations sa
+              WHERE sa.suite = $suite_id
+             UNION
+             SELECT esr.src_id
+               FROM extra_src_references esr
+               JOIN bin_associations ba ON esr.bin_id = ba.bin
+               WHERE ba.suite = $suite_id
+             UNION
+             SELECT b.source AS source
+               FROM bin_associations ba
+               JOIN binaries b ON ba.bin = b.id WHERE ba.suite = $suite_id) s
+            JOIN dsc_files df ON s.source = df.source
+            JOIN files f ON df.file = f.id
+            JOIN files_archive_map af ON f.id = af.file_id
+            JOIN component c ON af.component_id = c.id
+            JOIN archive ON af.archive_id = archive.id
+            WHERE archive.name = 'ftp-master'
+        "
+       psql -F' ' -A -t -c "$query"
+
+       query="
+          SELECT './pool/' || c.name || '/' || f.filename
+          FROM bin_associations ba
+          JOIN binaries b ON ba.bin = b.id
+          JOIN files f ON b.file = f.id
+          JOIN files_archive_map af ON f.id = af.file_id
+          JOIN component c ON af.component_id = c.id
+          JOIN archive ON af.archive_id = archive.id
+          WHERE ba.suite = $suite_id AND archive.name = 'ftp-master'
+        "
+       psql -F' ' -A -t -c "$query"
     }
 
-    printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At |
+    psql -F' ' -At -c "SELECT id, suite_name FROM suite" |
     while read id suite; do
         [ -e $base/ftp/dists/$suite ] || continue
         (
@@ -295,7 +342,7 @@ function mkfilesindices() {
                     [ "$(readlink $distdir)" != "$distname" ] || echo $distdir
                 done
             )
-            suite_list $id | tr -d ' ' | sed 's,^/srv/ftp-master.debian.org/ftp,.,'
+            suite_list $id
         ) | sort -u | gzip -9 > suite-${suite}.list.gz
     done
 
@@ -315,31 +362,39 @@ function mkfilesindices() {
     done
 
     (cd $base/ftp/
-           for dist in sid wheezy; do
+           for dist in sid jessie; do
                    find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip -9 > $base/ftp/indices/files/components/translation-$dist.list.gz
            done
     )
 
-    (cat ../arch-i386.files ../arch-amd64.files; zcat suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-wheezy.list.gz) |
+    (cat ../arch-i386.files ../arch-amd64.files; zcat suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-jessie.list.gz) |
     sort -u | poolfirst > ../typical.files
 
     rm -f $ARCHLIST
     log "Done!"
+    set -o pipefail
 }
 
 function mkchecksums() {
-    dsynclist=$dbdir/dsync.list
-    md5list=$indices/md5sums
+    local archiveroot dsynclist md5list
+
+    for archive in "${public_archives[@]}"; do
+        archiveroot="$(get_archiveroot "${archive}")"
+        dsynclist=$dbdir/dsync.${archive}.list
+        md5list=${archiveroot}/indices/md5sums
 
-    log -n "Creating md5 / dsync index file ... "
+        log -n "Creating md5 / dsync index file for ${archive}... "
 
-    cd "$ftpdir"
-    ${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
-    ${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n > ${md5list}.gz
-    ${bindir}/dsync-flist -q link-dups $dsynclist || true
+        cd "$archiveroot"
+        ${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
+        ${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n > ${md5list}.gz
+        ${bindir}/dsync-flist -q link-dups $dsynclist || true
+    done
 }
 
 function mirror() {
+    local archiveroot
+
     log "Regenerating \"public\" mirror/ hardlink fun"
     DATE_SERIAL=$(date +"%Y%m%d01")
     FILESOAPLUS1=$(awk '/serial/ { print $3+1 }' ${TRACEFILE} )
@@ -352,8 +407,16 @@ function mirror() {
     echo "Using dak v1" >> ${TRACEFILE}
     echo "Running on host: $(hostname -f)" >> ${TRACEFILE}
     echo "Archive serial: ${SERIAL}" >> ${TRACEFILE}
-    cd ${mirrordir}
-    rsync -aH --link-dest ${ftpdir} --delete --delete-after --delete-excluded --exclude Packages.*.new --exclude Sources.*.new  --ignore-errors ${ftpdir}/. .
+
+    # Ugly "hack", but hey, it does what we want.
+    cp ${TRACEFILE} ${TRACEFILE_BDO}
+
+    for archive in "${public_archives[@]}"; do
+        archiveroot="$(get_archiveroot "${archive}")"
+        mirrordir="${archiveroot}/../mirror"
+        cd ${mirrordir}
+        rsync -aH --link-dest ${archiveroot} --delete --delete-after --delete-excluded --exclude Packages.*.new --exclude Sources.*.new  --ignore-errors ${archiveroot}/. .
+    done
 }
 
 function expire() {
@@ -369,8 +432,8 @@ function transitionsclean() {
 }
 
 function dm() {
-    log "Updating DM html page"
-    $scriptsdir/dm-monitor >$webdir/dm-uploaders.html
+    log "Updating DM permissions page"
+    dak acl export-per-source dm >$exportdir/dm.txt
 }
 
 function bts() {
@@ -381,7 +444,7 @@ function bts() {
 function ddaccess() {
     # Tell our dd accessible mirror to sync itself up. Including ftp dir.
     log "Trigger dd accessible parts sync including ftp dir"
-    ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org pool
+    ${scriptsdir}/sync-dd ries-sync ries-sync1 ries-sync2 pool
 }
 
 function mirrorpush() {
@@ -437,6 +500,12 @@ function mirrorpush() {
     echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart
     echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart
     sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
+    sudo -H -u archvsync /home/archvsync/runmirrors -a backports > ~dak/runmirrorsbpo.log 2>&1 &
+}
+
+function mirrorpush-backports() {
+    log "Syncing backports mirror"
+    sudo -u backports /home/backports/bin/update-archive
 }
 
 function i18n2() {
@@ -451,7 +520,7 @@ function i18n2() {
         dak control-suite -l ${suite} >${codename}
     done
     echo "${STAMP}" > timestamp
-    gpg --secret-keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
+    gpg --secret-keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 473041FA --detach-sign -o timestamp.gpg timestamp
     rm -f md5sum
     md5sum * > md5sum
     cd ${webdir}/
@@ -470,12 +539,6 @@ function stats() {
     dak stats pkg-nums > $webdir/pkg-nums
 }
 
-function aptftpcleanup() {
-    log "Clean up apt-ftparchive's databases"
-    cd $configdir
-    apt-ftparchive -q clean apt.conf
-}
-
 function cleantransactions() {
     log "Cleanup transaction ids older than 3 months"
     cd $base/backup/
@@ -524,13 +587,6 @@ function process_unchecked() {
     sync_debbugs
 }
 
-# do a run of newstage only before dinstall is on.
-function newstage() {
-    log "Processing the newstage queue"
-    UNCHECKED_WITHOUT_LOCK="-p"
-    do_newstage
-}
-
 # Function to update a "statefile" telling people what we are doing
 # (more or less).
 #
@@ -548,12 +604,23 @@ EOF
 
 # extract changelogs and stuff
 function changelogs() {
-    log "Extracting changelogs"
-    dak make-changelog -e
-    mkdir -p ${exportpublic}/changelogs
-    cd ${exportpublic}/changelogs
-    rsync -aHW --delete --delete-after --ignore-errors ${exportdir}/changelogs/. .
-    sudo -H -u archvsync /home/archvsync/runmirrors metaftpdo > ~dak/runmirrors-metadata.log 2>&1 &
+    if lockfile -r3 $LOCK_CHANGELOG; then
+        log "Extracting changelogs"
+        dak make-changelog -e -a ftp-master
+        [ -f ${exportdir}/changelogs/filelist.yaml ] && xz -f ${exportdir}/changelogs/filelist.yaml
+        mkdir -p ${exportpublic}/changelogs
+        cd ${exportpublic}/changelogs
+        rsync -aHW --delete --delete-after --ignore-errors ${exportdir}/changelogs/. .
+        sudo -H -u staticsync /usr/local/bin/static-update-component ftp-master.metadata.debian.org >/dev/null 2>&1 &
+
+        dak make-changelog -e -a backports
+        [ -f /srv/backports-master.debian.org/export/changelogs/filelist.yaml ] && xz -f /srv/backports-master.debian.org/export/changelogs/filelist.yaml
+        mkdir -p /srv/backports-master.debian.org/rsync/export/changelogs
+        cd /srv/backports-master.debian.org/rsync/export/changelogs
+        rsync -aHW --delete --delete-after --ignore-errors /srv/backports-master.debian.org/export/changelogs/. .
+        remove_changelog_lock
+        trap remove_changelog_lock EXIT TERM HUP INT QUIT
+    fi
 }
 
 function gitpdiff() {
index c2daeb9c75d97a8a948b79778f792250e71fdab9..f2f88f8d675c62061edf3e86c774fef213d3e2b0 100644 (file)
@@ -28,9 +28,6 @@ LOCK_DAILY="$lockdir/daily.lock"
 # Lock cron.unchecked from doing work
 LOCK_ACCEPTED="$lockdir/unchecked.lock"
 
-# Lock process-new from doing work
-LOCK_NEW="$lockdir/processnew.lock"
-
 # This file is simply used to indicate to britney whether or not
 # the Packages file updates completed sucessfully.  It's not a lock
 # from our point of view
@@ -43,8 +40,13 @@ LOCK_STOP="$lockdir/archive.stop"
 # Lock buildd updates
 LOCK_BUILDD="$lockdir/buildd.lock"
 
+# Lock changelog updates
+LOCK_CHANGELOG="$lockdir/changelog.lock"
+
 # Statefile for the users
 DINSTALLSTATE="${webdir}/dinstall.status"
 
-# The mirror trace file
 TRACEFILE="${ftpdir}/project/trace/ftp-master.debian.org"
+
+archiveroot="$(get_archiveroot "backports")"
+TRACEFILE_BDO="${archiveroot}/project/trace/ftp-master.debian.org"
index 62126cb5f08aed81274efdb78f2badf0ce1809a5..065bfe3a48f57c3e62a48a5330aabe22ec8730fc 100644 (file)
@@ -19,9 +19,10 @@ lintian:
     - dir-or-file-in-var-www
     - wrong-file-owner-uid-or-gid
     - install-info-used-in-maintainer-script
-    - missing-pre-dependency-on-multiarch-support
     - bad-perm-for-file-in-etc-sudoers.d
     - source-contains-waf-binary
+    - md5sum-mismatch
+    - non-standard-toplevel-dir
   fatal:
     - debian-control-file-uses-obsolete-national-encoding
     - malformed-deb-archive
@@ -42,7 +43,6 @@ lintian:
     - binary-file-compressed-with-upx
     - file-in-usr-marked-as-conffile
     - build-info-in-binary-control-file-section
-    - debian-control-with-duplicate-fields
     - not-allowed-control-file
     - control-file-has-bad-permissions
     - control-file-has-bad-owner
@@ -52,7 +52,6 @@ lintian:
     - copyright-file-compressed
     - copyright-file-is-symlink
     - usr-share-doc-symlink-to-foreign-package
-    - old-style-copyright-file
     - copyright-refers-to-incorrect-directory
     - package-has-no-description
     - description-synopsis-is-empty
@@ -73,7 +72,6 @@ lintian:
     - uploader-name-missing
     - uploader-address-malformed
     - uploader-address-is-on-localhost
-    - no-source-field
     - source-field-does-not-match-pkg-name
     - symlink-has-too-many-up-segments
     - debian-rules-not-a-makefile
@@ -82,5 +80,7 @@ lintian:
     - dir-or-file-in-tmp
     - dir-or-file-in-mnt
     - dir-or-file-in-opt
+    - dir-or-file-in-run
     - dir-or-file-in-srv
-    - udeb-uses-non-gzip-data-tarball
+    - dir-or-file-in-var-lock
+    - dir-or-file-in-var-run
diff --git a/config/debian/robots.txt-incoming b/config/debian/robots.txt-incoming
new file mode 100644 (file)
index 0000000..1f53798
--- /dev/null
@@ -0,0 +1,2 @@
+User-agent: *
+Disallow: /
index cea5344f8e73d423ee56d32ee81cd24e7b702e9f..03a86a78fc2c052157f0fa5c486a66d649dfb311 100644 (file)
@@ -30,6 +30,8 @@ exportpublic=$public/rsync/export/
 
 ftpgroup=debadmin
 
+public_archives=(ftp-master backports)
+
 TMPDIR=${base}/tmp
 
 PATH=$masterdir:$PATH
index a47c5c03c917f417c9b080c7ac4a325f9c4205e7..e9d10c68d4b6a182182fd434b2082a2637f7eda7 100644 (file)
@@ -13,11 +13,6 @@ Config
     // Optional filename of dak's config file; if not present, this
     // file is assumed to contain dak config info.
     DakConfig          "/org/ftp.debian.org/dak/config/debian/dak.conf";
-
-    // Optional filename of apt-ftparchive's config file; if not
-    // present, the file is assumed to be 'apt.conf' in the same
-    // directory as this file.
-    AptConfig          "/org/ftp.debian.org/dak/config/debian/apt.conf";
   }
 
 }
index 2a4fd452be35359feac8efef350fcaf8e87c05ce..881e7303420b099c8846ade804ed87ff6e0aab77 100644 (file)
@@ -54,6 +54,7 @@ export EDITOR=$(which vim)
 export HISTFILESIZE=6000
 export GREP_OPTIONS="--color=auto"
 export CDPATH=".:~:${base}:${public}:${queuedir}"
+export PATH="~/bin/:${PATH}"
 
 alias base='cd ${base}'
 alias config='cd ${configdir}'
diff --git a/config/homedir/ssh/ftpmaster-authorized_keys b/config/homedir/ssh/ftpmaster-authorized_keys
new file mode 100644 (file)
index 0000000..ebde844
--- /dev/null
@@ -0,0 +1,7 @@
+# whenever we have to read a new dataset for testing/squeeze-updates this runs
+command="/srv/ftp-master.debian.org/dak/scripts/debian/import_dataset.sh testing",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,from="138.16.160.12,franck.debian.org" ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAqvcRf4LLH9WLz3YGg/vj62I6aMihd9eF8tEYIMvRUNIqcI95YQP6nPpnIovom30RI9l5vJP+xpd0ABoiVxGDr0fw4hfp137BxpOL2WDHoqYX0KWP5mdWpA2PV2HVOJ4xp0q18pZ0DIdhxAGDd1QRrkR2yD9CH4dhRNcYRN8TA970y5Tweesh19Ba583f25NrSv0+A1200qiSdMbn9KIQYwC0Gc9xcKS1/Tygf2Sz3ekVrODog/nACPLbHRxO+mPcHJVBb9Sf8l393l5eln7ZfmSD0wZD6X/2M9+rRoXtVycLbmISxJV8zdady/3HCX33fcWCI7xCfOsikcVWDzygtQ== release@ries
+command="/srv/ftp-master.debian.org/dak/scripts/debian/import_dataset.sh squeeze-updates",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,from="138.16.160.12,franck.debian.org" ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAs0CFETy4E2rR7sH5kl5tgPVltcimtdmkpWSYLO+AJrrTvN447KjL0GhAc9raWv/wp6UeGw9zhOOxH6UGGD2DKI+lIZKW2PraLnQMs9g67B7Q/7MH7rHIzKue1niOANgPZppQ18rdiexagWyj+E8z/A1cFqpfaIIupi543eXZ4yZV3fjrHIE6zTvIzoTzlAZ5IaCOYyFT8wx6Ql53aEZfMk6S1FvXou24wFBD08CArTjRMf2eYo/aPqWbJs955eZwNqp1kS4jtJKwc7DCKpY7elHCyIqfR7YZxTUOBEGpoaAIfjIitgEedZnuMmBl8IUi1jQ0HvM7HDb4n4NVR/hbew== release@franck
+command="/srv/ftp-master.debian.org/dak/scripts/debian/import_dataset.sh wheezy-updates",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,from="138.16.160.12,franck.debian.org" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBFRjQe43ruKGoTUFUOD9B2RG6yKfd2H402ywB3m7kEUGgsE0CI6+qGO0J/xiY6P4CBSVH1uwUpxxKvFJbVTI6227O9+1LnpeNliKcHo3H0i//Or18f1+mL1zoHAGimFvDydtnbYiSstYOzzPfvkaf1HPOZtaBUO2HleARbGR2zFtjZ3R/2QKj7a3xNQ/AkFY+N8YquqqRh4gNY8vEPTCVgNl/jy4Kb7lDtHAfS8O6oxDkqVon2nPO4KTgawYXw90CyBZP2/it5JUg5Z19U0mjw0wJ0hP7fU6tuSOm16ClS3w0phXRSsRO+jj7ngdSEhCFklmVSQtbX0+k7hNQKM91 release@franck
+
+# release team tpu removals
+command="/srv/ftp-master.debian.org/dak/scripts/debian/release_team_removals.sh",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,from="138.16.160.12,franck.debian.org" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCw6DLpbCsiadOqxenRfW5In7UFG5HoIDt0xV/dRDbqNUUihNcDi6SqlREuSBCA75lOqbhL1w2tWsdsTIMnJeq3Fdr3LdFjIKlG6QQZVThaD3SI76EkGtjt0XQDoN2d4hi0Xn2LOPKz8hxaY4jKYzSUN0TVue3C1EHTJD0S8Grkd5tPaDgXt4pJzHmNwT4r2dH5OT3Y3vJL2UGhbY6Y+rNFfmnKzDcBtNdUTLTtrAfCCMkPITTYrMvZevA9u/SzNenN9qwEQicc06FrycSCi6+XSA+t4k1YNf1NTHhTQEncEX4/FRf+jgbkt1lkchiu+eShx3bUZCsKPuoNEsuWUU5v release@franck
diff --git a/config/homedir/ssh/ftpmaster-config b/config/homedir/ssh/ftpmaster-config
new file mode 100644 (file)
index 0000000..d90b5a4
--- /dev/null
@@ -0,0 +1,38 @@
+Protocol 2
+ConnectTimeout 30
+ServerAliveInterval 30
+ForwardX11 no
+ForwardAgent no
+StrictHostKeyChecking yes
+PasswordAuthentication no
+BatchMode yes
+
+Host bugs-sync
+  Hostname bugs-master.debian.org
+  User debbugs
+  IdentityFile /srv/ftp-master.debian.org/s3kr1t/id_debbugs-vt
+
+Host ddtp-sync
+  Hostname i18n.debian.net
+  User ddtp-dak
+  IdentityFile /srv/ftp-master.debian.org/s3kr1t/ddtp-dak.rsa
+
+Host morgue-sync
+  Hostname stabile.debian.org
+  User dak
+  IdentityFile /srv/ftp-master.debian.org/s3kr1t/push_morgue
+
+Host ries-sync1
+  Hostname coccia.debian.org
+  User dak
+  IdentityFile /srv/ftp-master.debian.org/scripts/s3kr1t/ssh/push_dd1
+
+Host ries-sync2
+  Hostname coccia.debian.org
+  User dak
+  IdentityFile /srv/ftp-master.debian.org/scripts/s3kr1t/ssh/push_dd2
+
+Host external-security
+  Hostname chopin.debian.org
+  User dak
+  IdentityFile /srv/ftp-master.debian.org/scripts/s3kr1t/ssh/push_external_files
diff --git a/config/homedir/ssh/ftpmaster-mirror-authorized_keys b/config/homedir/ssh/ftpmaster-mirror-authorized_keys
new file mode 100644 (file)
index 0000000..8636f24
--- /dev/null
@@ -0,0 +1,3 @@
+# keys for scripts/debian/dd-sync
+command="rsync --server -lHogDtpre.iLsf -B8192 --timeout=3600 --delete-after . /srv/ftp-master.debian.org/" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDbFGM5hCwuWz5kVJUAA0lrDb6xJgiHXsGGSnRHJLePRgq06xJbEhLKUpYYNTj928PIYEEWQJb9nZaoRgTgS59aVeo5zt3qT8fGkuB4/XE+qaRWBlWXXaR/DdHh4gNLIws6D8Duny6M/ewrtaOLNca5i2Il9NBSrBrefocZHyoN6fTb2uwXOx8uJ/klcUeVUt+DZNmkF/j9VymmZ+g2892efGrwkcyY42u90xJK+VSHOQBqkRk5eObup5oUNBZr5+WI+GXop9CKy06o/a0KvgIUIBI03zq9zzgIySii/h1YR6YmVgJUUWFc3efap0oOTVVw56Re0QHRdDx+RZZ/5HSl rsync DD-accessible copy of /srv/ftp-master.debian.org, 2012-10-07
+command="rsync --server -lHogDtpre.iLsf -B8192 --timeout=3600 --delete-after . /srv/ftp.debian.org/" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSgqmVlgeILSLTE8hHwh5AckbuBf7Dv5n8m5EZFyKzed1Cf7HhOwO/33MNNcR5zl30c3/uZRNXpPLevrGX1vkiEEZ/ddlD/70oEQHS/Ms4pvxikeNO0aVCWQoX2xqR2bZqvRqH5HFr3MUY8PXbD6BLkf2teuc1UNAjsvFQLESKLBp3dONl8jJqdB9QMnmg6rmgeBffN6bogr08TNm+R3t6+6yx7okueh4PZY3eUSBzhelAb/5/3UrSFitM4qFA1Ah/jJe6ZXPF/9QAj97KV2v5iKdpQsFEjXYjtFBn/Yu1kWm6sEH4ki0RZFV7rWidaWu7hW/9emNFVKYj1xXPebk/ rsync DD-accessible copy of /srv/ftp.debian.org/, 2012-10-07
diff --git a/config/homedir/syncdd.sh b/config/homedir/syncdd.sh
deleted file mode 100755 (executable)
index 07c4bc5..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/bin/bash
-
-# Copyright (C) 2011 Joerg Jaspert <joerg@debian.org>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; version 2.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-
-set -e
-set -u
-set -E
-
-export LANG=C
-export LC_ALL=C
-
-export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
-. $SCRIPTVARS
-
-EXTRA=""
-
-check_commandline() {
-    while [ $# -gt 0 ]; do
-        case "$1" in
-            sync)
-                EXTRA="--exclude ftp/"
-                ;;
-            pool)
-                ;;
-            *)
-                echo "Unknown option ${1} ignored"
-                ;;
-        esac
-        shift  # Check next set of parameters.
-    done
-}
-
-if [ $# -gt 0 ]; then
-    ORIGINAL_COMMAND=$*
-else
-    ORIGINAL_COMMAND=""
-fi
-
-SSH_ORIGINAL_COMMAND=${SSH_ORIGINAL_COMMAND:-""}
-if [ -n "${SSH_ORIGINAL_COMMAND}" ]; then
-    set "nothing" "${SSH_ORIGINAL_COMMAND}"
-    shift
-    check_commandline $*
-fi
-
-if [ -n "${ORIGINAL_COMMAND}" ]; then
-    set ${ORIGINAL_COMMAND}
-    check_commandline $*
-fi
-
-
-cleanup() {
-    rm -f "${HOME}/sync.lock"
-}
-trap cleanup EXIT TERM HUP INT QUIT
-
-# not using $lockdir as thats inside the rsync dir, and --delete would
-# kick the lock away. Yes we could exclude it, but wth bother?
-#
-# Also, NEVER use --delete-excluded!
-if lockfile -r3 ${HOME}/sync.lock; then
-    cd $base/
-    rsync -aH -B8192 \
-        --exclude backup/*.xz \
-        --exclude backup/dump* \
-        --exclude database/\*.db \
-        ${EXTRA} \
-        --exclude mirror \
-        --exclude morgue/ \
-        --exclude=lost+found/ \
-        --exclude .da-backup.trace \
-        --exclude lock/ \
-        --exclude queue/holding/ \
-        --exclude queue/newstage/ \
-        --exclude queue/unchecked/ \
-        --exclude tmp/ \
-        --delete \
-        --delete-after \
-        --timeout 3600 \
-        -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
-        ftpmaster-sync:/srv/ftp-master.debian.org/ .
-
-    cd $public/
-    rsync -aH -B8192 \
-        --exclude mirror \
-        --exclude rsync/ \
-        --exclude=lost+found/ \
-        --exclude .da-backup.trace \
-        --exclude web-users/ \
-        --delete \
-        --delete-after \
-        --timeout 3600 \
-        -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
-        ftpmaster-sync2:/srv/ftp.debian.org/ .
-
-else
-    echo "Couldn't get the lock, not syncing"
-    exit 0
-fi
-
-
-## ftpmaster-sync is defined in .ssh/config as:
-# Host ftpmaster-sync
-#   Hostname franck.debian.org
-#   User dak
-#   IdentityFile ~dak/.ssh/syncftpmaster
-#   ForwardX11 no
-#   ForwardAgent no
-#   StrictHostKeyChecking yes
-#   PasswordAuthentication no
-#   BatchMode yes
-
-## ftpmaster-sync2 is the same, just a second ssh key
diff --git a/dak/acl.py b/dak/acl.py
new file mode 100644 (file)
index 0000000..a6fdddd
--- /dev/null
@@ -0,0 +1,137 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import sys
+
+from daklib.config import Config
+from daklib.dbconn import DBConn, Fingerprint, Keyring, Uid, ACL
+
+def usage():
+    print """Usage:
+  dak acl set-fingerprints <acl-name>
+  dak acl export-per-source <acl-name>
+
+  set-fingerprints:
+    Reads list of fingerprints from stdin and sets the ACL <acl-name> to these.
+    Accepted input formats are "uid:<uid>", "name:<name>" and
+    "fpr:<fingerprint>".
+
+  export-per-source:
+    Export per source upload rights for ACL <acl-name>.
+"""
+
+def get_fingerprint(entry, session):
+    """get fingerprint for given ACL entry
+
+    The entry is a string in one of these formats::
+
+        uid:<uid>
+        name:<name>
+        fpr:<fingerprint>
+        keyring:<keyring-name>
+
+    @type  entry: string
+    @param entry: ACL entry
+
+    @param session: database session
+
+    @rtype:  L{daklib.dbconn.Fingerprint} or C{None}
+    @return: fingerprint for the entry
+    """
+    field, value = entry.split(":", 1)
+    q = session.query(Fingerprint).join(Fingerprint.keyring).filter(Keyring.active == True)
+
+    if field == 'uid':
+        q = q.join(Fingerprint.uid).filter(Uid.uid == value)
+    elif field == 'name':
+        q = q.join(Fingerprint.uid).filter(Uid.name == value)
+    elif field == 'fpr':
+        q = q.filter(Fingerprint.fingerprint == value)
+    elif field == 'keyring':
+        q = q.filter(Keyring.keyring_name == value)
+    else:
+        raise Exception('Unknown selector "{0}".'.format(field))
+
+    return q.all()
+
+def acl_set_fingerprints(acl_name, entries):
+    session = DBConn().session()
+    acl = session.query(ACL).filter_by(name=acl_name).one()
+
+    acl.fingerprints.clear()
+    for entry in entries:
+        entry = entry.strip()
+        if entry.startswith('#') or len(entry) == 0:
+            continue
+
+        fps = get_fingerprint(entry, session)
+        if len(fps) == 0:
+            print "Unknown key for '{0}'".format(entry)
+        else:
+            acl.fingerprints.update(fps)
+
+    session.commit()
+
+def acl_export_per_source(acl_name):
+    session = DBConn().session()
+    acl = session.query(ACL).filter_by(name=acl_name).one()
+
+    query = r"""
+      SELECT
+        f.fingerprint,
+        (SELECT COALESCE(u.name, '') || ' <' || u.uid || '>'
+           FROM uid u
+           JOIN fingerprint f2 ON u.id = f2.uid
+          WHERE f2.id = f.id) AS name,
+        STRING_AGG(
+          a.source
+          || COALESCE(' (' || (SELECT fingerprint FROM fingerprint WHERE id = a.created_by_id) || ')', ''),
+          E',\n ' ORDER BY a.source)
+      FROM acl_per_source a
+      JOIN fingerprint f ON a.fingerprint_id = f.id
+      LEFT JOIN uid u ON f.uid = u.id
+      WHERE a.acl_id = :acl_id
+      GROUP BY f.id, f.fingerprint
+      ORDER BY name
+      """
+
+    for row in session.execute(query, {'acl_id': acl.id}):
+        print "Fingerprint:", row[0]
+        print "Uid:", row[1]
+        print "Allow:", row[2]
+        print
+
+    session.rollback()
+    session.close()
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    if len(argv) != 3:
+        usage()
+        sys.exit(1)
+
+    if argv[1] == 'set-fingerprints':
+        acl_set_fingerprints(argv[2], sys.stdin)
+    elif argv[1] == 'export-per-source':
+        acl_export_per_source(argv[2])
+    else:
+        usage()
+        sys.exit(1)
index 23b4c2d8e1a05ff17e1d7f8aecb51dab9e75c24c..106393d6c3446a4e08ed7320038977a23c4a8a22 100755 (executable)
@@ -65,6 +65,8 @@ Perform administrative work on the dak database.
      k list-all             list all keyrings
      k list-binary          list all keyrings with a NULL source acl
      k list-source          list all keyrings with a non NULL source acl
+     k add-buildd NAME ARCH...   add buildd keyring with upload permission
+                                 for the given architectures
 
   architecture / a:
      a list                 show a list of architectures
@@ -75,12 +77,21 @@ Perform administrative work on the dak database.
                             If SUITELIST is given, add to each of the
                             suites at the same time
 
+  component:
+     component list         show a list of components
+     component rm COMPONENT remove a component (will only work if
+                            empty)
+     component add NAME DESCRIPTION ORDERING
+                            add component NAME with DESCRIPTION.
+                            Ordered at ORDERING.
+
   suite / s:
-     s list                 show a list of suites
+     s list [--print-archive]
+                            show a list of suites
      s show SUITE           show config details for a suite
      s add SUITE VERSION [ label=LABEL ] [ description=DESCRIPTION ]
                          [ origin=ORIGIN ] [ codename=CODENAME ]
-                         [ signingkey=SIGNINGKEY ]
+                         [ signingkey=SIGNINGKEY ] [ archive=ARCHIVE ]
                             add suite SUITE, version VERSION.
                             label, description, origin, codename
                             and signingkey are optional.
@@ -96,6 +107,26 @@ Perform administrative work on the dak database.
      s-a rm SUITE ARCH      remove ARCH from suite (will only work if
                             no packages remain for the arch in the suite)
 
+  suite-component / s-c:
+     s-c list               show the architectures for all suites
+     s-c list-suite COMPONENT
+                            show the suites a COMPONENT is in
+     s-c list-component SUITE
+                            show the components in a SUITE
+     s-c add SUITE COMPONENT
+                            add COMPONENT to suite
+     s-c rm SUITE COMPONENT remove component from suite (will only work if
+                            no packages remain for the component in the suite)
+
+  archive:
+     archive list           list all archives
+     archive add NAME ROOT DESCRIPTION [primary-mirror=MIRROR] [tainted=1]
+                            add archive NAME with path ROOT,
+                            primary mirror MIRROR.
+     archive rm NAME        remove archive NAME (will only work if there are
+                            no files and no suites in the archive)
+     archive rename OLD NEW rename archive OLD to NEW
+
   version-check / v-c:
      v-c list                        show version checks for all suites
      v-c list-suite SUITE            show version checks for suite SUITE
@@ -182,10 +213,84 @@ dispatch['a'] = architecture
 
 ################################################################################
 
+def component_list():
+    session = DBConn().session()
+    for component in session.query(Component).order_by(Component.component_name):
+        print "{0} ordering={1}".format(component.component_name, component.ordering)
+
+def component_add(args):
+    (name, description, ordering) = args[0:3]
+
+    attributes = dict(
+        component_name=name,
+        description=description,
+        ordering=ordering,
+        )
+
+    for option in args[3:]:
+        (key, value) = option.split('=')
+        attributes[key] = value
+
+    session = DBConn().session()
+
+    component = Component()
+    for key, value in attributes.iteritems():
+        setattr(component, key, value)
+
+    session.add(component)
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def component_rm(name):
+    session = DBConn().session()
+    component = get_component(name, session)
+    session.delete(component)
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def component_rename(oldname, newname):
+    session = DBConn().session()
+    component = get_component(oldname, session)
+    component.component_name = newname
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def component(command):
+    mode = command[1]
+    if mode == 'list':
+        component_list()
+    elif mode == 'rename':
+        component_rename(command[2], command[3])
+    elif mode == 'add':
+        component_add(command[2:])
+    elif mode == 'rm':
+        component_rm(command[2])
+    else:
+        die("E: component command unknown")
+
+dispatch['component'] = component
+
+################################################################################
+
 def __suite_list(d, args):
     s = d.session()
-    for j in s.query(Suite).order_by(Suite.suite_name).all():
-        print j.suite_name
+    for j in s.query(Suite).join(Suite.archive).order_by(Archive.archive_name, Suite.suite_name).all():
+        if len(args) > 2 and args[2] == "--print-archive":
+            print "{0} {1}".format(j.archive.archive_name, j.suite_name)
+        else:
+            print "{0}".format(j.suite_name)
 
 def __suite_show(d, args):
     if len(args) < 2:
@@ -225,6 +330,11 @@ def __suite_add(d, args, addallarches=False):
             signingkey = get_field('signingkey')
             if signingkey is not None:
                 suite.signingkeys = [signingkey.upper()]
+            archive_name = get_field('archive')
+            if archive_name is not None:
+                suite.archive = get_archive(archive_name, s)
+            else:
+                suite.archive = s.query(Archive).filter(~Archive.archive_name.in_(['build-queues', 'new', 'policy'])).one()
             suite.srcformats = s.query(SrcFormat).all()
             s.add(suite)
             s.flush()
@@ -245,6 +355,23 @@ def __suite_add(d, args, addallarches=False):
 
     s.commit()
 
+def __suite_rm(d, args):
+    die_arglen(args, 3, "E: removing a suite requires at least a name")
+    name = args[2]
+    print "Removing suite {0}".format(name)
+    if not dryrun:
+        try:
+            s = d.session()
+            su = get_suite(name.lower())
+            if su is None:
+                die("E: Cannot find suite {0}".format(name))
+            s.delete(su)
+            s.commit()
+        except IntegrityError as e:
+            die("E: Integrity error removing suite {0} (suite-arch entries probably still exist)".format(name))
+        except SQLAlchemyError as e:
+            die("E: Error removing suite {0} ({1})".format(name, e))
+    print "Suite {0} removed".format(name)
 
 def suite(command):
     args = [str(x) for x in command]
@@ -259,6 +386,8 @@ def suite(command):
         __suite_list(d, args)
     elif mode == 'show':
         __suite_show(d, args)
+    elif mode == 'rm':
+        __suite_rm(d, args)
     elif mode == 'add':
         __suite_add(d, args, False)
     elif mode == 'add-all-arches':
@@ -306,20 +435,24 @@ def __suite_architecture_add(d, args):
     suite = get_suite(args[2].lower(), s)
     if suite is None: die("E: Can't find suite %s" % args[2].lower())
 
-    arch = get_architecture(args[3].lower(), s)
-    if arch is None: die("E: Can't find architecture %s" % args[3].lower())
+    for arch_name in args[3:]:
+        arch = get_architecture(arch_name.lower(), s)
+        if arch is None: die("E: Can't find architecture %s" % args[3].lower())
 
-    if not dryrun:
         try:
             suite.architectures.append(arch)
-            s.commit()
+            s.flush()
         except IntegrityError as e:
-            die("E: Can't add suite-architecture entry (%s, %s) - probably already exists" % (args[2].lower(), args[3].lower()))
+            die("E: Can't add suite-architecture entry (%s, %s) - probably already exists" % (args[2].lower(), arch_name))
         except SQLAlchemyError as e:
-            die("E: Can't add suite-architecture entry (%s, %s) - %s" % (args[2].lower(), args[3].lower(), e))
+            die("E: Can't add suite-architecture entry (%s, %s) - %s" % (args[2].lower(), arch_name, e))
 
-    print "Added suite-architecture entry for %s, %s" % (args[2].lower(), args[3].lower())
+        print "Added suite-architecture entry for %s, %s" % (args[2].lower(), arch_name)
 
+    if not dryrun:
+        s.commit()
+
+    s.close()
 
 def __suite_architecture_rm(d, args):
     if len(args) < 3:
@@ -373,6 +506,182 @@ dispatch['s-a'] = suite_architecture
 
 ################################################################################
 
+def __suite_component_list(d, args):
+    s = d.session()
+    for j in s.query(Suite).order_by(Suite.suite_name):
+        components = j.components
+        print j.suite_name + ': ' + \
+              ', '.join([c.component_name for c in components])
+
+
+def __suite_component_listcomponent(d, args):
+     die_arglen(args, 3, "E: suite-component list-component requires a suite")
+     suite = get_suite(args[2].lower(), d.session())
+     if suite is None:
+         die('E: suite %s is invalid' % args[2].lower())
+     for c in suite.components:
+         print c.component_name
+
+
+def __suite_component_listsuite(d, args):
+     die_arglen(args, 3, "E: suite-component list-suite requires an component")
+     component = get_component(args[2].lower(), d.session())
+     if component is None:
+         die("E: component %s is invalid" % args[2].lower())
+     for s in component.suites:
+         print s.suite_name
+
+
+def __suite_component_add(d, args):
+     if len(args) < 3:
+         die("E: adding a suite-component entry requires a suite and component")
+
+     s = d.session()
+
+     suite = get_suite(args[2].lower(), s)
+     if suite is None: die("E: Can't find suite %s" % args[2].lower())
+
+     for component_name in args[3:]:
+         component = get_component(component_name.lower(), s)
+         if component is None: die("E: Can't find component %s" % args[3].lower())
+
+         try:
+             suite.components.append(component)
+             s.flush()
+         except IntegrityError as e:
+             die("E: Can't add suite-component entry (%s, %s) - probably already exists" % (args[2].lower(), component_name))
+         except SQLAlchemyError as e:
+             die("E: Can't add suite-component entry (%s, %s) - %s" % (args[2].lower(), component_name, e))
+
+         print "Added suite-component entry for %s, %s" % (args[2].lower(), component_name)
+
+     if not dryrun:
+         s.commit()
+     s.close()
+
+def __suite_component_rm(d, args):
+     if len(args) < 3:
+         die("E: removing an suite-component entry requires a suite and component")
+
+     s = d.session()
+     if not dryrun:
+         try:
+             suite_name = args[2].lower()
+             suite = get_suite(suite_name, s)
+             if suite is None:
+                 die('E: no such suite %s' % suite_name)
+             component_string = args[3].lower()
+             component = get_component(arch_string, s)
+             if component not in suite.components:
+                 die("E: component %s not found in suite %s" % (component_string, suite_name))
+             suite.components.remove(component)
+             s.commit()
+         except IntegrityError as e:
+             die("E: Can't remove suite-component entry (%s, %s) - it's probably referenced" % (args[2].lower(), args[3].lower()))
+         except SQLAlchemyError as e:
+             die("E: Can't remove suite-component entry (%s, %s) - %s" % (args[2].lower(), args[3].lower(), e))
+
+     print "Removed suite-component entry for %s, %s" % (args[2].lower(), args[3].lower())
+
+
+def suite_component(command):
+    args = [str(x) for x in command]
+    Cnf = utils.get_conf()
+    d = DBConn()
+
+    die_arglen(args, 2, "E: suite-component needs at least a command")
+
+    mode = args[1].lower()
+
+    if mode == 'list':
+        __suite_component_list(d, args)
+    elif mode == 'list-component':
+        __suite_component_listcomponent(d, args)
+    elif mode == 'list-suite':
+        __suite_component_listsuite(d, args)
+    elif mode == 'add':
+        __suite_component_add(d, args)
+    # elif mode == 'rm':
+    #     __suite_architecture_rm(d, args)
+    else:
+        die("E: suite-component command unknown")
+
+dispatch['suite-component'] = suite_component
+dispatch['s-c'] = suite_component
+
+################################################################################
+
+def archive_list():
+    session = DBConn().session()
+    for archive in session.query(Archive).order_by(Archive.archive_name):
+        print "{0} path={1} description={2} tainted={3}".format(archive.archive_name, archive.path, archive.description, archive.tainted)
+
+def archive_add(args):
+    (name, path, description) = args[0:3]
+
+    attributes = dict(
+        archive_name=name,
+        path=path,
+        description=description,
+        )
+
+    for option in args[3:]:
+        (key, value) = option.split('=')
+        attributes[key] = value
+
+    session = DBConn().session()
+
+    archive = Archive()
+    for key, value in attributes.iteritems():
+        setattr(archive, key, value)
+
+    session.add(archive)
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def archive_rm(name):
+    session = DBConn().session()
+    archive = get_archive(name, session)
+    session.delete(archive)
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def archive_rename(oldname, newname):
+    session = DBConn().session()
+    archive = get_archive(oldname, session)
+    archive.archive_name = newname
+    session.flush()
+
+    if dryrun:
+        session.rollback()
+    else:
+        session.commit()
+
+def archive(command):
+    mode = command[1]
+    if mode == 'list':
+        archive_list()
+    elif mode == 'rename':
+        archive_rename(command[2], command[3])
+    elif mode == 'add':
+        archive_add(command[2:])
+    elif mode == 'rm':
+        archive_rm(command[2])
+    else:
+        die("E: archive command unknown")
+
+dispatch['archive'] = archive
+
+################################################################################
+
 def __version_check_list(d):
     session = d.session()
     for s in session.query(Suite).order_by(Suite.suite_name):
@@ -485,6 +794,8 @@ def show_config(command):
             print "PGPORT=%s" % cnf["DB::Port"]
             e.append('PGPORT')
         print "export " + " ".join(e)
+    elif mode == 'get':
+        print cnf.get(args[2])
     else:
         session = DBConn().session()
         try:
@@ -513,17 +824,49 @@ def show_keyring(command):
     if mode == 'list-all':
         pass
     elif mode == 'list-binary':
-        q = q.filter(Keyring.default_source_acl_id == None)
+        q = q.join(Keyring.acl).filter(ACL.allow_source == False)
     elif mode == 'list-source':
-        q = q.filter(Keyring.default_source_acl_id != None)
+        q = q.join(Keyring.acl).filter(ACL.allow_source == True)
     else:
         die("E: keyring command unknown")
 
     for k in q.all():
         print k.keyring_name
 
-dispatch['keyring'] = show_keyring
-dispatch['k'] = show_keyring
+def keyring_add_buildd(command):
+    name = command[2]
+    arch_names = command[3:]
+
+    session = DBConn().session()
+    arches = session.query(Architecture).filter(Architecture.arch_string.in_(arch_names))
+
+    acl = ACL()
+    acl.name = 'buildd-{0}'.format('+'.join(arch_names))
+    acl.architectures.update(arches)
+    acl.allow_new = True
+    acl.allow_binary = True
+    acl.allow_binary_only = True
+    acl.allow_hijack = True
+    session.add(acl)
+
+    k = Keyring()
+    k.keyring_name = name
+    k.acl = acl
+    k.priority = 10
+    session.add(k)
+
+    session.commit()
+
+def keyring(command):
+    if command[1].startswith('list-'):
+        show_keyring(command)
+    elif command[1] == 'add-buildd':
+        keyring_add_buildd(command)
+    else:
+        die("E: keyring command unknown")
+
+dispatch['keyring'] = keyring
+dispatch['k'] = keyring
 
 ################################################################################
 
index a47febbd229768a952812e5537061f35cc243893..926b4fb5009cf18a36ea34c8bb4f4165a7a58731 100755 (executable)
@@ -109,39 +109,61 @@ def check_files():
     Prepare the dictionary of existing filenames, then walk through the archive
     pool/ directory to compare it.
     """
-    global db_files
-
     cnf = Config()
+    session = DBConn().session()
 
-    print "Building list of database files..."
-    q = DBConn().session().query(PoolFile).join(Location).order_by('path', 'location')
-
-    print "Missing files:"
-    db_files.clear()
-
-    for f in q.all():
-        filename = os.path.abspath(os.path.join(f.location.path, f.filename))
-        db_files[filename] = ""
-        if os.access(filename, os.R_OK) == 0:
-            if f.last_used:
-                print "(last used: %s) %s" % (f.last_used, filename)
-            else:
-                print "%s" % (filename)
-
-
-    filename = os.path.join(cnf["Dir::Override"], 'override.unreferenced')
-    if os.path.exists(filename):
-        f = utils.open_file(filename)
-        for filename in f.readlines():
-            filename = filename[:-1]
-            excluded[filename] = ""
-
-    print "Existent files not in db:"
-
-    os.path.walk(os.path.join(cnf["Dir::Root"], 'pool/'), process_dir, None)
-
-    print
-    print "%s wasted..." % (utils.size_type(waste))
+    query = """
+        SELECT archive.name, suite.suite_name, f.filename
+          FROM binaries b
+          JOIN bin_associations ba ON b.id = ba.bin
+          JOIN suite ON ba.suite = suite.id
+          JOIN archive ON suite.archive_id = archive.id
+          JOIN files f ON b.file = f.id
+         WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af
+                            WHERE af.archive_id = suite.archive_id
+                              AND af.file_id = b.file)
+         ORDER BY archive.name, suite.suite_name, f.filename
+        """
+    for row in session.execute(query):
+        print "MISSING-ARCHIVE-FILE {0} {1} {2}".vformat(row)
+
+    query = """
+        SELECT archive.name, suite.suite_name, f.filename
+          FROM source s
+          JOIN src_associations sa ON s.id = sa.source
+          JOIN suite ON sa.suite = suite.id
+          JOIN archive ON suite.archive_id = archive.id
+          JOIN dsc_files df ON s.id = df.source
+          JOIN files f ON df.file = f.id
+         WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af
+                            WHERE af.archive_id = suite.archive_id
+                              AND af.file_id = df.file)
+         ORDER BY archive.name, suite.suite_name, f.filename
+        """
+    for row in session.execute(query):
+        print "MISSING-ARCHIVE-FILE {0} {1} {2}".vformat(row)
+
+    archive_files = session.query(ArchiveFile) \
+        .join(ArchiveFile.archive).join(ArchiveFile.file) \
+        .order_by(Archive.archive_name, PoolFile.filename)
+
+    expected_files = set()
+    for af in archive_files:
+        path = af.path
+        expected_files.add(af.path)
+        if not os.path.exists(path):
+            print "MISSING-FILE {0} {1} {2}".format(af.archive.archive_name, af.file.filename, path)
+
+    archives = session.query(Archive).order_by(Archive.archive_name)
+
+    for a in archives:
+        top = os.path.join(a.path, 'pool')
+        for dirpath, dirnames, filenames in os.walk(top):
+            for fn in filenames:
+                path = os.path.join(dirpath, fn)
+                if path in expected_files:
+                    continue
+                print "UNEXPECTED-FILE {0} {1}".format(a.archive_name, path)
 
 ################################################################################
 
@@ -252,7 +274,7 @@ def check_checksums():
 
     print "Checking file checksums & sizes..."
     for f in q:
-        filename = os.path.abspath(os.path.join(f.location.path, f.filename))
+        filename = f.fullpath
 
         try:
             fi = utils.open_file(filename)
@@ -361,18 +383,18 @@ def validate_sources(suite, component):
     """
     filename = "%s/dists/%s/%s/source/Sources.gz" % (Cnf["Dir::Root"], suite, component)
     print "Processing %s..." % (filename)
-    # apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
+    # apt_pkg.TagFile needs a real file handle and can't handle a GzipFile instance...
     (fd, temp_filename) = utils.temp_filename()
     (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
     if (result != 0):
         sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
         sys.exit(result)
     sources = utils.open_file(temp_filename)
-    Sources = apt_pkg.ParseTagFile(sources)
-    while Sources.Step():
-        source = Sources.Section.Find('Package')
-        directory = Sources.Section.Find('Directory')
-        files = Sources.Section.Find('Files')
+    Sources = apt_pkg.TagFile(sources)
+    while Sources.step():
+        source = Sources.section.find('Package')
+        directory = Sources.section.find('Directory')
+        files = Sources.section.find('Files')
         for i in files.split('\n'):
             (md5, size, name) = i.split()
             filename = "%s/%s/%s" % (Cnf["Dir::Root"], directory, name)
@@ -403,16 +425,16 @@ def validate_packages(suite, component, architecture):
     filename = "%s/dists/%s/%s/binary-%s/Packages.gz" \
                % (Cnf["Dir::Root"], suite, component, architecture)
     print "Processing %s..." % (filename)
-    # apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
+    # apt_pkg.TagFile needs a real file handle and can't handle a GzipFile instance...
     (fd, temp_filename) = utils.temp_filename()
     (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
     if (result != 0):
         sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
         sys.exit(result)
     packages = utils.open_file(temp_filename)
-    Packages = apt_pkg.ParseTagFile(packages)
-    while Packages.Step():
-        filename = "%s/%s" % (Cnf["Dir::Root"], Packages.Section.Find('Filename'))
+    Packages = apt_pkg.TagFile(packages)
+    while Packages.step():
+        filename = "%s/%s" % (Cnf["Dir::Root"], Packages.section.find('Filename'))
         if not os.path.exists(filename):
             print "W: %s missing." % (filename)
     packages.close()
@@ -465,7 +487,7 @@ def chk_bd_process_dir (unused, dirname, filenames):
             field = dsc.get(field_name)
             if field:
                 try:
-                    apt_pkg.ParseSrcDepends(field)
+                    apt_pkg.parse_src_depends(field)
                 except:
                     print "E: [%s] %s: %s" % (filename, field_name, field)
                     pass
index aed0a6475677e3e03d1a634910a0a0435f3ff0bb..31833198fe88ffeee2e176a068990d6a6ab72fcd 100755 (executable)
@@ -115,20 +115,23 @@ def process(osuite, affected_suites, originosuite, component, otype, session):
         packages = {}
         # TODO: Fix to use placeholders (check how to with arrays)
         q = session.execute("""
-SELECT b.package FROM binaries b, bin_associations ba, files f,
-                              location l, component c
- WHERE b.type = :otype AND b.id = ba.bin AND f.id = b.file AND l.id = f.location
-   AND c.id = l.component AND ba.suite IN (%s) AND c.id = :component_id
+SELECT b.package
+  FROM binaries b
+  JOIN bin_associations ba ON b.id = ba.bin
+  JOIN suite ON ba.suite = suite.id
+  JOIN files_archive_map af ON b.file = af.file_id AND suite.archive_id = af.archive_id
+ WHERE b.type = :otype AND ba.suite IN (%s) AND af.component_id = :component_id
 """ % (",".join([ str(i) for i in affected_suites ])), {'otype': otype, 'component_id': component_id})
         for i in q.fetchall():
             packages[i[0]] = 0
 
     src_packages = {}
     q = session.execute("""
-SELECT s.source FROM source s, src_associations sa, files f, location l,
-                     component c
- WHERE s.id = sa.source AND f.id = s.file AND l.id = f.location
-   AND c.id = l.component AND sa.suite IN (%s) AND c.id = :component_id
+SELECT s.source FROM source s
+  JOIN src_associations sa ON s.id = sa.source
+  JOIN suite ON sa.suite = suite.id
+  JOIN files_archive_map af ON s.file = af.file_id AND suite.archive_id = af.archive_id
+ WHERE sa.suite IN (%s) AND af.component_id = :component_id
 """ % (",".join([ str(i) for i in affected_suites])), {'component_id': component_id})
     for i in q.fetchall():
         src_packages[i[0]] = 0
index d947818c4612fef26f4e5d0c246b1c2cc0d8a66c..88b0bb9334a712228e2b2ae27ca2cf53f5e016f1 100755 (executable)
@@ -91,12 +91,11 @@ def init (cnf):
             utils.fubar("%s must be a directory." % (del_dir))
 
     # Move to the directory to clean
-    incoming = Options["Incoming"]
-    if incoming == "":
-        incoming_queue = get_policy_queue('unchecked')
-        if not incoming_queue:
-            utils.fubar("Cannot find 'unchecked' queue")
-        incoming = incoming_queue.path
+    incoming = Options.get("Incoming")
+    if not incoming:
+        incoming = cnf.get('Dir::Unchecked')
+        if not incoming:
+            utils.fubar("Cannot find 'unchecked' directory")
 
     try:
         os.chdir(incoming)
index 66bda62df6351665934fa7256cc7ea55a4ebc815..4fe2c5fdf12cf9822061f06b8e38a79cff75155a 100755 (executable)
@@ -64,133 +64,108 @@ Clean old packages from suites.
 
 ################################################################################
 
-def check_binaries(now_date, delete_date, max_delete, session):
-    print "Checking for orphaned binary packages..."
+def check_binaries(now_date, session):
+    Logger.log(["Checking for orphaned binary packages..."])
 
     # Get the list of binary packages not in a suite and mark them for
     # deletion.
-
-    q = session.execute("""
-SELECT b.file, f.filename
-         FROM binaries b
-    LEFT JOIN files f
-      ON (b.file = f.id)
-   WHERE f.last_used IS NULL
-     AND b.id NOT IN
-         (SELECT ba.bin FROM bin_associations ba)
-     AND f.id NOT IN
-         (SELECT bqf.fileid FROM build_queue_files bqf)""")
-    for i in q.fetchall():
-        Logger.log(["set lastused", i[1]])
-        if not Options["No-Action"]:
-            session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
-                            {'lastused': now_date, 'fileid': i[0]})
-
-    if not Options["No-Action"]:
-        session.commit()
-
     # Check for any binaries which are marked for eventual deletion
     # but are now used again.
 
-    q = session.execute("""
-SELECT b.file, f.filename
-         FROM binaries b
-    LEFT JOIN files f
-      ON (b.file = f.id)
-   WHERE f.last_used IS NOT NULL
-     AND (b.id IN
-          (SELECT ba.bin FROM bin_associations ba)
-          OR f.id IN
-          (SELECT bqf.fileid FROM build_queue_files bqf))""")
-
-    for i in q.fetchall():
-        Logger.log(["unset lastused", i[1]])
-        if not Options["No-Action"]:
-            session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
-
-    if not Options["No-Action"]:
-        session.commit()
+    query = """
+       WITH usage AS (
+         SELECT
+           af.archive_id AS archive_id,
+           af.file_id AS file_id,
+           af.component_id AS component_id,
+           BOOL_OR(EXISTS (SELECT 1 FROM bin_associations ba
+                            JOIN suite s ON ba.suite = s.id
+                           WHERE ba.bin = b.id
+                             AND s.archive_id = af.archive_id))
+             AS in_use
+         FROM files_archive_map af
+         JOIN binaries b ON af.file_id = b.file
+         GROUP BY af.archive_id, af.file_id, af.component_id
+       )
+
+       UPDATE files_archive_map af
+          SET last_used = CASE WHEN usage.in_use THEN NULL ELSE :last_used END
+         FROM usage, files f, archive
+        WHERE af.archive_id = usage.archive_id AND af.file_id = usage.file_id AND af.component_id = usage.component_id
+          AND ((af.last_used IS NULL AND NOT usage.in_use) OR (af.last_used IS NOT NULL AND usage.in_use))
+          AND af.file_id = f.id
+          AND af.archive_id = archive.id
+       RETURNING archive.name, f.filename, af.last_used IS NULL"""
+
+    res = session.execute(query, {'last_used': now_date})
+    for i in res:
+        op = "set lastused"
+        if i[2]:
+            op = "unset lastused"
+        Logger.log([op, i[0], i[1]])
 
 ########################################
 
-def check_sources(now_date, delete_date, max_delete, session):
-    print "Checking for orphaned source packages..."
+def check_sources(now_date, session):
+    Logger.log(["Checking for orphaned source packages..."])
 
     # Get the list of source packages not in a suite and not used by
     # any binaries.
-    q = session.execute("""
-SELECT s.id, s.file, f.filename
-       FROM source s
-  LEFT JOIN files f
-    ON (s.file = f.id)
-  WHERE f.last_used IS NULL
-   AND s.id NOT IN
-        (SELECT sa.source FROM src_associations sa)
-   AND s.id NOT IN
-        (SELECT b.source FROM binaries b)
-   AND s.id NOT IN (SELECT esr.src_id FROM extra_src_references esr)
-   AND f.id NOT IN
-        (SELECT bqf.fileid FROM build_queue_files bqf)""")
-
-    #### XXX: this should ignore cases where the files for the binary b
-    ####      have been marked for deletion (so the delay between bins go
-    ####      byebye and sources go byebye is 0 instead of StayOfExecution)
-
-    for i in q.fetchall():
-        source_id = i[0]
-        dsc_file_id = i[1]
-        dsc_fname = i[2]
-
-        # Mark the .dsc file for deletion
-        Logger.log(["set lastused", dsc_fname])
-        if not Options["No-Action"]:
-            session.execute("""UPDATE files SET last_used = :last_used
-                                WHERE id = :dscfileid AND last_used IS NULL""",
-                            {'last_used': now_date, 'dscfileid': dsc_file_id})
-
-        # Mark all other files references by .dsc too if they're not used by anyone else
-        x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
-                              WHERE d.source = :sourceid AND d.file = f.id""",
-                             {'sourceid': source_id})
-        for j in x.fetchall():
-            file_id = j[0]
-            file_name = j[1]
-            y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
-            if len(y.fetchall()) == 1:
-                Logger.log(["set lastused", file_name])
-                if not Options["No-Action"]:
-                    session.execute("""UPDATE files SET last_used = :lastused
-                                       WHERE id = :fileid AND last_used IS NULL""",
-                                    {'lastused': now_date, 'fileid': file_id})
-
-    if not Options["No-Action"]:
-        session.commit()
 
     # Check for any sources which are marked for deletion but which
     # are now used again.
-    q = session.execute("""
-SELECT f.id, f.filename FROM source s, files f, dsc_files df
-  WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
-    AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
-      OR (EXISTS (SELECT 1 FROM extra_src_references esr WHERE esr.src_id = s.id))
-      OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id))
-      OR (EXISTS (SELECT 1 FROM build_queue_files bqf WHERE bqf.fileid = s.file)))""")
 
-    #### XXX: this should also handle deleted binaries specially (ie, not
-    ####      reinstate sources because of them
+    # TODO: the UPDATE part is the same as in check_binaries. Merge?
+
+    query = """
+    WITH usage AS (
+      SELECT
+        af.archive_id AS archive_id,
+        af.file_id AS file_id,
+        af.component_id AS component_id,
+        BOOL_OR(EXISTS (SELECT 1 FROM src_associations sa
+                         JOIN suite s ON sa.suite = s.id
+                        WHERE sa.source = df.source
+                          AND s.archive_id = af.archive_id)
+          OR EXISTS (SELECT 1 FROM files_archive_map af_bin
+                              JOIN binaries b ON af_bin.file_id = b.file
+                             WHERE b.source = df.source
+                               AND af_bin.archive_id = af.archive_id
+                               AND (af_bin.last_used IS NULL OR af_bin.last_used > ad.delete_date))
+          OR EXISTS (SELECT 1 FROM extra_src_references esr
+                         JOIN bin_associations ba ON esr.bin_id = ba.bin
+                         JOIN binaries b ON ba.bin = b.id
+                         JOIN suite s ON ba.suite = s.id
+                        WHERE esr.src_id = df.source
+                          AND s.archive_id = af.archive_id))
+          AS in_use
+      FROM files_archive_map af
+      JOIN dsc_files df ON af.file_id = df.file
+      JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+      GROUP BY af.archive_id, af.file_id, af.component_id
+    )
 
-    for i in q.fetchall():
-        Logger.log(["unset lastused", i[1]])
-        if not Options["No-Action"]:
-            session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
-                            {'fileid': i[0]})
+    UPDATE files_archive_map af
+       SET last_used = CASE WHEN usage.in_use THEN NULL ELSE :last_used END
+      FROM usage, files f, archive
+     WHERE af.archive_id = usage.archive_id AND af.file_id = usage.file_id AND af.component_id = usage.component_id
+       AND ((af.last_used IS NULL AND NOT usage.in_use) OR (af.last_used IS NOT NULL AND usage.in_use))
+       AND af.file_id = f.id
+       AND af.archive_id = archive.id
 
-    if not Options["No-Action"]:
-        session.commit()
+    RETURNING archive.name, f.filename, af.last_used IS NULL
+    """
+
+    res = session.execute(query, {'last_used': now_date})
+    for i in res:
+        op = "set lastused"
+        if i[2]:
+            op = "unset lastused"
+        Logger.log([op, i[0], i[1]])
 
 ########################################
 
-def check_files(now_date, delete_date, max_delete, session):
+def check_files(now_date, session):
     # FIXME: this is evil; nothing should ever be in this state.  if
     # they are, it's a bug.
 
@@ -198,53 +173,55 @@ def check_files(now_date, delete_date, max_delete, session):
     # and then mark the file for deletion.  This probably masks a bug somwhere
     # else but is better than collecting cruft forever
 
-    print "Checking for unused files..."
+    Logger.log(["Checking for unused files..."])
     q = session.execute("""
-SELECT id, filename FROM files f
-  WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
-    AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
-    AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
-    AND NOT EXISTS (SELECT 1 FROM build_queue_files qf WHERE qf.fileid = f.id)
-    AND last_used IS NULL
-    ORDER BY filename""")
-
-    ql = q.fetchall()
-    if len(ql) > 0:
-        utils.warn("check_files found something it shouldn't")
-        for x in ql:
-            utils.warn("orphaned file: %s" % x)
-            Logger.log(["set lastused", x[1], "ORPHANED FILE"])
-            if not Options["No-Action"]:
-                 session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
-                                 {'lastused': now_date, 'fileid': x[0]})
+    UPDATE files_archive_map af
+       SET last_used = :last_used
+      FROM files f, archive
+     WHERE af.file_id = f.id
+       AND af.archive_id = archive.id
+       AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = af.file_id)
+       AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
+       AND af.last_used IS NULL
+    RETURNING archive.name, f.filename""", {'last_used': now_date})
+
+    for x in q:
+        utils.warn("orphaned file: {0}".format(x))
+        Logger.log(["set lastused", x[0], x[1], "ORPHANED FILE"])
 
-        if not Options["No-Action"]:
-            session.commit()
+    if not Options["No-Action"]:
+        session.commit()
 
-def clean_binaries(now_date, delete_date, max_delete, session):
+def clean_binaries(now_date, session):
     # We do this here so that the binaries we remove will have their
     # source also removed (if possible).
 
     # XXX: why doesn't this remove the files here as well? I don't think it
     #      buys anything keeping this separate
-    print "Cleaning binaries from the DB..."
-    print "Deleting from binaries table... "
-    for bin in session.query(DBBinary).join(DBBinary.poolfile).filter(PoolFile.last_used <= delete_date):
-        Logger.log(["delete binary", bin.poolfile.filename])
-        if not Options["No-Action"]:
-            session.delete(bin)
-    if not Options["No-Action"]:
-        session.commit()
+
+    Logger.log(["Deleting from binaries table... "])
+    q = session.execute("""
+      DELETE FROM binaries b
+       USING files f
+       WHERE f.id = b.file
+         AND NOT EXISTS (SELECT 1 FROM files_archive_map af
+                                  JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+                                 WHERE af.file_id = b.file
+                                   AND (af.last_used IS NULL OR af.last_used > ad.delete_date))
+      RETURNING f.filename
+    """)
+    for b in q:
+        Logger.log(["delete binary", b[0]])
 
 ########################################
 
-def clean(now_date, delete_date, max_delete, session):
+def clean(now_date, archives, max_delete, session):
     cnf = Config()
 
     count = 0
     size = 0
 
-    print "Cleaning out packages..."
+    Logger.log(["Cleaning out packages..."])
 
     morguedir = cnf.get("Dir::Morgue", os.path.join("Dir::Pool", 'morgue'))
     morguesubdir = cnf.get("Clean-Suites::MorgueSubDir", 'pool')
@@ -260,33 +237,53 @@ def clean(now_date, delete_date, max_delete, session):
         os.makedirs(dest)
 
     # Delete from source
-    print "Deleting from source table... "
+    Logger.log(["Deleting from source table..."])
     q = session.execute("""
-SELECT s.id, f.filename FROM source s, files f
-  WHERE f.last_used <= :deletedate
-        AND s.file = f.id
-        AND s.id NOT IN (SELECT src_id FROM extra_src_references)""", {'deletedate': delete_date})
-    for s in q.fetchall():
-        Logger.log(["delete source", s[1], s[0]])
-        if not Options["No-Action"]:
-            session.execute("DELETE FROM dsc_files WHERE source = :s_id", {"s_id":s[0]})
-            session.execute("DELETE FROM source WHERE id = :s_id", {"s_id":s[0]})
+      WITH
+      deleted_sources AS (
+        DELETE FROM source
+         USING files f
+         WHERE source.file = f.id
+           AND NOT EXISTS (SELECT 1 FROM files_archive_map af
+                                    JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
+                                   WHERE af.file_id = source.file
+                                     AND (af.last_used IS NULL OR af.last_used > ad.delete_date))
+        RETURNING source.id AS id, f.filename AS filename
+      ),
+      deleted_dsc_files AS (
+        DELETE FROM dsc_files df WHERE df.source IN (SELECT id FROM deleted_sources)
+        RETURNING df.file AS file_id
+      ),
+      now_unused_source_files AS (
+        UPDATE files_archive_map af
+           SET last_used = '1977-03-13 13:37:42' -- Kill it now. We waited long enough before removing the .dsc.
+         WHERE af.file_id IN (SELECT file_id FROM deleted_dsc_files)
+           AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
+      )
+      SELECT filename FROM deleted_sources""")
+    for s in q:
+        Logger.log(["delete source", s[0]])
 
     if not Options["No-Action"]:
         session.commit()
 
     # Delete files from the pool
-    old_files = session.query(PoolFile).filter(PoolFile.last_used <= delete_date)
+    old_files = session.query(ArchiveFile).filter('files_archive_map.last_used <= (SELECT delete_date FROM archive_delete_date ad WHERE ad.archive_id = files_archive_map.archive_id)').join(Archive)
     if max_delete is not None:
         old_files = old_files.limit(max_delete)
-        print "Limiting removals to %d" % max_delete
+        Logger.log(["Limiting removals to %d" % max_delete])
 
-    for pf in old_files:
-        filename = os.path.join(pf.location.path, pf.filename)
+    if archives is not None:
+        archive_ids = [ a.archive_id for a in archives ]
+        old_files = old_files.filter(ArchiveFile.archive_id.in_(archive_ids))
+
+    for af in old_files:
+        filename = af.path
         if not os.path.exists(filename):
-            utils.warn("can not find '%s'." % (filename))
+            Logger.log(["database referred to non-existing file", af.path])
+            session.delete(af)
             continue
-        Logger.log(["delete pool file", filename])
+        Logger.log(["delete archive file", filename])
         if os.path.isfile(filename):
             if os.path.islink(filename):
                 count += 1
@@ -302,12 +299,16 @@ SELECT s.id, f.filename FROM source s, files f
                 if os.path.exists(dest_filename):
                     dest_filename = utils.find_next_free(dest_filename)
 
-                Logger.log(["move to morgue", filename, dest_filename])
                 if not Options["No-Action"]:
-                    utils.move(filename, dest_filename)
+                    if af.archive.use_morgue:
+                        Logger.log(["move to morgue", filename, dest_filename])
+                        utils.move(filename, dest_filename)
+                    else:
+                        Logger.log(["removed file", filename])
+                        os.unlink(filename)
 
             if not Options["No-Action"]:
-                session.delete(pf)
+                session.delete(af)
                 session.commit()
 
         else:
@@ -315,12 +316,21 @@ SELECT s.id, f.filename FROM source s, files f
 
     if count > 0:
         Logger.log(["total", count, utils.size_type(size)])
-        print "Cleaned %d files, %s." % (count, utils.size_type(size))
+
+    # Delete entries in files no longer referenced by any archive
+    query = """
+       DELETE FROM files f
+        WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af WHERE af.file_id = f.id)
+    """
+    session.execute(query)
+
+    if not Options["No-Action"]:
+        session.commit()
 
 ################################################################################
 
-def clean_maintainers(now_date, delete_date, max_delete, session):
-    print "Cleaning out unused Maintainer entries..."
+def clean_maintainers(now_date, session):
+    Logger.log(["Cleaning out unused Maintainer entries..."])
 
     # TODO Replace this whole thing with one SQL statement
     q = session.execute("""
@@ -343,19 +353,19 @@ SELECT m.id, m.name FROM maintainer m
 
     if count > 0:
         Logger.log(["total", count])
-        print "Cleared out %d maintainer entries." % (count)
 
 ################################################################################
 
-def clean_fingerprints(now_date, delete_date, max_delete, session):
-    print "Cleaning out unused fingerprint entries..."
+def clean_fingerprints(now_date, session):
+    Logger.log(["Cleaning out unused fingerprint entries..."])
 
     # TODO Replace this whole thing with one SQL statement
     q = session.execute("""
 SELECT f.id, f.fingerprint FROM fingerprint f
   WHERE f.keyring IS NULL
     AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.sig_fpr = f.id)
-    AND NOT EXISTS (SELECT 1 FROM source s WHERE s.sig_fpr = f.id)""")
+    AND NOT EXISTS (SELECT 1 FROM source s WHERE s.sig_fpr = f.id)
+    AND NOT EXISTS (SELECT 1 FROM acl_per_source aps WHERE aps.created_by_id = f.id)""")
 
     count = 0
 
@@ -371,7 +381,6 @@ SELECT f.id, f.fingerprint FROM fingerprint f
 
     if count > 0:
         Logger.log(["total", count])
-        print "Cleared out %d fingerprint entries." % (count)
 
 ################################################################################
 
@@ -380,13 +389,12 @@ def clean_empty_directories(session):
     Removes empty directories from pool directories.
     """
 
-    print "Cleaning out empty directories..."
+    Logger.log(["Cleaning out empty directories..."])
 
     count = 0
 
     cursor = session.execute(
-        "SELECT DISTINCT(path) FROM location WHERE type = :type",
-        {'type': 'pool'},
+        """SELECT DISTINCT(path) FROM archive"""
     )
     bases = [x[0] for x in cursor.fetchall()]
 
@@ -404,6 +412,24 @@ def clean_empty_directories(session):
 
 ################################################################################
 
+def set_archive_delete_dates(now_date, session):
+    session.execute("""
+        CREATE TEMPORARY TABLE archive_delete_date (
+          archive_id INT NOT NULL,
+          delete_date TIMESTAMP NOT NULL
+        )""")
+
+    session.execute("""
+        INSERT INTO archive_delete_date
+          (archive_id, delete_date)
+        SELECT
+          archive.id, :now_date - archive.stayofexecution
+        FROM archive""", {'now_date': now_date})
+
+    session.flush()
+
+################################################################################
+
 def main():
     global Options, Logger
 
@@ -414,6 +440,7 @@ def main():
             cnf["Clean-Suites::Options::%s" % (i)] = ""
 
     Arguments = [('h',"help","Clean-Suites::Options::Help"),
+                 ('a','archive','Clean-Suites::Options::Archive','HasArg'),
                  ('n',"no-action","Clean-Suites::Options::No-Action"),
                  ('m',"maximum","Clean-Suites::Options::Maximum", "HasArg")]
 
@@ -434,26 +461,35 @@ def main():
     if Options["Help"]:
         usage()
 
-    Logger = daklog.Logger("clean-suites", debug=Options["No-Action"])
+    program = "clean-suites"
+    if Options['No-Action']:
+        program = "clean-suites (no action)"
+    Logger = daklog.Logger(program, debug=Options["No-Action"])
 
     session = DBConn().session()
 
-    now_date = datetime.now()
+    archives = None
+    if 'Archive' in Options:
+        archive_names = Options['Archive'].split(',')
+        archives = session.query(Archive).filter(Archive.archive_name.in_(archive_names)).all()
+        if len(archives) == 0:
+            utils.fubar('Unknown archive.')
 
-    # Stay of execution; default to 1.5 days
-    soe = int(cnf.get('Clean-Suites::StayOfExecution', '129600'))
+    now_date = datetime.now()
 
-    delete_date = now_date - timedelta(seconds=soe)
+    set_archive_delete_dates(now_date, session)
 
-    check_binaries(now_date, delete_date, max_delete, session)
-    clean_binaries(now_date, delete_date, max_delete, session)
-    check_sources(now_date, delete_date, max_delete, session)
-    check_files(now_date, delete_date, max_delete, session)
-    clean(now_date, delete_date, max_delete, session)
-    clean_maintainers(now_date, delete_date, max_delete, session)
-    clean_fingerprints(now_date, delete_date, max_delete, session)
+    check_binaries(now_date, session)
+    clean_binaries(now_date, session)
+    check_sources(now_date, session)
+    check_files(now_date, session)
+    clean(now_date, archives, max_delete, session)
+    clean_maintainers(now_date, session)
+    clean_fingerprints(now_date, session)
     clean_empty_directories(session)
 
+    session.rollback()
+
     Logger.close()
 
 ################################################################################
index f36e96dcaf0449749bef146ae19fe5f4995b5e5c..407b3c06475dfae80229d676175bbec415d7fd35 100755 (executable)
@@ -67,6 +67,9 @@ OPTIONS
         show this help and exit
 
 OPTIONS for generate
+     -a, --archive=ARCHIVE
+        only operate on suites in the specified archive
+
      -s, --suite={stable,testing,unstable,...}
         only operate on specified suite names
 
@@ -84,9 +87,9 @@ OPTIONS for scan-source and scan-binary
 
 ################################################################################
 
-def write_all(cnf, suite_names = [], component_names = [], force = None):
+def write_all(cnf, archive_names = [], suite_names = [], component_names = [], force = None):
     Logger = daklog.Logger('contents generate')
-    ContentsWriter.write_all(Logger, suite_names, component_names, force)
+    ContentsWriter.write_all(Logger, archive_names, suite_names, component_names, force)
     Logger.close()
 
 ################################################################################
@@ -119,6 +122,7 @@ def main():
     cnf['Contents::Options::Limit'] = ''
     cnf['Contents::Options::Force'] = ''
     arguments = [('h', "help",      'Contents::Options::Help'),
+                 ('a', 'archive',   'Contents::Options::Archive',   'HasArg'),
                  ('s', "suite",     'Contents::Options::Suite',     "HasArg"),
                  ('c', "component", 'Contents::Options::Component', "HasArg"),
                  ('l', "limit",     'Contents::Options::Limit',     "HasArg"),
@@ -142,13 +146,14 @@ def main():
         binary_scan_all(cnf, limit)
         return
 
+    archive_names   = utils.split_args(options['Archive'])
     suite_names     = utils.split_args(options['Suite'])
     component_names = utils.split_args(options['Component'])
 
     force = bool(options['Force'])
 
     if args[0] == 'generate':
-        write_all(cnf, suite_names, component_names, force)
+        write_all(cnf, archive_names, suite_names, component_names, force)
         return
 
     usage()
index 2d8a295a7fa0f8e3805d8e5fb7b5d54230732d1c..fecd76405148e1c22a2beded10d1f1d267c52bbc 100755 (executable)
@@ -45,6 +45,7 @@ import sys
 import apt_pkg
 import os
 
+from daklib.archive import ArchiveTransaction
 from daklib.config import Config
 from daklib.dbconn import *
 from daklib import daklog
@@ -72,27 +73,19 @@ Display or alter the contents of a suite using FILE(s), or stdin.
 
 #######################################################################################
 
-def get_id(package, version, architecture, session):
-    if architecture == "source":
-        q = session.execute("SELECT id FROM source WHERE source = :package AND version = :version",
-                            {'package': package, 'version': version})
+def get_pkg(package, version, architecture, session):
+    if architecture == 'source':
+        q = session.query(DBSource).filter_by(source=package, version=version) \
+            .join(DBSource.poolfile)
     else:
-        q = session.execute("""SELECT b.id FROM binaries b, architecture a
-                                WHERE b.package = :package AND b.version = :version
-                                  AND (a.arch_string = :arch OR a.arch_string = 'all')
-                                  AND b.architecture = a.id""",
-                               {'package': package, 'version': version, 'arch': architecture})
-
-    ql = q.fetchall()
-    if len(ql) < 1:
-        utils.warn("Couldn't find '%s_%s_%s'." % (package, version, architecture))
-        return None
-
-    if len(ql) > 1:
-        utils.warn("Found more than one match for '%s_%s_%s'." % (package, version, architecture))
-        return None
+        q = session.query(DBBinary).filter_by(package=package, version=version) \
+            .join(DBBinary.architecture).filter(Architecture.arch_string.in_([architecture, 'all'])) \
+            .join(DBBinary.poolfile)
 
-    return ql[0][0]
+    pkg = q.first()
+    if pkg is None:
+        utils.warn("Could not find {0}_{1}_{2}.".format(package, version, architecture))
+    return pkg
 
 #######################################################################################
 
@@ -194,16 +187,23 @@ def version_checks(package, architecture, target_suite, new_version, session, fo
 
 def cmp_package_version(a, b):
     """
-    comparison function for tuples of the form (package-name, version ...)
+    comparison function for tuples of the form (package-name, version, arch, ...)
     """
-    cmp_package = cmp(a[0], b[0])
-    if cmp_package != 0:
-        return cmp_package
-    return apt_pkg.version_compare(a[1], b[1])
+    res = 0
+    if a[2] == 'source' and b[2] != 'source':
+        res = -1
+    elif a[2] != 'source' and b[2] == 'source':
+        res = 1
+    if res == 0:
+        res = cmp(a[0], b[0])
+    if res == 0:
+        res = apt_pkg.version_compare(a[1], b[1])
+    return res
 
 #######################################################################################
 
-def set_suite(file, suite, session, britney=False, force=False):
+def set_suite(file, suite, transaction, britney=False, force=False):
+    session = transaction.session
     suite_id = suite.suite_id
     lines = file.readlines()
 
@@ -241,16 +241,17 @@ def set_suite(file, suite, session, britney=False, force=False):
         if key not in current:
             (package, version, architecture) = key
             version_checks(package, architecture, suite.suite_name, version, session, force)
-            pkid = get_id (package, version, architecture, session)
-            if not pkid:
+            pkg = get_pkg(package, version, architecture, session)
+            if pkg is None:
                 continue
+
+            component = pkg.poolfile.component
             if architecture == "source":
-                session.execute("""INSERT INTO src_associations (suite, source)
-                                        VALUES (:suiteid, :pkid)""", {'suiteid': suite_id, 'pkid': pkid})
+                transaction.copy_source(pkg, suite, component)
             else:
-                session.execute("""INSERT INTO bin_associations (suite, bin)
-                                        VALUES (:suiteid, :pkid)""", {'suiteid': suite_id, 'pkid': pkid})
-            Logger.log(["added", " ".join(key), pkid])
+                transaction.copy_binary(pkg, suite, component)
+
+            Logger.log(["added", " ".join(key)])
 
     # Check to see which packages need removed and remove them
     for key, pkid in current.iteritems():
@@ -269,9 +270,11 @@ def set_suite(file, suite, session, britney=False, force=False):
 
 #######################################################################################
 
-def process_file(file, suite, action, session, britney=False, force=False):
+def process_file(file, suite, action, transaction, britney=False, force=False):
+    session = transaction.session
+
     if action == "set":
-        set_suite(file, suite, session, britney, force)
+        set_suite(file, suite, transaction, britney, force)
         return
 
     suite_id = suite.suite_id
@@ -289,9 +292,15 @@ def process_file(file, suite, action, session, britney=False, force=False):
     request.sort(cmp=cmp_package_version)
 
     for package, version, architecture in request:
-        pkid = get_id(package, version, architecture, session)
-        if not pkid:
+        pkg = get_pkg(package, version, architecture, session)
+        if pkg is None:
             continue
+        if architecture == 'source':
+            pkid = pkg.source_id
+        else:
+            pkid = pkg.binary_id
+
+        component = pkg.poolfile.component
 
         # Do version checks when adding packages
         if action == "add":
@@ -314,9 +323,7 @@ def process_file(file, suite, action, session, britney=False, force=False):
                     utils.warn("'%s_%s_%s' already exists in suite %s." % (package, version, architecture, suite))
                     continue
                 else:
-                    session.execute("""INSERT INTO src_associations (suite, source)
-                                            VALUES (:suiteid, :pkid)""",
-                                       {'suiteid': suite_id, 'pkid': pkid})
+                    transaction.copy_source(pkg, suite, component)
                     Logger.log(["added", package, version, architecture, suite.suite_name, pkid])
 
             elif action == "remove":
@@ -343,9 +350,7 @@ def process_file(file, suite, action, session, britney=False, force=False):
                     utils.warn("'%s_%s_%s' already exists in suite %s." % (package, version, architecture, suite))
                     continue
                 else:
-                    session.execute("""INSERT INTO bin_associations (suite, bin)
-                                            VALUES (:suiteid, :pkid)""",
-                                       {'suiteid': suite_id, 'pkid': pkid})
+                    transaction.copy_binary(pkg, suite, component)
                     Logger.log(["added", package, version, architecture, suite.suite_name, pkid])
             elif action == "remove":
                 if association_id == None:
@@ -406,8 +411,6 @@ def main ():
     if Options["Help"]:
         usage()
 
-    session = DBConn().session()
-
     force = Options.has_key("Force") and Options["Force"]
 
     action = None
@@ -415,23 +418,14 @@ def main ():
     for i in ("add", "list", "remove", "set"):
         if cnf["Control-Suite::Options::%s" % (i)] != "":
             suite_name = cnf["Control-Suite::Options::%s" % (i)]
-            suite = get_suite(suite_name, session=session)
-            if suite is None:
-                utils.fubar("Unknown suite '%s'." % (suite_name))
-            else:
-                if action:
-                    utils.fubar("Can only perform one action at a time.")
-                action = i
 
-                # Safety/Sanity check
-                if action == "set" and (not suite.allowcsset):
-                    if force:
-                        utils.warn("Would not normally allow setting suite %s (allowsetcs is FALSE), but --force used" % (suite_name))
-                    else:
-                        utils.fubar("Will not reset suite %s due to its database configuration (allowsetcs is FALSE)" % (suite_name))
+            if action:
+                utils.fubar("Can only perform one action at a time.")
+
+            action = i
 
     # Need an action...
-    if action == None:
+    if action is None:
         utils.fubar("No action specified.")
 
     britney = False
@@ -439,14 +433,28 @@ def main ():
         britney = True
 
     if action == "list":
+        session = DBConn().session()
+        suite = session.query(Suite).filter_by(suite_name=suite_name).one()
         get_list(suite, session)
     else:
         Logger = daklog.Logger("control-suite")
-        if file_list:
-            for f in file_list:
-                process_file(utils.open_file(f), suite, action, session, britney, force)
-        else:
-            process_file(sys.stdin, suite, action, session, britney, force)
+
+        with ArchiveTransaction() as transaction:
+            session = transaction.session
+            suite = session.query(Suite).filter_by(suite_name=suite_name).one()
+
+            if action == "set" and not suite.allowcsset:
+                if force:
+                    utils.warn("Would not normally allow setting suite {0} (allowsetcs is FALSE), but --force used".format(suite_name))
+                else:
+                    utils.fubar("Will not reset suite {0} due to its database configuration (allowsetcs is FALSE)".format(suite_name))
+
+            if file_list:
+                for f in file_list:
+                    process_file(utils.open_file(f), suite, action, transaction, britney, force)
+            else:
+                process_file(sys.stdin, suite, action, transaction, britney, force)
+
         Logger.close()
 
 #######################################################################################
index 73f51025152d4dc19970402bc54b82553d2aff21..7dedceed886c1c9f4c11c0e3fab4db0389147764 100755 (executable)
@@ -60,6 +60,7 @@ Check for obsolete or duplicated packages.
   -h, --help                show this help and exit.
   -m, --mode=MODE           chose the MODE to run in (full, daily, bdo).
   -s, --suite=SUITE         check suite SUITE.
+  -R, --rdep-check          check reverse dependencies
   -w, --wanna-build-dump    where to find the copies of http://buildd.debian.org/stats/*.txt"""
     sys.exit(exit_code)
 
@@ -229,7 +230,7 @@ def queryWithoutSource(suite_id, session):
         order by ub.package"""
     return session.execute(query, { 'suite_id': suite_id })
 
-def reportWithoutSource(suite_name, suite_id, session):
+def reportWithoutSource(suite_name, suite_id, session, rdeps=False):
     rows = queryWithoutSource(suite_id, session)
     title = 'packages without source in suite %s' % suite_name
     if rows.rowcount > 0:
@@ -240,8 +241,15 @@ def reportWithoutSource(suite_name, suite_id, session):
         print "* package %s in version %s is no longer built from source" % \
             (package, version)
         print "  - suggested command:"
-        print "    dak rm -m %s -s %s -a all -p -R -b %s\n" % \
+        print "    dak rm -m %s -s %s -a all -p -R -b %s" % \
             (message, suite_name, package)
+        if rdeps:
+            if utils.check_reverse_depends([package], suite_name, [], session, True):
+                print
+            else:
+                print "  - No dependency problem found\n"
+        else:
+            print
 
 def queryNewerAll(suite_name, session):
     """searches for arch != all packages that have an arch == all
@@ -361,7 +369,7 @@ with uptodate_arch as
     select * from outdated_packages order by source"""
     return session.execute(query, { 'suite_id': suite_id })
 
-def reportNBS(suite_name, suite_id):
+def reportNBS(suite_name, suite_id, rdeps=False):
     session = DBConn().session()
     nbsRows = queryNBS(suite_id, session)
     title = 'NBS packages in suite %s' % suite_name
@@ -377,14 +385,21 @@ def reportNBS(suite_name, suite_id):
        print "  on %s" % arch_string
        print "  - suggested command:"
        message = '"[auto-cruft] NBS (no longer built by %s)"' % source
-       print "    dak rm -m %s -s %s -a %s -p -R -b %s\n" % \
+       print "    dak rm -m %s -s %s -a %s -p -R -b %s" % \
            (message, suite_name, arch_string, pkg_string)
+        if rdeps:
+            if utils.check_reverse_depends(pkg_list, suite_name, arch_list, session, True):
+                print
+            else:
+                print "  - No dependency problem found\n"
+        else:
+            print
     session.close()
 
-def reportAllNBS(suite_name, suite_id, session):
-    reportWithoutSource(suite_name, suite_id, session)
+def reportAllNBS(suite_name, suite_id, session, rdeps=False):
+    reportWithoutSource(suite_name, suite_id, session, rdeps)
     reportNewerAll(suite_name, session)
-    reportNBS(suite_name, suite_id)
+    reportNBS(suite_name, suite_id, rdeps)
 
 ################################################################################
 
@@ -506,7 +521,7 @@ def get_suite_binaries(suite, session):
 
 ################################################################################
 
-def report_outdated_nonfree(suite, session):
+def report_outdated_nonfree(suite, session, rdeps=False):
 
     packages = {}
     query = """WITH outdated_sources AS (
@@ -572,8 +587,15 @@ def report_outdated_nonfree(suite, session):
             for binary in sorted(packages[source]):
                 binaries.add(binary)
                 archs = archs.union(packages[source][binary])
-            print '    dak rm -m %s -s %s -a %s -p -R -b %s\n' % \
+            print '    dak rm -m %s -s %s -a %s -p -R -b %s' % \
                    (message, suite, ','.join(archs), ' '.join(binaries))
+            if rdeps:
+                if utils.check_reverse_depends(list(binaries), suite, archs, session, True):
+                    print
+                else:
+                    print "  - No dependency problem found\n"
+            else:
+                print
 
 ################################################################################
 
@@ -584,9 +606,10 @@ def main ():
 
     Arguments = [('h',"help","Cruft-Report::Options::Help"),
                  ('m',"mode","Cruft-Report::Options::Mode", "HasArg"),
+                 ('R',"rdep-check", "Cruft-Report::Options::Rdep-Check"),
                  ('s',"suite","Cruft-Report::Options::Suite","HasArg"),
                  ('w',"wanna-build-dump","Cruft-Report::Options::Wanna-Build-Dump","HasArg")]
-    for i in [ "help" ]:
+    for i in [ "help", "Rdep-Check" ]:
         if not cnf.has_key("Cruft-Report::Options::%s" % (i)):
             cnf["Cruft-Report::Options::%s" % (i)] = ""
 
@@ -604,6 +627,11 @@ def main ():
     if Options["Help"]:
         usage()
 
+    if Options["Rdep-Check"]:
+        rdeps = True
+    else:
+        rdeps = False
+
     # Set up checks based on mode
     if Options["Mode"] == "daily":
         checks = [ "nbs", "nviu", "nvit", "obsolete source", "outdated non-free", "nfu" ]
@@ -639,10 +667,10 @@ def main ():
         report_obsolete_source(suite_name, session)
 
     if "nbs" in checks:
-        reportAllNBS(suite_name, suite_id, session)
+        reportAllNBS(suite_name, suite_id, session, rdeps)
 
     if "outdated non-free" in checks:
-        report_outdated_nonfree(suite_name, session)
+        report_outdated_nonfree(suite_name, session, rdeps)
 
     bin_not_built = {}
 
@@ -652,20 +680,20 @@ def main ():
     # Checks based on the Sources files
     components = get_component_names(session)
     for component in components:
-        filename = "%s/dists/%s/%s/source/Sources.gz" % (cnf["Dir::Root"], suite_name, component)
-        # apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
+        filename = "%s/dists/%s/%s/source/Sources.gz" % (suite.archive.path, suite_name, component)
+        # apt_pkg.TagFile needs a real file handle and can't handle a GzipFile instance...
         (fd, temp_filename) = utils.temp_filename()
         (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
         if (result != 0):
             sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
             sys.exit(result)
         sources = utils.open_file(temp_filename)
-        Sources = apt_pkg.ParseTagFile(sources)
-        while Sources.Step():
-            source = Sources.Section.Find('Package')
-            source_version = Sources.Section.Find('Version')
-            architecture = Sources.Section.Find('Architecture')
-            binaries = Sources.Section.Find('Binary')
+        Sources = apt_pkg.TagFile(sources)
+        while Sources.step():
+            source = Sources.section.find('Package')
+            source_version = Sources.section.find('Version')
+            architecture = Sources.section.find('Architecture')
+            binaries = Sources.section.find('Binary')
             binaries_list = [ i.strip() for i in  binaries.split(',') ]
 
             if "bnb" in checks:
@@ -701,8 +729,8 @@ def main ():
         for architecture in architectures:
             if component == 'main/debian-installer' and re.match("kfreebsd", architecture):
                 continue
-            filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (cnf["Dir::Root"], suite_name, component, architecture)
-            # apt_pkg.ParseTagFile needs a real file handle
+            filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (suite.archive.path, suite_name, component, architecture)
+            # apt_pkg.TagFile needs a real file handle
             (fd, temp_filename) = utils.temp_filename()
             (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
             if (result != 0):
@@ -714,11 +742,11 @@ def main ():
                 nfu_entries = parse_nfu(architecture)
 
             packages = utils.open_file(temp_filename)
-            Packages = apt_pkg.ParseTagFile(packages)
-            while Packages.Step():
-                package = Packages.Section.Find('Package')
-                source = Packages.Section.Find('Source', "")
-                version = Packages.Section.Find('Version')
+            Packages = apt_pkg.TagFile(packages)
+            while Packages.step():
+                package = Packages.section.find('Package')
+                source = Packages.section.find('Source', "")
+                version = Packages.section.find('Version')
                 if source == "":
                     source = package
                 if bin2source.has_key(package) and \
index 579548c9470050bc8117f0db94074f4d0451cfbd..7cb80f4f66c544ec126ad52413215c9938af61dd 100755 (executable)
@@ -71,11 +71,17 @@ def init():
          "Process NEW and BYHAND packages"),
         ("process-upload",
          "Process packages in queue/unchecked"),
+        ("process-commands",
+         "Process command files (*.dak-commands)"),
         ("process-policy",
          "Process packages in policy queues from COMMENTS files"),
 
         ("dominate",
          "Remove obsolete source and binary associations from suites"),
+        ("export",
+         "Export uploads from policy queues"),
+        ("export-suite",
+         "export a suite to a flat directory structure"),
         ("make-pkg-file-mapping",
          "Generate package <-> file mapping"),
         ("generate-filelist",
@@ -111,14 +117,14 @@ def init():
          "Check for obsolete or duplicated packages"),
         ("examine-package",
          "Show information useful for NEW processing"),
-        ("find-null-maintainers",
-         "Check for users with no packages in the archive"),
+        ("import",
+         "Import existing source and binary packages"),
         ("import-keyring",
          "Populate fingerprint/uid table based on a new/updated keyring"),
-        ("import-ldap-fingerprints",
-         "Syncs fingerprint and uid tables with Debian LDAP db"),
         ("import-users-from-passwd",
          "Sync PostgreSQL users with passwd file"),
+        ("acl",
+         "Manage upload ACLs"),
         ("admin",
          "Perform administration on the dak database"),
         ("update-db",
@@ -131,14 +137,10 @@ def init():
          "Generates override files"),
         ("new-security-install",
          "New way to install a security upload into the archive"),
-        ("split-done",
-         "Split queue/done into a date-based hierarchy"),
         ("stats",
          "Generate statistics"),
         ("bts-categorize",
          "Categorize uncategorized bugs filed against ftp.debian.org"),
-        ("import-known-changes",
-         "import old changes files into known_changes table"),
         ("add-user",
          "Add a user to the archive"),
         ("make-changelog",
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/dak/dakdb/update100.py b/dak/dakdb/update100.py
new file mode 100644 (file)
index 0000000..85733cb
--- /dev/null
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add a component - suite mapping to only expose certain components in certain suites
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Varnish Software AS
+@author: Tollef Fog Heen <tfheen@varnish-software.com>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("""
+            CREATE TABLE component_suite (
+                component_id INTEGER NOT NULL REFERENCES component(id) ON DELETE CASCADE,
+                suite_id INTEGER NOT NULL REFERENCES suite(id) ON DELETE CASCADE,
+                PRIMARY KEY (component_id, suite_id)
+            )
+            """)
+        # Set up default mappings for all components to all suites
+        c.execute("INSERT INTO component_suite(component_id, suite_id) SELECT component.id,suite.id from suite, component")
+
+        c.execute("UPDATE config SET value = '100' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 100, rollback issued. Error message: {0}'.format(msg))
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/dak/dakdb/update73.py b/dak/dakdb/update73.py
new file mode 100644 (file)
index 0000000..1f309e4
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Reference archive table from suite and add path to archive root
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        archive_root = cnf["Dir::Root"]
+        c.execute("ALTER TABLE archive ADD COLUMN path TEXT NOT NULL DEFAULT %s", (archive_root,))
+        c.execute("ALTER TABLE archive ALTER COLUMN path DROP DEFAULT")
+
+        c.execute("ALTER TABLE archive ADD COLUMN mode CHAR(4) NOT NULL DEFAULT '0644' CHECK (mode SIMILAR TO '[0-7]{4}')")
+        c.execute("ALTER TABLE archive ADD COLUMN tainted BOOLEAN NOT NULL DEFAULT 'f'")
+        c.execute("ALTER TABLE archive ADD COLUMN use_morgue BOOLEAN NOT NULL DEFAULT 't'")
+
+        c.execute("SELECT id FROM archive")
+        (archive_id,) = c.fetchone()
+
+        if c.fetchone() is not None:
+            raise DBUpdateError("Cannot automatically upgrade form installation with multiple archives.")
+
+        c.execute("ALTER TABLE suite ADD COLUMN archive_id INT REFERENCES archive(id) NOT NULL DEFAULT %s", (archive_id,))
+        c.execute("ALTER TABLE suite ALTER COLUMN archive_id DROP DEFAULT")
+
+        c.execute("UPDATE config SET value = '73' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 73, rollback issued. Error message : %s' % (str(msg)))
diff --git a/dak/dakdb/update74.py b/dak/dakdb/update74.py
new file mode 100644 (file)
index 0000000..89810f4
--- /dev/null
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Drop origin_server column from archive table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("ALTER TABLE archive DROP COLUMN origin_server")
+
+        c.execute("UPDATE config SET value = '74' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 74, rollback issued. Error message : %s' % (str(msg)))
diff --git a/dak/dakdb/update75.py b/dak/dakdb/update75.py
new file mode 100644 (file)
index 0000000..d152d64
--- /dev/null
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Multi-archive support; convert policy and build queues to regular suites
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+import os
+
+################################################################################
+
+def _track_files_per_archive(cnf, c):
+    c.execute("SELECT id FROM archive")
+    (archive_id,) = c.fetchone()
+
+    if c.fetchone() is not None:
+        raise DBUpdateError("Cannot automatically upgrade from installation with multiple archives.")
+
+    c.execute("""CREATE TABLE files_archive_map (
+      file_id INT NOT NULL REFERENCES files(id),
+      archive_id INT NOT NULL REFERENCES archive(id),
+      component_id INT NOT NULL REFERENCES component(id),
+      last_used TIMESTAMP DEFAULT NULL,
+      created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+      PRIMARY KEY (file_id, archive_id, component_id)
+    )""")
+
+    c.execute("""INSERT INTO files_archive_map (file_id, archive_id, component_id)
+       SELECT f.id, %s, l.component
+       FROM files f
+       JOIN location l ON f.location = l.id""", (archive_id,))
+
+    c.execute("""UPDATE files f SET filename = substring(f.filename FROM c.name || '/(.*)')
+      FROM location l, component c
+      WHERE f.location = l.id AND l.component = c.id
+        AND f.filename LIKE c.name || '/%'""")
+
+    # NOTE: The location table would need these changes, but we drop it later
+    #       anyway.
+    #c.execute("""UPDATE location l SET path = path || c.name || '/'
+    #  FROM component c
+    #  WHERE l.component = c.id
+    #    AND l.path NOT LIKE '%/' || c.name || '/'""")
+
+    c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch")
+    c.execute("ALTER TABLE files DROP COLUMN location")
+    c.execute("DROP TABLE location")
+
+def _convert_policy_queues(cnf, c):
+    base = cnf['Dir::Base']
+    new_path = os.path.join(base, 'new')
+    policy_path = os.path.join(base, 'policy')
+
+    # Forget changes in (old) policy queues so they can be processed again.
+    c.execute("DROP TABLE IF EXISTS build_queue_policy_files")
+    c.execute("DROP TABLE IF EXISTS build_queue_files")
+    c.execute("DROP TABLE IF EXISTS changes_pending_binaries")
+    c.execute("DROP TABLE IF EXISTS changes_pending_source_files")
+    c.execute("DROP TABLE IF EXISTS changes_pending_source")
+    c.execute("DROP TABLE IF EXISTS changes_pending_files_map")
+    c.execute("DROP TABLE IF EXISTS changes_pending_files")
+    c.execute("DROP TABLE IF EXISTS changes_pool_files")
+    c.execute("DELETE FROM changes WHERE in_queue IS NOT NULL")
+
+    # newstage and unchecked are no longer queues
+    c.execute("""
+      DELETE FROM policy_queue
+      WHERE queue_name IN ('newstage', 'unchecked')
+    """)
+
+    # Create archive for NEW
+    c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue, mode) VALUES ('new', 'new queue', %s, 't', 'f', '0640') RETURNING (id)", (new_path,))
+    (new_archive_id,) = c.fetchone()
+
+    # Create archive for policy queues
+    c.execute("INSERT INTO archive (name, description, path, use_morgue) VALUES ('policy', 'policy queues', %s, 'f') RETURNING (id)", (policy_path,))
+    (archive_id,) = c.fetchone()
+
+    # Add suites for policy queues
+    c.execute("""
+      INSERT INTO suite
+        (archive_id, suite_name, origin, label, description, signingkeys)
+      SELECT
+        %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL])
+      FROM policy_queue
+      WHERE queue_name NOT IN ('unchecked')
+    """, (archive_id,))
+
+    # move NEW to its own archive
+    c.execute("UPDATE suite SET archive_id=%s WHERE suite_name IN ('byhand', 'new')", (new_archive_id,))
+
+    c.execute("""ALTER TABLE policy_queue
+      DROP COLUMN origin,
+      DROP COLUMN label,
+      DROP COLUMN releasedescription,
+      DROP COLUMN signingkey,
+      DROP COLUMN stay_of_execution,
+      DROP COLUMN perms,
+      ADD COLUMN suite_id INT REFERENCES suite(id)
+    """)
+
+    c.execute("UPDATE policy_queue pq SET suite_id=s.id FROM suite s WHERE s.suite_name = pq.queue_name")
+    c.execute("ALTER TABLE policy_queue ALTER COLUMN suite_id SET NOT NULL")
+
+    c.execute("""INSERT INTO suite_architectures (suite, architecture)
+        SELECT pq.suite_id, sa.architecture
+          FROM policy_queue pq
+          JOIN suite ON pq.id = suite.policy_queue_id
+          JOIN suite_architectures sa ON suite.id = sa.suite
+         WHERE pq.queue_name NOT IN ('byhand', 'new')
+         GROUP BY pq.suite_id, sa.architecture""")
+
+    # We only add architectures from suite_architectures to only add
+    # arches actually in use. It's not too important to have the
+    # right set of arches for policy queues anyway unless you want
+    # to generate Packages indices.
+    c.execute("""INSERT INTO suite_architectures (suite, architecture)
+        SELECT DISTINCT pq.suite_id, sa.architecture
+          FROM policy_queue pq, suite_architectures sa
+         WHERE pq.queue_name IN ('byhand', 'new')""")
+
+    c.execute("""CREATE TABLE policy_queue_upload (
+        id SERIAL NOT NULL PRIMARY KEY,
+        policy_queue_id INT NOT NULL REFERENCES policy_queue(id),
+        target_suite_id INT NOT NULL REFERENCES suite(id),
+        changes_id INT NOT NULL REFERENCES changes(id),
+        source_id INT REFERENCES source(id),
+        UNIQUE (policy_queue_id, target_suite_id, changes_id)
+    )""")
+
+    c.execute("""CREATE TABLE policy_queue_upload_binaries_map (
+        policy_queue_upload_id INT REFERENCES policy_queue_upload(id) ON DELETE CASCADE,
+        binary_id INT REFERENCES binaries(id),
+        PRIMARY KEY (policy_queue_upload_id, binary_id)
+    )""")
+
+    c.execute("""
+      CREATE TABLE policy_queue_byhand_file (
+        id SERIAL NOT NULL PRIMARY KEY,
+        upload_id INT NOT NULL REFERENCES policy_queue_upload(id),
+        filename TEXT NOT NULL,
+        processed BOOLEAN NOT NULL DEFAULT 'f'
+      )""")
+
+    c.execute("""ALTER TABLE changes
+      DROP COLUMN in_queue,
+      DROP COLUMN approved_for
+    """)
+
+def _convert_build_queues(cnf, c):
+    base = cnf['Dir::Base']
+    build_queue_path = os.path.join(base, 'build-queues')
+
+    c.execute("INSERT INTO archive (name, description, path, tainted, use_morgue) VALUES ('build-queues', 'build queues', %s, 't', 'f') RETURNING id", [build_queue_path])
+    archive_id, = c.fetchone()
+
+    c.execute("ALTER TABLE build_queue ADD COLUMN suite_id INT REFERENCES suite(id)")
+
+    c.execute("""
+      INSERT INTO suite
+        (archive_id, suite_name, origin, label, description, signingkeys, notautomatic)
+      SELECT
+        %s, queue_name, origin, label, releasedescription, NULLIF(ARRAY[signingkey], ARRAY[NULL]), notautomatic
+      FROM build_queue
+    """, [archive_id])
+    c.execute("UPDATE build_queue bq SET suite_id=(SELECT id FROM suite s WHERE s.suite_name = bq.queue_name)")
+    c.execute("ALTER TABLE build_queue ALTER COLUMN suite_id SET NOT NULL")
+
+    c.execute("""INSERT INTO suite_architectures (suite, architecture)
+        SELECT bq.suite_id, sa.architecture
+          FROM build_queue bq
+          JOIN suite_build_queue_copy sbqc ON bq.id = sbqc.build_queue_id
+          JOIN suite ON sbqc.suite = suite.id
+          JOIN suite_architectures sa ON suite.id = sa.suite
+         GROUP BY bq.suite_id, sa.architecture""")
+
+    c.execute("""ALTER TABLE build_queue
+                   DROP COLUMN path,
+                   DROP COLUMN copy_files,
+                   DROP COLUMN origin,
+                   DROP COLUMN label,
+                   DROP COLUMN releasedescription,
+                   DROP COLUMN signingkey,
+                   DROP COLUMN notautomatic""")
+
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+        if 'Dir::Base' not in cnf:
+            print """
+MANUAL UPGRADE INSTRUCTIONS
+===========================
+
+This database update will convert policy and build queues to regular suites.
+For these archives will be created under Dir::Base:
+
+  NEW:           <base>/new
+  policy queues: <base>/policy
+  build queues:  <base>/build-queues
+
+Please add Dir::Base to dak.conf and try the update again.  Once the database
+upgrade is finished, you will have to reprocess all uploads currently in
+policy queues: just move them back to unchecked manually.
+"""
+            raise DBUpdateError("Please update dak.conf and try again.")
+
+        c = self.db.cursor()
+
+        _track_files_per_archive(cnf, c)
+        _convert_policy_queues(cnf, c)
+        _convert_build_queues(cnf, c)
+
+        c.execute("UPDATE config SET value = '75' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 75, rollback issued. Error message : %s' % (str(msg)))
diff --git a/dak/dakdb/update76.py b/dak/dakdb/update76.py
new file mode 100644 (file)
index 0000000..41a918b
--- /dev/null
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add list of closed bugs to changes table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("ALTER TABLE changes ADD COLUMN closes TEXT[]")
+
+        c.execute("UPDATE config SET value = '76' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 76, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update77.py b/dak/dakdb/update77.py
new file mode 100644 (file)
index 0000000..a1d6754
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Move stayofexecution to the database
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+       stayofexecution = cnf.get('Clean-Suites::StayOfExecution', '129600')
+       c.execute("ALTER TABLE archive ADD COLUMN stayofexecution INTERVAL NOT NULL DEFAULT %s", (stayofexecution,))
+        c.execute("UPDATE archive SET stayofexecution='0' WHERE name IN ('new', 'policy', 'build-queues')")
+
+        c.execute("UPDATE config SET value = '77' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 77, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update78.py b/dak/dakdb/update78.py
new file mode 100644 (file)
index 0000000..209e756
--- /dev/null
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Alter permissions for new tables and set default permissions
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("GRANT SELECT ON files_archive_map, policy_queue_byhand_file, policy_queue_upload, policy_queue_upload_binaries_map TO PUBLIC")
+        c.execute("GRANT ALL ON files_archive_map, policy_queue_byhand_file, policy_queue_upload, policy_queue_upload_binaries_map TO ftpmaster")
+
+        c.execute("ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO PUBLIC")
+        c.execute("ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO ftpmaster")
+        c.execute("ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO PUBLIC")
+        c.execute("ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO ftpmaster")
+        c.execute("ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO ftpmaster")
+
+        c.execute("UPDATE config SET value = '78' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 78, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update79.py b/dak/dakdb/update79.py
new file mode 100755 (executable)
index 0000000..81a7b23
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add world schema and new stable views
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("CREATE SCHEMA world");
+        c.execute("GRANT USAGE ON SCHEMA world TO PUBLIC")
+        c.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA world GRANT SELECT ON TABLES TO PUBLIC")
+        c.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA world GRANT ALL ON TABLES TO ftpmaster")
+        c.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA world GRANT SELECT ON SEQUENCES TO PUBLIC")
+        c.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA world GRANT ALL ON SEQUENCES TO ftpmaster")
+        c.execute("ALTER DEFAULT PRIVILEGES IN SCHEMA world GRANT ALL ON FUNCTIONS TO ftpmaster")
+        c.execute("""
+            CREATE OR REPLACE VIEW world."files-1" AS
+              SELECT
+                files.id AS id,
+                component.name || '/' || files.filename AS filename,
+                files.size AS size,
+                files.md5sum AS md5sum,
+                files.sha1sum AS sha1sum,
+                files.sha256sum AS sha256sum,
+                files.last_used AS last_used,
+                files.created AS created,
+                files.modified AS modified
+              FROM files
+              JOIN files_archive_map fam ON files.id = fam.file_id
+              JOIN component ON fam.component_id = component.id
+              WHERE fam.archive_id = (SELECT id FROM archive WHERE name IN ('backports', 'ftp-master', 'security'))
+            """)
+
+
+        c.execute("UPDATE config SET value = '79' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 79, rollback issued. Error message: {0}'.format(msg))
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/dak/dakdb/update80.py b/dak/dakdb/update80.py
new file mode 100755 (executable)
index 0000000..da9399e
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+rename policy queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Joerg Jaspert <joerg@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("UPDATE policy_queue set queue_name='stable-new' where queue_name='proposedupdates'");
+        c.execute("UPDATE policy_queue set queue_name='oldstable-new' where queue_name='oldproposedupdates'");
+        c.execute("UPDATE suite set suite_name='stable-new' where    suite_name='proposedupdates'");
+        c.execute("UPDATE suite set suite_name='oldstable-new' where suite_name='oldproposedupdates'");
+
+        c.execute("UPDATE config SET value = '79' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 79, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update81.py b/dak/dakdb/update81.py
new file mode 100755 (executable)
index 0000000..668eb9e
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Correct permissions of policy_queue_byhand_file_id_seq
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Luca Falavigna <dktrkranz@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+        c.execute("GRANT SELECT, UPDATE, USAGE ON policy_queue_byhand_file_id_seq TO ftpmaster")
+        c.execute("GRANT SELECT ON policy_queue_byhand_file_id_seq TO public")
+        c.execute("UPDATE config SET value = '81' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 81, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update82.py b/dak/dakdb/update82.py
new file mode 100644 (file)
index 0000000..174f869
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Correct permissions of policy_queue_upload_id_seq
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Luca Falavigna <dktrkranz@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+        c.execute("GRANT SELECT, UPDATE, USAGE ON policy_queue_upload_id_seq TO ftpmaster")
+        c.execute("GRANT SELECT ON policy_queue_upload_id_seq TO public")
+        c.execute("UPDATE config SET value = '81' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 82, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update83.py b/dak/dakdb/update83.py
new file mode 100644 (file)
index 0000000..f0707d5
--- /dev/null
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+switch to new ACL implementation and add pre-suite NEW
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""ALTER TABLE suite ADD COLUMN new_queue_id INT REFERENCES policy_queue(id)""",
+
+"""CREATE TABLE acl (
+    id SERIAL PRIMARY KEY NOT NULL,
+    name TEXT NOT NULL,
+    is_global BOOLEAN NOT NULL DEFAULT 'f',
+
+    match_fingerprint BOOLEAN NOT NULL DEFAULT 'f',
+    match_keyring_id INTEGER REFERENCES keyrings(id),
+
+    allow_new BOOLEAN NOT NULL DEFAULT 'f',
+    allow_source BOOLEAN NOT NULL DEFAULT 'f',
+    allow_binary BOOLEAN NOT NULL DEFAULT 'f',
+    allow_binary_all BOOLEAN NOT NULL DEFAULT 'f',
+    allow_binary_only BOOLEAN NOT NULL DEFAULT 'f',
+    allow_hijack BOOLEAN NOT NULL DEFAULT 'f',
+    allow_per_source BOOLEAN NOT NULL DEFAULT 'f',
+    deny_per_source BOOLEAN NOT NULL DEFAULT 'f'
+    )""",
+
+"""CREATE TABLE acl_architecture_map (
+    acl_id INTEGER NOT NULL REFERENCES acl(id) ON DELETE CASCADE,
+    architecture_id INTEGER NOT NULL REFERENCES architecture(id) ON DELETE CASCADE,
+    PRIMARY KEY (acl_id, architecture_id)
+    )""",
+
+"""CREATE TABLE acl_fingerprint_map (
+    acl_id INTEGER NOT NULL REFERENCES acl(id) ON DELETE CASCADE,
+    fingerprint_id INTEGER NOT NULL REFERENCES fingerprint(id) ON DELETE CASCADE,
+    PRIMARY KEY (acl_id, fingerprint_id)
+    )""",
+
+"""CREATE TABLE acl_per_source (
+    acl_id INTEGER NOT NULL REFERENCES acl(id) ON DELETE CASCADE,
+    fingerprint_id INTEGER NOT NULL REFERENCES fingerprint(id) ON DELETE CASCADE,
+    source TEXT NOT NULL,
+    reason TEXT,
+    PRIMARY KEY (acl_id, fingerprint_id, source)
+    )""",
+
+"""CREATE TABLE suite_acl_map (
+    suite_id INTEGER NOT NULL REFERENCES suite(id) ON DELETE CASCADE,
+    acl_id INTEGER NOT NULL REFERENCES acl(id),
+    PRIMARY KEY (suite_id, acl_id)
+    )""",
+]
+
+################################################################################
+
+def get_buildd_acl_id(c, keyring_id):
+    c.execute("""
+        SELECT 'buildd-' || STRING_AGG(a.arch_string, '+' ORDER BY a.arch_string)
+          FROM keyring_acl_map kam
+          JOIN architecture a ON kam.architecture_id = a.id
+         WHERE kam.keyring_id = %(keyring_id)s
+        """, {'keyring_id': keyring_id})
+    acl_name, = c.fetchone()
+
+    c.execute('SELECT id FROM acl WHERE name = %(acl_name)s', {'acl_name': acl_name})
+    row = c.fetchone()
+    if row is not None:
+        return row[0]
+
+    c.execute("""
+        INSERT INTO acl
+               (        name, allow_new, allow_source, allow_binary, allow_binary_all, allow_binary_only, allow_hijack)
+        VALUES (%(acl_name)s,       't',          'f',          't',              'f',               't',          't')
+        RETURNING id""", {'acl_name': acl_name})
+    acl_id, = c.fetchone()
+
+    c.execute("""INSERT INTO acl_architecture_map (acl_id, architecture_id)
+                 SELECT %(acl_id)s, architecture_id
+                   FROM keyring_acl_map
+                  WHERE keyring_id = %(keyring_id)s""",
+              {'acl_id': acl_id, 'keyring_id': keyring_id})
+
+    return acl_id
+
+def get_acl_id(c, acl_dd, acl_dm, keyring_id, source_acl_id, binary_acl_id):
+    c.execute('SELECT access_level FROM source_acl WHERE id = %(source_acl_id)s', {'source_acl_id': source_acl_id})
+    row = c.fetchone()
+    if row is not None:
+        source_acl = row[0]
+    else:
+        source_acl = None
+
+    c.execute('SELECT access_level FROM binary_acl WHERE id = %(binary_acl_id)s', {'binary_acl_id': binary_acl_id})
+    row = c.fetchone()
+    if row is not None:
+        binary_acl = row[0]
+    else:
+        binary_acl = None
+
+    if source_acl == 'full' and binary_acl == 'full':
+        return acl_dd
+    elif source_acl == 'dm' and binary_acl == 'full':
+        return acl_dm
+    elif source_acl is None and binary_acl == 'map':
+        return get_buildd_acl_id(c, keyring_id)
+
+    raise Exception('Cannot convert ACL combination automatically: binary_acl={0}, source_acl={1}'.format(binary_acl, source_acl))
+
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("""
+            INSERT INTO acl
+                   (name, allow_new, allow_source, allow_binary, allow_binary_all, allow_binary_only, allow_hijack)
+            VALUES ('dd',       't',          't',          't',              't',               't',          't')
+            RETURNING id""")
+        acl_dd, = c.fetchone()
+
+        c.execute("""
+            INSERT INTO acl
+                   (name, allow_new, allow_source, allow_binary, allow_binary_all, allow_binary_only, allow_per_source, allow_hijack)
+            VALUES ('dm',       'f',          't',          't',              't',               'f',              't',          'f')
+            RETURNING id""")
+        acl_dm, = c.fetchone()
+
+        # convert per-fingerprint ACLs
+
+        c.execute('ALTER TABLE fingerprint ADD COLUMN acl_id INTEGER REFERENCES acl(id)')
+        c.execute("""SELECT id, keyring, source_acl_id, binary_acl_id
+                       FROM fingerprint
+                      WHERE source_acl_id IS NOT NULL OR binary_acl_id IS NOT NULL""")
+        for fingerprint_id, keyring_id, source_acl_id, binary_acl_id in c.fetchall():
+            acl_id = get_acl_id(c, acl_dd, acl_dm, keyring_id, source_acl_id, binary_acl_id)
+            c.execute('UPDATE fingerprint SET acl_id = %(acl_id)s WHERE id = %(fingerprint_id)s',
+                      {'acl_id': acl_id, 'fingerprint_id': fingerprint_id})
+        c.execute("""ALTER TABLE fingerprint
+                       DROP COLUMN source_acl_id,
+                       DROP COLUMN binary_acl_id,
+                       DROP COLUMN binary_reject""")
+
+        # convert per-keyring ACLs
+        c.execute('ALTER TABLE keyrings ADD COLUMN acl_id INTEGER REFERENCES acl(id)')
+        c.execute('SELECT id, default_source_acl_id, default_binary_acl_id FROM keyrings')
+        for keyring_id, source_acl_id, binary_acl_id in c.fetchall():
+            acl_id = get_acl_id(c, acl_dd, acl_dm, keyring_id, source_acl_id, binary_acl_id)
+            c.execute('UPDATE keyrings SET acl_id = %(acl_id)s WHERE id = %(keyring_id)s',
+                      {'acl_id': acl_id, 'keyring_id': keyring_id})
+        c.execute("""ALTER TABLE keyrings
+                       DROP COLUMN default_source_acl_id,
+                       DROP COLUMN default_binary_acl_id,
+                       DROP COLUMN default_binary_reject""")
+
+        c.execute("DROP TABLE keyring_acl_map")
+        c.execute("DROP TABLE binary_acl_map")
+        c.execute("DROP TABLE binary_acl")
+        c.execute("DROP TABLE source_acl")
+
+        # convert upload blocks
+        c.execute("""
+            INSERT INTO acl
+                   (    name, is_global, allow_new, allow_source, allow_binary, allow_binary_all, allow_hijack, allow_binary_only, deny_per_source)
+            VALUES ('blocks',       't',       't',          't',          't',              't',          't',               't',             't')
+            RETURNING id""")
+        acl_block, = c.fetchone()
+        c.execute("SELECT source, fingerprint_id, reason FROM upload_blocks")
+        for source, fingerprint_id, reason in c.fetchall():
+            if fingerprint_id is None:
+                raise Exception(
+                    "ERROR: upload blocks based on uid are no longer supported\n"
+                    "=========================================================\n"
+                    "\n"
+                    "dak now only supports upload blocks based on fingerprints. Please remove\n"
+                    "any uid-specific block by running\n"
+                    "   DELETE FROM upload_blocks WHERE fingerprint_id IS NULL\n"
+                    "and try again.")
+
+            c.execute('INSERT INTO acl_match_source_map (acl_id, fingerprint_id, source, reason) VALUES (%(acl_id)s, %(fingerprint_id)s, %(source)s, %(reason)s)',
+                      {'acl_id': acl_block, 'fingerprint_id': fingerprint_id, 'source': source, 'reason': reason})
+        c.execute("DROP TABLE upload_blocks")
+
+        c.execute("UPDATE config SET value = '83' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 83, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update84.py b/dak/dakdb/update84.py
new file mode 100644 (file)
index 0000000..c02e723
--- /dev/null
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add per-suite database permissions
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+CREATE TABLE suite_permission (
+  suite_id INT NOT NULL REFERENCES suite(id) ON DELETE CASCADE,
+  role TEXT NOT NULL,
+  PRIMARY KEY (suite_id, role)
+)
+""",
+
+"""
+CREATE OR REPLACE FUNCTION has_suite_permission(action TEXT, suite_id INT)
+  RETURNS BOOLEAN
+  STABLE
+  STRICT
+  SET search_path = public, pg_temp
+  LANGUAGE plpgsql
+AS $$
+DECLARE
+  v_result BOOLEAN;
+BEGIN
+
+  IF pg_has_role('ftpteam', 'USAGE') THEN
+    RETURN 't';
+  END IF;
+
+  SELECT BOOL_OR(pg_has_role(sp.role, 'USAGE')) INTO v_result
+    FROM suite_permission sp
+   WHERE sp.suite_id = has_suite_permission.suite_id
+   GROUP BY sp.suite_id;
+
+  IF v_result IS NULL THEN
+    v_result := 'f';
+  END IF;
+
+  RETURN v_result;
+
+END;
+$$
+""",
+
+"""
+CREATE OR REPLACE FUNCTION trigger_check_suite_permission() RETURNS TRIGGER
+SET search_path = public, pg_temp
+LANGUAGE plpgsql
+AS $$
+DECLARE
+  v_row RECORD;
+  v_suite_name suite.suite_name%TYPE;
+BEGIN
+
+  CASE TG_OP
+    WHEN 'INSERT', 'UPDATE' THEN
+      v_row := NEW;
+    WHEN 'DELETE' THEN
+      v_row := OLD;
+    ELSE
+      RAISE EXCEPTION 'Unexpected TG_OP (%)', TG_OP;
+  END CASE;
+
+  IF TG_OP = 'UPDATE' AND OLD.suite != NEW.suite THEN
+    RAISE EXCEPTION 'Cannot change suite';
+  END IF;
+
+  IF NOT has_suite_permission(TG_OP, v_row.suite) THEN
+    SELECT suite_name INTO STRICT v_suite_name FROM suite WHERE id = v_row.suite;
+    RAISE EXCEPTION 'Not allowed to % in %', TG_OP, v_suite_name;
+  END IF;
+
+  RETURN v_row;
+
+END;
+$$
+""",
+
+"""
+CREATE CONSTRAINT TRIGGER trigger_override_permission
+  AFTER INSERT OR UPDATE OR DELETE
+  ON override
+  FOR EACH ROW
+  EXECUTE PROCEDURE trigger_check_suite_permission()
+""",
+
+"""
+CREATE CONSTRAINT TRIGGER trigger_src_associations_permission
+  AFTER INSERT OR UPDATE OR DELETE
+  ON src_associations
+  FOR EACH ROW
+  EXECUTE PROCEDURE trigger_check_suite_permission()
+""",
+
+"""
+CREATE CONSTRAINT TRIGGER trigger_bin_associations_permission
+  AFTER INSERT OR UPDATE OR DELETE
+  ON bin_associations
+  FOR EACH ROW
+  EXECUTE PROCEDURE trigger_check_suite_permission()
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '84' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 84, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update85.py b/dak/dakdb/update85.py
new file mode 100644 (file)
index 0000000..2f0079d
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add per-suite close_bugs option
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("ALTER TABLE suite ADD COLUMN close_bugs BOOLEAN")
+
+        c.execute("UPDATE config SET value = '85' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 85, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update86.py b/dak/dakdb/update86.py
new file mode 100755 (executable)
index 0000000..0d2f405
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Unprivileged group into the database config table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Joerg Jaspert <joerg@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+        c.execute("INSERT INTO config (name, value) VALUES('unprivgroup', 'dak-unpriv')")
+        c.execute("UPDATE config SET value = '86' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 86, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update87.py b/dak/dakdb/update87.py
new file mode 100644 (file)
index 0000000..18e2509
--- /dev/null
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add external_files table for security
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Gergely Nagy <algernon@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+CREATE TABLE external_files (
+    id integer,
+    filename text NOT NULL,
+    size bigint NOT NULL,
+    md5sum text NOT NULL,
+    last_used timestamp with time zone,
+    sha1sum text,
+    sha256sum text,
+    created timestamp with time zone DEFAULT now() NOT NULL,
+    modified timestamp with time zone DEFAULT now() NOT NULL
+);
+""",
+"""
+INSERT INTO config(name, value) VALUES ('use_extfiles', 0);
+"""
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '87' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 87, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update88.py b/dak/dakdb/update88.py
new file mode 100644 (file)
index 0000000..08d2906
--- /dev/null
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add per-suite mail whitelists
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("ALTER TABLE suite ADD COLUMN mail_whitelist TEXT");
+
+        c.execute("UPDATE config SET value = '88' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 88, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update89.py b/dak/dakdb/update89.py
new file mode 100644 (file)
index 0000000..2ecccf5
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add table to keep track of seen signatures
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("""CREATE TABLE signature_history (
+          fingerprint TEXT NOT NULL,
+          signature_timestamp TIMESTAMP NOT NULL,
+          contents_sha1 TEXT NOT NULL,
+          seen TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+          PRIMARY KEY (signature_timestamp, fingerprint, contents_sha1)
+        )""")
+
+        c.execute("UPDATE config SET value = '89' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 89, rollback issued. Error message: {0}'.format(msg))
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/dak/dakdb/update90.py b/dak/dakdb/update90.py
new file mode 100644 (file)
index 0000000..9b70e98
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+add created_by_id and created columns to acl_per_source table
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("""ALTER TABLE acl_per_source
+                     ADD COLUMN created_by_id INTEGER REFERENCES fingerprint(id),
+                     ADD COLUMN created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP""")
+
+        c.execute("UPDATE config SET value = '90' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 90, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update91.py b/dak/dakdb/update91.py
new file mode 100644 (file)
index 0000000..7cd278b
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+per-queue NEW comments and permissions
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+ALTER TABLE new_comments
+ADD COLUMN policy_queue_id INTEGER REFERENCES policy_queue(id)
+""",
+
+"""
+UPDATE new_comments
+SET policy_queue_id = (SELECT id FROM policy_queue WHERE queue_name = 'new')
+""",
+
+"""
+ALTER TABLE new_comments ALTER COLUMN policy_queue_id SET NOT NULL
+""",
+
+"""
+CREATE OR REPLACE FUNCTION trigger_check_policy_queue_permission() RETURNS TRIGGER
+SET search_path = public, pg_temp
+LANGUAGE plpgsql
+AS $$
+DECLARE
+  v_row RECORD;
+  v_suite_id suite.id%TYPE;
+  v_policy_queue_name policy_queue.queue_name%TYPE;
+BEGIN
+
+  CASE TG_OP
+    WHEN 'INSERT', 'UPDATE' THEN
+      v_row := NEW;
+    WHEN 'DELETE' THEN
+      v_row := OLD;
+    ELSE
+      RAISE EXCEPTION 'Unexpected TG_OP (%)', TG_OP;
+  END CASE;
+
+  IF TG_OP = 'UPDATE' AND OLD.policy_queue_id != NEW.policy_queue_id THEN
+    RAISE EXCEPTION 'Cannot change policy_queue_id';
+  END IF;
+
+  SELECT suite_id, queue_name INTO STRICT v_suite_id, v_policy_queue_name
+    FROM policy_queue WHERE id = v_row.policy_queue_id;
+  IF NOT has_suite_permission(TG_OP, v_suite_id) THEN
+    RAISE EXCEPTION 'Not allowed to % in %', TG_OP, v_policy_queue_name;
+  END IF;
+
+  RETURN v_row;
+
+END;
+$$
+""",
+
+"""
+CREATE CONSTRAINT TRIGGER trigger_new_comments_permission
+  AFTER INSERT OR UPDATE OR DELETE
+  ON new_comments
+  FOR EACH ROW
+  EXECUTE PROCEDURE trigger_check_policy_queue_permission()
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '91' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 91, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update92.py b/dak/dakdb/update92.py
new file mode 100644 (file)
index 0000000..db88277
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+remove per-fingerprint ACLs that are identical to keyring ACL
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+UPDATE fingerprint f
+   SET acl_id = NULL
+  FROM keyrings k
+ WHERE (f.keyring = k.id AND f.acl_id = k.acl_id)
+    OR f.keyring IS NULL
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '92' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 92, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update93.py b/dak/dakdb/update93.py
new file mode 100644 (file)
index 0000000..7139856
--- /dev/null
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+update world.files-1 view to handle backports archive on ftp-master
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        c.execute("""
+            CREATE OR REPLACE VIEW world."files-1" AS
+              SELECT
+                files.id AS id,
+                component.name || '/' || files.filename AS filename,
+                files.size AS size,
+                files.md5sum AS md5sum,
+                files.sha1sum AS sha1sum,
+                files.sha256sum AS sha256sum,
+                files.last_used AS last_used,
+                files.created AS created,
+                files.modified AS modified
+              FROM files
+              JOIN files_archive_map fam ON files.id = fam.file_id
+              JOIN component ON fam.component_id = component.id
+              WHERE fam.archive_id = (SELECT id FROM archive WHERE name IN ('backports', 'ftp-master', 'security') ORDER BY id LIMIT 1)
+            """)
+
+        c.execute("UPDATE config SET value = '93' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 93, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update94.py b/dak/dakdb/update94.py
new file mode 100644 (file)
index 0000000..c635902
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+src_associations_full view
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+CREATE OR REPLACE VIEW src_associations_full AS
+SELECT
+  suite,
+  source,
+  BOOL_AND(extra_source) AS extra_source
+FROM
+  (SELECT sa.suite AS suite, sa.source AS source, FALSE AS extra_source
+     FROM src_associations sa
+   UNION
+   SELECT ba.suite AS suite, esr.src_id AS source_id, TRUE AS extra_source
+     FROM extra_src_references esr
+     JOIN bin_associations ba ON esr.bin_id = ba.bin)
+  AS tmp
+GROUP BY suite, source
+""",
+"""
+COMMENT ON VIEW src_associations_full IS
+  'view including all source packages for a suite, including those referenced by Built-Using'
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '94' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 94, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update95.py b/dak/dakdb/update95.py
new file mode 100644 (file)
index 0000000..7367d32
--- /dev/null
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Require SHA-1 and SHA-256 checksums in "files" table.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+ALTER TABLE files
+  ALTER COLUMN sha1sum SET NOT NULL,
+  ALTER COLUMN sha256sum SET NOT NULL
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '95' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 95, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update96.py b/dak/dakdb/update96.py
new file mode 100644 (file)
index 0000000..fad2b55
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add world.suite_summary view.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+CREATE OR REPLACE VIEW world.suite_summary AS
+    SELECT
+        s.source,
+        s.version,
+        uploader_fpr.fingerprint,
+        suite.suite_name AS distribution,
+        s.created AS date,
+        changed_by.name AS changed,
+        uploader.name AS uploaded
+    FROM source s
+        JOIN src_associations sa ON s.id = sa.source
+        JOIN suite ON sa.suite = suite.id
+        JOIN maintainer changed_by ON s.changedby = changed_by.id
+        LEFT JOIN fingerprint uploader_fpr ON s.sig_fpr = uploader_fpr.id
+        LEFT JOIN uid uploader ON uploader_fpr.uid = uploader.id
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '96' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 96, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update97.py b/dak/dakdb/update97.py
new file mode 100644 (file)
index 0000000..c6f7fd4
--- /dev/null
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Create path entries for changelog exporting
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2013 Luca Falavigna <dktrkranz@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+def do_update(self):
+    """
+    Move changelogs related config values into projectb
+    """
+    print __doc__
+    try:
+        c = self.db.cursor()
+        c.execute("ALTER TABLE archive ADD COLUMN changelog text NULL")
+        c.execute("UPDATE archive SET changelog = '/srv/ftp-master.debian.org/export/changelogs' WHERE name = 'ftp-master'")
+        c.execute("UPDATE archive SET changelog = '/srv/backports-master.debian.org/export/changelogs' WHERE name = 'backports'")
+        c.execute("DELETE FROM config WHERE name = 'exportpath'")
+        c.execute("UPDATE config SET value = '97' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply table-column update 97, rollback issued. Error message : %s' % (str(msg)))
diff --git a/dak/dakdb/update98.py b/dak/dakdb/update98.py
new file mode 100644 (file)
index 0000000..5595bd6
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Remove obsolete functions
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+    'DROP FUNCTION IF EXISTS bin_associations_id_max()',
+    'DROP FUNCTION IF EXISTS binaries_id_max()',
+    'DROP FUNCTION IF EXISTS dsc_files_id_max()',
+    'DROP FUNCTION IF EXISTS files_id_max()',
+    'DROP FUNCTION IF EXISTS override_type_id_max()',
+    'DROP FUNCTION IF EXISTS priority_id_max()',
+    'DROP FUNCTION IF EXISTS section_id_max()',
+    'DROP FUNCTION IF EXISTS source_id_max()',
+    'DROP AGGREGATE IF EXISTS space_separated_list(TEXT)',
+    'DROP FUNCTION IF EXISTS space_concat(TEXT, TEXT)',
+    'DROP FUNCTION IF EXISTS src_associations_id_max()',
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        c.execute("UPDATE config SET value = '98' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 98, rollback issued. Error message: {0}'.format(msg))
diff --git a/dak/dakdb/update99.py b/dak/dakdb/update99.py
new file mode 100644 (file)
index 0000000..f701fb7
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Add component ordering
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012 Varnish Software AS
+@author: Tollef Fog Heen <tfheen@varnish-software.com>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+statements = [
+"""
+ALTER TABLE component
+ADD COLUMN ordering INTEGER UNIQUE
+""",
+
+"""
+CREATE SEQUENCE component_ordering_seq
+INCREMENT BY 10
+START WITH 100
+OWNED BY component.ordering
+""",
+]
+
+################################################################################
+def do_update(self):
+    print __doc__
+    try:
+        cnf = Config()
+
+        c = self.db.cursor()
+
+        for stmt in statements:
+            c.execute(stmt)
+
+        for component in ('main', 'contrib', 'non-free'):
+            c.execute("UPDATE component SET ordering = nextval('component_ordering_seq') WHERE name = '{0}'".format(component))
+        c.execute("UPDATE component SET ordering = nextval('component_ordering_seq') WHERE ordering IS NULL")
+        c.execute("""ALTER TABLE component ALTER COLUMN ordering SET NOT NULL""")
+        c.execute("""ALTER TABLE component ALTER COLUMN ordering SET DEFAULT nextval('component_ordering_seq')""")
+
+        c.execute("UPDATE config SET value = '99' WHERE name = 'db_revision'")
+
+        self.db.commit()
+
+    except psycopg2.ProgrammingError as msg:
+        self.db.rollback()
+        raise DBUpdateError('Unable to apply sick update 99, rollback issued. Error message: {0}'.format(msg))
index be8c52203958486c8ae229a300149efa22f09351..1c77ee66b09cf74476721350128db3c16bdb6a13 100755 (executable)
@@ -134,12 +134,19 @@ def main():
         usage()
     if 'Suite' not in Options:
         query_suites = DBConn().session().query(Suite)
-        suites = [suite.suite_name for suite in query_suites.all()]
-        cnf['Obsolete::Options::Suite'] = ','.join(suites)
+        suites = [suite.suite_name for suite in query_suites]
+        cnf['Obsolete::Options::Suite'] = str(','.join(suites))
+
     Logger = daklog.Logger("dominate")
     session = DBConn().session()
     for suite_name in utils.split_args(Options['Suite']):
         suite = session.query(Suite).filter_by(suite_name = suite_name).one()
+
+        # Skip policy queues. We don't want to remove obsolete packages from those.
+        policy_queue = session.query(PolicyQueue).filter_by(suite=suite).first()
+        if policy_queue is not None:
+            continue
+
         if not suite.untouchable or Options['Force']:
             doDaDoDa(suite.suite_id, session)
     if Options['No-Action']:
index 87c7ea9fbb14ed959fe7308be787f6971278312f..63ee8af5ce303d01fbe19afcbff0386d881b9666 100755 (executable)
@@ -56,16 +56,18 @@ import md5
 import apt_pkg
 import apt_inst
 import shutil
-import commands
+import subprocess
 import threading
 
 from daklib import utils
+from daklib.config import Config
 from daklib.dbconn import DBConn, get_component_by_package_suite
 from daklib.gpg import SignedFile
 from daklib.regexes import html_escaping, re_html_escaping, re_version, re_spacestrip, \
                            re_contrib, re_nonfree, re_localhost, re_newlinespace, \
                            re_package, re_doc_directory
 from daklib.dak_exceptions import ChangesUnicodeError
+import daklib.daksubprocess
 
 ################################################################################
 
@@ -77,7 +79,7 @@ printed.copyrights = {}
 package_relations = {}           #: Store relations of packages for later output
 
 # default is to not output html.
-use_html = 0
+use_html = False
 
 ################################################################################
 
@@ -107,7 +109,7 @@ def headline(s, level=2, bodyelement=None):
         if bodyelement:
             return """<thead>
                 <tr><th colspan="2" class="title" onclick="toggle('%(bodyelement)s', 'table-row-group', 'table-row-group')">%(title)s <span class="toggle-msg">(click to toggle)</span></th></tr>
-              </thead>\n"""%{"bodyelement":bodyelement,"title":utils.html_escape(s)}
+              </thead>\n"""%{"bodyelement":bodyelement,"title":utils.html_escape(os.path.basename(s))}
         else:
             return "<h%d>%s</h%d>\n" % (level, utils.html_escape(s), level)
     else:
@@ -231,6 +233,7 @@ def split_depends (d_str) :
 
 def read_control (filename):
     recommends = []
+    predepends = []
     depends = []
     section = ''
     maintainer = ''
@@ -249,6 +252,10 @@ def read_control (filename):
 
     control_keys = control.keys()
 
+    if "Pre-Depends" in control:
+        predepends_str = control["Pre-Depends"]
+        predepends = split_depends(predepends_str)
+
     if "Depends" in control:
         depends_str = control["Depends"]
         # create list of dependancy lists
@@ -285,7 +292,7 @@ def read_control (filename):
         else:
             maintainer = escape_if_needed(maintainer)
 
-    return (control, control_keys, section, depends, recommends, arch, maintainer)
+    return (control, control_keys, section, predepends, depends, recommends, arch, maintainer)
 
 def read_changes_or_dsc (suite, filename, session = None):
     dsc = {}
@@ -414,7 +421,7 @@ def output_package_relations ():
     return foldable_output("Package relations", "relations", to_print)
 
 def output_deb_info(suite, filename, packagename, session = None):
-    (control, control_keys, section, depends, recommends, arch, maintainer) = read_control(filename)
+    (control, control_keys, section, predepends, depends, recommends, arch, maintainer) = read_control(filename)
 
     if control == '':
         return formatted_text("no control info")
@@ -422,7 +429,10 @@ def output_deb_info(suite, filename, packagename, session = None):
     if not package_relations.has_key(packagename):
         package_relations[packagename] = {}
     for key in control_keys :
-        if key == 'Depends':
+        if key == 'Pre-Depends':
+            field_value = create_depends_string(suite, predepends, session)
+            package_relations[packagename][key] = field_value
+        elif key == 'Depends':
             field_value = create_depends_string(suite, depends, session)
             package_relations[packagename][key] = field_value
         elif key == 'Recommends':
@@ -446,23 +456,37 @@ def output_deb_info(suite, filename, packagename, session = None):
         to_print += " "+format_field(key,field_value)+'\n'
     return to_print
 
-def do_command (command, filename, escaped=0):
-    o = os.popen("%s %s" % (command, filename))
-    if escaped:
-        return escaped_text(o.read())
-    else:
-        return formatted_text(o.read())
+def do_command (command, escaped=False):
+    process = daklib.daksubprocess.Popen(command, stdout=subprocess.PIPE)
+    o = process.stdout
+    try:
+        if escaped:
+            return escaped_text(o.read())
+        else:
+            return formatted_text(o.read())
+    finally:
+        process.wait()
 
 def do_lintian (filename):
+    cnf = Config()
+    cmd = []
+
+    user = cnf.get('Dinstall::UnprivUser') or None
+    if user is not None:
+        cmd.extend(['sudo', '-H', '-u', user])
+
+    color = 'always'
     if use_html:
-        return do_command("lintian --show-overrides --color html", filename, 1)
-    else:
-        return do_command("lintian --show-overrides --color always", filename, 1)
+        color = 'html'
+
+    cmd.extend(['lintian', '--show-overrides', '--color', color, "--", filename])
+
+    return do_command(cmd, escaped=True)
 
 def get_copyright (deb_filename):
     global printed
 
-    package = re_package.sub(r'\1', deb_filename)
+    package = re_package.sub(r'\1', os.path.basename(deb_filename))
     o = os.popen("dpkg-deb -c %s | egrep 'usr(/share)?/doc/[^/]*/copyright' | awk '{print $6}' | head -n 1" % (deb_filename))
     cright = o.read()[:-1]
 
@@ -478,30 +502,31 @@ def get_copyright (deb_filename):
     copyrightmd5 = md5.md5(cright).hexdigest()
 
     res = ""
-    if printed.copyrights.has_key(copyrightmd5) and printed.copyrights[copyrightmd5] != "%s (%s)" % (package, deb_filename):
+    if printed.copyrights.has_key(copyrightmd5) and printed.copyrights[copyrightmd5] != "%s (%s)" % (package, os.path.basename(deb_filename)):
         res += formatted_text( "NOTE: Copyright is the same as %s.\n\n" % \
                                (printed.copyrights[copyrightmd5]))
     else:
-        printed.copyrights[copyrightmd5] = "%s (%s)" % (package, deb_filename)
+        printed.copyrights[copyrightmd5] = "%s (%s)" % (package, os.path.basename(deb_filename))
     return res+formatted_text(cright)
 
 def get_readme_source (dsc_filename):
     tempdir = utils.temp_dirname()
     os.rmdir(tempdir)
 
-    cmd = "dpkg-source --no-check --no-copy -x %s %s" % (dsc_filename, tempdir)
-    (result, output) = commands.getstatusoutput(cmd)
-    if (result != 0):
+    cmd = ('dpkg-source', '--no-check', '--no-copy', '-x', dsc_filename, tempdir)
+    try:
+        daklib.daksubprocess.check_output(cmd, stderr=1)
+    except subprocess.CalledProcessError as e:
         res = "How is education supposed to make me feel smarter? Besides, every time I learn something new, it pushes some\n old stuff out of my brain. Remember when I took that home winemaking course, and I forgot how to drive?\n"
         res += "Error, couldn't extract source, WTF?\n"
-        res += "'dpkg-source -x' failed. return code: %s.\n\n" % (result)
-        res += output
+        res += "'dpkg-source -x' failed. return code: %s.\n\n" % (e.returncode)
+        res += e.output
         return res
 
     path = os.path.join(tempdir, 'debian/README.source')
     res = ""
     if os.path.exists(path):
-        res += do_command("cat", path)
+        res += do_command(["cat", "--", path])
     else:
         res += "No README.source in this package\n\n"
 
@@ -514,13 +539,14 @@ def get_readme_source (dsc_filename):
     return res
 
 def check_dsc (suite, dsc_filename, session = None):
-    (dsc) = read_changes_or_dsc(suite, dsc_filename, session)
+    dsc = read_changes_or_dsc(suite, dsc_filename, session)
+    dsc_basename = os.path.basename(dsc_filename)
     return foldable_output(dsc_filename, "dsc", dsc, norow=True) + \
            "\n" + \
-           foldable_output("lintian check for %s" % dsc_filename,
+           foldable_output("lintian check for %s" % dsc_basename,
               "source-lintian", do_lintian(dsc_filename)) + \
            "\n" + \
-           foldable_output("README.source for %s" % dsc_filename,
+           foldable_output("README.source for %s" % dsc_basename,
                "source-readmesource", get_readme_source(dsc_filename))
 
 def check_deb (suite, deb_filename, session = None):
@@ -543,7 +569,7 @@ def check_deb (suite, deb_filename, session = None):
            "binary-%s-lintian"%packagename, do_lintian(deb_filename)) + "\n"
 
     result += foldable_output("contents of %s" % (filename), "binary-%s-contents"%packagename,
-        do_command("dpkg -c", deb_filename)) + "\n"
+                              do_command(["dpkg", "-c", deb_filename])) + "\n"
 
     if is_a_udeb:
         result += foldable_output("skipping copyright for udeb",
@@ -552,9 +578,6 @@ def check_deb (suite, deb_filename, session = None):
         result += foldable_output("copyright of %s" % (filename),
            "binary-%s-copyright"%packagename, get_copyright(deb_filename)) + "\n"
 
-    result += foldable_output("file listing of %s" % (filename),
-       "binary-%s-file-listing"%packagename, do_command("ls -l", deb_filename))
-
     return result
 
 # Read a file, strip the signature and return the modified contents as
@@ -606,7 +629,7 @@ def main ():
 
     if Options["Html-Output"]:
         global use_html
-        use_html = 1
+        use_html = True
 
     stdout_fd = sys.stdout
 
@@ -614,7 +637,9 @@ def main ():
         try:
             if not Options["Html-Output"]:
                 # Pipe output for each argument through less
-                less_fd = os.popen("less -R -", 'w', 0)
+                less_cmd = ("less", "-R", "-")
+                less_process = daklib.daksubprocess.Popen(less_cmd, stdin=subprocess.PIPE, bufsize=0)
+                less_fd = less_process.stdin
                 # -R added to display raw control chars for colour
                 sys.stdout = less_fd
             try:
@@ -633,6 +658,7 @@ def main ():
                 if not Options["Html-Output"]:
                     # Reset stdout here so future less invocations aren't FUBAR
                     less_fd.close()
+                    less_process.wait()
                     sys.stdout = stdout_fd
         except IOError as e:
             if errno.errorcode[e.errno] == 'EPIPE':
diff --git a/dak/export.py b/dak/export.py
new file mode 100644 (file)
index 0000000..42f7550
--- /dev/null
@@ -0,0 +1,74 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import sys
+
+from daklib.config import Config
+from daklib.dbconn import *
+from daklib.policy import UploadCopy
+
+def usage():
+    print """Usage: dak export -q <queue> [options] -a|--all|<source...>
+
+Export uploads from policy queues, that is the changes files for the given
+source package and all other files associated with that.
+
+ -a --all          export all uploads
+ -c --copy         copy files instead of symlinking them
+ -d <directory>    target directory to export packages to
+                   default: current directory
+ -q <queue>        queue to grab uploads from
+ <source>          source package name to export
+"""
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    arguments = [('h', 'help', 'Export::Options::Help'),
+                 ('a', 'all', 'Export::Options::All'),
+                 ('c', 'copy', 'Export::Options::Copy'),
+                 ('d', 'directory', 'Export::Options::Directory', 'HasArg'),
+                 ('q', 'queue', 'Export::Options::Queue', 'HasArg')]
+
+    cnf = Config()
+    source_names = apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
+    options = cnf.subtree('Export::Options')
+
+    if 'Help' in options or 'Queue' not in options:
+        usage()
+        sys.exit(0)
+
+    session = DBConn().session()
+
+    queue = session.query(PolicyQueue).filter_by(queue_name=options['Queue']).first()
+    if queue is None:
+        print "Unknown queue '{0}'".format(options['Queue'])
+        sys.exit(1)
+    uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=queue)
+    if 'All' not in options:
+        uploads = uploads.filter(DBChange.source.in_(source_names))
+    directory = options.get('Directory', '.')
+    symlink = 'Copy' not in options
+
+    for u in uploads:
+        UploadCopy(u).export(directory, symlink=symlink, ignore_existing=True)
+
+if __name__ == '__main__':
+    main()
diff --git a/dak/export_suite.py b/dak/export_suite.py
new file mode 100644 (file)
index 0000000..2377558
--- /dev/null
@@ -0,0 +1,97 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import os
+import sys
+
+from daklib.config import Config
+from daklib.dbconn import *
+from daklib.fstransactions import FilesystemTransaction
+
+def usage():
+    print """Usage: dak export-suite -s <suite> [options]
+
+Export binaries and sources from a suite to a flat directory structure.
+
+ -c --copy         copy files instead of symlinking them
+ -d <directory>    target directory to export packages to
+                   default: current directory
+ -s <suite>        suite to grab uploads from
+"""
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    arguments = [('h', 'help', 'Export::Options::Help'),
+                 ('c', 'copy', 'Export::Options::Copy'),
+                 ('d', 'directory', 'Export::Options::Directory', 'HasArg'),
+                 ('s', 'suite', 'Export::Options::Suite', 'HasArg')]
+
+    cnf = Config()
+    apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
+    options = cnf.subtree('Export::Options')
+
+    if 'Help' in options or 'Suite' not in options:
+        usage()
+        sys.exit(0)
+
+    session = DBConn().session()
+
+    suite = session.query(Suite).filter_by(suite_name=options['Suite']).first()
+    if suite is None:
+        print "Unknown suite '{0}'".format(options['Suite'])
+        sys.exit(1)
+
+    directory = options.get('Directory')
+    if not directory:
+        print "No target directory."
+        sys.exit(1)
+
+    symlink = 'Copy' not in options
+
+    binaries = suite.binaries
+    sources = suite.sources
+
+    files = []
+    files.extend([ b.poolfile for b in binaries ])
+    for s in sources:
+        files.extend([ ds.poolfile for ds in s.srcfiles ])
+
+    with FilesystemTransaction() as fs:
+        for f in files:
+            af = session.query(ArchiveFile) \
+                        .join(ArchiveFile.component).join(ArchiveFile.file) \
+                        .filter(ArchiveFile.archive == suite.archive) \
+                        .filter(ArchiveFile.file == f).first()
+            # XXX: Remove later. There was a bug that caused only the *.dsc to
+            # be installed in build queues and we do not want to break them.
+            # The bug was fixed in 55d2c7e6e2418518704623246021021e05b90e58
+            # on 2012-11-04
+            if af is None:
+                af = session.query(ArchiveFile) \
+                            .join(ArchiveFile.component).join(ArchiveFile.file) \
+                            .filter(ArchiveFile.file == f).first()
+            dst = os.path.join(directory, f.basename)
+            if not os.path.exists(dst):
+                fs.copy(af.path, dst, symlink=symlink)
+        fs.commit()
+
+if __name__ == '__main__':
+    main()
diff --git a/dak/find_null_maintainers.py b/dak/find_null_maintainers.py
deleted file mode 100755 (executable)
index 3894f57..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-
-""" Check for users with no packages in the archive """
-# Copyright (C) 2003, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import ldap, sys, time
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib.config import Config
-from daklib.utils import fubar
-
-################################################################################
-
-def usage(exit_code=0):
-    print """Usage: dak find-null-maintainers
-Checks for users with no packages in the archive
-
-  -h, --help                show this help and exit."""
-    sys.exit(exit_code)
-
-################################################################################
-
-def get_ldap_value(entry, value):
-    ret = entry.get(value)
-    if not ret:
-        return ""
-    else:
-        # FIXME: what about > 0 ?
-        return ret[0]
-
-def main():
-    cnf = Config()
-
-    Arguments = [('h',"help","Find-Null-Maintainers::Options::Help")]
-    for i in [ "help" ]:
-        if not cnf.has_key("Find-Null-Maintainers::Options::%s" % (i)):
-            cnf["Find-Null-Maintainers::Options::%s" % (i)] = ""
-
-    apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
-
-    Options = cnf.subtree("Find-Null-Maintainers::Options")
-    if Options["Help"]:
-        usage()
-
-    if not cnf.has_key('Import-LDAP-Fingerprints::LDAPServer'):
-        fubar("Import-LDAP-Fingerprints::LDAPServer not configured")
-
-    if not cnf.has_key('Import-LDAP-Fingerprints::LDAPDn'):
-        fubar("Import-LDAP-Fingerprints::LDAPDn not configured")
-
-    session = DBConn().session()
-
-    print "Getting info from the LDAP server..."
-    LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
-    LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
-    l = ldap.open(LDAPServer)
-    l.simple_bind_s("","")
-    Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
-                       "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
-                       ["uid", "cn", "mn", "sn", "createTimestamp"])
-
-
-    db_uid = {}
-    db_unstable_uid = {}
-
-    print "Getting UID info for entire archive..."
-    q = session.execute("SELECT DISTINCT u.uid FROM uid u, fingerprint f WHERE f.uid = u.id")
-    for i in q.fetchall():
-        db_uid[i[0]] = ""
-
-    print "Getting UID info for unstable..."
-    q = session.execute("""
-SELECT DISTINCT u.uid FROM suite su, src_associations sa, source s, fingerprint f, uid u
- WHERE f.uid = u.id AND sa.source = s.id AND sa.suite = su.id
-   AND su.suite_name = 'unstable' AND s.sig_fpr = f.id
-UNION
-SELECT DISTINCT u.uid FROM suite su, bin_associations ba, binaries b, fingerprint f, uid u
- WHERE f.uid = u.id AND ba.bin = b.id AND ba.suite = su.id
-   AND su.suite_name = 'unstable' AND b.sig_fpr = f.id""")
-    for i in q.fetchall():
-        db_unstable_uid[i[0]] = ""
-
-    now = time.time()
-
-    for i in Attrs:
-        entry = i[1]
-        uid = entry["uid"][0]
-        created = time.mktime(time.strptime(entry["createTimestamp"][0][:8], '%Y%m%d'))
-        diff = now - created
-        # 31536000 is 1 year in seconds, i.e. 60 * 60 * 24 * 365
-        if diff < 31536000 / 2:
-            when = "Less than 6 months ago"
-        elif diff < 31536000:
-            when = "Less than 1 year ago"
-        elif diff < 31536000 * 1.5:
-            when = "Less than 18 months ago"
-        elif diff < 31536000 * 2:
-            when = "Less than 2 years ago"
-        elif diff < 31536000 * 3:
-            when = "Less than 3 years ago"
-        else:
-            when = "More than 3 years ago"
-        name = " ".join([get_ldap_value(entry, "cn"),
-                         get_ldap_value(entry, "mn"),
-                         get_ldap_value(entry, "sn")])
-        if not db_uid.has_key(uid):
-            print "NONE %s (%s) %s" % (uid, name, when)
-        else:
-            if not db_unstable_uid.has_key(uid):
-                print "NOT_UNSTABLE %s (%s) %s" % (uid, name, when)
-
-############################################################
-
-if __name__ == '__main__':
-    main()
index db4837617008e815ea2d7bc2c6f519a47b5e920d..6fe9436a72a75c2a8344063c06863466a7a07210 100755 (executable)
@@ -39,7 +39,7 @@ import apt_pkg
 import glob
 
 from daklib import utils
-from daklib.dbconn import get_suite, get_suite_architectures
+from daklib.dbconn import Archive, Component, DBConn, Suite, get_suite, get_suite_architectures
 #from daklib.regexes import re_includeinpdiff
 import re
 re_includeinpdiff = re.compile(r"(Translation-[a-zA-Z_]+\.(?:bz2|xz))")
@@ -57,9 +57,9 @@ def usage (exit_code=0):
 Write out ed-style diffs to Packages/Source lists
 
   -h, --help            show this help and exit
+  -a <archive>          generate diffs for suites in <archive>
   -c                    give the canonical path of the file
   -p                    name for the patch (defaults to current time)
-  -r                    use a different archive root
   -d                    name for the hardlink farm for status
   -m                    how many diffs to generate
   -n                    take no action
@@ -291,9 +291,9 @@ def main():
 
     Cnf = utils.get_conf()
     Arguments = [ ('h', "help", "Generate-Index-Diffs::Options::Help"),
+                  ('a', 'archive', 'Generate-Index-Diffs::Options::Archive', 'hasArg'),
                   ('c', None, "Generate-Index-Diffs::Options::CanonicalPath", "hasArg"),
                   ('p', "patchname", "Generate-Index-Diffs::Options::PatchName", "hasArg"),
-                  ('r', "rootdir", "Generate-Index-Diffs::Options::RootDir", "hasArg"),
                   ('d', "tmpdir", "Generate-Index-Diffs::Options::TempDir", "hasArg"),
                   ('m', "maxdiffs", "Generate-Index-Diffs::Options::MaxDiffs", "hasArg"),
                   ('n', "n-act", "Generate-Index-Diffs::Options::NoAct"),
@@ -311,20 +311,18 @@ def main():
         format = "%Y-%m-%d-%H%M.%S"
         Options["PatchName"] = time.strftime( format )
 
-    AptCnf = apt_pkg.newConfiguration()
-    apt_pkg.ReadConfigFileISC(AptCnf,utils.which_apt_conf_file())
-
-    if Options.has_key("RootDir"):
-        Cnf["Dir::Root"] = Options["RootDir"]
+    session = DBConn().session()
 
     if not suites:
-        suites = Cnf.subtree("Suite").list()
+        query = session.query(Suite.suite_name)
+        if Options.get('Archive'):
+            query = query.join(Suite.archive).filter(Archive.archive_name == Options['Archive'])
+        suites = [ s.suite_name for s in query ]
 
     for suitename in suites:
         print "Processing: " + suitename
-        SuiteBlock = Cnf.subtree("Suite::" + suitename)
 
-        suiteobj = get_suite(suitename.lower())
+        suiteobj = get_suite(suitename.lower(), session=session)
 
         # Use the canonical version of the suite name
         suite = suiteobj.suite_name
@@ -333,12 +331,8 @@ def main():
             print "Skipping: " + suite + " (untouchable)"
             continue
 
-        architectures = get_suite_architectures(suite, skipall=True)
-
-        if SuiteBlock.has_key("Components"):
-            components = SuiteBlock.value_list("Components")
-        else:
-            components = []
+        architectures = get_suite_architectures(suite, skipall=True, session=session)
+        components = [ c.component_name for c in session.query(Component.component_name) ]
 
         suite_suffix = Cnf.find("Dinstall::SuiteSuffix")
         if components and suite_suffix:
@@ -346,22 +340,13 @@ def main():
         else:
             longsuite = suite
 
-        tree = SuiteBlock.get("Tree", "dists/%s" % (longsuite))
-
-        if AptCnf.has_key("tree::%s" % (tree)):
-            sections = AptCnf["tree::%s::Sections" % (tree)].split()
-        elif AptCnf.has_key("bindirectory::%s" % (tree)):
-            sections = AptCnf["bindirectory::%s::Sections" % (tree)].split()
-        else:
-            aptcnf_filename = os.path.basename(utils.which_apt_conf_file())
-            print "ALERT: suite %s not in %s, nor untouchable!" % (suite, aptcnf_filename)
-            continue
+        tree = os.path.join(suiteobj.archive.path, 'dists', longsuite)
 
         # See if there are Translations which might need a new pdiff
         cwd = os.getcwd()
-        for component in sections:
+        for component in components:
             #print "DEBUG: Working on %s" % (component)
-            workpath=os.path.join(Cnf["Dir::Root"], tree, component, "i18n")
+            workpath=os.path.join(tree, component, "i18n")
             if os.path.isdir(workpath):
                 os.chdir(workpath)
                 for dirpath, dirnames, filenames in os.walk(".", followlinks=True, topdown=True):
@@ -380,11 +365,7 @@ def main():
         for archobj in architectures:
             architecture = archobj.arch_string
 
-            # use sections instead of components since dak.conf
-            # treats "foo/bar main" as suite "foo", suitesuffix "bar" and
-            # component "bar/main". suck.
-
-            for component in sections:
+            for component in components:
                 if architecture == "source":
                     longarch = architecture
                     packages = "Sources"
@@ -394,17 +375,13 @@ def main():
                     packages = "Packages"
                     maxsuite = maxpackages
                     # Process Contents
-                    file = "%s/%s/Contents-%s" % (Cnf["Dir::Root"] + tree, component,
-                            architecture)
+                    file = "%s/%s/Contents-%s" % (tree, component, architecture)
                     storename = "%s/%s_%s_contents_%s" % (Options["TempDir"], suite, component, architecture)
-                    genchanges(Options, file + ".diff", storename, file, \
-                      Cnf.get("Suite::%s::Generate-Index-Diffs::MaxDiffs::Contents" % (suite), maxcontents))
+                    genchanges(Options, file + ".diff", storename, file, maxcontents)
 
-                file = "%s/%s/%s/%s" % (Cnf["Dir::Root"] + tree,
-                           component, longarch, packages)
+                file = "%s/%s/%s/%s" % (tree, component, longarch, packages)
                 storename = "%s/%s_%s_%s" % (Options["TempDir"], suite, component, architecture)
-                genchanges(Options, file + ".diff", storename, file, \
-                  Cnf.get("Suite::%s::Generate-Index-Diffs::MaxDiffs::%s" % (suite, packages), maxsuite))
+                genchanges(Options, file + ".diff", storename, file, maxsuite)
 
 ################################################################################
 
index 39b2486da32dd31bee410fc350cec2a7ff62275a..3e01da2347d8dbe26043e0a3000a869f53d1a651 100755 (executable)
@@ -106,11 +106,11 @@ TreeDefault
     apt_trees={}
     apt_trees["di"]={}
 
-    apt_trees["stable"]="""
-tree "dists/stable"
+    apt_trees["oldstable"]="""
+tree "dists/oldstable"
 {
-   FileList "/srv/ftp-master.debian.org/database/dists/stable_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/stable_$(SECTION)_source.list";
+   FileList "/srv/ftp-master.debian.org/database/dists/oldstable_$(SECTION)_binary-$(ARCH).list";
+   SourceFileList "/srv/ftp-master.debian.org/database/dists/oldstable_$(SECTION)_source.list";
    Sections "main contrib non-free";
    Architectures "%(arch)s";
    BinOverride "override.squeeze.$(SECTION)";
@@ -119,10 +119,10 @@ tree "dists/stable"
 };
 """
 
-    apt_trees["di"]["stable"]="""
-tree "dists/stable/main"
+    apt_trees["di"]["oldstable"]="""
+tree "dists/oldstable/main"
 {
-   FileList "/srv/ftp-master.debian.org/database/dists/stable_main_$(SECTION)_binary-$(ARCH).list";
+   FileList "/srv/ftp-master.debian.org/database/dists/oldstable_main_$(SECTION)_binary-$(ARCH).list";
    Sections "debian-installer";
    Architectures "%(arch)s";
    BinOverride "override.squeeze.main.$(SECTION)";
@@ -132,9 +132,9 @@ tree "dists/stable/main"
    %(contentsline)s
 };
 
-tree "dists/stable/non-free"
+tree "dists/oldstable/non-free"
 {
-   FileList "/srv/ftp-master.debian.org/database/dists/stable_non-free_$(SECTION)_binary-$(ARCH).list";
+   FileList "/srv/ftp-master.debian.org/database/dists/oldstable_non-free_$(SECTION)_binary-$(ARCH).list";
    Sections "debian-installer";
    Architectures "%(arch)s";
    BinOverride "override.squeeze.main.$(SECTION)";
@@ -145,188 +145,7 @@ tree "dists/stable/non-free"
 };
 """
 
-    apt_trees["squeeze-updates"]="""
-tree "dists/squeeze-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/squeeze-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/squeeze-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.squeeze.$(SECTION)";
-   ExtraOverride "override.squeeze.extra.$(SECTION)";
-   SrcOverride "override.squeeze.$(SECTION).src";
-   Contents " ";
-};
-"""
-
-    apt_trees["testing"]="""
-tree "dists/testing"
-{
-   FakeDI "dists/unstable";
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/testing_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.wheezy.$(SECTION)";
-   ExtraOverride "override.wheezy.extra.$(SECTION)";
-   SrcOverride "override.wheezy.$(SECTION).src";
-};
-"""
-
-    apt_trees["di"]["testing"]="""
-tree "dists/testing/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-
-tree "dists/testing/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-"""
-
-    apt_trees["unstable"]="""
-tree "dists/unstable"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/unstable_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.$(SECTION)";
-   ExtraOverride "override.sid.extra.$(SECTION)";
-   SrcOverride "override.sid.$(SECTION).src";
-};
-"""
-    apt_trees["di"]["unstable"]="""
-tree "dists/unstable/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-
-tree "dists/unstable/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/unstable_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-"""
 
-    apt_trees["experimental"]="""
-tree "dists/experimental"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/experimental_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.$(SECTION)";
-   SrcOverride "override.sid.$(SECTION).src";
-};
-"""
-    apt_trees["di"]["experimental"]="""
-tree "dists/experimental/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-
-tree "dists/experimental/non-free"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/experimental_non-free_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.sid.main.$(SECTION)";
-   SrcOverride "override.sid.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   %(contentsline)s
-};
-"""
-
-    apt_trees["testing-proposed-updates"]="""
-tree "dists/testing-proposed-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.wheezy.$(SECTION)";
-   ExtraOverride "override.wheezy.extra.$(SECTION)";
-   SrcOverride "override.wheezy.$(SECTION).src";
-   Contents " ";
-};
-"""
-    apt_trees["di"]["testing-proposed-updates"]="""
-tree "dists/testing-proposed-updates/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/testing-proposed-updates_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.wheezy.main.$(SECTION)";
-   SrcOverride "override.wheezy.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents " ";
-};
-"""
-
-    apt_trees["proposed-updates"]="""
-tree "dists/proposed-updates"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_$(SECTION)_binary-$(ARCH).list";
-   SourceFileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_$(SECTION)_source.list";
-   Sections "main contrib non-free";
-   Architectures "%(arch)s";
-   BinOverride "override.squeeze.$(SECTION)";
-   ExtraOverride "override.squeeze.extra.$(SECTION)";
-   SrcOverride "override.squeeze.$(SECTION).src";
-   Contents " ";
-};
-"""
-    apt_trees["di"]["proposed-updates"]="""
-tree "dists/proposed-updates/main"
-{
-   FileList "/srv/ftp-master.debian.org/database/dists/proposed-updates_main_$(SECTION)_binary-$(ARCH).list";
-   Sections "debian-installer";
-   Architectures "%(arch)s";
-   BinOverride "override.squeeze.main.$(SECTION)";
-   SrcOverride "override.squeeze.main.src";
-   BinCacheDB "packages-debian-installer-$(ARCH).db";
-   Packages::Extensions ".udeb";
-   Contents " ";
-};
-"""
     cnf = Config()
     try:
         # Write apt.conf
@@ -353,7 +172,7 @@ tree "dists/proposed-updates/main"
         # it has errormessages we like to see
         os.environ['GZIP'] = '--rsyncable'
         os.chdir(tmppath)
-        (result, output) = commands.getstatusoutput('apt-ftparchive -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+        (result, output) = commands.getstatusoutput('apt-ftparchive -o APT::FTPArchive::Contents=off -o APT::FTPArchive::SHA512=off generate %s' % os.path.basename(ac_name))
         sn="a-f %s,%s: " % (suite, arch)
         print sn + output.replace('\n', '\n%s' % (sn))
         return result
index 8b4773d8528152f4b6c32a19a38730eb4ae8eba4..7eca3c8cacc0460b858e9f2678fcc52f2a68b75c 100755 (executable)
@@ -34,6 +34,7 @@ def usage():
     print """Usage: dak generate-packages-sources2 [OPTIONS]
 Generate the Packages/Sources files
 
+  -a, --archive=ARCHIVE        process suites in ARCHIVE
   -s, --suite=SUITE            process this suite
                                Default: All suites not marked 'untouchable'
   -f, --force                  Allow processing of untouchable suites
@@ -66,24 +67,35 @@ SELECT
    WHERE s.id=sm.src_id
   )
   ||
-  E'\nDirectory\: pool/' || SUBSTRING(f.filename FROM E'\\A(.*)/[^/]*\\Z')
+  CASE
+    WHEN src_associations_full.extra_source THEN E'\nExtra-Source-Only\: yes'
+    ELSE ''
+  END
   ||
-  E'\nPriority\: ' || pri.priority
+  E'\nDirectory\: pool/' || :component_name || '/' || SUBSTRING(f.filename FROM E'\\A(.*)/[^/]*\\Z')
   ||
-  E'\nSection\: ' || sec.section
+  E'\nPriority\: ' || COALESCE(pri.priority, 'extra')
+  ||
+  E'\nSection\: ' || COALESCE(sec.section, 'misc')
 
 FROM
 
 source s
-JOIN src_associations sa ON s.id = sa.source
+JOIN src_associations_full ON src_associations_full.suite = :suite AND s.id = src_associations_full.source
 JOIN files f ON s.file=f.id
-JOIN override o ON o.package = s.source
-JOIN section sec ON o.section = sec.id
-JOIN priority pri ON o.priority = pri.id
+JOIN files_archive_map fam
+  ON fam.file_id = f.id
+     AND fam.archive_id = (SELECT archive_id FROM suite WHERE id = :suite)
+     AND fam.component_id = :component
+LEFT JOIN override o ON o.package = s.source
+                     AND o.suite = :overridesuite
+                     AND o.component = :component
+                     AND o.type = :dsc_type
+LEFT JOIN section sec ON o.section = sec.id
+LEFT JOIN priority pri ON o.priority = pri.id
 
 WHERE
-  sa.suite = :suite
-  AND o.suite = :overridesuite AND o.component = :component AND o.type = :dsc_type
+  (src_associations_full.extra_source OR o.suite IS NOT NULL)
 
 ORDER BY
 s.source, s.version
@@ -104,6 +116,7 @@ def generate_sources(suite_id, component_id):
     overridesuite_id = suite.get_overridesuite().suite_id
 
     writer_args = {
+            'archive': suite.archive.path,
             'suite': suite.suite_name,
             'component': component.component_name
     }
@@ -113,7 +126,7 @@ def generate_sources(suite_id, component_id):
     output = writer.open()
 
     # run query and write Sources
-    r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "dsc_type": dsc_type, "overridesuite": overridesuite_id})
+    r = session.execute(_sources_query, {"suite": suite_id, "component": component_id, "component_name": component.component_name, "dsc_type": dsc_type, "overridesuite": overridesuite_id})
     for (stanza,) in r:
         print >>output, stanza
         print >>output, ""
@@ -147,12 +160,12 @@ WITH
       binaries b
       JOIN bin_associations ba ON b.id = ba.bin
       JOIN files f ON f.id = b.file
-      JOIN location l ON l.id = f.location
+      JOIN files_archive_map fam ON f.id = fam.file_id AND fam.archive_id = :archive_id
       JOIN source s ON b.source = s.id
     WHERE
       (b.architecture = :arch_all OR b.architecture = :arch) AND b.type = :type_name
       AND ba.suite = :suite
-      AND l.component = :component
+      AND fam.component_id = :component
   )
 
 SELECT
@@ -181,7 +194,7 @@ SELECT
   ), '')
   || E'\nSection\: ' || sec.section
   || E'\nPriority\: ' || pri.priority
-  || E'\nFilename\: pool/' || tmp.filename
+  || E'\nFilename\: pool/' || :component_name || '/' || tmp.filename
   || E'\nSize\: ' || tmp.size
   || E'\nMD5sum\: ' || tmp.md5sum
   || E'\nSHA1\: ' || tmp.sha1sum
@@ -233,6 +246,7 @@ def generate_packages(suite_id, component_id, architecture_id, type_name):
         metadata_skip.append("Description-md5")
 
     writer_args = {
+            'archive': suite.archive.path,
             'suite': suite.suite_name,
             'component': component.component_name,
             'architecture': architecture.arch_string,
@@ -243,7 +257,8 @@ def generate_packages(suite_id, component_id, architecture_id, type_name):
     writer = PackagesFileWriter(**writer_args)
     output = writer.open()
 
-    r = session.execute(_packages_query, {"suite": suite_id, "component": component_id,
+    r = session.execute(_packages_query, {"archive_id": suite.archive.archive_id,
+        "suite": suite_id, "component": component_id, 'component_name': component.component_name,
         "arch": architecture_id, "type_id": type_id, "type_name": type_name, "arch_all": arch_all_id,
         "overridesuite": overridesuite_id, "metadata_skip": metadata_skip,
         "include_long_description": 'true' if include_long_description else 'false'})
@@ -301,6 +316,7 @@ def generate_translations(suite_id, component_id):
     component = session.query(Component).get(component_id)
 
     writer_args = {
+            'archive': suite.archive.path,
             'suite': suite.suite_name,
             'component': component.component_name,
             'language': 'en',
@@ -329,6 +345,7 @@ def main():
     cnf = Config()
 
     Arguments = [('h',"help","Generate-Packages-Sources::Options::Help"),
+                 ('a','archive','Generate-Packages-Sources::Options::Archive','HasArg'),
                  ('s',"suite","Generate-Packages-Sources::Options::Suite"),
                  ('f',"force","Generate-Packages-Sources::Options::Force"),
                  ('o','option','','ArbItem')]
@@ -347,7 +364,7 @@ def main():
 
     logger = daklog.Logger('generate-packages-sources2')
 
-    from daklib.dbconn import Component, DBConn, get_suite, Suite
+    from daklib.dbconn import Component, DBConn, get_suite, Suite, Archive
     session = DBConn().session()
     session.execute("SELECT add_missing_description_md5()")
     session.commit()
@@ -362,11 +379,13 @@ def main():
                 print "I: Cannot find suite %s" % s
                 logger.log(['Cannot find suite %s' % s])
     else:
-        suites = session.query(Suite).filter(Suite.untouchable == False).all()
+        query = session.query(Suite).filter(Suite.untouchable == False)
+        if 'Archive' in Options:
+            query = query.join(Suite.archive).filter(Archive.archive_name==Options['Archive'])
+        suites = query.all()
 
     force = Options.has_key("Force") and Options["Force"]
 
-    component_ids = [ c.component_id for c in session.query(Component).all() ]
 
     def parse_results(message):
         # Split out into (code, msg)
@@ -379,9 +398,10 @@ def main():
             logger.log(['E: ', msg])
 
     for s in suites:
+        component_ids = [ c.component_id for c in s.components ]
         if s.untouchable and not force:
-            import utils
-            utils.fubar("Refusing to touch %s (untouchable and not forced)" % s.suite_name)
+            import daklib.utils
+            daklib.utils.fubar("Refusing to touch %s (untouchable and not forced)" % s.suite_name)
         for c in component_ids:
             pool.apply_async(generate_sources, [s.suite_id, c], callback=parse_results)
             if not s.include_long_description:
index 3801132db5b72298c32f2a6fdfbc51916002183b..e483e6c22bba3f977e96d3491bffa302fce10dbc 100755 (executable)
@@ -60,11 +60,13 @@ def usage (exit_code=0):
     print """Usage: dak generate-releases [OPTIONS]
 Generate the Release files
 
+  -a, --archive=ARCHIVE      process suites in ARCHIVE
   -s, --suite=SUITE(s)       process this suite
                              Default: All suites not marked 'untouchable'
   -f, --force                Allow processing of untouchable suites
                              CAREFUL: Only to be used at (point) release time!
   -h, --help                 show this help and exit
+  -q, --quiet                Don't output progress
 
 SUITE can be a space seperated list, e.g.
    --suite=unstable testing
@@ -81,7 +83,7 @@ def sign_release_dir(suite, dirname):
         if cnf.has_key("Dinstall::SigningPubKeyring"):
             keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
 
-        arguments = "--no-options --batch --no-tty --armour"
+        arguments = "--no-options --batch --no-tty --armour --personal-digest-preferences=SHA256"
 
         relname = os.path.join(dirname, 'Release')
 
@@ -93,20 +95,14 @@ def sign_release_dir(suite, dirname):
         if os.path.exists(inlinedest):
             os.unlink(inlinedest)
 
-        # We can only use one key for inline signing so use the first one in
-        # the array for consistency
-        firstkey = True
+        defkeyid=""
+        for keyid in suite.signingkeys or []:
+            defkeyid += "--local-user %s " % keyid
 
-        for keyid in suite.signingkeys:
-            defkeyid = "--default-key %s" % keyid
-
-            os.system("gpg %s %s %s --detach-sign <%s >>%s" %
-                    (keyring, defkeyid, arguments, relname, dest))
-
-            if firstkey:
-                os.system("gpg %s %s %s --clearsign <%s >>%s" %
-                        (keyring, defkeyid, arguments, relname, inlinedest))
-                firstkey = False
+        os.system("gpg %s %s %s --detach-sign <%s >>%s" %
+                  (keyring, defkeyid, arguments, relname, dest))
+        os.system("gpg %s %s %s --clearsign <%s >>%s" %
+                  (keyring, defkeyid, arguments, relname, inlinedest))
 
 class ReleaseWriter(object):
     def __init__(self, suite):
@@ -145,16 +141,20 @@ class ReleaseWriter(object):
 
         cnf = Config()
 
-        suite_suffix = "%s" % (cnf.find("Dinstall::SuiteSuffix"))
+        suite_suffix = cnf.find("Dinstall::SuiteSuffix", "")
 
-        outfile = os.path.join(cnf["Dir::Root"], 'dists', "%s/%s" % (suite.suite_name, suite_suffix), "Release")
+        outfile = os.path.join(suite.archive.path, 'dists', suite.suite_name, suite_suffix, "Release")
         out = open(outfile + ".new", "w")
 
         for key, dbfield in attribs:
             if getattr(suite, dbfield) is not None:
                 # TEMPORARY HACK HACK HACK until we change the way we store the suite names etc
                 if key == 'Suite' and getattr(suite, dbfield) == 'squeeze-updates':
+                    out.write("Suite: oldstable-updates\n")
+                elif key == 'Suite' and getattr(suite, dbfield) == 'wheezy-updates':
                     out.write("Suite: stable-updates\n")
+                elif key == 'Suite' and getattr(suite, dbfield) == 'jessie-updates':
+                    out.write("Suite: testing-updates\n")
                 else:
                     out.write("%s: %s\n" % (key, getattr(suite, dbfield)))
 
@@ -170,11 +170,9 @@ class ReleaseWriter(object):
 
         out.write("Architectures: %s\n" % (" ".join([a.arch_string for a in architectures])))
 
-        ## FIXME: Components need to be adjusted to whatever will be in the db
-        ## Needs putting in the DB
-        components = ['main', 'contrib', 'non-free']
+        components = [ c.component_name for c in suite.components ]
 
-        out.write("Components: %s\n" % ( " ".join(map(lambda x: "%s%s" % (suite_suffix, x), components ))))
+        out.write("Components: %s\n" % (" ".join(components)))
 
         # For exact compatibility with old g-r, write out Description here instead
         # of with the rest of the DB fields above
@@ -182,7 +180,7 @@ class ReleaseWriter(object):
             out.write("Description: %s\n" % suite.description)
 
         for comp in components:
-            for dirpath, dirnames, filenames in os.walk("%sdists/%s/%s%s" % (cnf["Dir::Root"], suite.suite_name, suite_suffix, comp), topdown=True):
+            for dirpath, dirnames, filenames in os.walk(os.path.join(suite.archive.path, "dists", suite.suite_name, suite_suffix, comp), topdown=True):
                 if not re_gensubrelease.match(dirpath):
                     continue
 
@@ -214,7 +212,7 @@ class ReleaseWriter(object):
         # their checksums to the main Release file
         oldcwd = os.getcwd()
 
-        os.chdir("%sdists/%s/%s" % (cnf["Dir::Root"], suite.suite_name, suite_suffix))
+        os.chdir(os.path.join(suite.archive.path, "dists", suite.suite_name, suite_suffix))
 
         hashfuncs = { 'MD5Sum' : apt_pkg.md5sum,
                       'SHA1' : apt_pkg.sha1sum,
@@ -292,13 +290,15 @@ def main ():
 
     cnf = Config()
 
-    for i in ["Help", "Suite", "Force"]:
+    for i in ["Help", "Suite", "Force", "Quiet"]:
         if not cnf.has_key("Generate-Releases::Options::%s" % (i)):
             cnf["Generate-Releases::Options::%s" % (i)] = ""
 
     Arguments = [('h',"help","Generate-Releases::Options::Help"),
+                 ('a','archive','Generate-Releases::Options::Archive','HasArg'),
                  ('s',"suite","Generate-Releases::Options::Suite"),
                  ('f',"force","Generate-Releases::Options::Force"),
+                 ('q',"quiet","Generate-Releases::Options::Quiet"),
                  ('o','option','','ArbItem')]
 
     suite_names = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
@@ -308,6 +308,7 @@ def main ():
         usage()
 
     Logger = daklog.Logger('generate-releases')
+    pool = DakProcessPool()
 
     session = DBConn().session()
 
@@ -321,19 +322,21 @@ def main ():
                 print "cannot find suite %s" % s
                 Logger.log(['cannot find suite %s' % s])
     else:
-        suites = session.query(Suite).filter(Suite.untouchable == False).all()
+        query = session.query(Suite).filter(Suite.untouchable == False)
+        if 'Archive' in Options:
+            query = query.join(Suite.archive).filter(Archive.archive_name==Options['Archive'])
+        suites = query.all()
 
     broken=[]
 
-    pool = DakProcessPool()
-
     for s in suites:
         # Setup a multiprocessing Pool. As many workers as we have CPU cores.
         if s.untouchable and not Options["Force"]:
             print "Skipping %s (untouchable)" % s.suite_name
             continue
 
-        print "Processing %s" % s.suite_name
+        if not Options["Quiet"]:
+            print "Processing %s" % s.suite_name
         Logger.log(['Processing release file for Suite: %s' % (s.suite_name)])
         pool.apply_async(generate_helper, (s.suite_id, ))
 
diff --git a/dak/import.py b/dak/import.py
new file mode 100644 (file)
index 0000000..36d965e
--- /dev/null
@@ -0,0 +1,241 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import os
+import sys
+
+from daklib.dbconn import *
+import daklib.archive
+import daklib.config
+import daklib.daklog
+import daklib.upload
+import daklib.regexes
+
+def usage():
+    print """Usage:
+
+dak import <suite> <component> <files...>
+dak import -D|--dump <file> <suite> <component>
+dak import -E|--export-dump <suite> <component>
+
+WARNING: This command does no sanity checks. Only use it on trusted packages.
+
+Options:
+  -h, --help:             show this help message
+  -a, --add-overrides:    add missing overrides automatically
+  -c, --changed-by:       Changed-By for imported source packages
+                          (default: maintainer)
+  -D, --dump <file>:      Import all files listed in <file>. The format
+                          is described below.
+  -E, --export-dump:      Export list of files in the format required
+                          by dak import --dump.
+  -s, --ignore-signature: ignore signature for imported source packages
+
+File format used by --dump:
+
+  <filename>:<md5>:<sha1>:<sha256>:[<fingerprint>]:[<changed-by>]
+"""
+
+def import_source(log, transaction, suite, component, directory, hashed_file,
+                  fingerprint=None, changed_by=None,
+                  keyrings=None, require_signature=True, add_overrides=False):
+    if keyrings is None:
+        keyrings = []
+    filename = hashed_file.filename
+    session = transaction.session
+
+    source = daklib.upload.Source(directory, [hashed_file], keyrings, require_signature)
+    if source.valid_signature:
+        fingerprint = session.query(Fingerprint).filter_by(fingerprint=source.primary_fingerprint).first()
+    if changed_by is None:
+        changed_by = source.dsc['Maintainer']
+    db_changed_by = get_or_set_maintainer(changed_by, session)
+
+    transaction.install_source(directory, source, suite, component, db_changed_by, fingerprint=fingerprint)
+    log.log(['import-source', suite.suite_name, component.component_name, filename])
+
+    if add_overrides and not session.query(Override).filter_by(suite=suite.get_overridesuite(), component=component, package=source.dsc['Source']).join(OverrideType).filter(OverrideType.overridetype == 'dsc').first():
+        overridetype = session.query(OverrideType).filter_by(overridetype='dsc').one()
+        overridesuite = suite.get_overridesuite()
+        section_name = 'misc'
+        if component.component_name != 'main':
+            section_name = "{0}/{1}".format(component.component_name, section_name)
+        section = session.query(Section).filter_by(section=section_name).one()
+        priority = session.query(Priority).filter_by(priority='extra').one()
+
+        override = Override(package=source.dsc['Source'], suite=overridesuite, component=component,
+                            section=section, priority=priority, overridetype=overridetype)
+        session.add(override)
+        log.log(['add-source-override', suite.suite_name, component.component_name, source.dsc['Source'], section.section, priority.priority])
+
+def import_binary(log, transaction, suite, component, directory, hashed_file, fingerprint=None, add_overrides=False):
+    filename = hashed_file.filename
+    session = transaction.session
+
+    binary = daklib.upload.Binary(directory, hashed_file)
+    transaction.install_binary(directory, binary, suite, component, fingerprint=fingerprint)
+    log.log(['import-binary', suite.suite_name, component.component_name, filename])
+
+    if add_overrides and not session.query(Override).filter_by(suite=suite.get_overridesuite(), component=component, package=binary.control['Package']).join(OverrideType).filter(OverrideType.overridetype == binary.type).first():
+        overridetype = session.query(OverrideType).filter_by(overridetype=binary.type).one()
+        overridesuite = suite.get_overridesuite()
+        section = session.query(Section).filter_by(section=binary.control['Section']).one()
+        priority = session.query(Priority).filter_by(priority=binary.control['Priority']).one()
+
+        override = Override(package=binary.control['Package'], suite=overridesuite, component=component,
+                            section=section, priority=priority, overridetype=overridetype)
+        session.add(override)
+        log.log(['add-binary-override', suite.suite_name, component.component_name, binary.control['Package'], section.section, priority.priority])
+
+def import_file(log, transaction, suite, component, directory, hashed_file,
+                fingerprint=None, changed_by=None, keyrings=None, require_signature=True,
+                add_overrides = False):
+    filename = hashed_file.filename
+    if daklib.regexes.re_file_binary.match(filename):
+        import_binary(log, transaction, suite, component, directory, hashed_file,
+                      fingerprint=fingerprint, add_overrides=add_overrides)
+    elif daklib.regexes.re_file_dsc.match(filename):
+        import_source(log, transaction, suite, component, directory, hashed_file,
+                      fingerprint=fingerprint, changed_by=changed_by, keyrings=keyrings,
+                      require_signature=require_signature, add_overrides=add_overrides)
+    else:
+        raise Exception('File is neither source nor binary package: {0}'.format(filename))
+
+def import_dump(log, transaction, suite, component, fh,
+                keyrings=None, require_signature=True, add_overrides=False):
+    session = transaction.session
+    for line in fh:
+        path, size, md5, sha1, sha256, fpr, changed_by = line.strip().split(':', 6)
+
+        if not changed_by:
+            changed_by = None
+        fingerprint = None
+        if fpr:
+            fingerprint = session.query(Fingerprint).filter_by(fingerprint=fpr).first()
+            if fingerprint is None:
+                print 'W: {0}: unknown fingerprint {1}'.format(filename, fpr)
+
+        directory, filename = os.path.split(os.path.abspath(path))
+        hashed_file = daklib.upload.HashedFile(filename, long(size), md5, sha1, sha256)
+        hashed_file.check(directory)
+
+        import_file(log, transaction, suite, component, directory, hashed_file,
+                    fingerprint=fingerprint, changed_by=changed_by,
+                    keyrings=keyrings, require_signature=require_signature, add_overrides=add_overrides)
+
+        transaction.commit()
+
+_export_query = r"""
+WITH
+tmp AS
+  (SELECT 1 AS order, s.file AS file_id, s.sig_fpr AS fingerprint_id, s.changedby AS changed_by, sa.suite AS suite_id
+     FROM source s
+     JOIN src_associations sa ON sa.source = s.id
+   UNION
+   SELECT 2 AS order, b.file AS file_id, b.sig_fpr AS fingerprint_id, NULL, ba.suite AS suite_id
+     FROM binaries b
+     JOIN bin_associations ba ON ba.bin = b.id
+  )
+
+SELECT
+  f.filename, f.size::TEXT, f.md5sum, f.sha1sum, f.sha256sum, COALESCE(fpr.fingerprint, ''), COALESCE(m.name, '')
+FROM files f
+JOIN tmp ON f.id = tmp.file_id
+JOIN suite ON suite.id = tmp.suite_id
+JOIN files_archive_map fam ON fam.file_id = f.id AND fam.archive_id = suite.archive_id
+LEFT JOIN fingerprint fpr ON fpr.id = tmp.fingerprint_id
+LEFT JOIN maintainer m ON m.id = tmp.changed_by
+
+WHERE
+  suite.id = :suite_id
+  AND fam.component_id = :component_id
+
+ORDER BY tmp.order, f.filename;
+"""
+
+def export_dump(transaction, suite, component):
+    session = transaction.session
+    query = session.execute(_export_query,
+                            {'suite_id': suite.suite_id,
+                             'component_id': component.component_id})
+    for row in query:
+        print ":".join(row)
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    arguments = [
+        ('h', 'help', 'Import::Options::Help'),
+        ('a', 'add-overrides', 'Import::Options::AddOverrides'),
+        ('c', 'changed-by', 'Import::Options::ChangedBy', 'HasArg'),
+        ('D', 'dump', 'Import::Options::Dump', 'HasArg'),
+        ('E', 'export-dump', 'Import::Options::Export'),
+        ('s', 'ignore-signature', 'Import::Options::IgnoreSignature'),
+        ]
+
+    cnf = daklib.config.Config()
+    cnf['Import::Options::Dummy'] = ''
+    argv = apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
+    options = cnf.subtree('Import::Options')
+
+    if 'Help' in options or len(argv) < 2:
+        usage()
+        sys.exit(0)
+
+    suite_name = argv[0]
+    component_name = argv[1]
+
+    add_overrides = options.find_b('AddOverrides')
+    require_signature = not options.find_b('IgnoreSignature')
+    changed_by = options.find('ChangedBy') or None
+
+    log = daklib.daklog.Logger('import')
+
+    with daklib.archive.ArchiveTransaction() as transaction:
+        session = transaction.session
+        suite = session.query(Suite).filter_by(suite_name=suite_name).one()
+        component = session.query(Component).filter_by(component_name=component_name).one()
+        keyrings = session.query(Keyring).filter_by(active=True).order_by(Keyring.priority)
+        keyring_files = [ k.keyring_name for k in keyrings ]
+
+        dump = options.find('Dump') or None
+        if options.find_b('Export'):
+            export_dump(transaction, suite, component)
+            transaction.rollback()
+        elif dump is not None:
+            with open(dump, 'r') as fh:
+                import_dump(log, transaction, suite, component, fh, keyring_files,
+                            require_signature=require_signature, add_overrides=add_overrides)
+            transaction.commit()
+        else:
+            files = argv[2:]
+            for f in files:
+                directory, filename = os.path.split(os.path.abspath(f))
+                hashed_file = daklib.upload.HashedFile.from_file(directory, filename)
+                import_file(log, transaction, suite, component, directory, hashed_file,
+                            changed_by=changed_by,
+                            keyrings=keyring_files, require_signature=require_signature,
+                            add_overrides=add_overrides)
+            transaction.commit()
+
+    log.close()
+
+if __name__ == '__main__':
+    main()
index 89c2b755dc5942f56e8b28d42afa530a0ab73ea9..3b53bff39f5973a496be04d730b3e34f1e601b7b 100755 (executable)
@@ -177,14 +177,9 @@ def main():
 
         changes.append((db_uid_byid.get(u, [None])[0], "Removed key: %s" % (f)))
         session.execute("""UPDATE fingerprint
-                              SET keyring = NULL,
-                                  source_acl_id = NULL,
-                                  binary_acl_id = NULL,
-                                  binary_reject = TRUE
+                              SET keyring = NULL
                             WHERE id = :fprid""", {'fprid': fid})
 
-        session.execute("""DELETE FROM binary_acl_map WHERE fingerprint_id = :fprid""", {'fprid': fid})
-
     # For the keys in this keyring, add/update any fingerprints that've
     # changed.
 
@@ -208,19 +203,9 @@ def main():
             if newuid:
                 fp.uid_id = newuid
 
-            fp.binary_acl_id = keyring.default_binary_acl_id
-            fp.source_acl_id = keyring.default_source_acl_id
-            fp.default_binary_reject = keyring.default_binary_reject
             session.add(fp)
             session.flush()
 
-            for k in keyring.keyring_acl_map:
-                ba = BinaryACLMap()
-                ba.fingerprint_id = fp.fingerprint_id
-                ba.architecture_id = k.architecture_id
-                session.add(ba)
-                session.flush()
-
         else:
             if newuid and olduid != newuid and olduid == -1:
                 changes.append((newuiduid, "Linked key: %s" % f))
@@ -245,29 +230,14 @@ def main():
 
                 # Only change the keyring if it won't result in a loss of permissions
                 if movekey:
-                    session.execute("""DELETE FROM binary_acl_map WHERE fingerprint_id = :fprid""", {'fprid': oldfid})
-
                     session.execute("""UPDATE fingerprint
-                                          SET keyring = :keyring,
-                                              source_acl_id = :source_acl_id,
-                                              binary_acl_id = :binary_acl_id,
-                                              binary_reject = :binary_reject
+                                          SET keyring = :keyring
                                         WHERE id = :fpr""",
                                     {'keyring': keyring.keyring_id,
-                                     'source_acl_id': keyring.default_source_acl_id,
-                                     'binary_acl_id': keyring.default_binary_acl_id,
-                                     'binary_reject': keyring.default_binary_reject,
                                      'fpr': oldfid})
 
                     session.flush()
 
-                    for k in keyring.keyring_acl_map:
-                        ba = BinaryACLMap()
-                        ba.fingerprint_id = oldfid
-                        ba.architecture_id = k.architecture_id
-                        session.add(ba)
-                        session.flush()
-
                 else:
                     print "Key %s exists in both %s and %s keyrings. Not demoting." % (f,
                                                                                        oldkeyring.keyring_name,
diff --git a/dak/import_known_changes.py b/dak/import_known_changes.py
deleted file mode 100755 (executable)
index 4e8068f..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-#!/usr/bin/env python
-# coding=utf8
-
-"""
-Import known_changes files
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2009  Mike O'Connor <stew@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-
-################################################################################
-
-import sys
-import os
-import logging
-import threading
-from daklib.dbconn import DBConn, get_dbchange, get_policy_queue
-from daklib.config import Config
-import apt_pkg
-from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError
-from daklib.changes import Changes
-from daklib.utils import parse_changes, warn, gpgv_get_status_output, process_gpgv_output
-import traceback
-
-# where in dak.conf all of our configuration will be stowed
-options_prefix = "KnownChanges"
-options_prefix = "%s::Options" % options_prefix
-
-log = logging.getLogger()
-
-################################################################################
-
-
-def usage (exit_code=0):
-    print """Usage: dak import-known-changes [options]
-
-OPTIONS
-     -j n
-        run with n threads concurrently
-
-     -v, --verbose
-        show verbose information messages
-
-     -q, --quiet
-        supress all output but errors
-
-"""
-    sys.exit(exit_code)
-
-def check_signature (sig_filename, data_filename=""):
-    fingerprint = None
-
-    keyrings = [
-        "/home/joerg/keyring/keyrings/debian-keyring.gpg",
-        "/home/joerg/keyring/keyrings/debian-maintainers.gpg",
-        "/home/joerg/keyring/keyrings/debian-role-keys.gpg",
-        "/home/joerg/keyring/keyrings/emeritus-keyring.pgp",
-        "/home/joerg/keyring/keyrings/emeritus-keyring.gpg",
-        "/home/joerg/keyring/keyrings/removed-keys.gpg",
-        "/home/joerg/keyring/keyrings/removed-keys.pgp"
-        ]
-
-    keyringargs = " ".join(["--keyring %s" % x for x in keyrings ])
-
-    # Build the command line
-    status_read, status_write = os.pipe()
-    cmd = "gpgv --status-fd %s %s %s" % (status_write, keyringargs, sig_filename)
-
-    # Invoke gpgv on the file
-    (output, status, exit_status) = gpgv_get_status_output(cmd, status_read, status_write)
-
-    # Process the status-fd output
-    (keywords, internal_error) = process_gpgv_output(status)
-
-    # If we failed to parse the status-fd output, let's just whine and bail now
-    if internal_error:
-        warn("Couldn't parse signature")
-        return None
-
-    # usually one would check for bad things here. We, however, do not care.
-
-    # Next check gpgv exited with a zero return code
-    if exit_status:
-        warn("Couldn't parse signature")
-        return None
-
-    # Sanity check the good stuff we expect
-    if not keywords.has_key("VALIDSIG"):
-        warn("Couldn't parse signature")
-    else:
-        args = keywords["VALIDSIG"]
-        if len(args) < 1:
-            warn("Couldn't parse signature")
-        else:
-            fingerprint = args[0]
-
-    return fingerprint
-
-
-class EndOfChanges(object):
-    """something enqueued to signify the last change"""
-    pass
-
-
-class OneAtATime(object):
-    """
-    a one space queue which sits between multiple possible producers
-    and multiple possible consumers
-    """
-    def __init__(self):
-        self.next_in_line = None
-        self.read_lock = threading.Condition()
-        self.write_lock = threading.Condition()
-        self.die = False
-
-    def plsDie(self):
-        self.die = True
-        self.write_lock.acquire()
-        self.write_lock.notifyAll()
-        self.write_lock.release()
-
-        self.read_lock.acquire()
-        self.read_lock.notifyAll()
-        self.read_lock.release()
-
-    def enqueue(self, next):
-        self.write_lock.acquire()
-        while self.next_in_line:
-            if self.die:
-                return
-            self.write_lock.wait()
-
-        assert( not self.next_in_line )
-        self.next_in_line = next
-        self.write_lock.release()
-        self.read_lock.acquire()
-        self.read_lock.notify()
-        self.read_lock.release()
-
-    def dequeue(self):
-        self.read_lock.acquire()
-        while not self.next_in_line:
-            if self.die:
-                return
-            self.read_lock.wait()
-
-        result = self.next_in_line
-
-        self.next_in_line = None
-        self.read_lock.release()
-        self.write_lock.acquire()
-        self.write_lock.notify()
-        self.write_lock.release()
-
-        if isinstance(result, EndOfChanges):
-            return None
-
-        return result
-
-class ChangesToImport(object):
-    """A changes file to be enqueued to be processed"""
-    def __init__(self, checkdir, changesfile, count):
-        self.dirpath = checkdir
-        self.changesfile = changesfile
-        self.count = count
-
-    def __str__(self):
-        return "#%d: %s in %s" % (self.count, self.changesfile, self.dirpath)
-
-class ChangesGenerator(threading.Thread):
-    """enqueues changes files to be imported"""
-    def __init__(self, parent, queue):
-        threading.Thread.__init__(self)
-        self.queue = queue
-        self.session = DBConn().session()
-        self.parent = parent
-        self.die = False
-
-    def plsDie(self):
-        self.die = True
-
-    def run(self):
-        cnf = Config()
-        count = 1
-
-        dirs = []
-        dirs.append(cnf['Dir::Done'])
-
-        for queue_name in [ "byhand", "new", "proposedupdates", "oldproposedupdates" ]:
-            queue = get_policy_queue(queue_name)
-            if queue:
-                dirs.append(os.path.abspath(queue.path))
-            else:
-                warn("Could not find queue %s in database" % queue_name)
-
-        for checkdir in dirs:
-            if os.path.exists(checkdir):
-                print "Looking into %s" % (checkdir)
-
-                for dirpath, dirnames, filenames in os.walk(checkdir, topdown=True):
-                    if not filenames:
-                        # Empty directory (or only subdirectories), next
-                        continue
-
-                    for changesfile in filenames:
-                        try:
-                            if not changesfile.endswith(".changes"):
-                                # Only interested in changes files.
-                                continue
-                            count += 1
-
-                            if not get_dbchange(changesfile, self.session):
-                                to_import = ChangesToImport(dirpath, changesfile, count)
-                                if self.die:
-                                    return
-                                self.queue.enqueue(to_import)
-                        except KeyboardInterrupt:
-                            print("got Ctrl-c in enqueue thread.  terminating")
-                            self.parent.plsDie()
-                            sys.exit(1)
-
-        self.queue.enqueue(EndOfChanges())
-
-class ImportThread(threading.Thread):
-    def __init__(self, parent, queue):
-        threading.Thread.__init__(self)
-        self.queue = queue
-        self.session = DBConn().session()
-        self.parent = parent
-        self.die = False
-
-    def plsDie(self):
-        self.die = True
-
-    def run(self):
-        while True:
-            try:
-                if self.die:
-                    return
-                to_import = self.queue.dequeue()
-                if not to_import:
-                    return
-
-                print( "Directory %s, file %7d, (%s)" % (to_import.dirpath[-10:], to_import.count, to_import.changesfile) )
-
-                changes = Changes()
-                changes.changes_file = to_import.changesfile
-                changesfile = os.path.join(to_import.dirpath, to_import.changesfile)
-                changes.changes = parse_changes(changesfile, signing_rules=-1)
-                changes.changes["fingerprint"] = check_signature(changesfile)
-                changes.add_known_changes(to_import.dirpath, session=self.session)
-                self.session.commit()
-
-            except InvalidDscError as line:
-                warn("syntax error in .dsc file '%s', line %s." % (f, line))
-
-            except ChangesUnicodeError:
-                warn("found invalid changes file, not properly utf-8 encoded")
-
-            except KeyboardInterrupt:
-                print("Caught C-c; on ImportThread. terminating.")
-                self.parent.plsDie()
-                sys.exit(1)
-
-            except:
-                self.parent.plsDie()
-                sys.exit(1)
-
-class ImportKnownChanges(object):
-    def __init__(self,num_threads):
-        self.queue = OneAtATime()
-        self.threads = [ ChangesGenerator(self,self.queue) ]
-
-        for i in range(num_threads):
-            self.threads.append( ImportThread(self,self.queue) )
-
-        try:
-            for thread in self.threads:
-                thread.start()
-
-        except KeyboardInterrupt:
-            print("Caught C-c; terminating.")
-            warn("Caught C-c; terminating.")
-            self.plsDie()
-
-    def plsDie(self):
-        traceback.print_stack90
-        for thread in self.threads:
-            print( "STU: before ask %s to die" % thread )
-            thread.plsDie()
-            print( "STU: after ask %s to die" % thread )
-
-        self.threads=[]
-        sys.exit(1)
-
-
-def main():
-    cnf = Config()
-
-    arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
-                 ('j',"concurrency", "%s::%s" % (options_prefix,"Concurrency"),"HasArg"),
-                 ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
-                 ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
-                ]
-
-    args = apt_pkg.parse_commandline(cnf.Cnf, arguments,sys.argv)
-
-    num_threads = 1
-
-    if len(args) > 0:
-        usage()
-
-    if cnf.has_key("%s::%s" % (options_prefix,"Help")):
-        usage()
-
-    level=logging.INFO
-    if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
-        level=logging.ERROR
-
-    elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
-        level=logging.DEBUG
-
-
-    logging.basicConfig( level=level,
-                         format='%(asctime)s %(levelname)s %(message)s',
-                         stream = sys.stderr )
-
-    if Config().has_key( "%s::%s" %(options_prefix,"Concurrency")):
-        num_threads = int(Config()[ "%s::%s" %(options_prefix,"Concurrency")])
-
-    ImportKnownChanges(num_threads)
-
-
-
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/import_ldap_fingerprints.py b/dak/import_ldap_fingerprints.py
deleted file mode 100755 (executable)
index 0c2a7bd..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env python
-
-""" Sync fingerprint and uid tables with a debian.org LDAP DB """
-# Copyright (C) 2003, 2004, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-# <elmo>  ping@debian.org ?
-# <aj>    missing@ ? wtfru@ ?
-# <elmo>  giggle
-# <elmo>  I like wtfru
-# <aj>    all you have to do is retrofit wtfru into an acronym and no one
-#         could possibly be offended!
-# <elmo>  aj: worried terriers for russian unity ?
-# <aj>    uhhh
-# <aj>    ooookkkaaaaay
-# <elmo>  wthru is a little less offensive maybe?  but myabe that's
-#         just because I read h as heck, not hell
-# <elmo>  ho hum
-# <aj>    (surely the "f" stands for "freedom" though...)
-# <elmo>  where the freedom are you?
-# <aj>    'xactly
-# <elmo>  or worried terriers freed (of) russian unilateralism ?
-# <aj>    freedom -- it's the "foo" of the 21st century
-# <aj>    oo, how about "wat@" as in wherefore art thou?
-# <neuro> or worried attack terriers
-# <aj>    Waning Trysts Feared - Return? Unavailable?
-# <aj>    (i find all these terriers more worrying, than worried)
-# <neuro> worrying attack terriers, then
-
-################################################################################
-
-import commands, ldap, sys
-import apt_pkg
-
-from daklib.config import Config
-from daklib.dbconn import *
-from daklib import utils
-from daklib.regexes import re_gpg_fingerprint, re_debian_address
-
-################################################################################
-
-def usage(exit_code=0):
-    print """Usage: dak import-ldap-fingerprints
-Syncs fingerprint and uid tables with a debian.org LDAP DB
-
-  -h, --help                show this help and exit."""
-    sys.exit(exit_code)
-
-################################################################################
-
-def get_ldap_value(entry, value):
-    ret = entry.get(value)
-    if not ret or ret[0] == "" or ret[0] == "-":
-        return ""
-    else:
-        # FIXME: what about > 0 ?
-        return ret[0] + " "
-
-def get_ldap_name(entry):
-    name = get_ldap_value(entry, "cn")
-    name += get_ldap_value(entry, "mn")
-    name += get_ldap_value(entry, "sn")
-    return name.rstrip()
-
-def main():
-    cnf = Config()
-    Arguments = [('h',"help","Import-LDAP-Fingerprints::Options::Help")]
-    for i in [ "help" ]:
-        if not cnf.has_key("Import-LDAP-Fingerprints::Options::%s" % (i)):
-            cnf["Import-LDAP-Fingerprints::Options::%s" % (i)] = ""
-
-    apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
-
-    Options = cnf.subtree("Import-LDAP-Fingerprints::Options")
-    if Options["Help"]:
-        usage()
-
-    session = DBConn().session()
-
-    LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
-    LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
-    l = ldap.open(LDAPServer)
-    l.simple_bind_s("","")
-    Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
-                       "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
-                       ["uid", "keyfingerprint", "cn", "mn", "sn"])
-
-
-    # Our database session is already in a transaction
-
-    # Sync LDAP with DB
-    db_fin_uid = {}
-    db_uid_name = {}
-    ldap_fin_uid_id = {}
-    q = session.execute("""
-SELECT f.fingerprint, f.id, u.uid FROM fingerprint f, uid u WHERE f.uid = u.id
- UNION SELECT f.fingerprint, f.id, null FROM fingerprint f where f.uid is null""")
-    for i in q.fetchall():
-        (fingerprint, fingerprint_id, uid) = i
-        db_fin_uid[fingerprint] = (uid, fingerprint_id)
-
-    q = session.execute("SELECT id, name FROM uid")
-    for i in q.fetchall():
-        (uid, name) = i
-        db_uid_name[uid] = name
-
-    for i in Attrs:
-        entry = i[1]
-        fingerprints = entry["keyFingerPrint"]
-        uid_name = entry["uid"][0]
-        name = get_ldap_name(entry)
-        uid = get_or_set_uid(uid_name, session)
-        uid_id = uid.uid_id
-
-        if not db_uid_name.has_key(uid_id) or db_uid_name[uid_id] != name:
-            session.execute("UPDATE uid SET name = :name WHERE id = :uidid", {'name': name, 'uidid': uid_id})
-            print "Assigning name of %s as %s" % (uid_name, name)
-
-        for fingerprint in fingerprints:
-            ldap_fin_uid_id[fingerprint] = (uid_name, uid_id)
-            if db_fin_uid.has_key(fingerprint):
-                (existing_uid, fingerprint_id) = db_fin_uid[fingerprint]
-                if not existing_uid:
-                    session.execute("UPDATE fingerprint SET uid = :uidid WHERE id = :fprid",
-                                    {'uidid': uid_id, 'fprid': fingerprint_id})
-                    print "Assigning %s to 0x%s." % (uid_name, fingerprint)
-                elif existing_uid == uid_name:
-                    pass
-                elif '@' not in existing_uid:
-                    session.execute("UPDATE fingerprint SET uid = :uidid WHERE id = :fprid",
-                                    {'uidid': uid_id, 'fprid': fingerprint_id})
-                    print "Promoting DM %s to DD %s with keyid 0x%s." % (existing_uid, uid_name, fingerprint)
-                else:
-                    utils.warn("%s has %s in LDAP, but database says it should be %s." % \
-                               (uid_name, fingerprint, existing_uid))
-
-    # Try to update people who sign with non-primary key
-    q = session.execute("SELECT fingerprint, id FROM fingerprint WHERE uid is null")
-    for i in q.fetchall():
-        (fingerprint, fingerprint_id) = i
-        cmd = "gpg --no-default-keyring %s --fingerprint %s" \
-              % (utils.gpg_keyring_args(), fingerprint)
-        (result, output) = commands.getstatusoutput(cmd)
-        if result == 0:
-            m = re_gpg_fingerprint.search(output)
-            if not m:
-                print output
-                utils.fubar("0x%s: No fingerprint found in gpg output but it returned 0?\n%s" % \
-                            (fingerprint, utils.prefix_multi_line_string(output, " [GPG output:] ")))
-            primary_key = m.group(1)
-            primary_key = primary_key.replace(" ","")
-            if not ldap_fin_uid_id.has_key(primary_key):
-                utils.warn("0x%s (from 0x%s): no UID found in LDAP" % (primary_key, fingerprint))
-            else:
-                (uid, uid_id) = ldap_fin_uid_id[primary_key]
-                session.execute("UPDATE fingerprint SET uid = :uid WHERE id = :fprid",
-                                {'uid': uid_id, 'fprid': fingerprint_id})
-                print "Assigning %s to 0x%s." % (uid, fingerprint)
-        else:
-            extra_keyrings = ""
-            for keyring in cnf.value_list("Import-LDAP-Fingerprints::ExtraKeyrings"):
-                extra_keyrings += " --keyring=%s" % (keyring)
-            cmd = "gpg %s %s --list-key %s" \
-                  % (utils.gpg_keyring_args(), extra_keyrings, fingerprint)
-            (result, output) = commands.getstatusoutput(cmd)
-            if result != 0:
-                cmd = "gpg --keyserver=%s --allow-non-selfsigned-uid --recv-key %s" % (cnf["Import-LDAP-Fingerprints::KeyServer"], fingerprint)
-                (result, output) = commands.getstatusoutput(cmd)
-                if result != 0:
-                    print "0x%s: NOT found on keyserver." % (fingerprint)
-                    print cmd
-                    print result
-                    print output
-                    continue
-                else:
-                    cmd = "gpg --list-key %s" % (fingerprint)
-                    (result, output) = commands.getstatusoutput(cmd)
-                    if result != 0:
-                        print "0x%s: --list-key returned error after --recv-key didn't." % (fingerprint)
-                        print cmd
-                        print result
-                        print output
-                        continue
-            m = re_debian_address.search(output)
-            if m:
-                guess_uid = m.group(1)
-            else:
-                guess_uid = "???"
-            name = " ".join(output.split('\n')[0].split()[3:])
-            print "0x%s -> %s -> %s" % (fingerprint, name, guess_uid)
-
-            # FIXME: make me optionally non-interactive
-            # FIXME: default to the guessed ID
-            uid = None
-            while not uid:
-                uid = utils.our_raw_input("Map to which UID ? ")
-                Attrs = l.search_s(LDAPDn,ldap.SCOPE_ONELEVEL,"(uid=%s)" % (uid), ["cn","mn","sn"])
-                if not Attrs:
-                    print "That UID doesn't exist in LDAP!"
-                    uid = None
-                else:
-                    entry = Attrs[0][1]
-                    name = get_ldap_name(entry)
-                    prompt = "Map to %s - %s (y/N) ? " % (uid, name.replace("  "," "))
-                    yn = utils.our_raw_input(prompt).lower()
-                    if yn == "y":
-                        uid_o = get_or_set_uid(uid, session=session)
-                        uid_id = uid_o.uid_id
-                        session.execute("UPDATE fingerprint SET uid = :uidid WHERE id = :fprid",
-                                        {'uidid': uid_id, 'fprid': fingerprint_id})
-                        print "Assigning %s to 0x%s." % (uid, fingerprint)
-                    else:
-                        uid = None
-
-    # Commit it all
-    session.commit()
-
-############################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/import_new_files.py b/dak/import_new_files.py
deleted file mode 100755 (executable)
index 7a29467..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-# coding=utf8
-
-"""
-Import known_changes files
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2009  Mike O'Connor <stew@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-
-################################################################################
-
-import sys
-import os
-import logging
-import threading
-import glob
-import apt_pkg
-from daklib.dbconn import DBConn, get_dbchange, get_policy_queue, session_wrapper, ChangePendingFile, get_location, check_poolfile
-from daklib.config import Config
-from daklib.queue import Upload
-from daklib.utils import poolify
-
-# where in dak.conf all of our configuration will be stowed
-options_prefix = "NewFiles"
-options_prefix = "%s::Options" % options_prefix
-
-log = logging.getLogger()
-
-################################################################################
-
-
-def usage (exit_code=0):
-    print """Usage: dak import-new-files [options]
-
-OPTIONS
-     -v, --verbose
-        show verbose information messages
-
-     -q, --quiet
-        supress all output but errors
-
-"""
-    sys.exit(exit_code)
-
-class ImportNewFiles(object):
-    @session_wrapper
-    def __init__(self, session=None):
-        cnf = Config()
-        try:
-            newq = get_policy_queue('new', session)
-            for changes_fn in glob.glob(newq.path + "/*.changes"):
-                changes_bn = os.path.basename(changes_fn)
-                chg = get_dbchange(changes_bn, session)
-
-                u = Upload()
-                success = u.load_changes(changes_fn)
-                u.pkg.changes_file = changes_bn
-                u.check_hashes()
-
-                if not chg:
-                    chg = u.pkg.add_known_changes(newq.path, newq.policy_queue_id, session)
-                    session.add(chg)
-
-                if not success:
-                    log.critical("failed to load %s" % changes_fn)
-                    sys.exit(1)
-                else:
-                    log.critical("ACCLAIM: %s" % changes_fn)
-
-                files=[]
-                for chg_fn in u.pkg.files.keys():
-                    try:
-                        f = open(os.path.join(newq.path, chg_fn))
-                        cpf = ChangePendingFile()
-                        cpf.filename = chg_fn
-                        cpf.size = u.pkg.files[chg_fn]['size']
-                        cpf.md5sum = u.pkg.files[chg_fn]['md5sum']
-
-                        if u.pkg.files[chg_fn].has_key('sha1sum'):
-                            cpf.sha1sum = u.pkg.files[chg_fn]['sha1sum']
-                        else:
-                            log.warning("Having to generate sha1sum for %s" % chg_fn)
-                            f.seek(0)
-                            cpf.sha1sum = apt_pkg.sha1sum(f)
-
-                        if u.pkg.files[chg_fn].has_key('sha256sum'):
-                            cpf.sha256sum = u.pkg.files[chg_fn]['sha256sum']
-                        else:
-                            log.warning("Having to generate sha256sum for %s" % chg_fn)
-                            f.seek(0)
-                            cpf.sha256sum = apt_pkg.sha256sum(f)
-
-                        session.add(cpf)
-                        files.append(cpf)
-                        f.close()
-                    except IOError:
-                        # Can't find the file, try to look it up in the pool
-                        poolname = poolify(u.pkg.changes["source"], u.pkg.files[chg_fn]["component"])
-                        l = get_location(cnf["Dir::Pool"], u.pkg.files[chg_fn]["component"], session=session)
-                        if not l:
-                            log.critical("ERROR: Can't find location for %s (component %s)" % (chg_fn, u.pkg.files[chg_fn]["component"]))
-
-                        found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
-                                                         u.pkg.files[chg_fn]['size'],
-                                                         u.pkg.files[chg_fn]["md5sum"],
-                                                         l.location_id,
-                                                         session=session)
-
-                        if found is None:
-                            log.critical("ERROR: Found multiple files for %s in pool" % chg_fn)
-                            sys.exit(1)
-                        elif found is False and poolfile is not None:
-                            log.critical("ERROR: md5sum / size mismatch for %s in pool" % chg_fn)
-                            sys.exit(1)
-                        else:
-                            if poolfile is None:
-                                log.critical("ERROR: Could not find %s in pool" % chg_fn)
-                                sys.exit(1)
-                            else:
-                                chg.poolfiles.append(poolfile)
-
-
-                chg.files = files
-
-
-            session.commit()
-
-        except KeyboardInterrupt:
-            print("Caught C-c; terminating.")
-            utils.warn("Caught C-c; terminating.")
-            self.plsDie()
-
-
-def main():
-    cnf = Config()
-
-    arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
-                 ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
-                 ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
-                ]
-
-    args = apt_pkg.parse_commandline(cnf.Cnf, arguments,sys.argv)
-
-    num_threads = 1
-
-    if len(args) > 0:
-        usage(1)
-
-    if cnf.has_key("%s::%s" % (options_prefix,"Help")):
-        usage(0)
-
-    level=logging.INFO
-    if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
-        level=logging.ERROR
-
-    elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
-        level=logging.DEBUG
-
-
-    logging.basicConfig( level=level,
-                         format='%(asctime)s %(levelname)s %(message)s',
-                         stream = sys.stderr )
-
-    ImportNewFiles()
-
-
-if __name__ == '__main__':
-    main()
index fdd7da9676fb92ac8e80ffd84597f5c1bf0fe7c5..0e0d33a94ea85f8d3b573ada62a57145887c726a 100755 (executable)
@@ -103,7 +103,7 @@ def process_keyring(fullpath, secret=False):
 ######################################################################
 
 def create_directories():
-    """Create directories referenced in dak.conf and apt.conf."""
+    """Create directories referenced in dak.conf."""
 
     session = DBConn().session()
 
@@ -131,20 +131,13 @@ def create_directories():
         process_keyring(Cnf['Dinstall::SigningPubKeyring'], secret=True)
 
     # Process public keyrings
-    for keyring in session.query(Keyring).all():
+    for keyring in session.query(Keyring).filter_by(active=True):
         process_keyring(keyring.keyring_name)
 
-    # Process pool directories
-    for component in session.query(Component):
-        directory = os.path.join( Cnf['Dir::Pool'], component.component_name )
-
-        do_dir(directory, '%s pool' % component.component_name)
-
-
     # Process dists directories
     # TODO: Store location of each suite in database
     for suite in session.query(Suite):
-        suite_dir = os.path.join( Cnf['Dir::Root'], 'dists', "%s/%s" % (suite.suite_name, suite_suffix) )
+        suite_dir = os.path.join(suite.archive.path, 'dists', suite.suite_name, suite_suffix)
 
         # TODO: Store valid suite/component mappings in database
         for component in session.query(Component):
index 79f417fd3cb53874d8845368414e21e3b48553a1..9d21e5ca76952f9d1a9909407d7ae22bf9abafb0 100755 (executable)
--- a/dak/ls.py
+++ b/dak/ls.py
@@ -135,19 +135,19 @@ def main ():
         q = session.execute("""
 SELECT b.package, b.version, a.arch_string, su.suite_name, c.name, m.name
   FROM binaries b, architecture a, suite su, bin_associations ba,
-       files f, location l, component c, maintainer m
+       files f, files_archive_map af, component c, maintainer m
  WHERE b.package %s :package AND a.id = b.architecture AND su.id = ba.suite
-   AND b.id = ba.bin AND b.file = f.id AND f.location = l.id
-   AND l.component = c.id AND b.maintainer = m.id %s %s %s
+   AND b.id = ba.bin AND b.file = f.id AND af.file_id = f.id AND su.archive_id = af.archive_id
+   AND af.component_id = c.id AND b.maintainer = m.id %s %s %s
 """ % (comparison_operator, con_suites, con_architectures, con_bintype), {'package': package})
         ql = q.fetchall()
         if check_source:
             q = session.execute("""
 SELECT s.source, s.version, 'source', su.suite_name, c.name, m.name
-  FROM source s, suite su, src_associations sa, files f, location l,
+  FROM source s, suite su, src_associations sa, files f, files_archive_map af,
        component c, maintainer m
  WHERE s.source %s :package AND su.id = sa.suite AND s.id = sa.source
-   AND s.file = f.id AND f.location = l.id AND l.component = c.id
+   AND s.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id
    AND s.maintainer = m.id %s
 """ % (comparison_operator, con_suites), {'package': package})
             if not Options["Architecture"] or con_architectures:
@@ -174,6 +174,19 @@ SELECT s.source, s.version, 'source', su.suite_name, c.name, m.name
 
         packages = d.keys()
         packages.sort()
+
+        # Calculate optimal column sizes
+        sizes = [10, 13, 10]
+        for pkg in packages:
+            versions = d[pkg].keys()
+            for version in versions:
+                suites = d[pkg][version].keys()
+                for suite in suites:
+                       sizes[0] = max(sizes[0], len(pkg))
+                       sizes[1] = max(sizes[1], len(version))
+                       sizes[2] = max(sizes[2], len(suite))
+        fmt = "%%%is | %%%is | %%%is | "  % tuple(sizes)
+
         for pkg in packages:
             versions = d[pkg].keys()
             versions.sort(apt_pkg.version_compare)
@@ -184,7 +197,7 @@ SELECT s.source, s.version, 'source', su.suite_name, c.name, m.name
                     arches = d[pkg][version][suite]
                     arches.sort(utils.arch_compare_sw)
                     if Options["Format"] == "": #normal
-                        sys.stdout.write("%10s | %10s | %13s | " % (pkg, version, suite))
+                        sys.stdout.write(fmt % (pkg, version, suite))
                         sys.stdout.write(", ".join(arches))
                         sys.stdout.write('\n')
                     elif Options["Format"] in [ "control-suite", "heidi" ]:
index 2fe77fe6af1987e59359c8ca142c40ed1f1a47d8..7d3132eb28a42260847fc6919716a556d3562028 100755 (executable)
@@ -54,20 +54,22 @@ import sys
 import apt_pkg
 from glob import glob
 from shutil import rmtree
+from yaml import safe_dump
 from daklib.dbconn import *
 from daklib import utils
-from daklib.config import Config
 from daklib.contents import UnpackedSource
 from daklib.regexes import re_no_epoch
 
 ################################################################################
 
+filelist = 'filelist.yaml'
+
 def usage (exit_code=0):
     print """Generate changelog between two suites
 
        Usage:
        make-changelog -s <suite> -b <base_suite> [OPTION]...
-       make-changelog -e
+       make-changelog -e -a <archive>
 
 Options:
 
@@ -76,7 +78,9 @@ Options:
   -b, --base-suite          suite to be taken as reference for comparison
   -n, --binnmu              display binNMUs uploads instead of source ones
 
-  -e, --export              export interesting files from source packages"""
+  -e, --export              export interesting files from source packages
+  -a, --archive             archive to fetch data from
+  -p, --progress            display progress status"""
 
     sys.exit(exit_code)
 
@@ -160,24 +164,28 @@ def display_changes(uploads, index):
         print upload[index]
         prev_upload = upload[0]
 
-def export_files(session, pool, clpool):
+def export_files(session, archive, clpool, progress=False):
     """
     Export interesting files from source packages.
     """
+    pool = os.path.join(archive.path, 'pool')
 
     sources = {}
     unpack = {}
     files = ('changelog', 'copyright', 'NEWS.Debian', 'README.Debian')
     stats = {'unpack': 0, 'created': 0, 'removed': 0, 'errors': 0, 'files': 0}
-    query = """SELECT DISTINCT s.source, su.suite_name AS suite, s.version, f.filename
+    query = """SELECT DISTINCT s.source, su.suite_name AS suite, s.version, c.name || '/' || f.filename AS filename
                FROM source s
                JOIN newest_source n ON n.source = s.source AND n.version = s.version
                JOIN src_associations sa ON sa.source = s.id
                JOIN suite su ON su.id = sa.suite
                JOIN files f ON f.id = s.file
+               JOIN files_archive_map fam ON f.id = fam.file_id AND fam.archive_id = su.archive_id
+               JOIN component c ON fam.component_id = c.id
+               WHERE su.archive_id = :archive_id
                ORDER BY s.source, suite"""
 
-    for p in session.execute(query):
+    for p in session.execute(query, {'archive_id': archive.archive_id}):
         if not sources.has_key(p[0]):
             sources[p[0]] = {}
         sources[p[0]][p[1]] = (re_no_epoch.sub('', p[2]), p[3])
@@ -188,12 +196,12 @@ def export_files(session, pool, clpool):
             if not os.path.exists(path):
                 os.makedirs(path)
             if not os.path.exists(os.path.join(path, \
-                   '%s_%s.changelog' % (p, sources[p][s][0]))):
+                   '%s_%s_changelog' % (p, sources[p][s][0]))):
                 if not unpack.has_key(os.path.join(pool, sources[p][s][1])):
                     unpack[os.path.join(pool, sources[p][s][1])] = (path, set())
                 unpack[os.path.join(pool, sources[p][s][1])][1].add(s)
             else:
-                for file in glob('%s/%s_%s*' % (path, p, sources[p][s][0])):
+                for file in glob('%s/%s_%s_*' % (path, p, sources[p][s][0])):
                     link = '%s%s' % (s, file.split('%s_%s' \
                                       % (p, sources[p][s][0]))[1])
                     try:
@@ -205,15 +213,20 @@ def export_files(session, pool, clpool):
     for p in unpack.keys():
         package = os.path.splitext(os.path.basename(p))[0].split('_')
         try:
-            unpacked = UnpackedSource(p)
+            unpacked = UnpackedSource(p, clpool)
             tempdir = unpacked.get_root_directory()
             stats['unpack'] += 1
+            if progress:
+                if stats['unpack'] % 100 == 0:
+                    sys.stderr.write('%d packages unpacked\n' % stats['unpack'])
+                elif stats['unpack'] % 10 == 0:
+                    sys.stderr.write('.')
             for file in files:
                 for f in glob(os.path.join(tempdir, 'debian', '*%s' % file)):
                     for s in unpack[p][1]:
-                        suite = os.path.join(unpack[p][0], '%s.%s' \
+                        suite = os.path.join(unpack[p][0], '%s_%s' \
                                 % (s, os.path.basename(f)))
-                        version = os.path.join(unpack[p][0], '%s_%s.%s' % \
+                        version = os.path.join(unpack[p][0], '%s_%s_%s' % \
                                   (package[0], package[1], os.path.basename(f)))
                         if not os.path.exists(version):
                             os.link(f, version)
@@ -229,20 +242,27 @@ def export_files(session, pool, clpool):
             print 'make-changelog: unable to unpack %s\n%s' % (p, e)
             stats['errors'] += 1
 
-    for root, dirs, files in os.walk(clpool):
+    for root, dirs, files in os.walk(clpool, topdown=False):
+        files = [f for f in files if f != filelist]
         if len(files):
-            if root.split('/')[-1] not in sources.keys():
-                if os.path.exists(root):
-                    rmtree(root)
-                    stats['removed'] += 1
+            if root != clpool:
+                if root.split('/')[-1] not in sources.keys():
+                    if os.path.exists(root):
+                        stats['removed'] += len(os.listdir(root))
+                        rmtree(root)
             for file in files:
                 if os.path.exists(os.path.join(root, file)):
                     if os.stat(os.path.join(root, file)).st_nlink ==  1:
-                        os.unlink(os.path.join(root, file))
                         stats['removed'] += 1
-
-    for root, dirs, files in os.walk(clpool):
+                        os.unlink(os.path.join(root, file))
+        for dir in dirs:
+            try:
+                os.rmdir(os.path.join(root, dir))
+            except OSError:
+                pass
         stats['files'] += len(files)
+    stats['files'] -= stats['removed']
+
     print 'make-changelog: file exporting finished'
     print '  * New packages unpacked: %d' % stats['unpack']
     print '  * New files created: %d' % stats['created']
@@ -250,16 +270,37 @@ def export_files(session, pool, clpool):
     print '  * Unpack errors: %d' % stats['errors']
     print '  * Files available into changelog pool: %d' % stats['files']
 
+def generate_export_filelist(clpool):
+    clfiles = {}
+    for root, dirs, files in os.walk(clpool):
+        for file in [f for f in files if f != filelist]:
+            clpath = os.path.join(root, file).replace(clpool, '').strip('/')
+            source = clpath.split('/')[2]
+            elements = clpath.split('/')[3].split('_')
+            if source not in clfiles:
+                clfiles[source] = {}
+            if elements[0] == source:
+                if elements[1] not in clfiles[source]:
+                    clfiles[source][elements[1]] = []
+                clfiles[source][elements[1]].append(clpath)
+            else:
+                if elements[0] not in clfiles[source]:
+                    clfiles[source][elements[0]] = []
+                clfiles[source][elements[0]].append(clpath)
+    with open(os.path.join(clpool, filelist), 'w+') as fd:
+        safe_dump(clfiles, fd, default_flow_style=False)
+
 def main():
     Cnf = utils.get_conf()
-    cnf = Config()
     Arguments = [('h','help','Make-Changelog::Options::Help'),
+                 ('a','archive','Make-Changelog::Options::Archive','HasArg'),
                  ('s','suite','Make-Changelog::Options::Suite','HasArg'),
                  ('b','base-suite','Make-Changelog::Options::Base-Suite','HasArg'),
                  ('n','binnmu','Make-Changelog::Options::binNMU'),
-                 ('e','export','Make-Changelog::Options::export')]
+                 ('e','export','Make-Changelog::Options::export'),
+                 ('p','progress','Make-Changelog::Options::progress')]
 
-    for i in ['help', 'suite', 'base-suite', 'binnmu', 'export']:
+    for i in ['help', 'suite', 'base-suite', 'binnmu', 'export', 'progress']:
         if not Cnf.has_key('Make-Changelog::Options::%s' % (i)):
             Cnf['Make-Changelog::Options::%s' % (i)] = ''
 
@@ -269,6 +310,7 @@ def main():
     base_suite = Cnf['Make-Changelog::Options::Base-Suite']
     binnmu = Cnf['Make-Changelog::Options::binNMU']
     export = Cnf['Make-Changelog::Options::export']
+    progress = Cnf['Make-Changelog::Options::progress']
 
     if Options['help'] or not (suite and base_suite) and not export:
         usage()
@@ -280,9 +322,11 @@ def main():
     session = DBConn().session()
 
     if export:
-        if cnf.exportpath:
-            exportpath = os.path.join(Cnf['Dir::Export'], cnf.exportpath)
-            export_files(session, Cnf['Dir::Pool'], exportpath)
+        archive = session.query(Archive).filter_by(archive_name=Options['Archive']).one()
+        exportpath = archive.changelog
+        if exportpath:
+            export_files(session, archive, exportpath, progress)
+            generate_export_filelist(exportpath)
         else:
             utils.fubar('No changelog export path defined')
     elif binnmu:
index b7f729857d20b47baa66f69df544024a09ffe72b..30606e9a429a1fee4fbfb45776a42455895a7309 100755 (executable)
@@ -43,9 +43,10 @@ import sys
 ################################################################################
 
 def usage (exit_code=0):
-    print """Usage: dak make-maintainers [OPTION] EXTRA_FILE[...]
+    print """Usage: dak make-maintainers [OPTION] -a ARCHIVE EXTRA_FILE[...]
 Generate an index of packages <=> Maintainers / Uploaders.
 
+  -a, --archive=ARCHIVE      archive to take packages from
   -h, --help                 show this help and exit
 """
     sys.exit(exit_code)
@@ -67,31 +68,42 @@ def uploader_list(source):
 def main():
     cnf = Config()
 
-    Arguments = [('h',"help","Make-Maintainers::Options::Help")]
+    Arguments = [('h',"help","Make-Maintainers::Options::Help"),
+                 ('a','archive','Make-Maintainers::Options::Archive','HasArg')]
     if not cnf.has_key("Make-Maintainers::Options::Help"):
         cnf["Make-Maintainers::Options::Help"] = ""
 
     extra_files = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
     Options = cnf.subtree("Make-Maintainers::Options")
 
-    if Options["Help"]:
+    if Options["Help"] or not Options.get('Archive'):
         usage()
 
     Logger = daklog.Logger('make-maintainers')
     session = DBConn().session()
 
+    archive = session.query(Archive).filter_by(archive_name=Options['Archive']).one()
+
     # dictionary packages to maintainer names
     maintainers = dict()
     # dictionary packages to list of uploader names
     uploaders = dict()
 
     source_query = session.query(DBSource).from_statement('''
-        select distinct on (source) * from source
-            order by source, version desc''')
+        select distinct on (source.source) source.* from source
+            join src_associations sa on source.id = sa.source
+            join suite on sa.suite = suite.id
+            where suite.archive_id = :archive_id
+            order by source.source, source.version desc''') \
+        .params(archive_id=archive.archive_id)
 
     binary_query = session.query(DBBinary).from_statement('''
-        select distinct on (package) * from binaries
-            order by package, version desc''')
+        select distinct on (binaries.package) binaries.* from binaries
+            join bin_associations ba on binaries.id = ba.bin
+            join suite on ba.suite = suite.id
+            where suite.archive_id = :archive_id
+            order by binaries.package, binaries.version desc''') \
+        .params(archive_id=archive.archive_id)
 
     Logger.log(['sources'])
     for source in source_query:
index c457820fc0faf53d1e0e53dcbb3bc80e80832313..175376709a14405dcafb737eb8f375fd389141ed 100755 (executable)
@@ -31,11 +31,13 @@ and binary package version it has in a standard rfc2822-like format.
 
 ################################################################################
 
+import sys
+
 from daklib.dbconn import *
 
 ################################################################################
 
-def build_mapping():
+def build_mapping(archive, session):
     # The ORDER BY is in the queries so that compression of the output works
     # better.  It's the difference between a 9 megabyte bzip2 and a 2.5 mb
     # bzip2 file.
@@ -44,10 +46,13 @@ def build_mapping():
     SELECT
         source.source,
         source.version,
-        './pool/' || files.filename AS path
+        './pool/' || component.name || '/' || files.filename AS path
     FROM source
       JOIN dsc_files ON source.id=dsc_files.source
       JOIN files ON files.id=dsc_files.file
+      JOIN files_archive_map ON files.id = files_archive_map.file_id
+      JOIN component ON files_archive_map.component_id = component.id
+    WHERE files_archive_map.archive_id = :archive_id
     ORDER BY source, version
     """
 
@@ -56,26 +61,27 @@ def build_mapping():
         source.source,
         source.version,
         architecture.arch_string AS arch,
-        './pool/' || files.filename AS path,
+        './pool/' || component.name || '/' || files.filename AS path,
         binaries.package,
         binaries.version AS bin_version
     FROM source
       JOIN binaries ON source.id=binaries.source
       JOIN files ON binaries.file=files.id
+      JOIN files_archive_map ON files.id = files_archive_map.file_id
+      JOIN component ON files_archive_map.component_id = component.id
       JOIN architecture ON architecture.id=binaries.architecture
+    WHERE files_archive_map.archive_id = :archive_id
     ORDER BY source, version, package, bin_version
     """
 
-    session = DBConn().session()
-
-    for row in session.execute(query_sources).fetchall():
+    for row in session.execute(query_sources, {'archive_id': archive.archive_id}).fetchall():
         (source, version, path) = row
         print "Path: %s"%path
         print "Source: %s"%source
         print "Source-Version: %s"%version
         print
 
-    for row in session.execute(query_binaries).fetchall():
+    for row in session.execute(query_binaries, {'archive_id': archive.archive_id}).fetchall():
         (source, version, arch, path, bin, binv) = row
         print "Path: %s"%path
         print "Source: %s"%source
@@ -87,9 +93,21 @@ def build_mapping():
 
 ################################################################################
 
+def usage():
+    print "usage: dak make-pkg-file-mapping <archive>"
+    sys.exit(0)
+
+################################################################################
+
 def main():
-    DBConn()
-    build_mapping()
+    if len(sys.argv) != 2:
+        usage()
+
+    archive_name = sys.argv[1]
+
+    session = DBConn().session()
+    archive = session.query(Archive).filter_by(archive_name=archive_name).one()
+    build_mapping(archive, session)
 
 #########################################################################################
 
index b6342b1e420567eb2dd7f37b05254cda436d31f4..1020542a491c37a8f3ae800e1037173f48dd9d96 100755 (executable)
@@ -5,6 +5,7 @@
 @contact: Debian FTPMaster <ftpmaster@debian.org>
 @copyright: 2000, 2001, 2002, 2006  James Troup <james@nocrew.org>
 @copyright: 2009  Mark Hymers <mhy@debian.org>
+@copyright: 2012, Ansgar Burchardt <ansgar@debian.org>
 
 """
 
 
 ################################################################################
 
-import os
-import os.path
-import stat
-import sys
-from datetime import datetime
 import apt_pkg
+from datetime import datetime, timedelta
+import sys
 
 from daklib import daklog
+from daklib.archive import ArchiveTransaction
 from daklib.dbconn import *
 from daklib.config import Config
 
@@ -54,6 +53,74 @@ Manage the contents of one or more build queues
 
 ################################################################################
 
+def clean(build_queue, transaction, now=None):
+    session = transaction.session
+    if now is None:
+        now = datetime.now()
+
+    delete_before = now - timedelta(seconds=build_queue.stay_of_execution)
+    suite = build_queue.suite
+
+    # Remove binaries subject to the following conditions:
+    # 1. Keep binaries that are in policy queues.
+    # 2. Remove binaries that are not in suites.
+    # 3. Remove binaries that have been in the build queue for some time.
+    query = """
+        SELECT b.*
+          FROM binaries b
+          JOIN bin_associations ba ON b.id = ba.bin
+         WHERE ba.suite = :suite_id
+           AND NOT EXISTS
+               (SELECT 1 FROM policy_queue_upload_binaries_map pqubm
+                         JOIN policy_queue_upload pqu ON pqu.id = pqubm.policy_queue_upload_id
+                         JOIN policy_queue pq ON pq.id = pqu.policy_queue_id
+                         JOIN suite s ON s.policy_queue_id = pq.id
+                         JOIN suite_build_queue_copy sbqc ON sbqc.suite = s.id
+                        WHERE pqubm.binary_id = ba.bin AND pq.send_to_build_queues
+                          AND sbqc.build_queue_id = :build_queue_id)
+           AND (ba.created < :delete_before
+                OR NOT EXISTS
+                   (SELECT 1 FROM bin_associations ba2
+                             JOIN suite_build_queue_copy sbqc ON sbqc.suite = ba2.suite
+                            WHERE ba2.bin = ba.bin AND sbqc.build_queue_id = :build_queue_id))"""
+    binaries = session.query(DBBinary).from_statement(query) \
+        .params({'build_queue_id': build_queue.queue_id, 'suite_id': suite.suite_id, 'delete_before': delete_before})
+    for binary in binaries:
+        Logger.log(["removed binary from build queue", build_queue.queue_name, binary.package, binary.version])
+        transaction.remove_binary(binary, suite)
+
+    # Remove sources
+    # Conditions are similar as for binaries, but we also keep sources
+    # if there is a binary in the build queue that uses it.
+    query = """
+        SELECT s.*
+          FROM source s
+          JOIN src_associations sa ON s.id = sa.source
+         WHERE sa.suite = :suite_id
+           AND NOT EXISTS
+               (SELECT 1 FROM policy_queue_upload pqu
+                         JOIN policy_queue pq ON pq.id = pqu.policy_queue_id
+                         JOIN suite s ON s.policy_queue_id = pq.id
+                         JOIN suite_build_queue_copy sbqc ON sbqc.suite = s.id
+                        WHERE pqu.source_id = sa.source AND pq.send_to_build_queues
+                          AND sbqc.build_queue_id = :build_queue_id)
+           AND (sa.created < :delete_before
+                OR NOT EXISTS
+                   (SELECT 1 FROM src_associations sa2
+                             JOIN suite_build_queue_copy sbqc ON sbqc.suite = sa2.suite
+                            WHERE sbqc.build_queue_id = :build_queue_id
+                              AND sa2.source = sa.source))
+           AND NOT EXISTS
+               (SELECT 1 FROM bin_associations ba
+                         JOIN binaries b ON ba.bin = b.id
+                        WHERE ba.suite = :suite_id
+                          AND b.source = s.id)"""
+    sources = session.query(DBSource).from_statement(query) \
+        .params({'build_queue_id': build_queue.queue_id, 'suite_id': suite.suite_id, 'delete_before': delete_before})
+    for source in sources:
+        Logger.log(["removed source from build queue", build_queue.queue_name, source.source, source.version])
+        transaction.remove_source(source, suite)
+
 def main ():
     global Options, Logger
 
@@ -79,25 +146,23 @@ def main ():
 
     session = DBConn().session()
 
-    if Options["All"]:
-        if len(queue_names) != 0:
-            print "E: Cannot use both -a and a queue_name"
-            sys.exit(1)
-        queues = session.query(BuildQueue).all()
-
-    else:
-        queues = []
-        for q in queue_names:
-            queue = get_build_queue(q.lower(), session)
-            if queue:
-                queues.append(queue)
-            else:
-                Logger.log(['cannot find queue %s' % q])
-
-    # For each given queue, look up object and call manage_queue
-    for q in queues:
-        Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
-        q.clean_and_update(starttime, Logger, dryrun=Options["No-Action"])
+    with ArchiveTransaction() as transaction:
+        session = transaction.session
+        if Options['All']:
+            if len(queue_names) != 0:
+                print "E: Cannot use both -a and a queue name"
+                sys.exit(1)
+            queues = session.query(BuildQueue)
+        else:
+            queues = session.query(BuildQueue).filter(BuildQueue.queue_name.in_(queue_names))
+
+        for q in queues:
+            Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
+            clean(q, transaction, now=starttime)
+        if not Options['No-Action']:
+            transaction.commit()
+        else:
+            transaction.rollback()
 
     Logger.close()
 
diff --git a/dak/metadata.py b/dak/metadata.py
deleted file mode 100755 (executable)
index b4c1d58..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-"""
-Import data for Package/Sources files from .deb and .dsc files
-@copyright: 2011 Torsten Werner <twerner@debian.org>
-@copyright: 2011 Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-################################################################################
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-# < mvo> that screams for consolidation in libapt at least (that then in turn can
-#        use libdpkg ... ) - I guess the "d" means delayed ;)
-
-# (whilst discussing adding xz support to dak, and therefore python-apt, and
-#        therefore libapt-pkg)
-
-################################################################################
-
-import sys
-import apt_pkg
-
-from daklib.config import Config
-from daklib.dbconn import *
-from daklib.metadata import MetadataScanner
-from daklib import daklog
-from daklib import utils
-
-################################################################################
-
-def usage (exit_code=0):
-    print """Usage: dak metadata [options] subcommand
-
-SUBCOMMANDS
-    scan-source
-        scan the dsc files in the existing pool and load metadata into the database
-
-    scan-binary
-        scan the deb files in the existing pool and load metadata into the database
-
-OPTIONS
-     -h, --help
-        show this help and exit
-
-OPTIONS for scan
-     -l, --limit=NUMBER
-        maximum number of items to scan
-"""
-    sys.exit(exit_code)
-
-################################################################################
-
-def scan_all(cnf, mode, limit):
-    Logger = daklog.Logger('metadata scan (%s)' % mode)
-    result = MetadataScanner.scan_all(mode, limit)
-    processed = '%(processed)d %(type)s processed' % result
-    remaining = '%(remaining)d %(type)s remaining' % result
-    Logger.log([processed, remaining])
-    Logger.close()
-
-################################################################################
-
-def main():
-    cnf = Config()
-    cnf['Metadata::Options::Help'] = ''
-    cnf['Metadata::Options::Suite'] = ''
-    cnf['Metadata::Options::Limit'] = ''
-    cnf['Metadata::Options::Force'] = ''
-    arguments = [('h', "help",  'Metadata::Options::Help'),
-                 ('s', "suite", 'Metadata::Options::Suite', "HasArg"),
-                 ('l', "limit", 'Metadata::Options::Limit', "HasArg"),
-                 ('f', "force", 'Metadata::Options::Force'),
-                ]
-    args = apt_pkg.parse_commandline(cnf.Cnf, arguments, sys.argv)
-    options = cnf.subtree('Metadata::Options')
-
-    if (len(args) != 1) or options['Help']:
-        usage()
-
-    limit = None
-    if len(options['Limit']) > 0:
-        limit = int(options['Limit'])
-
-    if args[0] == 'scan-source':
-        scan_all(cnf, 'source', limit)
-        return
-    elif args[0] == 'scan-binary':
-        scan_all(cnf, 'binary', limit)
-        return
-
-    suite_names = utils.split_args(options['Suite'])
-
-    force = bool(options['Force'])
-
-    if args[0] == 'generate':
-        raise NotImplementError
-
-    usage()
-
-
-if __name__ == '__main__':
-    main()
index 026dcced053bff432a4038ece86c0ed13ee6101a..aa2928db71bef54f63fb990b80563c59502b92b2 100755 (executable)
@@ -86,35 +86,26 @@ def sudo(arg, fn, exit):
 
 def do_Approve(): sudo("A", _do_Approve, True)
 def _do_Approve():
-    # 1. use process-policy to go through the COMMENTS dir
-    spawn("dak process-policy embargoed")
-    spawn("dak process-policy unembargoed")
-    newstage=get_policy_queue('newstage')
-
-    # 2. sync the stuff to ftpmaster
-    print "Sync stuff for upload to ftpmaster"
-    spawn("rsync -a -q %s/. /srv/queued/ftpmaster/." % (newstage.path))
-
     print "Locking unchecked"
     lockfile='/srv/security-master.debian.org/lock/unchecked.lock'
-    spawn("lockfile -r8 {0}".format(lockfile))
+    spawn("lockfile -r42 {0}".format(lockfile))
 
     try:
-        # 3. Now run process-upload in the newstage dir
-        print "Now put it into the security archive"
-        spawn("dak process-upload -a -d %s" % (newstage.path))
+        # 1. Install accepted packages
+        print "Installing accepted packages into security archive"
+        for queue in ("embargoed",):
+            spawn("dak process-policy {0}".format(queue))
 
-        # 4. Run all the steps that are needed to publish the changed archive
+        # 3. Run all the steps that are needed to publish the changed archive
         print "Domination"
         spawn("dak dominate")
         #    print "Generating filelist for apt-ftparchive"
         #    spawn("dak generate-filelist")
         print "Updating Packages and Sources files... This may take a while, be patient"
         spawn("/srv/security-master.debian.org/dak/config/debian-security/map.sh")
-        #    spawn("apt-ftparchive generate %s" % (utils.which_apt_conf_file()))
-        spawn("dak generate-packages-sources2")
+        spawn("dak generate-packages-sources2 -a security")
         print "Updating Release files..."
-        spawn("dak generate-releases")
+        spawn("dak generate-releases -a security")
         print "Triggering security mirrors... (this may take a while)"
         spawn("/srv/security-master.debian.org/dak/config/debian-security/make-mirror.sh")
         spawn("sudo -u archvsync -H /home/archvsync/signal_security")
@@ -188,9 +179,11 @@ def main():
         acceptfilename="%s/COMMENTS/ACCEPT.%s_%s" % (os.path.dirname(os.path.abspath(changes[0])), dbchange.source, version)
         acceptfiles[acceptfilename]=1
 
+    print "Would create %s now and then go on to accept this package, if you allow me to." % (acceptfiles.keys())
     if Options["No-Action"]:
-        print "Would create %s now and then go on to accept this package, but No-Action is set" % (acceptfiles.keys())
         sys.exit(0)
+    else:
+        raw_input("Press Enter to continue")
 
     for acceptfilename in acceptfiles.keys():
         accept_file = file(acceptfilename, "w")
index ce5d12ff41348c1f4c929fd7f5374554eafd13e3..1d43a62579fe08012426cb5dcd1fc2d435c50883 100755 (executable)
@@ -56,27 +56,27 @@ Make microchanges or microqueries of the binary overrides
 """
     sys.exit(exit_code)
 
-def check_override_compliance(package, priority, suite, cnf, session):
+def check_override_compliance(package, priority, archive_path, suite_name, cnf, session):
     print "Checking compliance with related overrides..."
 
     depends = set()
     rdepends = set()
     components = get_component_names(session)
-    arches = set([x.arch_string for x in get_suite_architectures(suite)])
+    arches = set([x.arch_string for x in get_suite_architectures(suite_name)])
     arches -= set(["source", "all"])
     for arch in arches:
         for component in components:
-            Packages = utils.get_packages_from_ftp(cnf['Dir::Root'], suite, component, arch)
-            while Packages.Step():
-                package_name = Packages.Section.Find("Package")
-                dep_list = Packages.Section.Find("Depends")
+            Packages = utils.get_packages_from_ftp(archive_path, suite_name, component, arch)
+            while Packages.step():
+                package_name = Packages.section.find("Package")
+                dep_list = Packages.section.find("Depends")
                 if dep_list:
                     if package_name == package:
-                        for d in apt_pkg.ParseDepends(dep_list):
+                        for d in apt_pkg.parse_depends(dep_list):
                             for i in d:
                                 depends.add(i[0])
                     else:
-                        for d in apt_pkg.ParseDepends(dep_list):
+                        for d in apt_pkg.parse_depends(dep_list):
                             for i in d:
                                 if i[0] == package:
                                     rdepends.add(package_name)
@@ -87,7 +87,7 @@ def check_override_compliance(package, priority, suite, cnf, session):
                JOIN priority p ON p.id = o.priority
                WHERE s.suite_name = '%s'
                AND o.package in ('%s')""" \
-               % (suite, "', '".join(depends.union(rdepends)))
+               % (suite_name, "', '".join(depends.union(rdepends)))
     packages = session.execute(query)
 
     excuses = []
@@ -136,10 +136,14 @@ def main ():
         utils.fubar("package name is a required argument.")
 
     package = arguments.pop(0)
-    suite = Options["Suite"]
+    suite_name = Options["Suite"]
     if arguments and len(arguments) > 2:
         utils.fubar("Too many arguments")
 
+    suite = get_suite(suite_name, session)
+    if suite is None:
+        utils.fubar("Unknown suite '{0}'".format(suite_name))
+
     if arguments and len(arguments) == 1:
         # Determine if the argument is a priority or a section...
         arg = arguments.pop()
@@ -170,8 +174,8 @@ def main ():
        AND override.section = section.id
        AND override.package = :package
        AND override.suite = suite.id
-       AND suite.suite_name = :suite
-        """ % (eqdsc), {'package': package, 'suite': suite})
+       AND suite.suite_name = :suite_name
+        """ % (eqdsc), {'package': package, 'suite_name': suite_name})
 
         if q.rowcount == 0:
             continue
@@ -227,7 +231,7 @@ def main ():
         utils.fubar("Trying to change priority of a source-only package")
 
     if Options["Check"] and newpriority != oldpriority:
-        check_override_compliance(package, p, suite, cnf, session)
+        check_override_compliance(package, p, suite.archive.path, suite_name, cnf, session)
 
     # If we're in no-action mode
     if Options["No-Action"]:
@@ -266,9 +270,9 @@ def main ():
            SET priority = :newprioid
          WHERE package = :package
            AND override.type != :otypedsc
-           AND suite = (SELECT id FROM suite WHERE suite_name = :suite)""",
+           AND suite = (SELECT id FROM suite WHERE suite_name = :suite_name)""",
            {'newprioid': newprioid, 'package': package,
-            'otypedsc':  dsc_otype_id, 'suite': suite})
+            'otypedsc':  dsc_otype_id, 'suite_name': suite_name})
 
         Logger.log(["changed priority", package, oldpriority, newpriority])
 
@@ -277,9 +281,9 @@ def main ():
         UPDATE override
            SET section = :newsecid
          WHERE package = :package
-           AND suite = (SELECT id FROM suite WHERE suite_name = :suite)""",
+           AND suite = (SELECT id FROM suite WHERE suite_name = :suite_name)""",
            {'newsecid': newsecid, 'package': package,
-            'suite': suite})
+            'suite_name': suite_name})
 
         Logger.log(["changed section", package, oldsection, newsection])
 
@@ -311,7 +315,7 @@ def main ():
         Subst["__SOURCE__"] = package
 
         summary = "Concerning package %s...\n" % (package)
-        summary += "Operating on the %s suite\n" % (suite)
+        summary += "Operating on the %s suite\n" % (suite_name)
         if newpriority != oldpriority:
             summary += "Changed priority from %s to %s\n" % (oldpriority,newpriority)
         if newsection != oldsection:
index 6cb392df7f3ff673c1f9d89d0e0a1001e8d341dd..413dcaabfbfcca0cddc1ac2074711744b24b3371 100755 (executable)
@@ -81,20 +81,23 @@ def main():
 
     depends = {}
     session = DBConn().session()
-    suite = Options['suite']
+    suite_name = Options['suite']
+    suite = get_suite(suite_name, session)
+    if suite is None:
+        utils.fubar("Unknown suite '{0}'".format(suite_name))
     components = get_component_names(session)
-    arches = set([x.arch_string for x in get_suite_architectures(suite)])
+    arches = set([x.arch_string for x in get_suite_architectures(suite_name)])
     arches -= set(['source', 'all'])
     for arch in arches:
         for component in components:
-            Packages = utils.get_packages_from_ftp(cnf['Dir::Root'], suite, component, arch)
-            while Packages.Step():
-                package = Packages.Section.Find('Package')
-                dep_list = Packages.Section.Find('Depends')
+            Packages = utils.get_packages_from_ftp(suite.archive.path, suite_name, component, arch)
+            while Packages.step():
+                package = Packages.section.find('Package')
+                dep_list = Packages.section.find('Depends')
                 if Options['package'] and package != Options['package']:
                     continue
                 if dep_list:
-                    for d in apt_pkg.ParseDepends(dep_list):
+                    for d in apt_pkg.parse_depends(dep_list):
                         for i in d:
                             if not depends.has_key(package):
                                 depends[package] = set()
@@ -110,7 +113,7 @@ def main():
                JOIN bin_associations ba ON ba.bin = b.id
                WHERE s.suite_name = '%s'
                AND ba.suite = s.id
-               AND p.level <> 0""" % suite
+               AND p.level <> 0""" % suite_name
     packages = session.execute(query)
 
     out = {}
diff --git a/dak/process_commands.py b/dak/process_commands.py
new file mode 100644 (file)
index 0000000..c679d9c
--- /dev/null
@@ -0,0 +1,96 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import datetime
+import os
+import sys
+import time
+
+from daklib.config import Config
+from daklib.command import CommandError, CommandFile
+from daklib.daklog import Logger
+from daklib.fstransactions import FilesystemTransaction
+from daklib.gpg import GpgException
+from daklib.utils import find_next_free
+
+def usage():
+    print """Usage: dak process-commands [-d <directory>] [<command-file>...]
+
+process command files
+"""
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    arguments = [('h', 'help', 'Process-Commands::Options::Help'),
+                 ('d', 'directory', 'Process-Commands::Options::Directory', 'HasArg')]
+
+    cnf = Config()
+    cnf['Process-Commands::Options::Dummy'] = ''
+    filenames = apt_pkg.parse_commandline(cnf.Cnf, arguments, argv)
+    options = cnf.subtree('Process-Commands::Options')
+
+    if 'Help' in options or (len(filenames) == 0 and 'Directory' not in options):
+        usage()
+        sys.exit(0)
+
+    log = Logger('command')
+
+    now = datetime.datetime.now()
+    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
+    rejectdir = cnf['Dir::Reject']
+
+    if len(filenames) == 0:
+        filenames = [ fn for fn in os.listdir(options['Directory']) if fn.endswith('.dak-commands') ]
+
+    for fn in filenames:
+        basename = os.path.basename(fn)
+        if not fn.endswith('.dak-commands'):
+            log.log(['unexpected filename', basename])
+            continue
+
+        with open(fn, 'r') as fh:
+            data = fh.read()
+
+        try:
+            command = CommandFile(basename, data, log)
+            command.evaluate()
+        except:
+            created = os.stat(fn).st_mtime
+            now = time.time()
+            too_new = (now - created < int(cnf.get('Dinstall::SkipTime', '60')))
+            if too_new:
+                log.log(['skipped (too new)'])
+                continue
+            log.log(['reject', basename])
+            dst = find_next_free(os.path.join(rejectdir, basename))
+        else:
+            log.log(['done', basename])
+            dst = find_next_free(os.path.join(donedir, basename))
+
+        with FilesystemTransaction() as fs:
+            fs.unlink(fn)
+            fs.create(dst, mode=0o644).write(data)
+            fs.commit()
+
+    log.close()
+
+if __name__ == '__main__':
+    main()
index 11950f14429e41a654167b110822de96fd70e6e7..9f9a39cf2154e4465dddb688798778721c7f235e 100755 (executable)
@@ -53,6 +53,9 @@ import contextlib
 import pwd
 import apt_pkg, apt_inst
 import examine_package
+import subprocess
+import daklib.daksubprocess
+from sqlalchemy import or_
 
 from daklib.dbconn import *
 from daklib.queue import *
@@ -62,7 +65,7 @@ from daklib.regexes import re_no_epoch, re_default_answer, re_isanum, re_package
 from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
 from daklib.summarystats import SummaryStats
 from daklib.config import Config
-from daklib.changesutils import *
+from daklib.policy import UploadCopy, PolicyQueueUploadHandler
 
 # Globals
 Options = None
@@ -75,38 +78,6 @@ Sections = None
 ################################################################################
 ################################################################################
 
-def recheck(upload, session):
-# STU: I'm not sure, but I don't thin kthis is necessary any longer:    upload.recheck(session)
-    if len(upload.rejects) > 0:
-        answer = "XXX"
-        if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]:
-            answer = 'S'
-
-        print "REJECT\n%s" % '\n'.join(upload.rejects)
-        prompt = "[R]eject, Skip, Quit ?"
-
-        while prompt.find(answer) == -1:
-            answer = utils.our_raw_input(prompt)
-            m = re_default_answer.match(prompt)
-            if answer == "":
-                answer = m.group(1)
-            answer = answer[:1].upper()
-
-        if answer == 'R':
-            upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
-            upload.pkg.remove_known_changes(session=session)
-            session.commit()
-            return 0
-        elif answer == 'S':
-            return 0
-        elif answer == 'Q':
-            end()
-            sys.exit(0)
-
-    return 1
-
-################################################################################
-
 class Section_Completer:
     def __init__ (self, session):
         self.sections = []
@@ -149,32 +120,60 @@ class Priority_Completer:
 
 ################################################################################
 
-def print_new (new, upload, indexed, file=sys.stdout):
-    check_valid(new)
-    broken = False
+def takenover_binaries(upload, missing, session):
+    rows = []
+    binaries = set([x.package for x in upload.binaries])
+    for m in missing:
+        if m['type'] != 'dsc':
+            binaries.remove(m['package'])
+    if binaries:
+        source = upload.binaries[0].source.source
+        suite = upload.target_suite.overridesuite or \
+                    upload.target_suite.suite_name
+        suites = [s[0] for s in session.query(Suite.suite_name).filter \
+                                    (or_(Suite.suite_name == suite,
+                                     Suite.overridesuite == suite)).all()]
+        rows = session.query(DBSource.source, DBBinary.package).distinct(). \
+                             filter(DBBinary.package.in_(binaries)). \
+                             join(DBBinary.source). \
+                             filter(DBSource.source != source). \
+                             join(DBBinary.suites). \
+                             filter(Suite.suite_name.in_(suites)). \
+                             order_by(DBSource.source, DBBinary.package).all()
+    return rows
+
+################################################################################
+
+def print_new (upload, missing, indexed, session, file=sys.stdout):
+    check_valid(missing, session)
     index = 0
-    for pkg in new.keys():
+    for m in missing:
         index += 1
-        section = new[pkg]["section"]
-        priority = new[pkg]["priority"]
-        if new[pkg]["section id"] == -1:
-            section += "[!]"
-            broken = True
-        if new[pkg]["priority id"] == -1:
-            priority += "[!]"
-            broken = True
+        if m['type'] != 'deb':
+            package = '{0}:{1}'.format(m['type'], m['package'])
+        else:
+            package = m['package']
+        section = m['section']
+        priority = m['priority']
         if indexed:
-            line = "(%s): %-20s %-20s %-20s" % (index, pkg, priority, section)
+            line = "(%s): %-20s %-20s %-20s" % (index, package, priority, section)
         else:
-            line = "%-20s %-20s %-20s" % (pkg, priority, section)
-        line = line.strip()+'\n'
-        file.write(line)
-    notes = get_new_comments(upload.pkg.changes.get("source"))
+            line = "%-20s %-20s %-20s" % (package, priority, section)
+        line = line.strip()
+        if not m['valid']:
+            line = line + ' [!]'
+        print >>file, line
+    takenover = takenover_binaries(upload, missing, session)
+    if takenover:
+        print '\n\nBINARIES TAKEN OVER\n'
+        for t in takenover:
+            print '%s: %s' % (t[0], t[1])
+    notes = get_new_comments(upload.policy_queue, upload.changes.source)
     for note in notes:
         print "\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
               % (note.author, note.version, note.notedate, note.comment)
         print "-" * 72
-    return broken, len(notes) > 0
+    return len(notes) > 0
 
 ################################################################################
 
@@ -187,11 +186,11 @@ def index_range (index):
 ################################################################################
 ################################################################################
 
-def edit_new (new, upload):
+def edit_new (overrides, upload, session):
     # Write the current data to a temporary file
     (fd, temp_filename) = utils.temp_filename()
     temp_file = os.fdopen(fd, 'w')
-    print_new (new, upload, indexed=0, file=temp_file)
+    print_new (upload, overrides, indexed=0, session=session, file=temp_file)
     temp_file.close()
     # Spawn an editor on that file
     editor = os.environ.get("EDITOR","vi")
@@ -203,38 +202,48 @@ def edit_new (new, upload):
     lines = temp_file.readlines()
     temp_file.close()
     os.unlink(temp_filename)
+
+    overrides_map = dict([ ((o['type'], o['package']), o) for o in overrides ])
+    new_overrides = []
     # Parse the new data
     for line in lines:
         line = line.strip()
-        if line == "":
+        if line == "" or line[0] == '#':
             continue
         s = line.split()
         # Pad the list if necessary
         s[len(s):3] = [None] * (3-len(s))
         (pkg, priority, section) = s[:3]
-        if not new.has_key(pkg):
+        if pkg.find(':') != -1:
+            type, pkg = pkg.split(':', 1)
+        else:
+            type = 'deb'
+        if (type, pkg) not in overrides_map:
             utils.warn("Ignoring unknown package '%s'" % (pkg))
         else:
-            # Strip off any invalid markers, print_new will readd them.
-            if section.endswith("[!]"):
-                section = section[:-3]
-            if priority.endswith("[!]"):
-                priority = priority[:-3]
-            for f in new[pkg]["files"]:
-                upload.pkg.files[f]["section"] = section
-                upload.pkg.files[f]["priority"] = priority
-            new[pkg]["section"] = section
-            new[pkg]["priority"] = priority
+            if section.find('/') != -1:
+                component = section.split('/', 1)[0]
+            else:
+                component = 'main'
+            new_overrides.append(dict(
+                    package=pkg,
+                    type=type,
+                    section=section,
+                    component=component,
+                    priority=priority,
+                    ))
+    return new_overrides
 
 ################################################################################
 
 def edit_index (new, upload, index):
+    package = new[index]['package']
     priority = new[index]["priority"]
     section = new[index]["section"]
     ftype = new[index]["type"]
     done = 0
     while not done:
-        print "\t".join([index, priority, section])
+        print "\t".join([package, priority, section])
 
         answer = "XXX"
         if ftype != "dsc":
@@ -286,11 +295,14 @@ def edit_index (new, upload, index):
         # Reset the readline completer
         readline.set_completer(None)
 
-    for f in new[index]["files"]:
-        upload.pkg.files[f]["section"] = section
-        upload.pkg.files[f]["priority"] = priority
     new[index]["priority"] = priority
     new[index]["section"] = section
+    if section.find('/') != -1:
+        component = section.split('/', 1)[0]
+    else:
+        component = 'main'
+    new[index]['component'] = component
+
     return new
 
 ################################################################################
@@ -299,14 +311,8 @@ def edit_overrides (new, upload, session):
     print
     done = 0
     while not done:
-        print_new (new, upload, indexed=1)
-        new_index = {}
-        index = 0
-        for i in new.keys():
-            index += 1
-            new_index[index] = i
-
-        prompt = "(%s) edit override <n>, Editor, Done ? " % (index_range(index))
+        print_new (upload, new, indexed=1, session=session)
+        prompt = "edit override <n>, Editor, Done ? "
 
         got_answer = 0
         while not got_answer:
@@ -317,132 +323,226 @@ def edit_overrides (new, upload, session):
                 got_answer = 1
             elif re_isanum.match (answer):
                 answer = int(answer)
-                if (answer < 1) or (answer > index):
-                    print "%s is not a valid index (%s).  Please retry." % (answer, index_range(index))
+                if answer < 1 or answer > len(new):
+                    print "{0} is not a valid index.  Please retry.".format(answer)
                 else:
                     got_answer = 1
 
         if answer == 'E':
-            edit_new(new, upload)
+            new = edit_new(new, upload, session)
         elif answer == 'D':
             done = 1
         else:
-            edit_index (new, upload, new_index[answer])
+            edit_index (new, upload, answer - 1)
 
     return new
 
 
 ################################################################################
 
-def check_pkg (upload):
+def check_pkg (upload, upload_copy, session):
+    missing = []
     save_stdout = sys.stdout
+    changes = os.path.join(upload_copy.directory, upload.changes.changesname)
+    suite_name = upload.target_suite.suite_name
+    handler = PolicyQueueUploadHandler(upload, session)
+    missing = [(m['type'], m["package"]) for m in handler.missing_overrides(hints=missing)]
+
+    less_cmd = ("less", "-R", "-")
+    less_process = daklib.daksubprocess.Popen(less_cmd, bufsize=0, stdin=subprocess.PIPE)
     try:
-        sys.stdout = os.popen("less -R -", 'w', 0)
-        changes = utils.parse_changes (upload.pkg.changes_file)
-        print examine_package.display_changes(changes['distribution'], upload.pkg.changes_file)
-        files = upload.pkg.files
-        for f in files.keys():
-            if files[f].has_key("new"):
-                ftype = files[f]["type"]
-                if ftype == "deb":
-                    print examine_package.check_deb(changes['distribution'], f)
-                elif ftype == "dsc":
-                    print examine_package.check_dsc(changes['distribution'], f)
+        sys.stdout = less_process.stdin
+        print examine_package.display_changes(suite_name, changes)
+
+        source = upload.source
+        if source is not None:
+            source_file = os.path.join(upload_copy.directory, os.path.basename(source.poolfile.filename))
+            print examine_package.check_dsc(suite_name, source_file)
+
+        for binary in upload.binaries:
+            binary_file = os.path.join(upload_copy.directory, os.path.basename(binary.poolfile.filename))
+            examined = examine_package.check_deb(suite_name, binary_file)
+            # We always need to call check_deb to display package relations for every binary,
+            # but we print its output only if new overrides are being added.
+            if ("deb", binary.package) in missing:
+                print examined
+
         print examine_package.output_package_relations()
+        less_process.stdin.close()
     except IOError as e:
         if e.errno == errno.EPIPE:
             utils.warn("[examine_package] Caught EPIPE; skipping.")
         else:
-            sys.stdout = save_stdout
             raise
     except KeyboardInterrupt:
         utils.warn("[examine_package] Caught C-c; skipping.")
-    sys.stdout = save_stdout
+    finally:
+        less_process.wait()
+        sys.stdout = save_stdout
 
 ################################################################################
 
 ## FIXME: horribly Debian specific
 
-def do_bxa_notification(upload):
-    files = upload.pkg.files
+def do_bxa_notification(new, upload, session):
+    cnf = Config()
+
+    new = set([ o['package'] for o in new if o['type'] == 'deb' ])
+    if len(new) == 0:
+        return
+
+    key = session.query(MetadataKey).filter_by(key='Description').one()
     summary = ""
-    for f in files.keys():
-        if files[f]["type"] == "deb":
-            control = apt_pkg.TagSection(utils.deb_extract_control(utils.open_file(f)))
-            summary += "\n"
-            summary += "Package: %s\n" % (control.find("Package"))
-            summary += "Description: %s\n" % (control.find("Description"))
-    upload.Subst["__BINARY_DESCRIPTIONS__"] = summary
-    bxa_mail = utils.TemplateSubst(upload.Subst,Config()["Dir::Templates"]+"/process-new.bxa_notification")
+    for binary in upload.binaries:
+        if binary.package not in new:
+            continue
+        description = session.query(BinaryMetadata).filter_by(binary=binary, key=key).one().value
+        summary += "\n"
+        summary += "Package: {0}\n".format(binary.package)
+        summary += "Description: {0}\n".format(description)
+
+    subst = {
+        '__DISTRO__': cnf['Dinstall::MyDistribution'],
+        '__BCC__': 'X-DAK: dak process-new',
+        '__BINARY_DESCRIPTIONS__': summary,
+        }
+
+    bxa_mail = utils.TemplateSubst(subst,os.path.join(cnf["Dir::Templates"], "process-new.bxa_notification"))
     utils.send_mail(bxa_mail)
 
 ################################################################################
 
-def add_overrides (new, upload, session):
-    changes = upload.pkg.changes
-    files = upload.pkg.files
-    srcpkg = changes.get("source")
-
-    for suite in changes["suite"].keys():
-        suite_id = get_suite(suite).suite_id
-        for pkg in new.keys():
-            component_id = get_component(new[pkg]["component"]).component_id
-            type_id = get_override_type(new[pkg]["type"]).overridetype_id
-            priority_id = new[pkg]["priority id"]
-            section_id = new[pkg]["section id"]
-            Logger.log(["%s (%s) overrides" % (pkg, srcpkg), suite, new[pkg]["component"], new[pkg]["type"], new[pkg]["priority"], new[pkg]["section"]])
-            session.execute("INSERT INTO override (suite, component, type, package, priority, section, maintainer) VALUES (:sid, :cid, :tid, :pkg, :pid, :sectid, '')",
-                            { 'sid': suite_id, 'cid': component_id, 'tid':type_id, 'pkg': pkg, 'pid': priority_id, 'sectid': section_id})
-            for f in new[pkg]["files"]:
-                if files[f].has_key("new"):
-                    del files[f]["new"]
-            del new[pkg]
+def add_overrides (new_overrides, suite, session):
+    if suite.overridesuite is not None:
+        suite = session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+
+    for override in new_overrides:
+        package = override['package']
+        priority = session.query(Priority).filter_by(priority=override['priority']).first()
+        section = session.query(Section).filter_by(section=override['section']).first()
+        component = get_mapped_component(override['component'], session)
+        overridetype = session.query(OverrideType).filter_by(overridetype=override['type']).one()
+
+        if priority is None:
+            raise Exception('Invalid priority {0} for package {1}'.format(priority, package))
+        if section is None:
+            raise Exception('Invalid section {0} for package {1}'.format(section, package))
+        if component is None:
+            raise Exception('Invalid component {0} for package {1}'.format(component, package))
+
+        o = Override(package=package, suite=suite, component=component, priority=priority, section=section, overridetype=overridetype)
+        session.add(o)
 
     session.commit()
 
-    if Config().find_b("Dinstall::BXANotify"):
-        do_bxa_notification(upload)
+################################################################################
+
+def run_user_inspect_command(upload, upload_copy):
+    command = os.environ.get('DAK_INSPECT_UPLOAD')
+    if command is None:
+        return
+
+    directory = upload_copy.directory
+    if upload.source:
+        dsc = os.path.basename(upload.source.poolfile.filename)
+    else:
+        dsc = ''
+    changes = upload.changes.changesname
+
+    shell_command = command.format(
+            directory=directory,
+            dsc=dsc,
+            changes=changes,
+            )
+
+    daklib.daksubprocess.check_call(shell_command, shell=True)
 
 ################################################################################
 
-def do_new(upload, session):
-    print "NEW\n"
-    files = upload.pkg.files
-    upload.check_files(not Options["No-Action"])
-    changes = upload.pkg.changes
-    cnf = Config()
+def get_reject_reason(reason=''):
+    """get reason for rejection
 
-    # Check for a valid distribution
-    upload.check_distributions()
+    @rtype:  str
+    @return: string giving the reason for the rejection or C{None} if the
+             rejection should be cancelled
+    """
+    answer = 'E'
+    if Options['Automatic']:
+        answer = 'R'
+
+    while answer == 'E':
+        reason = utils.call_editor(reason)
+        print "Reject message:"
+        print utils.prefix_multi_line_string(reason, "  ", include_blank_lines=1)
+        prompt = "[R]eject, Edit, Abandon, Quit ?"
+        answer = "XXX"
+        while prompt.find(answer) == -1:
+            answer = utils.our_raw_input(prompt)
+            m = re_default_answer.search(prompt)
+            if answer == "":
+                answer = m.group(1)
+            answer = answer[:1].upper()
 
-    # Make a copy of distribution we can happily trample on
-    changes["suite"] = copy.copy(changes["distribution"])
+    if answer == 'Q':
+        sys.exit(0)
 
-    # Try to get an included dsc
-    dsc = None
-    (status, _) = upload.load_dsc()
-    if status:
-        dsc = upload.pkg.dsc
+    if answer == 'R':
+        return reason
+    return None
+
+################################################################################
+
+def do_new(upload, upload_copy, handler, session):
+    cnf = Config()
+
+    run_user_inspect_command(upload, upload_copy)
 
     # The main NEW processing loop
-    done = 0
-    new = {}
+    done = False
+    missing = []
     while not done:
-        # Find out what's new
-        new, byhand = determine_new(upload.pkg.changes_file, changes, files, dsc=dsc, session=session, new=new)
+        queuedir = upload.policy_queue.path
+        byhand = upload.byhand
+
+        missing = handler.missing_overrides(hints=missing)
+        broken = not check_valid(missing, session)
 
-        if not new:
-            break
+        changesname = os.path.basename(upload.changes.changesname)
+
+        print
+        print changesname
+        print "-" * len(changesname)
+        print
+        print "   Target:     {0}".format(upload.target_suite.suite_name)
+        print "   Changed-By: {0}".format(upload.changes.changedby)
+        print
+
+        #if len(byhand) == 0 and len(missing) == 0:
+        #    break
+
+        if missing:
+            print "NEW\n"
 
         answer = "XXX"
         if Options["No-Action"] or Options["Automatic"]:
             answer = 'S'
 
-        (broken, note) = print_new(new, upload, indexed=0)
+        note = print_new(upload, missing, indexed=0, session=session)
         prompt = ""
 
-        if not broken and not note:
-            prompt = "Add overrides, "
+        has_unprocessed_byhand = False
+        for f in byhand:
+            path = os.path.join(queuedir, f.filename)
+            if not f.processed and os.path.exists(path):
+                print "W: {0} still present; please process byhand components and try again".format(f.filename)
+                has_unprocessed_byhand = True
+
+        if not has_unprocessed_byhand and not broken and not note:
+            if len(missing) == 0:
+                prompt = "Accept, "
+                answer = 'A'
+            else:
+                prompt = "Add overrides, "
         if broken:
             print "W: [!] marked entries must be fixed before package can be processed."
         if note:
@@ -463,53 +563,57 @@ def do_new(upload, session):
             continue
 
         if answer == 'A' and not Options["Trainee"]:
-            try:
-                check_daily_lock()
-                done = add_overrides (new, upload, session)
-                new_accept(upload, Options["No-Action"], session)
-                Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
-            except CantGetLockError:
-                print "Hello? Operator! Give me the number for 911!"
-                print "Dinstall in the locked area, cant process packages, come back later"
+            add_overrides(missing, upload.target_suite, session)
+            if Config().find_b("Dinstall::BXANotify"):
+                do_bxa_notification(missing, upload, session)
+            handler.accept()
+            done = True
+            Logger.log(["NEW ACCEPT", upload.changes.changesname])
         elif answer == 'C':
-            check_pkg(upload)
+            check_pkg(upload, upload_copy, session)
         elif answer == 'E' and not Options["Trainee"]:
-            new = edit_overrides (new, upload, session)
+            missing = edit_overrides (missing, upload, session)
         elif answer == 'M' and not Options["Trainee"]:
-            aborted = upload.do_reject(manual=1,
-                                       reject_message=Options["Manual-Reject"],
-                                       notes=get_new_comments(changes.get("source", ""), session=session))
-            if not aborted:
-                upload.pkg.remove_known_changes(session=session)
-                session.commit()
-                Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
-                done = 1
+            reason = Options.get('Manual-Reject', '') + "\n"
+            reason = reason + "\n\n=====\n\n".join([n.comment for n in get_new_comments(upload.policy_queue, upload.changes.source, session=session)])
+            reason = get_reject_reason(reason)
+            if reason is not None:
+                Logger.log(["NEW REJECT", upload.changes.changesname])
+                handler.reject(reason)
+                done = True
         elif answer == 'N':
-            edit_note(get_new_comments(changes.get("source", ""), session=session),
-                      upload, session, bool(Options["Trainee"]))
+            if edit_note(get_new_comments(upload.policy_queue, upload.changes.source, session=session),
+                         upload, session, bool(Options["Trainee"])) == 0:
+                end()
+                sys.exit(0)
         elif answer == 'P' and not Options["Trainee"]:
-            prod_maintainer(get_new_comments(changes.get("source", ""), session=session),
-                            upload)
-            Logger.log(["NEW PROD: %s" % (upload.pkg.changes_file)])
+            if prod_maintainer(get_new_comments(upload.policy_queue, upload.changes.source, session=session),
+                               upload) == 0:
+                end()
+                sys.exit(0)
+            Logger.log(["NEW PROD", upload.changes.changesname])
         elif answer == 'R' and not Options["Trainee"]:
             confirm = utils.our_raw_input("Really clear note (y/N)? ").lower()
             if confirm == "y":
-                for c in get_new_comments(changes.get("source", ""), changes.get("version", ""), session=session):
+                for c in get_new_comments(upload.policy_queue, upload.changes.source, upload.changes.version, session=session):
                     session.delete(c)
                 session.commit()
         elif answer == 'O' and not Options["Trainee"]:
             confirm = utils.our_raw_input("Really clear all notes (y/N)? ").lower()
             if confirm == "y":
-                for c in get_new_comments(changes.get("source", ""), session=session):
+                for c in get_new_comments(upload.policy_queue, upload.changes.source, session=session):
                     session.delete(c)
                 session.commit()
 
         elif answer == 'S':
-            done = 1
+            done = True
         elif answer == 'Q':
             end()
             sys.exit(0)
 
+        if handler.get_action():
+            print "PENDING %s\n" % handler.get_action()
+
 ################################################################################
 ################################################################################
 ################################################################################
@@ -522,104 +626,35 @@ def usage (exit_code=0):
   -h, --help                show this help and exit.
   -m, --manual-reject=MSG   manual reject with `msg'
   -n, --no-action           don't do anything
+  -q, --queue=QUEUE         operate on a different queue
   -t, --trainee             FTP Trainee mode
-  -V, --version             display the version number and exit"""
-    sys.exit(exit_code)
+  -V, --version             display the version number and exit
 
-################################################################################
+ENVIRONMENT VARIABLES
 
-def do_byhand(upload, session):
-    done = 0
-    while not done:
-        files = upload.pkg.files
-        will_install = True
-        byhand = []
-
-        for f in files.keys():
-            if files[f]["section"] == "byhand":
-                if os.path.exists(f):
-                    print "W: %s still present; please process byhand components and try again." % (f)
-                    will_install = False
-                else:
-                    byhand.append(f)
+  DAK_INSPECT_UPLOAD: shell command to run to inspect a package
+      The command is automatically run in a shell when an upload
+      is checked.  The following substitutions are available:
 
-        answer = "XXXX"
-        if Options["No-Action"]:
-            answer = "S"
-        if will_install:
-            if Options["Automatic"] and not Options["No-Action"]:
-                answer = 'A'
-            prompt = "[A]ccept, Manual reject, Skip, Quit ?"
-        else:
-            prompt = "Manual reject, [S]kip, Quit ?"
+        {directory}: directory the upload is contained in
+        {dsc}:       name of the included dsc or the empty string
+        {changes}:   name of the changes file
 
-        while prompt.find(answer) == -1:
-            answer = utils.our_raw_input(prompt)
-            m = re_default_answer.search(prompt)
-            if answer == "":
-                answer = m.group(1)
-            answer = answer[:1].upper()
+      Note that Python's 'format' method is used to format the command.
 
-        if answer == 'A':
-            dbchg = get_dbchange(upload.pkg.changes_file, session)
-            if dbchg is None:
-                print "Warning: cannot find changes file in database; can't process BYHAND"
-            else:
-                try:
-                    check_daily_lock()
-                    done = 1
-                    for b in byhand:
-                        # Find the file entry in the database
-                        found = False
-                        for f in dbchg.files:
-                            if f.filename == b:
-                                found = True
-                                f.processed = True
-                                break
-
-                        if not found:
-                            print "Warning: Couldn't find BYHAND item %s in the database to mark it processed" % b
-
-                    session.commit()
-                    Logger.log(["BYHAND ACCEPT: %s" % (upload.pkg.changes_file)])
-                except CantGetLockError:
-                    print "Hello? Operator! Give me the number for 911!"
-                    print "Dinstall in the locked area, cant process packages, come back later"
-        elif answer == 'M':
-            aborted = upload.do_reject(manual=1,
-                                       reject_message=Options["Manual-Reject"],
-                                       notes=get_new_comments(changes.get("source", ""), session=session))
-            if not aborted:
-                upload.pkg.remove_known_changes(session=session)
-                session.commit()
-                Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
-                done = 1
-        elif answer == 'S':
-            done = 1
-        elif answer == 'Q':
-            end()
-            sys.exit(0)
-
-################################################################################
+      Example: run mc in a tmux session to inspect the upload
 
-def check_daily_lock():
-    """
-    Raises CantGetLockError if the dinstall daily.lock exists.
-    """
+      export DAK_INSPECT_UPLOAD='tmux new-session -d -s process-new 2>/dev/null; tmux new-window -n "{changes}" -t process-new:0 -k "cd {directory}; mc"'
 
-    cnf = Config()
-    try:
-        lockfile = cnf.get("Process-New::DinstallLockFile",
-                           os.path.join(cnf['Dir::Lock'], 'processnew.lock'))
+      and run
 
-        os.open(lockfile,
-                os.O_RDONLY | os.O_CREAT | os.O_EXCL)
-    except OSError as e:
-        if e.errno == errno.EEXIST or e.errno == errno.EACCES:
-            raise CantGetLockError
+      tmux attach -t process-new
 
-    os.unlink(lockfile)
+      in a separate terminal session.
+"""
+    sys.exit(exit_code)
 
+################################################################################
 
 @contextlib.contextmanager
 def lock_package(package):
@@ -646,97 +681,84 @@ def lock_package(package):
     finally:
         os.unlink(path)
 
-class clean_holding(object):
-    def __init__(self,pkg):
-        self.pkg = pkg
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, type, value, traceback):
-        h = Holding()
-
-        for f in self.pkg.files.keys():
-            if os.path.exists(os.path.join(h.holding_dir, f)):
-                os.unlink(os.path.join(h.holding_dir, f))
-
-
-def do_pkg(changes_full_path, session):
-    changes_dir = os.path.dirname(changes_full_path)
-    changes_file = os.path.basename(changes_full_path)
-
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
-    u.load_changes(changes_file)
-    u.pkg.directory = changes_dir
-    u.update_subst()
-    u.logger = Logger
-    origchanges = os.path.abspath(u.pkg.changes_file)
-
+def do_pkg(upload, session):
     # Try to get an included dsc
-    dsc = None
-    (status, _) = u.load_dsc()
-    if status:
-        dsc = u.pkg.dsc
+    dsc = upload.source
 
     cnf = Config()
-    bcc = "X-DAK: dak process-new"
-    if cnf.has_key("Dinstall::Bcc"):
-        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-    else:
-        u.Subst["__BCC__"] = bcc
-
-    files = u.pkg.files
-    u.check_distributions()
-    for deb_filename, f in files.items():
-        if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
-            u.binary_file_checks(deb_filename, session)
-            u.check_binary_against_db(deb_filename, session)
-        else:
-            u.source_file_checks(deb_filename, session)
-            u.check_source_against_db(deb_filename, session)
+    group = cnf.get('Dinstall::UnprivGroup') or None
 
-        u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
+    #bcc = "X-DAK: dak process-new"
+    #if cnf.has_key("Dinstall::Bcc"):
+    #    u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+    #else:
+    #    u.Subst["__BCC__"] = bcc
 
     try:
-        with lock_package(u.pkg.changes["source"]):
-            with clean_holding(u.pkg):
-                if not recheck(u, session):
-                    return
-
-                new, byhand = determine_new(u.pkg.changes_file, u.pkg.changes, files, dsc=dsc, session=session)
-                if byhand:
-                    do_byhand(u, session)
-                elif new:
-                    do_new(u, session)
-                else:
-                    try:
-                        check_daily_lock()
-                        new_accept(u, Options["No-Action"], session)
-                    except CantGetLockError:
-                        print "Hello? Operator! Give me the number for 911!"
-                        print "Dinstall in the locked area, cant process packages, come back later"
-
+      with lock_package(upload.changes.source):
+       with UploadCopy(upload, group=group) as upload_copy:
+        handler = PolicyQueueUploadHandler(upload, session)
+        if handler.get_action() is not None:
+            print "PENDING %s\n" % handler.get_action()
+            return
+
+        do_new(upload, upload_copy, handler, session)
     except AlreadyLockedError as e:
         print "Seems to be locked by %s already, skipping..." % (e)
 
-def show_new_comments(changes_files, session):
-    sources = set()
+def show_new_comments(uploads, session):
+    sources = [ upload.changes.source for upload in uploads ]
+    if len(sources) == 0:
+        return
+
     query = """SELECT package, version, comment, author
                FROM new_comments
-               WHERE package IN ('"""
-
-    for changes in changes_files:
-        sources.add(os.path.basename(changes).split("_")[0])
+               WHERE package IN :sources
+               ORDER BY package, version"""
 
-    query += "%s') ORDER BY package, version" % "', '".join(sources)
-    r = session.execute(query)
+    r = session.execute(query, params=dict(sources=tuple(sources)))
 
     for i in r:
         print "%s_%s\n%s\n(%s)\n\n\n" % (i[0], i[1], i[2], i[3])
 
-    session.commit()
+    session.rollback()
+
+################################################################################
+
+def sort_uploads(new_queue, uploads, session, nobinaries=False):
+    sources = {}
+    sorteduploads = []
+    suitesrc = [s.source for s in session.query(DBSource.source). \
+      filter(DBSource.suites.any(Suite.suite_name.in_(['unstable', 'experimental'])))]
+    comments = [p.package for p in session.query(NewComment.package). \
+      filter_by(trainee=False, policy_queue=new_queue).distinct()]
+    for upload in uploads:
+        source = upload.changes.source
+        if not source in sources:
+            sources[source] = []
+        sources[source].append({'upload': upload,
+                                'date': upload.changes.created,
+                                'stack': 1,
+                                'binary': True if source in suitesrc else False,
+                                'comments': True if source in comments else False})
+    for src in sources:
+        if len(sources[src]) > 1:
+            changes = sources[src]
+            firstseen = sorted(changes, key=lambda k: (k['date']))[0]['date']
+            changes.sort(key=lambda item:item['date'])
+            for i in range (0, len(changes)):
+                changes[i]['date'] = firstseen
+                changes[i]['stack'] = i + 1
+        sorteduploads += sources[src]
+    if nobinaries:
+        sorteduploads = [u["upload"] for u in sorted(sorteduploads,
+                         key=lambda k: (k["comments"], k["binary"],
+                         k["date"], -k["stack"]))]
+    else:
+        sorteduploads = [u["upload"] for u in sorted(sorteduploads,
+                         key=lambda k: (k["comments"], -k["binary"],
+                         k["date"], -k["stack"]))]
+    return sorteduploads
 
 ################################################################################
 
@@ -768,18 +790,22 @@ def main():
                  ('h',"help","Process-New::Options::Help"),
                  ('m',"manual-reject","Process-New::Options::Manual-Reject", "HasArg"),
                  ('t',"trainee","Process-New::Options::Trainee"),
+                 ('q','queue','Process-New::Options::Queue', 'HasArg'),
                  ('n',"no-action","Process-New::Options::No-Action")]
 
+    changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
+
     for i in ["automatic", "no-binaries", "comments", "help", "manual-reject", "no-action", "version", "trainee"]:
         if not cnf.has_key("Process-New::Options::%s" % (i)):
             cnf["Process-New::Options::%s" % (i)] = ""
 
-    changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
+    queue_name = cnf.get('Process-New::Options::Queue', 'new')
+    new_queue = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
     if len(changes_files) == 0:
-        new_queue = get_policy_queue('new', session );
-        changes_paths = [ os.path.join(new_queue.path, j) for j in utils.get_changes_files(new_queue.path) ]
+        uploads = new_queue.uploads
     else:
-        changes_paths = [ os.path.abspath(j) for j in changes_files ]
+        uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=new_queue) \
+            .join(DBChange).filter(DBChange.changesname.in_(changes_files)).all()
 
     Options = cnf.subtree("Process-New::Options")
 
@@ -796,20 +822,15 @@ def main():
     Priorities = Priority_Completer(session)
     readline.parse_and_bind("tab: complete")
 
-    if len(changes_paths) > 1:
+    if len(uploads) > 1:
         sys.stderr.write("Sorting changes...\n")
-    changes_files = sort_changes(changes_paths, session, Options["No-Binaries"])
+        uploads = sort_uploads(new_queue, uploads, session, Options["No-Binaries"])
 
     if Options["Comments"]:
-        show_new_comments(changes_files, session)
+        show_new_comments(uploads, session)
     else:
-        for changes_file in changes_files:
-            changes_file = utils.validate_changes_file_arg(changes_file, 0)
-            if not changes_file:
-                continue
-            print "\n" + os.path.basename(changes_file)
-
-            do_pkg (changes_file, session)
+        for upload in uploads:
+            do_pkg (upload, session)
 
     end()
 
index e401a3bd630956f1c1397adc8b6d0fbda2297fb5..7dd55a8300d3020b21bffa25597b76ad5a85604d 100755 (executable)
 ################################################################################
 
 import os
-import copy
+import datetime
+import re
 import sys
+import traceback
 import apt_pkg
 
 from daklib.dbconn import *
-from daklib.queue import *
 from daklib import daklog
 from daklib import utils
 from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
 from daklib.config import Config
-from daklib.changesutils import *
+from daklib.archive import ArchiveTransaction
+from daklib.urgencylog import UrgencyLog
+
+import daklib.announce
 
 # Globals
 Options = None
@@ -50,66 +54,326 @@ Logger = None
 
 ################################################################################
 
-def do_comments(dir, srcqueue, opref, npref, line, fn, session):
+def do_comments(dir, srcqueue, opref, npref, line, fn, transaction):
+    session = transaction.session
+    actions = []
     for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
-        lines = open("%s/%s" % (dir, comm)).readlines()
+        lines = open(os.path.join(dir, comm)).readlines()
         if len(lines) == 0 or lines[0] != line + "\n": continue
-        changes_files = [ x for x in os.listdir(".") if x.startswith(comm[len(opref):]+"_")
-                                and x.endswith(".changes") ]
-        changes_files = sort_changes(changes_files, session)
-        for f in changes_files:
-            print "Processing changes file: %s" % f
-            f = utils.validate_changes_file_arg(f, 0)
-            if not f:
-                print "Couldn't validate changes file %s" % f
-                continue
-            fn(f, srcqueue, "".join(lines[1:]), session)
-
-        if opref != npref and not Options["No-Action"]:
+
+        # If the ACCEPT includes a _<arch> we only accept that .changes.
+        # Otherwise we accept all .changes that start with the given prefix
+        changes_prefix = comm[len(opref):]
+        if changes_prefix.count('_') < 2:
+            changes_prefix = changes_prefix + '_'
+        else:
+            changes_prefix = changes_prefix + '.changes'
+
+        # We need to escape "_" as we use it with the LIKE operator (via the
+        # SQLA startwith) later.
+        changes_prefix = changes_prefix.replace("_", r"\_")
+
+        uploads = session.query(PolicyQueueUpload).filter_by(policy_queue=srcqueue) \
+            .join(PolicyQueueUpload.changes).filter(DBChange.changesname.startswith(changes_prefix)) \
+            .order_by(PolicyQueueUpload.source_id)
+        reason = "".join(lines[1:])
+        actions.extend((u, reason) for u in uploads)
+
+        if opref != npref:
             newcomm = npref + comm[len(opref):]
-            os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm))
+            newcomm = utils.find_next_free(os.path.join(dir, newcomm))
+            transaction.fs.move(os.path.join(dir, comm), newcomm)
 
-################################################################################
+    actions.sort()
 
-def comment_accept(changes_file, srcqueue, comments, session):
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    u.load_changes(changes_file)
-    u.update_subst()
+    for u, reason in actions:
+        print("Processing changes file: {0}".format(u.changes.changesname))
+        fn(u, srcqueue, reason, transaction)
 
-    if not Options["No-Action"]:
-        destqueue = get_policy_queue('newstage', session)
-        if changes_to_queue(u, srcqueue, destqueue, session):
-            print "  ACCEPT"
-            Logger.log(["Policy Queue ACCEPT: %s:  %s" % (srcqueue.queue_name, u.pkg.changes_file)])
-        else:
-            print "E: Failed to migrate %s" % u.pkg.changes_file
+################################################################################
+
+def try_or_reject(function):
+    def wrapper(upload, srcqueue, comments, transaction):
+        try:
+            function(upload, srcqueue, comments, transaction)
+        except Exception as e:
+            comments = 'An exception was raised while processing the package:\n{0}\nOriginal comments:\n{1}'.format(traceback.format_exc(), comments)
+            try:
+                transaction.rollback()
+                real_comment_reject(upload, srcqueue, comments, transaction)
+            except Exception as e:
+                comments = 'In addition an exception was raised while trying to reject the upload:\n{0}\nOriginal rejection:\n{1}'.format(traceback.format_exc(), comments)
+                transaction.rollback()
+                real_comment_reject(upload, srcqueue, comments, transaction, notify=False)
+        if not Options['No-Action']:
+            transaction.commit()
+    return wrapper
 
 ################################################################################
 
-def comment_reject(changes_file, srcqueue, comments, session):
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    u.load_changes(changes_file)
-    u.update_subst()
+@try_or_reject
+def comment_accept(upload, srcqueue, comments, transaction):
+    for byhand in upload.byhand:
+        path = os.path.join(srcqueue.path, byhand.filename)
+        if os.path.exists(path):
+            raise Exception('E: cannot ACCEPT upload with unprocessed byhand file {0}'.format(byhand.filename))
+
+    cnf = Config()
+
+    fs = transaction.fs
+    session = transaction.session
+    changesname = upload.changes.changesname
+    allow_tainted = srcqueue.suite.archive.tainted
+
+    # We need overrides to get the target component
+    overridesuite = upload.target_suite
+    if overridesuite.overridesuite is not None:
+        overridesuite = session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()
+
+    def binary_component_func(db_binary):
+        override = session.query(Override).filter_by(suite=overridesuite, package=db_binary.package) \
+            .join(OverrideType).filter(OverrideType.overridetype == db_binary.binarytype) \
+            .join(Component).one()
+        return override.component
+
+    def source_component_func(db_source):
+        override = session.query(Override).filter_by(suite=overridesuite, package=db_source.source) \
+            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
+            .join(Component).one()
+        return override.component
+
+    all_target_suites = [upload.target_suite]
+    all_target_suites.extend([q.suite for q in upload.target_suite.copy_queues])
+
+    for suite in all_target_suites:
+        if upload.source is not None:
+            transaction.copy_source(upload.source, suite, source_component_func(upload.source), allow_tainted=allow_tainted)
+        for db_binary in upload.binaries:
+            # build queues may miss the source package if this is a binary-only upload
+            if suite != upload.target_suite:
+                transaction.copy_source(db_binary.source, suite, source_component_func(db_binary.source), allow_tainted=allow_tainted)
+            transaction.copy_binary(db_binary, suite, binary_component_func(db_binary), allow_tainted=allow_tainted, extra_archives=[upload.target_suite.archive])
+
+    # Copy .changes if needed
+    if upload.target_suite.copychanges:
+        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+        dst = os.path.join(upload.target_suite.path, upload.changes.changesname)
+        fs.copy(src, dst, mode=upload.target_suite.archive.mode)
+
+    # Copy upload to Process-Policy::CopyDir
+    # Used on security.d.o to sync accepted packages to ftp-master, but this
+    # should eventually be replaced by something else.
+    copydir = cnf.get('Process-Policy::CopyDir') or None
+    if copydir is not None:
+        mode = upload.target_suite.archive.mode
+        if upload.source is not None:
+            for f in [ df.poolfile for df in upload.source.srcfiles ]:
+                dst = os.path.join(copydir, f.basename)
+                if not os.path.exists(dst):
+                    fs.copy(f.fullpath, dst, mode=mode)
+
+        for db_binary in upload.binaries:
+            f = db_binary.poolfile
+            dst = os.path.join(copydir, f.basename)
+            if not os.path.exists(dst):
+                fs.copy(f.fullpath, dst, mode=mode)
+
+        src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+        dst = os.path.join(copydir, upload.changes.changesname)
+        if not os.path.exists(dst):
+            fs.copy(src, dst, mode=mode)
+
+    if upload.source is not None and not Options['No-Action']:
+        urgency = upload.changes.urgency
+        if urgency not in cnf.value_list('Urgency::Valid'):
+            urgency = cnf['Urgency::Default']
+        UrgencyLog().log(upload.source.source, upload.source.version, urgency)
+
+    print "  ACCEPT"
+    if not Options['No-Action']:
+        Logger.log(["Policy Queue ACCEPT", srcqueue.queue_name, changesname])
+
+    pu = get_processed_upload(upload)
+    daklib.announce.announce_accept(pu)
+
+    # TODO: code duplication. Similar code is in process-upload.
+    # Move .changes to done
+    src = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+    now = datetime.datetime.now()
+    donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
+    dst = os.path.join(donedir, upload.changes.changesname)
+    dst = utils.find_next_free(dst)
+    fs.copy(src, dst, mode=0o644)
+
+    remove_upload(upload, transaction)
+
+################################################################################
 
-    u.rejects.append(comments)
+@try_or_reject
+def comment_reject(*args):
+    real_comment_reject(*args, manual=True)
 
+def real_comment_reject(upload, srcqueue, comments, transaction, notify=True, manual=False):
     cnf = Config()
-    bcc = "X-DAK: dak process-policy"
-    if cnf.has_key("Dinstall::Bcc"):
-        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-    else:
-        u.Subst["__BCC__"] = bcc
 
+    fs = transaction.fs
+    session = transaction.session
+    changesname = upload.changes.changesname
+    queuedir = upload.policy_queue.path
+    rejectdir = cnf['Dir::Reject']
+
+    ### Copy files to reject/
+
+    poolfiles = [b.poolfile for b in upload.binaries]
+    if upload.source is not None:
+        poolfiles.extend([df.poolfile for df in upload.source.srcfiles])
+    # Not beautiful...
+    files = [ af.path for af in session.query(ArchiveFile) \
+                  .filter_by(archive=upload.policy_queue.suite.archive) \
+                  .join(ArchiveFile.file) \
+                  .filter(PoolFile.file_id.in_([ f.file_id for f in poolfiles ])) ]
+    for byhand in upload.byhand:
+        path = os.path.join(queuedir, byhand.filename)
+        if os.path.exists(path):
+            files.append(path)
+    files.append(os.path.join(queuedir, changesname))
+
+    for fn in files:
+        dst = utils.find_next_free(os.path.join(rejectdir, os.path.basename(fn)))
+        fs.copy(fn, dst, link=True)
+
+    ### Write reason
+
+    dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(changesname)))
+    fh = fs.create(dst)
+    fh.write(comments)
+    fh.close()
+
+    ### Send mail notification
+
+    if notify:
+        rejected_by = None
+        reason = comments
+
+        # Try to use From: from comment file if there is one.
+        # This is not very elegant...
+        match = re.match(r"\AFrom: ([^\n]+)\n\n", comments)
+        if match:
+            rejected_by = match.group(1)
+            reason = '\n'.join(comments.splitlines()[2:])
+
+        pu = get_processed_upload(upload)
+        daklib.announce.announce_reject(pu, reason, rejected_by)
+
+    print "  REJECT"
     if not Options["No-Action"]:
-        u.do_reject(manual=0, reject_message='\n'.join(u.rejects))
-        u.pkg.remove_known_changes(session=session)
-        session.commit()
+        Logger.log(["Policy Queue REJECT", srcqueue.queue_name, upload.changes.changesname])
 
-        print "  REJECT"
-        Logger.log(["Policy Queue REJECT: %s:  %s" % (srcqueue.queue_name, u.pkg.changes_file)])
+    changes = upload.changes
+    remove_upload(upload, transaction)
+    session.delete(changes)
 
+################################################################################
+
+def remove_upload(upload, transaction):
+    fs = transaction.fs
+    session = transaction.session
+    changes = upload.changes
+
+    # Remove byhand and changes files. Binary and source packages will be
+    # removed from {bin,src}_associations and eventually removed by clean-suites automatically.
+    queuedir = upload.policy_queue.path
+    for byhand in upload.byhand:
+        path = os.path.join(queuedir, byhand.filename)
+        if os.path.exists(path):
+            fs.unlink(path)
+        session.delete(byhand)
+    fs.unlink(os.path.join(queuedir, upload.changes.changesname))
+
+    session.delete(upload)
+    session.flush()
+
+################################################################################
+
+def get_processed_upload(upload):
+    pu = daklib.announce.ProcessedUpload()
+
+    pu.maintainer = upload.changes.maintainer
+    pu.changed_by = upload.changes.changedby
+    pu.fingerprint = upload.changes.fingerprint
+
+    pu.suites = [ upload.target_suite ]
+    pu.from_policy_suites = [ upload.target_suite ]
+
+    changes_path = os.path.join(upload.policy_queue.path, upload.changes.changesname)
+    pu.changes = open(changes_path, 'r').read()
+    pu.changes_filename = upload.changes.changesname
+    pu.sourceful = upload.source is not None
+    pu.source = upload.changes.source
+    pu.version = upload.changes.version
+    pu.architecture = upload.changes.architecture
+    pu.bugs = upload.changes.closes
+
+    pu.program = "process-policy"
+
+    return pu
+
+################################################################################
+
+def remove_unreferenced_binaries(policy_queue, transaction):
+    """Remove binaries that are no longer referenced by an upload
+
+    @type  policy_queue: L{daklib.dbconn.PolicyQueue}
+
+    @type  transaction: L{daklib.archive.ArchiveTransaction}
+    """
+    session = transaction.session
+    suite = policy_queue.suite
+
+    query = """
+       SELECT b.*
+         FROM binaries b
+         JOIN bin_associations ba ON b.id = ba.bin
+        WHERE ba.suite = :suite_id
+          AND NOT EXISTS (SELECT 1 FROM policy_queue_upload_binaries_map pqubm
+                                   JOIN policy_queue_upload pqu ON pqubm.policy_queue_upload_id = pqu.id
+                                  WHERE pqu.policy_queue_id = :policy_queue_id
+                                    AND pqubm.binary_id = b.id)"""
+    binaries = session.query(DBBinary).from_statement(query) \
+        .params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
+
+    for binary in binaries:
+        Logger.log(["removed binary from policy queue", policy_queue.queue_name, binary.package, binary.version])
+        transaction.remove_binary(binary, suite)
+
+def remove_unreferenced_sources(policy_queue, transaction):
+    """Remove sources that are no longer referenced by an upload or a binary
+
+    @type  policy_queue: L{daklib.dbconn.PolicyQueue}
+
+    @type  transaction: L{daklib.archive.ArchiveTransaction}
+    """
+    session = transaction.session
+    suite = policy_queue.suite
+
+    query = """
+       SELECT s.*
+         FROM source s
+         JOIN src_associations sa ON s.id = sa.source
+        WHERE sa.suite = :suite_id
+          AND NOT EXISTS (SELECT 1 FROM policy_queue_upload pqu
+                                  WHERE pqu.policy_queue_id = :policy_queue_id
+                                    AND pqu.source_id = s.id)
+          AND NOT EXISTS (SELECT 1 FROM binaries b
+                                   JOIN bin_associations ba ON b.id = ba.bin
+                                  WHERE b.source = s.id
+                                    AND ba.suite = :suite_id)"""
+    sources = session.query(DBSource).from_statement(query) \
+        .params({'suite_id': policy_queue.suite_id, 'policy_queue_id': policy_queue.policy_queue_id})
+
+    for source in sources:
+        Logger.log(["removed source from policy queue", policy_queue.queue_name, source.source, source.version])
+        transaction.remove_source(source, suite)
 
 ################################################################################
 
@@ -139,28 +403,31 @@ def main():
     if Options["Help"]:
         usage()
 
+    Logger = daklog.Logger("process-policy")
     if not Options["No-Action"]:
+        urgencylog = UrgencyLog()
+
+    with ArchiveTransaction() as transaction:
+        session = transaction.session
         try:
-            Logger = daklog.Logger("process-policy")
-        except CantOpenError as e:
-            Logger = None
+            pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
+        except NoResultFound:
+            print "E: Cannot find policy queue %s" % queue_name
+            sys.exit(1)
 
-    # Find policy queue
-    session.query(PolicyQueue)
+        commentsdir = os.path.join(pq.path, 'COMMENTS')
+        # The comments stuff relies on being in the right directory
+        os.chdir(pq.path)
 
-    try:
-        pq = session.query(PolicyQueue).filter_by(queue_name=queue_name).one()
-    except NoResultFound:
-        print "E: Cannot find policy queue %s" % queue_name
-        sys.exit(1)
+        do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, transaction)
+        do_comments(commentsdir, pq, "ACCEPTED.", "ACCEPTED.", "OK", comment_accept, transaction)
+        do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, transaction)
 
-    commentsdir = os.path.join(pq.path, 'COMMENTS')
-    # The comments stuff relies on being in the right directory
-    os.chdir(pq.path)
-    do_comments(commentsdir, pq, "ACCEPT.", "ACCEPTED.", "OK", comment_accept, session)
-    do_comments(commentsdir, pq, "ACCEPTED.", "ACCEPTED.", "OK", comment_accept, session)
-    do_comments(commentsdir, pq, "REJECT.", "REJECTED.", "NOTOK", comment_reject, session)
+        remove_unreferenced_binaries(pq, transaction)
+        remove_unreferenced_sources(pq, transaction)
 
+    if not Options['No-Action']:
+        urgencylog.close()
 
 ################################################################################
 
index 53ab7cc98f7be1553e050449c181e238c97a3275..1518d262ad78bc42f221a1630f90804f5578ee1a 100755 (executable)
@@ -159,24 +159,29 @@ Checks Debian packages from Incoming
 
 ## Queue builds
 
+import datetime
+import errno
 from errno import EACCES, EAGAIN
 import fcntl
 import os
 import sys
 import traceback
 import apt_pkg
+import time
 from sqlalchemy.orm.exc import NoResultFound
 
 from daklib import daklog
-from daklib.queue import *
-from daklib.queue_install import *
-from daklib import utils
 from daklib.dbconn import *
 from daklib.urgencylog import UrgencyLog
 from daklib.summarystats import SummaryStats
-from daklib.holding import Holding
 from daklib.config import Config
-from daklib.regexes import re_match_expired
+import daklib.utils as utils
+from daklib.regexes import *
+
+import daklib.announce
+import daklib.archive
+import daklib.checks
+import daklib.upload
 
 ###############################################################################
 
@@ -188,6 +193,7 @@ Logger = None
 def usage (exit_code=0):
     print """Usage: dak process-upload [OPTION]... [CHANGES]...
   -a, --automatic           automatic run
+  -d, --directory <DIR>     process uploads in <DIR>
   -h, --help                show this help and exit.
   -n, --no-action           don't do anything
   -p, --no-lock             don't check lockfile !! for cron.daily only !!
@@ -197,27 +203,177 @@ def usage (exit_code=0):
 
 ###############################################################################
 
-def byebye():
-    if not Options["No-Action"]:
-        # Clean out the queue files
-        session = DBConn().session()
-        session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
-        session.commit()
+def try_or_reject(function):
+    """Try to call function or reject the upload if that fails
+    """
+    def wrapper(directory, upload, *args, **kwargs):
+        reason = 'No exception caught. This should not happen.'
+
+        try:
+            return function(directory, upload, *args, **kwargs)
+        except (daklib.archive.ArchiveException, daklib.checks.Reject) as e:
+            reason = unicode(e)
+        except Exception as e:
+            reason = "There was an uncaught exception when processing your upload:\n{0}\nAny original reject reason follows below.".format(traceback.format_exc())
+
+        try:
+            upload.rollback()
+            return real_reject(directory, upload, reason=reason)
+        except Exception as e:
+            reason = "In addition there was an exception when rejecting the package:\n{0}\nPrevious reasons:\n{1}".format(traceback.format_exc(), reason)
+            upload.rollback()
+            return real_reject(directory, upload, reason=reason, notify=False)
+
+        raise Exception('Rejecting upload failed after multiple tries. Giving up. Last reason:\n{0}'.format(reason))
+
+    return wrapper
+
+def get_processed_upload(upload):
+    changes = upload.changes
+    control = upload.changes.changes
+
+    pu = daklib.announce.ProcessedUpload()
+
+    pu.maintainer = control.get('Maintainer')
+    pu.changed_by = control.get('Changed-By')
+    pu.fingerprint = changes.primary_fingerprint
+
+    pu.suites = upload.final_suites or []
+    pu.from_policy_suites = []
+
+    pu.changes = open(upload.changes.path, 'r').read()
+    pu.changes_filename = upload.changes.filename
+    pu.sourceful = upload.changes.sourceful
+    pu.source = control.get('Source')
+    pu.version = control.get('Version')
+    pu.architecture = control.get('Architecture')
+    pu.bugs = changes.closed_bugs
+
+    pu.program = "process-upload"
+
+    pu.warnings = upload.warnings
+
+    return pu
+
+@try_or_reject
+def accept(directory, upload):
+    cnf = Config()
 
+    Logger.log(['ACCEPT', upload.changes.filename])
+    print "ACCEPT"
+
+    upload.install()
+
+    accepted_to_real_suite = False
+    for suite in upload.final_suites:
+        accepted_to_real_suite = accepted_to_real_suite or suite.policy_queue is None
+
+    sourceful_upload = 'source' in upload.changes.architectures
+
+    control = upload.changes.changes
+    if sourceful_upload and not Options['No-Action']:
+        urgency = control.get('Urgency')
+        if urgency not in cnf.value_list('Urgency::Valid'):
+            urgency = cnf['Urgency::Default']
+        UrgencyLog().log(control['Source'], control['Version'], urgency)
+
+    pu = get_processed_upload(upload)
+    daklib.announce.announce_accept(pu)
+
+    # Move .changes to done, but only for uploads that were accepted to a
+    # real suite.  process-policy will handle this for uploads to queues.
+    if accepted_to_real_suite:
+        src = os.path.join(upload.directory, upload.changes.filename)
+
+        now = datetime.datetime.now()
+        donedir = os.path.join(cnf['Dir::Done'], now.strftime('%Y/%m/%d'))
+        dst = os.path.join(donedir, upload.changes.filename)
+        dst = utils.find_next_free(dst)
+
+        upload.transaction.fs.copy(src, dst, mode=0o644)
+
+    SummaryStats().accept_count += 1
+    SummaryStats().accept_bytes += upload.changes.bytes
+
+@try_or_reject
+def accept_to_new(directory, upload):
+    cnf = Config()
+
+    Logger.log(['ACCEPT-TO-NEW', upload.changes.filename])
+    print "ACCEPT-TO-NEW"
+
+    upload.install_to_new()
+    # TODO: tag bugs pending
+
+    pu = get_processed_upload(upload)
+    daklib.announce.announce_new(pu)
+
+    SummaryStats().accept_count += 1
+    SummaryStats().accept_bytes += upload.changes.bytes
+
+@try_or_reject
+def reject(directory, upload, reason=None, notify=True):
+    real_reject(directory, upload, reason, notify)
+
+def real_reject(directory, upload, reason=None, notify=True):
+    # XXX: rejection itself should go to daklib.archive.ArchiveUpload
+    cnf = Config()
 
+    Logger.log(['REJECT', upload.changes.filename])
+    print "REJECT"
+
+    fs = upload.transaction.fs
+    rejectdir = cnf['Dir::Reject']
+
+    files = [ f.filename for f in upload.changes.files.itervalues() ]
+    files.append(upload.changes.filename)
+
+    for fn in files:
+        src = os.path.join(upload.directory, fn)
+        dst = utils.find_next_free(os.path.join(rejectdir, fn))
+        if not os.path.exists(src):
+            continue
+        fs.copy(src, dst)
+
+    if upload.reject_reasons is not None:
+        if reason is None:
+            reason = ''
+        reason = reason + '\n' + '\n'.join(upload.reject_reasons)
+
+    if reason is None:
+        reason = '(Unknown reason. Please check logs.)'
+
+    dst = utils.find_next_free(os.path.join(rejectdir, '{0}.reason'.format(upload.changes.filename)))
+    fh = fs.create(dst)
+    fh.write(reason)
+    fh.close()
+
+    if notify:
+        pu = get_processed_upload(upload)
+        daklib.announce.announce_reject(pu, reason)
+
+    SummaryStats().reject_count += 1
+
+###############################################################################
+
+def action(directory, upload):
+    changes = upload.changes
+    processed = True
 
-def action(u, session):
     global Logger
 
     cnf = Config()
-    holding = Holding()
 
-    # changes["distribution"] may not exist in corner cases
-    # (e.g. unreadable changes files)
-    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict):
-        u.pkg.changes["distribution"] = {}
+    okay = upload.check()
+
+    summary = changes.changes.get('Changes', '')
 
-    (summary, short_summary) = u.build_summaries()
+    package_info = []
+    if okay:
+        if changes.source is not None:
+            package_info.append("source:{0}".format(changes.source.dsc['Source']))
+        for binary in changes.binaries:
+            package_info.append("binary:{0}".format(binary.control['Package']))
 
     (prompt, answer) = ("", "XXX")
     if Options["No-Action"] or Options["Automatic"]:
@@ -225,63 +381,39 @@ def action(u, session):
 
     queuekey = ''
 
-    pi = u.package_info()
-
-    try:
-        chg = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one()
-    except NoResultFound as e:
-        chg = None
-
-    if len(u.rejects) > 0:
-        if u.upload_too_new():
-            print "SKIP (too new)\n" + pi,
+    print summary
+    print
+    print "\n".join(package_info)
+    print
+    if len(upload.warnings) > 0:
+        print "\n".join(upload.warnings)
+        print
+
+    if len(upload.reject_reasons) > 0:
+        print "Reason:"
+        print "\n".join(upload.reject_reasons)
+        print
+
+        path = os.path.join(directory, changes.filename)
+        created = os.stat(path).st_mtime
+        now = time.time()
+        too_new = (now - created < int(cnf['Dinstall::SkipTime']))
+
+        if too_new:
+            print "SKIP (too new)"
             prompt = "[S]kip, Quit ?"
         else:
-            print "REJECT\n" + pi
             prompt = "[R]eject, Skip, Quit ?"
             if Options["Automatic"]:
                 answer = 'R'
+    elif upload.new:
+        prompt = "[N]ew, Skip, Quit ?"
+        if Options['Automatic']:
+            answer = 'N'
     else:
-        # Are we headed for NEW / BYHAND / AUTOBYHAND?
-        # Note that policy queues are no longer handled here
-        qu = determine_target(u)
-        if qu:
-            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
-            queuekey = qu[0].upper()
-            if queuekey in "RQSA":
-                queuekey = "D"
-                prompt = "[D]ivert, Skip, Quit ?"
-            else:
-                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
-            if Options["Automatic"]:
-                answer = queuekey
-        else:
-            # Does suite have a policy_queue configured
-            divert = False
-            for s in u.pkg.changes["distribution"].keys():
-                suite = get_suite(s, session)
-                if suite.policy_queue:
-                    if not chg or chg.approved_for_id != suite.policy_queue.policy_queue_id:
-                        # This routine will check whether the upload is a binary
-                        # upload when the source is already in the target suite.  If
-                        # so, we skip the policy queue, otherwise we go there.
-                        divert = package_to_suite(u, suite.suite_name, session=session)
-                        if divert:
-                            print "%s for %s\n%s%s" % ( suite.policy_queue.queue_name.upper(),
-                                                        ", ".join(u.pkg.changes["distribution"].keys()),
-                                                        pi, summary)
-                            queuekey = "P"
-                            prompt = "[P]olicy, Skip, Quit ?"
-                            policyqueue = suite.policy_queue
-                            if Options["Automatic"]:
-                                answer = 'P'
-                            break
-
-            if not divert:
-                print "ACCEPT\n" + pi + summary,
-                prompt = "[A]ccept, Skip, Quit ?"
-                if Options["Automatic"]:
-                    answer = 'A'
+        prompt = "[A]ccept, Skip, Quit ?"
+        if Options['Automatic']:
+            answer = 'A'
 
     while prompt.find(answer) == -1:
         answer = utils.our_raw_input(prompt)
@@ -291,131 +423,72 @@ def action(u, session):
         answer = answer[:1].upper()
 
     if answer == 'R':
-        os.chdir(u.pkg.directory)
-        u.do_reject(0, pi)
+        reject(directory, upload)
     elif answer == 'A':
-        if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
-        session.commit()
-        u.accept(summary, short_summary, session)
-        u.check_override()
-        chg.clean_from_queue()
-        session.commit()
-        u.remove()
-    elif answer == 'P':
-        if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
-        package_to_queue(u, summary, short_summary, policyqueue, chg, session)
-        session.commit()
-        u.remove()
-    elif answer == queuekey:
-        if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session=session, logger=Logger)
-        QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
-        session.commit()
-        u.remove()
+        # upload.try_autobyhand must not be run with No-Action.
+        if Options['No-Action']:
+            accept(directory, upload)
+        elif upload.try_autobyhand():
+            accept(directory, upload)
+        else:
+            print "W: redirecting to BYHAND as automatic processing failed."
+            accept_to_new(directory, upload)
+    elif answer == 'N':
+        accept_to_new(directory, upload)
     elif answer == 'Q':
-        byebye()
         sys.exit(0)
+    elif answer == 'S':
+        processed = False
+
+    if not Options['No-Action']:
+        upload.commit()
 
-    session.commit()
+    return processed
 
 ###############################################################################
 
-def cleanup():
-    h = Holding()
-    if not Options["No-Action"]:
-        h.clean()
+def unlink_if_exists(path):
+    try:
+        os.unlink(path)
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
 
-def process_it(changes_file, session):
+def process_it(directory, changes, keyrings, session):
     global Logger
 
-    Logger.log(["Processing changes file", changes_file])
+    print "\n{0}\n".format(changes.filename)
+    Logger.log(["Processing changes file", changes.filename])
 
-    cnf = Config()
+    with daklib.archive.ArchiveUpload(directory, changes, keyrings) as upload:
+        processed = action(directory, upload)
+        if processed and not Options['No-Action']:
+            unlink_if_exists(os.path.join(directory, changes.filename))
+            for fn in changes.files:
+                unlink_if_exists(os.path.join(directory, fn))
 
-    holding = Holding()
-
-    # TODO: Actually implement using pending* tables so that we don't lose track
-    #       of what is where
-
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    u.pkg.directory = os.getcwd()
-    u.logger = Logger
-    origchanges = os.path.abspath(u.pkg.changes_file)
+###############################################################################
 
-    # Some defaults in case we can't fully process the .changes file
-    u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
-    u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
+def process_changes(changes_filenames):
+    session = DBConn().session()
+    keyrings = session.query(Keyring).filter_by(active=True).order_by(Keyring.priority)
+    keyring_files = [ k.keyring_name for k in keyrings ]
 
-    # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
-    bcc = "X-DAK: dak process-upload"
-    if cnf.has_key("Dinstall::Bcc"):
-        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-    else:
-        u.Subst["__BCC__"] = bcc
-
-    # Remember where we are so we can come back after cd-ing into the
-    # holding directory.  TODO: Fix this stupid hack
-    u.prevdir = os.getcwd()
+    changes = []
+    for fn in changes_filenames:
+        try:
+            directory, filename = os.path.split(fn)
+            c = daklib.upload.Changes(directory, filename, keyring_files)
+            changes.append([directory, c])
+        except Exception as e:
+            Logger.log([filename, "Error while loading changes: {0}".format(e)])
 
-    try:
-        # If this is the Real Thing(tm), copy things into a private
-        # holding directory first to avoid replacable file races.
-        if not Options["No-Action"]:
-            holding.chdir_to_holding()
-
-            # Absolutize the filename to avoid the requirement of being in the
-            # same directory as the .changes file.
-            holding.copy_to_holding(origchanges)
-
-            # Relativize the filename so we use the copy in holding
-            # rather than the original...
-            changespath = os.path.basename(u.pkg.changes_file)
-        else:
-            changespath = origchanges
+    changes.sort(key=lambda x: x[1])
 
-        (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
+    for directory, c in changes:
+        process_it(directory, c, keyring_files, session)
 
-        if u.pkg.changes["fingerprint"]:
-            valid_changes_p = u.load_changes(changespath)
-        else:
-            for reason in rejects:
-                if re_match_expired.match(reason):
-                    # Hrm, key expired. Lets see if we can still parse the .changes before
-                    # we reject. Then we would be able to mail the maintainer, instead of
-                    # just silently dropping the upload.
-                    u.load_changes(changespath)
-            valid_changes_p = False
-            u.rejects.extend(rejects)
-
-        if valid_changes_p:
-            u.check_distributions()
-            u.check_files(not Options["No-Action"])
-            valid_dsc_p = u.check_dsc(not Options["No-Action"])
-            if valid_dsc_p and not Options["No-Action"]:
-                u.check_source()
-            u.check_hashes()
-            if valid_dsc_p and not Options["No-Action"] and not len(u.rejects):
-                u.check_lintian()
-            u.check_urgency()
-            u.check_timestamps()
-            u.check_signed_by_key()
-
-        action(u, session)
-
-    except (SystemExit, KeyboardInterrupt):
-        cleanup()
-        raise
-
-    except:
-        print "ERROR"
-        traceback.print_exc(file=sys.stderr)
-
-    cleanup()
-    # Restore previous WD
-    os.chdir(u.prevdir)
+    session.rollback()
 
 ###############################################################################
 
@@ -425,8 +498,6 @@ def main():
     cnf = Config()
     summarystats = SummaryStats()
 
-    DBConn()
-
     Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
                  ('h',"help","Dinstall::Options::Help"),
                  ('n',"no-action","Dinstall::Options::No-Action"),
@@ -484,15 +555,7 @@ def main():
     else:
         Logger.log(["Using changes files from command-line", len(changes_files)])
 
-    # Sort the .changes files so that we process sourceful ones first
-    changes_files.sort(utils.changes_compare)
-
-    # Process the changes files
-    for changes_file in changes_files:
-        print "\n" + changes_file
-        session = DBConn().session()
-        process_it(changes_file, session)
-        session.close()
+    process_changes(changes_files)
 
     if summarystats.accept_count:
         sets = "set"
@@ -509,8 +572,6 @@ def main():
         print "Rejected %d package %s." % (summarystats.reject_count, sets)
         Logger.log(["rejected", summarystats.reject_count])
 
-    byebye()
-
     if not Options["No-Action"]:
         urgencylog.close()
 
index a3b59738df18c7c7d5b9e04d3719463b0582b15a..7e4e14745706027a31734c054fb1ec20827f849f 100755 (executable)
@@ -43,10 +43,11 @@ except ImportError:
     pass
 
 from daklib import utils
-from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment, DBChange, DBSource, \
-                          get_uid_from_fingerprint, get_policy_queue
+from daklib.dbconn import DBConn, DBSource, has_new_comment, PolicyQueue, \
+                          get_uid_from_fingerprint
+from daklib.policy import PolicyQueueUploadHandler
 from daklib.textutils import fix_maintainer
+from daklib.utils import get_logins_from_ldap
 from daklib.dak_exceptions import *
 
 Cnf = None
@@ -116,7 +117,15 @@ def time_pp(x):
 def sg_compare (a, b):
     a = a[1]
     b = b[1]
-    """Sort by have note, time of oldest upload."""
+    """Sort by have pending action, have note, time of oldest upload."""
+    # Sort by have pending action
+    a_note_state = a["processed"]
+    b_note_state = b["processed"]
+    if a_note_state < b_note_state:
+        return -1
+    elif a_note_state > b_note_state:
+        return 1
+
     # Sort by have note
     a_note_state = a["note_state"]
     b_note_state = b["note_state"]
@@ -187,7 +196,7 @@ def header():
                 continue;
             c = children[i].getAttribute("class").split(" ");
             for(var j = 0; j < c.length; j++) {
-                if(c[j] == "binNEW") {
+                if(c[j] == "sourceNEW") {
                     if (children[i].style.display == '')
                         children[i].style.display = 'none';
                     else children[i].style.display = '';
@@ -245,12 +254,12 @@ def footer():
     """
 
 def table_header(type, source_count, total_count):
-    print "<h1 class='binNEW'>Summary for: %s</h1>" % (type)
-    print "<h1 class='binNEW' style='display: none'>Summary for: binary-%s only</h1>" % (type)
+    print "<h1 class='sourceNEW'>Summary for: %s</h1>" % (type)
+    print "<h1 class='sourceNEW' style='display: none'>Summary for: binary-%s only</h1>" % (type)
     print """
     <p class="togglepkg" onclick="togglePkg()">Click to toggle all/binary-NEW packages</p>
     <table class="NEW">
-      <caption class="binNEW">
+      <caption class="sourceNEW">
     """
     print "Package count in <strong>%s</strong>: <em>%s</em>&nbsp;|&nbsp; Total Package count: <em>%s</em>" % (type, source_count, total_count)
     print """
@@ -283,8 +292,12 @@ def table_row(source, version, arch, last_mod, maint, distribution, closes, fing
         if dist == "experimental":
             trclass = "exp"
 
-    if not len(session.query(DBSource).filter_by(source = source).all()):
-        trclass += " binNEW"
+    query = '''SELECT source
+               FROM source_suite
+               WHERE source = :source
+               AND suite_name IN ('unstable', 'experimental')'''
+    if not session.execute(query, {'source': source}).rowcount:
+        trclass += " sourceNEW"
     session.commit()
 
     if row_number % 2 != 0:
@@ -292,7 +305,7 @@ def table_row(source, version, arch, last_mod, maint, distribution, closes, fing
     else:
         print "<tr class=\"%s odd\">" % (trclass)
 
-    if "binNEW" in trclass:
+    if "sourceNEW" in trclass:
         print "<td class=\"package\">%s</td>" % (source)
     else:
         print "<td class=\"package\"><a href=\"http://packages.qa.debian.org/%(source)s\">%(source)s</a></td>" % {'source': source}
@@ -314,11 +327,7 @@ def table_row(source, version, arch, last_mod, maint, distribution, closes, fing
     print "<span class=\"changed-by\">Changed-By: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span><br/>" % (utils.html_escape(mail), utils.html_escape(name))
 
     if sponsor:
-        try:
-            (login, domain) = sponsor.split("@", 1)
-            print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a>@debian.org</span><br/>" % (utils.html_escape(login), utils.html_escape(login))
-        except Exception as e:
-            pass
+        print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a>@debian.org</span><br/>" % (utils.html_escape(sponsor), utils.html_escape(sponsor))
 
     print "<span class=\"signature\">Fingerprint: %s</span>" % (fingerprint)
     print "</td>"
@@ -367,42 +376,41 @@ RRA:MAX:0.5:288:795
 
 ############################################################
 
-def process_changes_files(changes_files, type, log, rrd_dir):
+def process_queue(queue, log, rrd_dir):
     msg = ""
-    cache = {}
-    # Read in all the .changes files
-    for filename in changes_files:
-        try:
-            u = Upload()
-            u.load_changes(filename)
-            cache[filename] = copy(u.pkg.changes)
-            cache[filename]["filename"] = filename
-        except Exception as e:
-            print "WARNING: Exception %s" % e
-            continue
+    type = queue.queue_name
+    session = DBConn().session()
+
     # Divide the .changes into per-source groups
     per_source = {}
-    for filename in cache.keys():
-        source = cache[filename]["source"]
-        if not per_source.has_key(source):
+    total_pending = 0
+    for upload in queue.uploads:
+        source = upload.changes.source
+        if source not in per_source:
             per_source[source] = {}
             per_source[source]["list"] = []
-        per_source[source]["list"].append(cache[filename])
+            per_source[source]["processed"] = ""
+            handler = PolicyQueueUploadHandler(upload, session)
+            if handler.get_action():
+                per_source[source]["processed"] = "PENDING %s" % handler.get_action()
+                total_pending += 1
+        per_source[source]["list"].append(upload)
+        per_source[source]["list"].sort(lambda x, y: cmp(x.changes.created, y.changes.created), reverse=True)
     # Determine oldest time and have note status for each source group
     for source in per_source.keys():
         source_list = per_source[source]["list"]
         first = source_list[0]
-        oldest = os.stat(first["filename"])[stat.ST_MTIME]
+        oldest = time.mktime(first.changes.created.timetuple())
         have_note = 0
         for d in per_source[source]["list"]:
-            mtime = os.stat(d["filename"])[stat.ST_MTIME]
+            mtime = time.mktime(d.changes.created.timetuple())
             if Cnf.has_key("Queue-Report::Options::New"):
                 if mtime > oldest:
                     oldest = mtime
             else:
                 if mtime < oldest:
                     oldest = mtime
-            have_note += has_new_comment(d["source"], d["version"])
+            have_note += has_new_comment(d.policy_queue, d.changes.source, d.changes.version)
         per_source[source]["oldest"] = oldest
         if not have_note:
             per_source[source]["note_state"] = 0; # none
@@ -413,12 +421,16 @@ def process_changes_files(changes_files, type, log, rrd_dir):
     per_source_items = per_source.items()
     per_source_items.sort(sg_compare)
 
-    update_graph_database(rrd_dir, type, len(per_source_items), len(changes_files))
+    update_graph_database(rrd_dir, type, len(per_source_items), len(queue.uploads))
 
     entries = []
     max_source_len = 0
     max_version_len = 0
     max_arch_len = 0
+    try:
+        logins = get_logins_from_ldap()
+    except:
+        logins = dict()
     for i in per_source_items:
         maintainer = {}
         maint=""
@@ -428,30 +440,24 @@ def process_changes_files(changes_files, type, log, rrd_dir):
         changeby = {}
         changedby=""
         sponsor=""
-        filename=i[1]["list"][0]["filename"]
+        filename=i[1]["list"][0].changes.changesname
         last_modified = time.time()-i[1]["oldest"]
-        source = i[1]["list"][0]["source"]
+        source = i[1]["list"][0].changes.source
         if len(source) > max_source_len:
             max_source_len = len(source)
-        binary_list = i[1]["list"][0]["binary"].keys()
-        binary = ', '.join(binary_list)
-        arches = {}
-        versions = {}
+        binary_list = i[1]["list"][0].binaries
+        binary = ', '.join([ b.package for b in binary_list ])
+        arches = set()
+        versions = set()
         for j in i[1]["list"]:
-            changesbase = os.path.basename(j["filename"])
-            try:
-                session = DBConn().session()
-                dbc = session.query(DBChange).filter_by(changesname=changesbase).one()
-                session.close()
-            except Exception as e:
-                print "Can't find changes file in NEW for %s (%s)" % (changesbase, e)
-                dbc = None
+            dbc = j.changes
+            changesbase = dbc.changesname
 
             if Cnf.has_key("Queue-Report::Options::New") or Cnf.has_key("Queue-Report::Options::822"):
                 try:
                     (maintainer["maintainer822"], maintainer["maintainer2047"],
                     maintainer["maintainername"], maintainer["maintaineremail"]) = \
-                    fix_maintainer (j["maintainer"])
+                    fix_maintainer (dbc.maintainer)
                 except ParseMaintError as msg:
                     print "Problems while parsing maintainer address\n"
                     maintainer["maintainername"] = "Unknown"
@@ -461,31 +467,35 @@ def process_changes_files(changes_files, type, log, rrd_dir):
                 try:
                     (changeby["changedby822"], changeby["changedby2047"],
                      changeby["changedbyname"], changeby["changedbyemail"]) = \
-                     fix_maintainer (j["changed-by"])
+                     fix_maintainer (dbc.changedby)
                 except ParseMaintError as msg:
                     (changeby["changedby822"], changeby["changedby2047"],
                      changeby["changedbyname"], changeby["changedbyemail"]) = \
                      ("", "", "", "")
                 changedby="%s:%s" % (changeby["changedbyname"], changeby["changedbyemail"])
 
-                distribution=j["distribution"].keys()
-                closes=j["closes"].keys()
-                if dbc:
-                    fingerprint = dbc.fingerprint
-                    sponsor_name = get_uid_from_fingerprint(fingerprint).name
-                    sponsor_email = get_uid_from_fingerprint(fingerprint).uid + "@debian.org"
-                    if sponsor_name != maintainer["maintainername"] and sponsor_name != changeby["changedbyname"] and \
-                    sponsor_email != maintainer["maintaineremail"] and sponsor_name != changeby["changedbyemail"]:
-                        sponsor = sponsor_email
-
-            for arch in j["architecture"].keys():
-                arches[arch] = ""
-            version = j["version"]
-            versions[version] = ""
-        arches_list = arches.keys()
+                distribution=dbc.distribution.split()
+                closes=dbc.closes
+
+                fingerprint = dbc.fingerprint
+                sponsor_name = get_uid_from_fingerprint(fingerprint).name
+                sponsor_login = get_uid_from_fingerprint(fingerprint).uid
+                if '@' in sponsor_login:
+                    if fingerprint in logins:
+                        sponsor_login = logins[fingerprint]
+                if (sponsor_name != maintainer["maintainername"] and
+                  sponsor_name != changeby["changedbyname"] and
+                  sponsor_login + '@debian.org' != maintainer["maintaineremail"] and
+                  sponsor_name != changeby["changedbyemail"]):
+                    sponsor = sponsor_login
+
+            for arch in dbc.architecture.split():
+                arches.add(arch)
+            versions.add(dbc.version)
+        arches_list = list(arches)
         arches_list.sort(utils.arch_compare_sw)
         arch_list = " ".join(arches_list)
-        version_list = " ".join(versions.keys())
+        version_list = " ".join(sorted(versions, reverse=True))
         if len(version_list) > max_version_len:
             max_version_len = len(version_list)
         if len(arch_list) > max_arch_len:
@@ -494,7 +504,7 @@ def process_changes_files(changes_files, type, log, rrd_dir):
             note = " | [N]"
         else:
             note = ""
-        entries.append([source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
+        entries.append([source, binary, version_list, arch_list, per_source[source]["processed"], note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, filename])
 
     # direction entry consists of "Which field, which direction, time-consider" where
     # time-consider says how we should treat last_modified. Thats all.
@@ -505,16 +515,16 @@ def process_changes_files(changes_files, type, log, rrd_dir):
         age =  Cnf["Queue-Report::Options::Age"]
     if Cnf.has_key("Queue-Report::Options::New"):
     # If we produce html we always have oldest first.
-        direction.append([5,-1,"ao"])
+        direction.append([6,-1,"ao"])
     else:
         if Cnf.has_key("Queue-Report::Options::Sort"):
             for i in Cnf["Queue-Report::Options::Sort"].split(","):
                 if i == "ao":
                     # Age, oldest first.
-                    direction.append([5,-1,age])
+                    direction.append([6,-1,age])
                 elif i == "an":
                     # Age, newest first.
-                    direction.append([5,1,age])
+                    direction.append([6,1,age])
                 elif i == "na":
                     # Name, Ascending.
                     direction.append([0,1,0])
@@ -523,10 +533,10 @@ def process_changes_files(changes_files, type, log, rrd_dir):
                     direction.append([0,-1,0])
                 elif i == "nl":
                     # Notes last.
-                    direction.append([4,1,0])
+                    direction.append([5,1,0])
                 elif i == "nf":
                     # Notes first.
-                    direction.append([4,-1,0])
+                    direction.append([5,-1,0])
     entries.sort(lambda x, y: sortfunc(x, y))
     # Yes, in theory you can add several sort options at the commandline with. But my mind is to small
     # at the moment to come up with a real good sorting function that considers all the sidesteps you
@@ -536,7 +546,7 @@ def process_changes_files(changes_files, type, log, rrd_dir):
     if Cnf.has_key("Queue-Report::Options::822"):
         # print stuff out in 822 format
         for entry in entries:
-            (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
+            (source, binary, version_list, arch_list, processed, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, changes_file) = entry
 
             # We'll always have Source, Version, Arch, Mantainer, and Dist
             # For the rest, check to see if we have them, then print them out
@@ -555,7 +565,7 @@ def process_changes_files(changes_files, type, log, rrd_dir):
                (name, mail) = changedby.split(":", 1)
                log.write("Changed-By: " + name + " <"+mail+">" + "\n")
             if sponsor:
-               log.write("Sponsored-By: " + "@".join(sponsor.split("@")[:2]) + "\n")
+               log.write("Sponsored-By: %s@debian.org\n" % sponsor)
             log.write("Distribution:")
             for dist in distribution:
                log.write(" " + dist)
@@ -569,39 +579,43 @@ def process_changes_files(changes_files, type, log, rrd_dir):
             log.write("Changes-File: " + os.path.basename(changes_file) + "\n")
             log.write("\n")
 
+    total_count = len(queue.uploads)
+    source_count = len(per_source_items)
+
     if Cnf.has_key("Queue-Report::Options::New"):
-        direction.append([5,1,"ao"])
+        direction.append([6,1,"ao"])
         entries.sort(lambda x, y: sortfunc(x, y))
     # Output for a html file. First table header. then table_footer.
     # Any line between them is then a <tr> printed from subroutine table_row.
         if len(entries) > 0:
-            total_count = len(changes_files)
-            source_count = len(per_source_items)
             table_header(type.upper(), source_count, total_count)
             for entry in entries:
-                (source, binary, version_list, arch_list, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
+                (source, binary, version_list, arch_list, processed, note, last_modified, maint, distribution, closes, fingerprint, sponsor, changedby, undef) = entry
                 table_row(source, version_list, arch_list, time_pp(last_modified), maint, distribution, closes, fingerprint, sponsor, changedby)
             table_footer(type.upper())
     elif not Cnf.has_key("Queue-Report::Options::822"):
     # The "normal" output without any formatting.
-        format="%%-%ds | %%-%ds | %%-%ds%%s | %%s old\n" % (max_source_len, max_version_len, max_arch_len)
-
         msg = ""
         for entry in entries:
-            (source, binary, version_list, arch_list, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
-            msg += format % (source, version_list, arch_list, note, time_pp(last_modified))
+            (source, binary, version_list, arch_list, processed, note, last_modified, undef, undef, undef, undef, undef, undef, undef) = entry
+            if processed:
+                format="%%-%ds | %%-%ds | %%-%ds | %%s\n" % (max_source_len, max_version_len, max_arch_len)
+                msg += format % (source, version_list, arch_list, processed)
+            else:
+                format="%%-%ds | %%-%ds | %%-%ds%%s | %%s old\n" % (max_source_len, max_version_len, max_arch_len)
+                msg += format % (source, version_list, arch_list, note, time_pp(last_modified))
 
         if msg:
-            total_count = len(changes_files)
-            source_count = len(per_source_items)
             print type.upper()
             print "-"*len(type)
             print
             print msg
-            print "%s %s source package%s / %s %s package%s in total." % (source_count, type, plural(source_count), total_count, type, plural(total_count))
+            print ("%s %s source package%s / %s %s package%s in total / %s %s package%s to be processed." %
+                   (source_count, type, plural(source_count),
+                    total_count, type, plural(total_count),
+                    total_pending, type, plural(total_pending)))
             print
 
-
 ################################################################################
 
 def main():
@@ -628,10 +642,7 @@ def main():
     if Cnf.has_key("Queue-Report::Options::New"):
         header()
 
-    # Initialize db so we can get the NEW comments
-    dbconn = DBConn()
-
-    queue_names = [ ]
+    queue_names = []
 
     if Cnf.has_key("Queue-Report::Options::Directories"):
         for i in Cnf["Queue-Report::Options::Directories"].split(","):
@@ -653,14 +664,12 @@ def main():
         # Open the report file
         f = open(Cnf["Queue-Report::ReportLocations::822Location"], "w")
 
-    session = dbconn.session()
+    session = DBConn().session()
 
     for queue_name in queue_names:
-        queue = get_policy_queue(queue_name, session)
-        if queue:
-            directory = os.path.abspath(queue.path)
-            changes_files = glob.glob("%s/*.changes" % (directory))
-            process_changes_files(changes_files, os.path.basename(directory), f, rrd_dir)
+        queue = session.query(PolicyQueue).filter_by(queue_name=queue_name).first()
+        if queue is not None:
+            process_queue(queue, f, rrd_dir)
         else:
             utils.warn("Cannot find queue %s" % queue_name)
 
index bbd4b45854adce9f3a56fae89e26cc107095815a..688ea19b6056634ebfd1b234cea215bad5ba8261 100755 (executable)
--- a/dak/rm.py
+++ b/dak/rm.py
@@ -51,7 +51,7 @@ from daklib.config import Config
 from daklib.dbconn import *
 from daklib import utils
 from daklib.dak_exceptions import *
-from daklib.regexes import re_strip_source_version, re_build_dep_arch, re_bin_only_nmu
+from daklib.regexes import re_strip_source_version, re_bin_only_nmu
 import debianbts as bts
 
 ################################################################################
@@ -65,7 +65,8 @@ def usage (exit_code=0):
 Remove PACKAGE(s) from suite(s).
 
   -a, --architecture=ARCH    only act on this architecture
-  -b, --binary               remove binaries only
+  -b, --binary               PACKAGE are binary packages to remove
+  -B, --binary-only          remove binaries only
   -c, --component=COMPONENT  act on this component
   -C, --carbon-copy=EMAIL    send a CC of removal message to EMAIL
   -d, --done=BUG#            send removal message as closure to bug#
@@ -99,159 +100,8 @@ def game_over():
 ################################################################################
 
 def reverse_depends_check(removals, suite, arches=None, session=None):
-    dbsuite = get_suite(suite, session)
-    cnf = Config()
-
     print "Checking reverse dependencies..."
-    dep_problem = 0
-    p2c = {}
-    all_broken = {}
-    if arches:
-        all_arches = set(arches)
-    else:
-        all_arches = set([x.arch_string for x in get_suite_architectures(suite)])
-    all_arches -= set(["source", "all"])
-    metakey_d = get_or_set_metadatakey("Depends", session)
-    metakey_p = get_or_set_metadatakey("Provides", session)
-    params = {
-        'suite_id':     dbsuite.suite_id,
-        'metakey_d_id': metakey_d.key_id,
-        'metakey_p_id': metakey_p.key_id,
-    }
-    for architecture in all_arches | set(['all']):
-        deps = {}
-        sources = {}
-        virtual_packages = {}
-        params['arch_id'] = get_architecture(architecture, session).arch_id
-
-        statement = '''
-            SELECT b.id, b.package, s.source, c.name as component,
-                (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
-                (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
-                FROM binaries b
-                JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
-                JOIN source s ON b.source = s.id
-                JOIN files f ON b.file = f.id
-                JOIN location l ON f.location = l.id
-                JOIN component c ON l.component = c.id
-                WHERE b.architecture = :arch_id'''
-        query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
-            from_statement(statement).params(params)
-        for binary_id, package, source, component, depends, provides in query:
-            sources[package] = source
-            p2c[package] = component
-            if depends is not None:
-                deps[package] = depends
-            # Maintain a counter for each virtual package.  If a
-            # Provides: exists, set the counter to 0 and count all
-            # provides by a package not in the list for removal.
-            # If the counter stays 0 at the end, we know that only
-            # the to-be-removed packages provided this virtual
-            # package.
-            if provides is not None:
-                for virtual_pkg in provides.split(","):
-                    virtual_pkg = virtual_pkg.strip()
-                    if virtual_pkg == package: continue
-                    if not virtual_packages.has_key(virtual_pkg):
-                        virtual_packages[virtual_pkg] = 0
-                    if package not in removals:
-                        virtual_packages[virtual_pkg] += 1
-
-        # If a virtual package is only provided by the to-be-removed
-        # packages, treat the virtual package as to-be-removed too.
-        for virtual_pkg in virtual_packages.keys():
-            if virtual_packages[virtual_pkg] == 0:
-                removals.append(virtual_pkg)
-
-        # Check binary dependencies (Depends)
-        for package in deps.keys():
-            if package in removals: continue
-            parsed_dep = []
-            try:
-                parsed_dep += apt_pkg.ParseDepends(deps[package])
-            except ValueError as e:
-                print "Error for package %s: %s" % (package, e)
-            for dep in parsed_dep:
-                # Check for partial breakage.  If a package has a ORed
-                # dependency, there is only a dependency problem if all
-                # packages in the ORed depends will be removed.
-                unsat = 0
-                for dep_package, _, _ in dep:
-                    if dep_package in removals:
-                        unsat += 1
-                if unsat == len(dep):
-                    component = p2c[package]
-                    source = sources[package]
-                    if component != "main":
-                        source = "%s/%s" % (source, component)
-                    all_broken.setdefault(source, {}).setdefault(package, set()).add(architecture)
-                    dep_problem = 1
-
-    if all_broken:
-        print "# Broken Depends:"
-        for source, bindict in sorted(all_broken.items()):
-            lines = []
-            for binary, arches in sorted(bindict.items()):
-                if arches == all_arches or 'all' in arches:
-                    lines.append(binary)
-                else:
-                    lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
-            print '%s: %s' % (source, lines[0])
-            for line in lines[1:]:
-                print ' ' * (len(source) + 2) + line
-        print
-
-    # Check source dependencies (Build-Depends and Build-Depends-Indep)
-    all_broken.clear()
-    metakey_bd = get_or_set_metadatakey("Build-Depends", session)
-    metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
-    params = {
-        'suite_id':    dbsuite.suite_id,
-        'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
-    }
-    statement = '''
-        SELECT s.id, s.source, string_agg(sm.value, ', ') as build_dep
-           FROM source s
-           JOIN source_metadata sm ON s.id = sm.src_id
-           WHERE s.id in
-               (SELECT source FROM src_associations
-                   WHERE suite = :suite_id)
-               AND sm.key_id in :metakey_ids
-           GROUP BY s.id, s.source'''
-    query = session.query('id', 'source', 'build_dep').from_statement(statement). \
-        params(params)
-    for source_id, source, build_dep in query:
-        if source in removals: continue
-        parsed_dep = []
-        if build_dep is not None:
-            # Remove [arch] information since we want to see breakage on all arches
-            build_dep = re_build_dep_arch.sub("", build_dep)
-            try:
-                parsed_dep += apt_pkg.ParseDepends(build_dep)
-            except ValueError as e:
-                print "Error for source %s: %s" % (source, e)
-        for dep in parsed_dep:
-            unsat = 0
-            for dep_package, _, _ in dep:
-                if dep_package in removals:
-                    unsat += 1
-            if unsat == len(dep):
-                component = DBSource.get(source_id, session).get_component_name()
-                if component != "main":
-                    source = "%s/%s" % (source, component)
-                all_broken.setdefault(source, set()).add(utils.pp_deps(dep))
-                dep_problem = 1
-
-    if all_broken:
-        print "# Broken Build-Depends:"
-        for source, bdeps in sorted(all_broken.items()):
-            bdeps = sorted(bdeps)
-            print '%s: %s' % (source, bdeps[0])
-            for bdep in bdeps[1:]:
-                print ' ' * (len(source) + 2) + bdep
-        print
-
-    if dep_problem:
+    if utils.check_reverse_depends(removals, suite, arches, session):
         print "Dependency problem found."
         if not Options["No-Action"]:
             game_over()
@@ -268,7 +118,8 @@ def main ():
 
     Arguments = [('h',"help","Rm::Options::Help"),
                  ('a',"architecture","Rm::Options::Architecture", "HasArg"),
-                 ('b',"binary", "Rm::Options::Binary-Only"),
+                 ('b',"binary", "Rm::Options::Binary"),
+                 ('B',"binary-only", "Rm::Options::Binary-Only"),
                  ('c',"component", "Rm::Options::Component", "HasArg"),
                  ('C',"carbon-copy", "Rm::Options::Carbon-Copy", "HasArg"), # Bugs to Cc
                  ('d',"done","Rm::Options::Done", "HasArg"), # Bugs fixed
@@ -281,7 +132,7 @@ def main ():
                  ('S',"source-only", "Rm::Options::Source-Only"),
                  ]
 
-    for i in [ "architecture", "binary-only", "carbon-copy", "component",
+    for i in [ "architecture", "binary", "binary-only", "carbon-copy", "component",
                "done", "help", "no-action", "partial", "rdep-check", "reason",
                "source-only", "Do-Close" ]:
         if not cnf.has_key("Rm::Options::%s" % (i)):
@@ -302,8 +153,10 @@ def main ():
         utils.fubar("need at least one package name as an argument.")
     if Options["Architecture"] and Options["Source-Only"]:
         utils.fubar("can't use -a/--architecture and -S/--source-only options simultaneously.")
-    if Options["Binary-Only"] and Options["Source-Only"]:
-        utils.fubar("can't use -b/--binary-only and -S/--source-only options simultaneously.")
+    if ((Options["Binary"] and Options["Source-Only"])
+            or (Options["Binary"] and Options["Binary-Only"])
+            or (Options["Binary-Only"] and Options["Source-Only"])):
+        utils.fubar("Only one of -b/--binary, -B/--binary-only and -S/--source-only can be used.")
     if Options.has_key("Carbon-Copy") and not Options.has_key("Done"):
         utils.fubar("can't use -C/--carbon-copy without also using -d/--done option.")
     if Options["Architecture"] and not Options["Partial"]:
@@ -311,11 +164,8 @@ def main ():
         Options["Partial"] = "true"
     if Options["Do-Close"] and not Options["Done"]:
         utils.fubar("No.")
-    if Options["Do-Close"] and Options["Binary-Only"]:
-        utils.fubar("No.")
-    if Options["Do-Close"] and Options["Source-Only"]:
-        utils.fubar("No.")
-    if Options["Do-Close"] and Options["Suite"] != 'unstable':
+    if (Options["Do-Close"]
+           and (Options["Binary"] or Options["Binary-Only"] or Options["Source-Only"])):
         utils.fubar("No.")
 
     # Force the admin to tell someone if we're not doing a 'dak
@@ -350,7 +200,7 @@ def main ():
         else:
             utils.fubar("Invalid -C/--carbon-copy argument '%s'; not a bug number, 'package' or email address." % (copy_to))
 
-    if Options["Binary-Only"]:
+    if Options["Binary"]:
         field = "b.package"
     else:
         field = "s.source"
@@ -361,6 +211,7 @@ def main ():
 
     # Additional suite checks
     suite_ids_list = []
+    whitelists = []
     suites = utils.split_args(Options["Suite"])
     suites_list = utils.join_with_commas_and(suites)
     if not Options["No-Action"]:
@@ -368,6 +219,7 @@ def main ():
             s = get_suite(suite, session=session)
             if s is not None:
                 suite_ids_list.append(s.suite_id)
+                whitelists.append(s.mail_whitelist)
             if suite in ("oldstable", "stable"):
                 print "**WARNING** About to remove from the (old)stable suite!"
                 print "This should only be done just prior to a (point) release and not at"
@@ -390,63 +242,39 @@ def main ():
     if Options["Rdep-Check"] and len(suites) > 1:
         utils.fubar("Reverse dependency check on multiple suites is not implemented.")
 
-    print "Working...",
-    sys.stdout.flush()
     to_remove = []
     maintainers = {}
 
-    # We have 3 modes of package selection: binary-only, source-only
-    # and source+binary.  The first two are trivial and obvious; the
-    # latter is a nasty mess, but very nice from a UI perspective so
-    # we try to support it.
+    # We have 3 modes of package selection: binary, source-only, binary-only
+    # and source+binary.
 
     # XXX: TODO: This all needs converting to use placeholders or the object
     #            API. It's an SQL injection dream at the moment
 
-    if Options["Binary-Only"]:
-        # Binary-only
-        q = session.execute("SELECT b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, location l, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s %s" % (con_packages, con_suites, con_components, con_architectures))
-        for i in q.fetchall():
-            to_remove.append(i)
+    if Options["Binary"]:
+        # Removal by binary package name
+        q = session.execute("SELECT b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, files_archive_map af, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s %s" % (con_packages, con_suites, con_components, con_architectures))
+        to_remove.extend(q)
     else:
         # Source-only
-        source_packages = {}
-        q = session.execute("SELECT l.path, f.filename, s.source, s.version, 'source', s.id, s.maintainer FROM source s, src_associations sa, suite su, files f, location l, component c WHERE sa.source = s.id AND sa.suite = su.id AND s.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s" % (con_packages, con_suites, con_components))
-        for i in q.fetchall():
-            source_packages[i[2]] = i[:2]
-            to_remove.append(i[2:])
+        if not Options["Binary-Only"]:
+            q = session.execute("SELECT s.source, s.version, 'source', s.id, s.maintainer FROM source s, src_associations sa, suite su, archive, files f, files_archive_map af, component c WHERE sa.source = s.id AND sa.suite = su.id AND archive.id = su.archive_id AND s.file = f.id AND af.file_id = f.id AND af.archive_id = su.archive_id AND af.component_id = c.id %s %s %s" % (con_packages, con_suites, con_components))
+            to_remove.extend(q)
         if not Options["Source-Only"]:
             # Source + Binary
-            binary_packages = {}
-            # First get a list of binary package names we suspect are linked to the source
-            q = session.execute("SELECT DISTINCT b.package FROM binaries b, source s, src_associations sa, suite su, files f, location l, component c WHERE b.source = s.id AND sa.source = s.id AND sa.suite = su.id AND s.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s" % (con_packages, con_suites, con_components))
-            for i in q.fetchall():
-                binary_packages[i[0]] = ""
-            # Then parse each .dsc that we found earlier to see what binary packages it thinks it produces
-            for i in source_packages.keys():
-                filename = "/".join(source_packages[i])
-                try:
-                    dsc = utils.parse_changes(filename, dsc_file=1)
-                except CantOpenError:
-                    utils.warn("couldn't open '%s'." % (filename))
-                    continue
-                for package in dsc.get("binary").split(','):
-                    package = package.strip()
-                    binary_packages[package] = ""
-            # Then for each binary package: find any version in
-            # unstable, check the Source: field in the deb matches our
-            # source package and if so add it to the list of packages
-            # to be removed.
-            for package in binary_packages.keys():
-                q = session.execute("SELECT l.path, f.filename, b.package, b.version, a.arch_string, b.id, b.maintainer FROM binaries b, bin_associations ba, architecture a, suite su, files f, location l, component c WHERE ba.bin = b.id AND ba.suite = su.id AND b.architecture = a.id AND b.file = f.id AND f.location = l.id AND l.component = c.id %s %s %s AND b.package = '%s'" % (con_suites, con_components, con_architectures, package))
-                for i in q.fetchall():
-                    filename = "/".join(i[:2])
-                    control = apt_pkg.TagSection(utils.deb_extract_control(utils.open_file(filename)))
-                    source = control.find("Source", control.find("Package"))
-                    source = re_strip_source_version.sub('', source)
-                    if source_packages.has_key(source):
-                        to_remove.append(i[2:])
-    print "done."
+            q = session.execute("""
+                    SELECT b.package, b.version, a.arch_string, b.id, b.maintainer
+                    FROM binaries b
+                         JOIN bin_associations ba ON b.id = ba.bin
+                         JOIN architecture a ON b.architecture = a.id
+                         JOIN suite su ON ba.suite = su.id
+                         JOIN archive ON archive.id = su.archive_id
+                         JOIN files_archive_map af ON b.file = af.file_id AND af.archive_id = archive.id
+                         JOIN component c ON af.component_id = c.id
+                         JOIN source s ON b.source = s.id
+                         JOIN src_associations sa ON s.id = sa.source AND sa.suite = su.id
+                    WHERE TRUE %s %s %s %s""" % (con_packages, con_suites, con_components, con_architectures))
+            to_remove.extend(q)
 
     if not to_remove:
         print "Nothing to do."
@@ -639,10 +467,8 @@ def main ():
         Archive = get_archive(whereami, session)
         if Archive is None:
             utils.warn("Cannot find archive %s.  Setting blank values for origin" % whereami)
-            Subst_close_rm["__MASTER_ARCHIVE__"] = ""
             Subst_close_rm["__PRIMARY_MIRROR__"] = ""
         else:
-            Subst_close_rm["__MASTER_ARCHIVE__"] = Archive.origin_server
             Subst_close_rm["__PRIMARY_MIRROR__"] = Archive.primary_mirror
 
         for bug in utils.split_args(Options["Done"]):
@@ -651,7 +477,7 @@ def main ():
                 mail_message = utils.TemplateSubst(Subst_close_rm,cnf["Dir::Templates"]+"/rm.bug-close-with-related")
             else:
                 mail_message = utils.TemplateSubst(Subst_close_rm,cnf["Dir::Templates"]+"/rm.bug-close")
-            utils.send_mail(mail_message)
+            utils.send_mail(mail_message, whitelists=whitelists)
 
     # close associated bug reports
     if Options["Do-Close"]:
@@ -673,11 +499,18 @@ def main ():
         if len(sources) == 1:
             source_pkg = source.split("_", 1)[0]
         else:
-            utils.fubar("Closing bugs for multiple source pakcages is not supported.  Do it yourself.")
+            utils.fubar("Closing bugs for multiple source packages is not supported.  Do it yourself.")
         Subst_close_other["__BUG_NUMBER_ALSO__"] = ""
         Subst_close_other["__SOURCE__"] = source_pkg
-        other_bugs = bts.get_bugs('src', source_pkg, 'status', 'open')
+        merged_bugs = set()
+        other_bugs = bts.get_bugs('src', source_pkg, 'status', 'open', 'status', 'forwarded')
         if other_bugs:
+            for bugno in other_bugs:
+                if bugno not in merged_bugs:
+                    for bug in bts.get_status(bugno):
+                        for merged in bug.mergedwith:
+                            other_bugs.remove(merged)
+                            merged_bugs.add(merged)
             logfile.write("Also closing bug(s):")
             logfile822.write("Also-Bugs:")
             for bug in other_bugs:
index e7dded0f92393f846e88fad9f8aeebac4eabe84b..53f568e0df60844bd470095c330a03c8b1f13e94 100755 (executable)
@@ -27,6 +27,7 @@ import rrdtool
 from debian import deb822
 
 from daklib.dbconn import *
+from daklib.gpg import SignedFile
 from daklib import utils
 from daklib.regexes import re_html_escaping, html_escaping
 
@@ -109,11 +110,12 @@ def table_header():
 def table_footer():
     return '</table><br/><p>non-NEW uploads are <a href="/deferred/">available</a>, see the <a href="ftp://ftp-master.debian.org/pub/UploadQueue/README">UploadQueue-README</a> for more information.</p></center><br/>\n'
 
-def table_row(changesname, delay, changed_by, closes):
+def table_row(changesname, delay, changed_by, closes, fingerprint):
     global row_number
 
     res = '<tr class="%s">'%((row_number%2) and 'odd' or 'even')
-    res += (3*'<td valign="top">%s</td>')%tuple(map(html_escape,(changesname,delay,changed_by)))
+    res += (2*'<td valign="top">%s</td>')%tuple(map(html_escape,(changesname,delay)))
+    res += '<td valign="top">%s<br><span class=\"deferredfp\">Fingerprint: %s</span></td>' % (html_escape(changed_by), fingerprint)
     res += ('<td valign="top">%s</td>' %
              ''.join(map(lambda close:  '<a href="http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=%s">#%s</a><br>' % (close, close),closes)))
     res += '</tr>\n'
@@ -188,6 +190,8 @@ def get_upload_data(changesfn):
 
     uploader = achanges.get('changed-by')
     uploader = re.sub(r'^\s*(\S.*)\s+<.*>',r'\1',uploader)
+    with utils.open_file(changesfn) as f:
+        fingerprint = SignedFile(f.read(), keyrings=get_active_keyring_paths()).fingerprint
     if Cnf.has_key("Show-Deferred::LinkPath"):
         isnew = 0
         suites = get_suites_source_in(achanges['source'])
@@ -210,7 +214,7 @@ def get_upload_data(changesfn):
                 if os.path.exists(qfn):
                     os.symlink(qfn,lfn)
                     os.chmod(qfn, 0o644)
-    return (max(delaydays-1,0)*24*60*60+remainingtime, changesname, delay, uploader, achanges.get('closes','').split(),achanges, delaydays)
+    return (max(delaydays-1,0)*24*60*60+remainingtime, changesname, delay, uploader, achanges.get('closes','').split(), fingerprint, achanges, delaydays)
 
 def list_uploads(filelist, rrd_dir):
     uploads = map(get_upload_data, filelist)
@@ -219,7 +223,7 @@ def list_uploads(filelist, rrd_dir):
     print header()
     if uploads:
         print table_header()
-        print ''.join(map(lambda x: table_row(*x[1:5]), uploads)).encode('utf-8')
+        print ''.join(map(lambda x: table_row(*x[1:6]), uploads)).encode('utf-8')
         print table_footer()
     else:
         print '<h1>Currently no deferred uploads to Debian</h1>'
@@ -231,13 +235,14 @@ def list_uploads(filelist, rrd_dir):
         try:
             counts = [0]*16
             for u in uploads:
-                counts[u[6]] += 1
+                counts[u[7]] += 1
                 print >> f, "Changes-file: %s"%u[1]
                 fields = """Location: DEFERRED
 Delayed-Until: %s
-Delay-Remaining: %s"""%(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time()+u[0])),u[2])
+Delay-Remaining: %s
+Fingerprint: %s"""%(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time()+u[0])),u[2], u[5])
                 print >> f, fields
-                encoded = unicode(u[5]).encode('utf-8')
+                encoded = unicode(u[6]).encode('utf-8')
                 print >> f, encoded.rstrip()
                 open(os.path.join(Cnf["Show-Deferred::LinkPath"],u[1]),"w").write(encoded+fields+'\n')
                 print >> f
index eac91d8a419201e970a132afab11ab3a680482c5..1397a324f6eae35d2fd8a5b5d105fda3a1a6bdb9 100755 (executable)
@@ -30,13 +30,12 @@ import os, sys, time
 import apt_pkg
 import examine_package
 
+from daklib import policy
 from daklib.dbconn import *
-from daklib.queue import determine_new, check_valid, Upload, get_policy_queue
 from daklib import utils
 from daklib.regexes import re_source_ext
 from daklib.config import Config
 from daklib import daklog
-from daklib.changesutils import *
 from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS, PROC_STATUS_SIGNALRAISED
 from multiprocessing import Manager, TimeoutError
 
@@ -53,7 +52,7 @@ timeout_str = "Timed out while processing"
 ################################################################################
 ################################################################################
 
-def html_header(name, filestoexamine):
+def html_header(name, missing):
     if name.endswith('.changes'):
         name = ' '.join(name.split('_')[:2])
     result = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
@@ -122,8 +121,7 @@ def html_header(name, filestoexamine):
       <p><a href="#source-lintian" onclick="show('source-lintian-body')">source lintian</a></p>
 
 """
-    for fn in filter(lambda x: x.endswith('.deb') or x.endswith('.udeb'),filestoexamine):
-        packagename = fn.split('_')[0]
+    for binarytype, packagename in filter(lambda m: m[0] in ('deb', 'udeb'), missing):
         result += """
         <p class="subtitle">%(pkg)s</p>
         <p><a href="#binary-%(pkg)s-control" onclick="show('binary-%(pkg)s-control-body')">control file</a></p>
@@ -154,86 +152,59 @@ def html_footer():
 ################################################################################
 
 
-def do_pkg(changes_file):
-    changes_file = utils.validate_changes_file_arg(changes_file, 0)
-    if not changes_file:
-        return
-    print "\n" + changes_file
+def do_pkg(upload_id):
+    cnf = Config()
+
+    session = DBConn().session()
+    upload = session.query(PolicyQueueUpload).filter_by(id=upload_id).one()
 
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    # We can afoord not to check the signature before loading the changes file
-    # as we've validated it already (otherwise it couldn't be in new)
-    # and we can more quickly skip over already processed files this way
-    u.load_changes(changes_file)
+    queue = upload.policy_queue
+    changes = upload.changes
 
-    origchanges = os.path.abspath(u.pkg.changes_file)
+    origchanges = os.path.join(queue.path, changes.changesname)
+    print origchanges
 
-    # Still be cautious in case paring the changes file went badly
-    if u.pkg.changes.has_key('source') and u.pkg.changes.has_key('version'):
-        htmlname = u.pkg.changes["source"] + "_" + u.pkg.changes["version"] + ".html"
-        htmlfile = os.path.join(cnf["Show-New::HTMLPath"], htmlname)
-    else:
-        # Changes file was bad
-        print "Changes file %s missing source or version field" % changes_file
-        return
+    htmlname = "{0}_{1}.html".format(changes.source, changes.version)
+    htmlfile = os.path.join(cnf['Show-New::HTMLPath'], htmlname)
 
     # Have we already processed this?
     if os.path.exists(htmlfile) and \
-        os.stat(htmlfile).st_mtime > os.stat(origchanges).st_mtime:
+        os.stat(htmlfile).st_mtime > time.mktime(changes.created.timetuple()):
             with open(htmlfile, "r") as fd:
                 if fd.read() != timeout_str:
                     sources.append(htmlname)
                     return (PROC_STATUS_SUCCESS,
                             '%s already up-to-date' % htmlfile)
 
-    # Now we'll load the fingerprint
-    session = DBConn().session()
+    # Go, process it... Now!
     htmlfiles_to_process.append(htmlfile)
-    (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file, session=session)
-    new_queue = get_policy_queue('new', session );
-    u.pkg.directory = new_queue.path
-    u.update_subst()
-    files = u.pkg.files
-    changes = u.pkg.changes
     sources.append(htmlname)
 
-    for deb_filename, f in files.items():
-        if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
-            u.binary_file_checks(deb_filename, session)
-            u.check_binary_against_db(deb_filename, session)
-        else:
-            u.source_file_checks(deb_filename, session)
-            u.check_source_against_db(deb_filename, session)
-    u.pkg.changes["suite"] = u.pkg.changes["distribution"]
-
-    new, byhand = determine_new(u.pkg.changes_file, u.pkg.changes, files, 0, dsc=u.pkg.dsc, session=session)
+    group = cnf.get('Dinstall::UnprivGroup') or None
 
-    outfile = open(os.path.join(cnf["Show-New::HTMLPath"],htmlname),"w")
+    with open(htmlfile, 'w') as outfile:
+      with policy.UploadCopy(upload, group=group) as upload_copy:
+        handler = policy.PolicyQueueUploadHandler(upload, session)
+        missing = [ (o['type'], o['package']) for o in handler.missing_overrides() ]
+        distribution = changes.distribution
 
-    filestoexamine = []
-    for pkg in new.keys():
-        for fn in new[pkg]["files"]:
-            filestoexamine.append(fn)
+        print >>outfile, html_header(changes.source, missing)
+        print >>outfile, examine_package.display_changes(distribution, origchanges)
 
-    print >> outfile, html_header(changes["source"], filestoexamine)
+        if upload.source is not None and ('dsc', upload.source.source) in missing:
+            fn = os.path.join(upload_copy.directory, upload.source.poolfile.basename)
+            print >>outfile, examine_package.check_dsc(distribution, fn, session)
+        for binary in upload.binaries:
+            if (binary.binarytype, binary.package) not in missing:
+                continue
+            fn = os.path.join(upload_copy.directory, binary.poolfile.basename)
+            print >>outfile, examine_package.check_deb(distribution, fn, session)
 
-    check_valid(new, session)
-    distribution = changes["distribution"].keys()[0]
-    print >> outfile, examine_package.display_changes(distribution, changes_file)
+        print >>outfile, html_footer()
 
-    for fn in filter(lambda fn: fn.endswith(".dsc"), filestoexamine):
-        print >> outfile, examine_package.check_dsc(distribution, fn, session)
-    for fn in filter(lambda fn: fn.endswith(".deb") or fn.endswith(".udeb"), filestoexamine):
-        print >> outfile, examine_package.check_deb(distribution, fn, session)
-
-    print >> outfile, html_footer()
-
-    outfile.close()
     session.close()
-
     htmlfiles_to_process.remove(htmlfile)
-    return (PROC_STATUS_SUCCESS, '%s already updated' % htmlfile)
+    return (PROC_STATUS_SUCCESS, '{0} already updated'.format(htmlfile))
 
 ################################################################################
 
@@ -252,38 +223,47 @@ def init(session):
     cnf = Config()
 
     Arguments = [('h',"help","Show-New::Options::Help"),
-                 ("p","html-path","Show-New::HTMLPath","HasArg")]
+                 ("p","html-path","Show-New::HTMLPath","HasArg"),
+                 ('q','queue','Show-New::Options::Queue','HasArg')]
 
     for i in ["help"]:
         if not cnf.has_key("Show-New::Options::%s" % (i)):
             cnf["Show-New::Options::%s" % (i)] = ""
 
-    changes_files = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
-    if len(changes_files) == 0:
-        new_queue = get_policy_queue('new', session );
-        changes_files = utils.get_changes_files(new_queue.path)
-
+    changesnames = apt_pkg.parse_commandline(cnf.Cnf,Arguments,sys.argv)
     Options = cnf.subtree("Show-New::Options")
 
     if Options["help"]:
         usage()
 
-    return changes_files
+    queue_names = Options.find('Queue', 'new').split(',')
+    uploads = session.query(PolicyQueueUpload) \
+        .join(PolicyQueueUpload.policy_queue).filter(PolicyQueue.queue_name.in_(queue_names)) \
+        .join(PolicyQueueUpload.changes).order_by(DBChange.source)
+
+    if len(changesnames) > 0:
+        uploads = uploads.filter(DBChange.changesname.in_(changesnames))
+
+    return uploads
 
 
 ################################################################################
 ################################################################################
 
 def main():
-    session = DBConn().session()
-    changes_files = init(session)
+    examine_package.use_html = True
+    pool = DakProcessPool(processes=5)
 
-    examine_package.use_html=1
+    session = DBConn().session()
+    upload_ids = [ u.id for u in init(session) ]
+    session.close()
 
-    pool = DakProcessPool(processes=5)
-    p = pool.map_async(do_pkg, changes_files)
+    for upload_id in upload_ids:
+        pool.apply_async(do_pkg, [upload_id])
     pool.close()
-    p.wait(timeout=600)
+
+    #p.wait(timeout=600)
+    pool.join()
     for htmlfile in htmlfiles_to_process:
         with open(htmlfile, "w") as fd:
             fd.write(timeout_str)
diff --git a/dak/split_done.py b/dak/split_done.py
deleted file mode 100755 (executable)
index 3d07287..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-
-""" Split queue/done into date based subdirectories """
-# Copyright (C) 2004, 2005, 2006  James Troup <james@nocrew.org>
-# Copyright (C) 2008  Joerg Jaspert <joerg@debian.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import glob, os, stat, time
-from daklib import utils
-
-################################################################################
-
-def main():
-    Cnf = utils.get_conf()
-    count = 0
-    move_date = int(time.time())
-    os.chdir(Cnf["Dir::Done"])
-    files = glob.glob("%s/*" % (Cnf["Dir::Done"]))
-    for filename in files:
-        if os.path.isfile(filename):
-            filemtime = os.stat(filename)[stat.ST_MTIME]
-            if filemtime > move_date:
-                continue
-            mtime = time.gmtime(filemtime)
-            dirname = time.strftime("%Y/%m/%d", mtime)
-            if not os.path.exists(dirname):
-                print "Creating: %s" % (dirname)
-                os.makedirs(dirname)
-            dest = dirname + '/' + os.path.basename(filename)
-            if os.path.exists(dest):
-                utils.warn("%s already exists." % (dest))
-                continue
-            print "Move: %s -> %s" % (filename, dest)
-            os.rename(filename, dest)
-            count = count + 1
-    print "Moved %d files." % (count)
-
-############################################################
-
-if __name__ == '__main__':
-    main()
index f33873475029ff701fdd2509ee57275fbf28b6f6..6f69e1a7f4239429fac841b78cdb47c92605a278 100755 (executable)
@@ -2,6 +2,7 @@
 
 """ Various statistical pr0nography fun and games """
 # Copyright (C) 2000, 2001, 2002, 2003, 2006  James Troup <james@nocrew.org>
+# Copyright (C) 2013  Luca Falavigna <dktrkranz@debian.org>
 
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 import sys
 import apt_pkg
 
+from datetime import datetime
+from email.utils import mktime_tz, parsedate_tz
+from mailbox import mbox
+from os import listdir, system, unlink
+from os.path import isfile, join, splitext
+from re import findall, DOTALL, MULTILINE
+from sys import stderr
+from yaml import safe_load, safe_dump
+
 from daklib import utils
 from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture
 
@@ -40,6 +50,19 @@ from daklib.dbconn import DBConn, get_suite_architectures, Suite, Architecture
 
 Cnf = None
 
+stats = {}
+users = {}
+buffer = 0
+FORMAT_SWITCH = '2009-08'
+blacklisted = ('dak', 'katie')
+
+NEW = ('^(\d{14})\|(?:jennifer|process-unchecked|.*?\|dak)'
+       '\|(Moving to new|ACCEPT-TO-NEW)')
+new_ACTIONS = '^(\d{14})\|[^\|]*\|(\S+)\|NEW (\S+)[:\|]'
+old_ACTIONS = ('(?:lisa|process-new)\|program start\|(.*?)\|'
+               '(?:lisa|process-new)\|program end')
+old_ACTION = '^(\d{14})\|(?:lisa|process-new)\|(Accepting changes|rejected)\|'
+
 ################################################################################
 
 def usage(exit_code=0):
@@ -53,6 +76,7 @@ The following MODEs are available:
   arch-space    - displays space used by each architecture
   pkg-nums      - displays the number of packages by suite/architecture
   daily-install - displays daily install stats suitable for graphing
+  new           - stores stats about the NEW queue
 """
     sys.exit(exit_code)
 
@@ -189,8 +213,179 @@ def number_of_packages():
 
 ################################################################################
 
+def parse_new_uploads(data):
+    global stats
+    latest_timestamp = stats['timestamp']
+    for entry in findall(NEW, data, MULTILINE):
+        timestamp = entry[0]
+        if stats['timestamp'] >= timestamp:
+            continue
+        date = parse_timestamp(timestamp)
+        if date not in stats:
+            stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
+                           'REJECT': 0, 'PROD': 0}, 'members': {}}
+        stats[date]['stats']['NEW'] += 1
+        stats['history']['stats']['NEW'] += 1
+        latest_timestamp = timestamp
+    return latest_timestamp
+
+
+def parse_actions(data, logdate):
+    global stats
+    latest_timestamp = stats['timestamp']
+    if logdate <= FORMAT_SWITCH:
+        for batch in findall(old_ACTIONS, data, DOTALL):
+            who = batch.split()[0]
+            if who in blacklisted:
+                continue
+            for entry in findall(old_ACTION, batch, MULTILINE):
+                action = entry[1]
+                if action.startswith('Accepting'):
+                    action = 'ACCEPT'
+                elif action.startswith('rejected'):
+                    action = 'REJECT'
+                timestamp = entry[0]
+                if stats['timestamp'] >= timestamp:
+                    continue
+                date = parse_timestamp(entry[0])
+                if date not in stats:
+                    stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
+                                   'REJECT': 0, 'PROD': 0}, 'members': {}}
+                stats[date]['stats'][action] += 1
+                stats['history']['stats'][action] += 1
+                if who not in stats[date]['members']:
+                    stats[date]['members'][who] = {'ACCEPT': 0, 'REJECT': 0,
+                                                   'PROD': 0}
+                stats[date]['members'][who][action] += 1
+                if who not in stats['history']['members']:
+                    stats['history']['members'][who] = {'ACCEPT': 0, 'REJECT': 0,
+                                                    'PROD': 0}
+                stats['history']['members'][who][action] += 1
+                latest_timestamp = timestamp
+        parse_prod(logdate)
+    if logdate >= FORMAT_SWITCH:
+        for entry in findall(new_ACTIONS, data, MULTILINE):
+            action = entry[2]
+            timestamp = entry[0]
+            if stats['timestamp'] >= timestamp:
+                continue
+            date = parse_timestamp(timestamp)
+            if date not in stats:
+                stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
+                               'REJECT': 0, 'PROD': 0}, 'members': {}}
+            member = entry[1]
+            if member in blacklisted:
+                continue
+            if date not in stats:
+                stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
+                               'REJECT': 0, 'PROD': 0}, 'members': {}}
+            if member not in stats[date]['members']:
+                stats[date]['members'][member] = {'ACCEPT': 0, 'REJECT': 0,
+                                                  'PROD': 0}
+            if member not in stats['history']['members']:
+                stats['history']['members'][member] = {'ACCEPT': 0,
+                                                       'REJECT': 0, 'PROD': 0}
+            stats[date]['stats'][action] += 1
+            stats[date]['members'][member][action] += 1
+            stats['history']['stats'][action] += 1
+            stats['history']['members'][member][action] += 1
+            latest_timestamp = timestamp
+    return latest_timestamp
+
+
+def parse_prod(logdate):
+    global stats
+    global users
+    maildate = ''.join([x[-2:] for x in logdate.split('-')])
+    mailarchive = join(utils.get_conf()['Dir::Base'], 'mail/archive',
+                       'mail-%s.xz' % maildate)
+    if not isfile(mailarchive):
+        return
+    (fd, tmpfile) = utils.temp_filename(utils.get_conf()['Dir::TempPath'])
+    system('xzcat %s > %s' % (mailarchive, tmpfile))
+    for message in mbox(tmpfile):
+        if (message['subject'] and
+                message['subject'].startswith('Comments regarding')):
+            try:
+                member = users[' '.join(message['From'].split()[:-1])]
+            except KeyError:
+                continue
+            ts = mktime_tz(parsedate_tz(message['date']))
+            timestamp = datetime.fromtimestamp(ts).strftime("%Y%m%d%H%M%S")
+            date = parse_timestamp(timestamp)
+            if date not in stats:
+                stats[date] = {'stats': {'NEW': 0, 'ACCEPT': 0,
+                                 'REJECT': 0, 'PROD': 0}, 'members': {}}
+            if member not in stats[date]['members']:
+                stats[date]['members'][member] = {'ACCEPT': 0, 'REJECT': 0,
+                                                     'PROD': 0}
+            if member not in stats['history']['members']:
+                stats['history']['members'][member] = {'ACCEPT': 0,
+                                                       'REJECT': 0, 'PROD': 0}
+            stats[date]['stats']['PROD'] += 1
+            stats[date]['members'][member]['PROD'] += 1
+            stats['history']['stats']['PROD'] += 1
+            stats['history']['members'][member]['PROD'] += 1
+    unlink(tmpfile)
+
+
+def parse_timestamp(timestamp):
+    y = int(timestamp[:4])
+    m = int(timestamp[4:6])
+    return '%d-%02d' % (y, m)
+
+
+def new_stats(logdir, yaml):
+    global Cnf
+    global stats
+    try:
+        with open(yaml, 'r') as fd:
+            stats = safe_load(fd)
+    except IOError:
+        pass
+    if not stats:
+        stats = {'history': {'stats': {'NEW': 0, 'ACCEPT': 0,
+                 'REJECT': 0, 'PROD': 0}, 'members': {}},
+                 'timestamp': '19700101000000'}
+    latest_timestamp = stats['timestamp']
+    for fn in sorted(listdir(logdir)):
+        if fn == 'current':
+            continue
+        log = splitext(fn)[0]
+        if log < parse_timestamp(stats['timestamp']):
+            continue
+        logfile = join(logdir, fn)
+        if isfile(logfile):
+            if fn.endswith('.bz2'):
+                # This hack is required becaue python2 does not support
+                # multi-stream files (http://bugs.python.org/issue1625)
+                (fd, tmpfile) = utils.temp_filename(Cnf['Dir::TempPath'])
+                system('bzcat %s > %s' % (logfile, tmpfile))
+                with open(tmpfile, 'r') as fd:
+                    data = fd.read()
+                unlink(tmpfile)
+            else:
+                with open(logfile, 'r') as fd:
+                    data = fd.read()
+            ts = parse_new_uploads(data)
+            if ts > latest_timestamp:
+                latest_timestamp = ts
+            ts = parse_actions(data, log)
+            if ts > latest_timestamp:
+                latest_timestamp = ts
+            stderr.write('.')
+            stderr.flush()
+    stderr.write('\n')
+    stderr.flush()
+    stats['timestamp'] = latest_timestamp
+    with open(yaml, 'w') as fd:
+        safe_dump(stats, fd)
+
+################################################################################
+
 def main ():
     global Cnf
+    global users
 
     Cnf = utils.get_conf()
     Arguments = [('h',"help","Stats::Options::Help")]
@@ -208,8 +403,12 @@ def main ():
         utils.warn("dak stats requires a MODE argument")
         usage(1)
     elif len(args) > 1:
-        utils.warn("dak stats accepts only one MODE argument")
-        usage(1)
+        if args[0].lower() != "new":
+            utils.warn("dak stats accepts only one MODE argument")
+            usage(1)
+    elif args[0].lower() == "new":
+            utils.warn("new MODE requires an output file")
+            usage(1)
     mode = args[0].lower()
 
     if mode == "arch-space":
@@ -218,6 +417,9 @@ def main ():
         number_of_packages()
     elif mode == "daily-install":
         daily_install_stats()
+    elif mode == "new":
+        users = utils.get_users_from_ldap()
+        new_stats(Cnf["Dir::Log"], args[1])
     else:
         utils.warn("unknown mode '%s'" % (mode))
         usage(1)
index 7409f6f95a0b18154639cf25b62d952d1879cba7..68c65b6a070bf9dbe84d3f8a664c95ccd89572d0 100755 (executable)
@@ -134,7 +134,7 @@ def load_transitions(trans_file):
     sourcecontent = sourcefile.read()
     failure = False
     try:
-        trans = yaml.load(sourcecontent)
+        trans = yaml.safe_load(sourcecontent)
     except yaml.YAMLError as exc:
         # Someone fucked it up
         print "ERROR: %s" % (exc)
@@ -262,7 +262,7 @@ def write_transitions(from_trans):
     temp_lock  = lock_file(trans_temp)
 
     destfile = file(trans_temp, 'w')
-    yaml.dump(from_trans, destfile, default_flow_style=False)
+    yaml.safe_dump(from_trans, destfile, default_flow_style=False)
     destfile.close()
 
     os.rename(trans_temp, trans_file)
@@ -321,7 +321,7 @@ def temp_transitions_file(transitions):
     (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Dir::TempPath"])
     os.chmod(path, 0o644)
     f = open(path, "w")
-    yaml.dump(transitions, f, default_flow_style=False)
+    yaml.safe_dump(transitions, f, default_flow_style=False)
     return path
 
 ################################################################################
index f70fc1c8374eb2963e48805626d0f664c053b7ea..61a1089210295e5ab759d7858e59edc5f4bfb58e 100755 (executable)
@@ -37,6 +37,8 @@ import os
 import apt_pkg
 import time
 import errno
+from glob import glob
+from re import findall
 
 from daklib import utils
 from daklib.config import Config
@@ -46,7 +48,6 @@ from daklib.daklog import Logger
 ################################################################################
 
 Cnf = None
-required_database_schema = 72
 
 ################################################################################
 
@@ -120,6 +121,7 @@ Updates dak's database schema to the lastest version. You should disable crontab
         print "Determining dak database revision ..."
         cnf = Config()
         logger = Logger('update-db')
+        modules = []
 
         try:
             # Build a connect string
@@ -155,25 +157,39 @@ Updates dak's database schema to the lastest version. You should disable crontab
             self.update_db_to_zero()
             database_revision = 0
 
+        dbfiles = glob(os.path.join(os.path.dirname(__file__), 'dakdb/update*.py'))
+        required_database_schema = max(map(int, findall('update(\d+).py', " ".join(dbfiles))))
+
         print "dak database schema at %d" % database_revision
         print "dak version requires schema %d"  % required_database_schema
 
-        if database_revision == required_database_schema:
+        if database_revision < required_database_schema:
+            print "\nUpdates to be applied:"
+            for i in range(database_revision, required_database_schema):
+                i += 1
+                dakdb = __import__("dakdb", globals(), locals(), ['update'+str(i)])
+                update_module = getattr(dakdb, "update"+str(i))
+                print "Update %d: %s" % (i, next(s for s in update_module.__doc__.split("\n") if s))
+                modules.append((update_module, i))
+            prompt = "\nUpdate database? (y/N) "
+            answer = utils.our_raw_input(prompt)
+            if answer.upper() != 'Y':
+                sys.exit(0)
+        else:
             print "no updates required"
             logger.log(["no updates required"])
             sys.exit(0)
 
-        for i in range (database_revision, required_database_schema):
+        for module in modules:
+            (update_module, i) = module
             try:
-                dakdb = __import__("dakdb", globals(), locals(), ['update'+str(i+1)])
-                update_module = getattr(dakdb, "update"+str(i+1))
                 update_module.do_update(self)
-                message = "updated database schema from %d to %d" % (database_revision, i+1)
+                message = "updated database schema from %d to %d" % (database_revision, i)
                 print message
                 logger.log([message])
             except DBUpdateError as e:
                 # Seems the update did not work.
-                print "Was unable to update database schema from %d to %d." % (database_revision, i+1)
+                print "Was unable to update database schema from %d to %d." % (database_revision, i)
                 print "The error message received was %s" % (e)
                 logger.log(["DB Schema upgrade failed"])
                 logger.close()
diff --git a/daklib/announce.py b/daklib/announce.py
new file mode 100644 (file)
index 0000000..e784df0
--- /dev/null
@@ -0,0 +1,170 @@
+"""module to send announcements for processed packages
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012, Ansgar Burchardt <ansgar@debian.org>
+@license: GPL-2+
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+
+from daklib.config import Config
+from daklib.textutils import fix_maintainer
+from daklib.utils import mail_addresses_for_upload, TemplateSubst, send_mail
+
+class ProcessedUpload(object):
+    # people
+    maintainer = None
+    changed_by = None
+    fingerprint = None
+
+    # suites
+    suites = []
+    from_policy_suites = []
+
+    # package
+    changes = None
+    changes_filename = None
+    sourceful = None
+    source = None
+    architecture = None
+    version = None
+    bugs = None
+
+    # program
+    program = "unknown-program"
+
+    warnings = []
+
+def _subst_for_upload(upload):
+    cnf = Config()
+
+    maintainer = upload.maintainer or cnf['Dinstall::MyEmailAddress']
+    changed_by = upload.changed_by or maintainer
+    if upload.sourceful:
+        maintainer_to = mail_addresses_for_upload(maintainer, changed_by, upload.fingerprint)
+    else:
+        maintainer_to = mail_addresses_for_upload(maintainer, maintainer, upload.fingerprint)
+
+    bcc = 'X-DAK: dak {0}'.format(upload.program)
+    if 'Dinstall::Bcc' in cnf:
+        bcc = '{0}\nBcc: {1}'.format(bcc, cnf['Dinstall::Bcc'])
+
+    subst = {
+        '__DISTRO__': cnf['Dinstall::MyDistribution'],
+        '__BUG_SERVER__': cnf.get('Dinstall::BugServer'),
+        '__ADMIN_ADDRESS__': cnf['Dinstall::MyAdminAddress'],
+        '__DAK_ADDRESS__': cnf['Dinstall::MyEmailAddress'],
+        '__REJECTOR_ADDRESS__': cnf['Dinstall::MyEmailAddress'],
+        '__MANUAL_REJECT_MESSAGE__': '',
+
+        '__BCC__': bcc,
+
+        '__MAINTAINER__': changed_by,
+        '__MAINTAINER_FROM__': fix_maintainer(changed_by)[1],
+        '__MAINTAINER_TO__': ', '.join(maintainer_to),
+        '__CHANGES_FILENAME__': upload.changes_filename,
+        '__FILE_CONTENTS__': upload.changes,
+        '__SOURCE__': upload.source,
+        '__VERSION__': upload.version,
+        '__ARCHITECTURE__': upload.architecture,
+        '__WARNINGS__': '\n'.join(upload.warnings),
+        }
+
+    override_maintainer = cnf.get('Dinstall::OverrideMaintainer')
+    if override_maintainer:
+        subst['__MAINTAINER_FROM__'] = subst['__MAINTAINER_TO__'] = override_maintainer
+
+    return subst
+
+def _whitelists(upload):
+    return [ s.mail_whitelist for s in upload.suites ]
+
+def announce_reject(upload, reason, rejected_by=None):
+    cnf = Config()
+    subst = _subst_for_upload(upload)
+    whitelists = _whitelists(upload)
+
+    automatic = rejected_by is None
+
+    subst['__CC__'] = 'X-DAK-Rejection: {0}'.format('automatic' if automatic else 'manual')
+    subst['__REJECT_MESSAGE__'] = reason
+
+    if rejected_by:
+        subst['__REJECTOR_ADDRESS__'] = rejected_by
+
+    if not automatic:
+        subst['__BCC__'] = '{0}\nBcc: {1}'.format(subst['__BCC__'], subst['__REJECTOR_ADDRESS__'])
+
+    message = TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'queue.rejected'))
+    send_mail(message, whitelists=whitelists)
+
+def announce_accept(upload):
+    cnf = Config()
+    subst = _subst_for_upload(upload)
+    whitelists = _whitelists(upload)
+
+    accepted_to_real_suite = any(suite.policy_queue is None or suite in upload.from_policy_suites for suite in upload.suites)
+
+    suite_names = []
+    for suite in upload.suites:
+        if suite.policy_queue:
+            suite_names.append("{0}->{1}".format(suite.suite_name, suite.policy_queue.queue_name))
+        else:
+            suite_names.append(suite.suite_name)
+    suite_names.extend(suite.suite_name for suite in upload.from_policy_suites)
+    subst['__SUITE__'] = ', '.join(suite_names) or '(none)'
+
+    message = TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.accepted'))
+    send_mail(message, whitelists=whitelists)
+
+    if accepted_to_real_suite and upload.sourceful:
+        # senf mail to announce lists and tracking server
+        announce = set()
+        for suite in upload.suites:
+            if suite.policy_queue is None or suite in upload.from_policy_suites:
+                announce.update(suite.announce or [])
+
+        announce_list_address = ", ".join(announce)
+
+        tracking = cnf.get('Dinstall::TrackingServer')
+        if tracking:
+            announce_list_address = "{0}\nBcc: {1}@{2}".format(announce_list_address, upload.source, tracking)
+
+        if len(announce_list_address) != 0:
+            my_subst = subst.copy()
+            my_subst['__ANNOUNCE_LIST_ADDRESS__'] = announce_list_address
+
+            message = TemplateSubst(my_subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.announce'))
+            send_mail(message, whitelists=whitelists)
+
+    close_bugs_default = cnf.find_b('Dinstall::CloseBugs')
+    close_bugs = any(s.close_bugs if s.close_bugs is not None else close_bugs_default for s in upload.suites)
+    if accepted_to_real_suite and upload.sourceful and close_bugs:
+        for bug in upload.bugs:
+            my_subst = subst.copy()
+            my_subst['__BUG_NUMBER__'] = str(bug)
+
+            message = TemplateSubst(my_subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.bug-close'))
+            send_mail(message, whitelists=whitelists)
+
+def announce_new(upload):
+    cnf = Config()
+    subst = _subst_for_upload(upload)
+    whitelists = _whitelists(upload)
+
+    message = TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-unchecked.new'))
+    send_mail(message, whitelists=whitelists)
diff --git a/daklib/archive.py b/daklib/archive.py
new file mode 100644 (file)
index 0000000..372ab8a
--- /dev/null
@@ -0,0 +1,1295 @@
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""module to manipulate the archive
+
+This module provides classes to manipulate the archive.
+"""
+
+from daklib.dbconn import *
+import daklib.checks as checks
+from daklib.config import Config
+import daklib.upload as upload
+import daklib.utils as utils
+from daklib.fstransactions import FilesystemTransaction
+from daklib.regexes import re_changelog_versions, re_bin_only_nmu
+import daklib.daksubprocess
+
+import apt_pkg
+from datetime import datetime
+import os
+import shutil
+from sqlalchemy.orm.exc import NoResultFound
+import sqlalchemy.exc
+import tempfile
+import traceback
+
+class ArchiveException(Exception):
+    pass
+
+class HashMismatchException(ArchiveException):
+    pass
+
+class ArchiveTransaction(object):
+    """manipulate the archive in a transaction
+    """
+    def __init__(self):
+        self.fs = FilesystemTransaction()
+        self.session = DBConn().session()
+
+    def get_file(self, hashed_file, source_name, check_hashes=True):
+        """Look for file C{hashed_file} in database
+
+        @type  hashed_file: L{daklib.upload.HashedFile}
+        @param hashed_file: file to look for in the database
+
+        @type  source_name: str
+        @param source_name: source package name
+
+        @type  check_hashes: bool
+        @param check_hashes: check size and hashes match
+
+        @raise KeyError: file was not found in the database
+        @raise HashMismatchException: hash mismatch
+
+        @rtype:  L{daklib.dbconn.PoolFile}
+        @return: database entry for the file
+        """
+        poolname = os.path.join(utils.poolify(source_name), hashed_file.filename)
+        try:
+            poolfile = self.session.query(PoolFile).filter_by(filename=poolname).one()
+            if check_hashes and (poolfile.filesize != hashed_file.size
+                                 or poolfile.md5sum != hashed_file.md5sum
+                                 or poolfile.sha1sum != hashed_file.sha1sum
+                                 or poolfile.sha256sum != hashed_file.sha256sum):
+                raise HashMismatchException('{0}: Does not match file already existing in the pool.'.format(hashed_file.filename))
+            return poolfile
+        except NoResultFound:
+            raise KeyError('{0} not found in database.'.format(poolname))
+
+    def _install_file(self, directory, hashed_file, archive, component, source_name):
+        """Install a file
+
+        Will not give an error when the file is already present.
+
+        @rtype:  L{daklib.dbconn.PoolFile}
+        @return: database object for the new file
+        """
+        session = self.session
+
+        poolname = os.path.join(utils.poolify(source_name), hashed_file.filename)
+        try:
+            poolfile = self.get_file(hashed_file, source_name)
+        except KeyError:
+            poolfile = PoolFile(filename=poolname, filesize=hashed_file.size)
+            poolfile.md5sum = hashed_file.md5sum
+            poolfile.sha1sum = hashed_file.sha1sum
+            poolfile.sha256sum = hashed_file.sha256sum
+            session.add(poolfile)
+            session.flush()
+
+        try:
+            session.query(ArchiveFile).filter_by(archive=archive, component=component, file=poolfile).one()
+        except NoResultFound:
+            archive_file = ArchiveFile(archive, component, poolfile)
+            session.add(archive_file)
+            session.flush()
+
+            path = os.path.join(archive.path, 'pool', component.component_name, poolname)
+            hashed_file_path = os.path.join(directory, hashed_file.filename)
+            self.fs.copy(hashed_file_path, path, link=False, mode=archive.mode)
+
+        return poolfile
+
+    def install_binary(self, directory, binary, suite, component, allow_tainted=False, fingerprint=None, source_suites=None, extra_source_archives=None):
+        """Install a binary package
+
+        @type  directory: str
+        @param directory: directory the binary package is located in
+
+        @type  binary: L{daklib.upload.Binary}
+        @param binary: binary package to install
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: target suite
+
+        @type  component: L{daklib.dbconn.Component}
+        @param component: target component
+
+        @type  allow_tainted: bool
+        @param allow_tainted: allow to copy additional files from tainted archives
+
+        @type  fingerprint: L{daklib.dbconn.Fingerprint}
+        @param fingerprint: optional fingerprint
+
+        @type  source_suites: SQLAlchemy subquery for C{daklib.dbconn.Suite} or C{True}
+        @param source_suites: suites to copy the source from if they are not
+                              in C{suite} or C{True} to allow copying from any
+                              suite.
+
+        @type  extra_source_archives: list of L{daklib.dbconn.Archive}
+        @param extra_source_archives: extra archives to copy Built-Using sources from
+
+        @rtype:  L{daklib.dbconn.DBBinary}
+        @return: databse object for the new package
+        """
+        session = self.session
+        control = binary.control
+        maintainer = get_or_set_maintainer(control['Maintainer'], session)
+        architecture = get_architecture(control['Architecture'], session)
+
+        (source_name, source_version) = binary.source
+        source_query = session.query(DBSource).filter_by(source=source_name, version=source_version)
+        source = source_query.filter(DBSource.suites.contains(suite)).first()
+        if source is None:
+            if source_suites != True:
+                source_query = source_query.join(DBSource.suites) \
+                    .filter(Suite.suite_id == source_suites.c.id)
+            source = source_query.first()
+            if source is None:
+                raise ArchiveException('{0}: trying to install to {1}, but could not find source'.format(binary.hashed_file.filename, suite.suite_name))
+            self.copy_source(source, suite, component)
+
+        db_file = self._install_file(directory, binary.hashed_file, suite.archive, component, source_name)
+
+        unique = dict(
+            package=control['Package'],
+            version=control['Version'],
+            architecture=architecture,
+            )
+        rest = dict(
+            source=source,
+            maintainer=maintainer,
+            poolfile=db_file,
+            binarytype=binary.type,
+            fingerprint=fingerprint,
+            )
+
+        try:
+            db_binary = session.query(DBBinary).filter_by(**unique).one()
+            for key, value in rest.iteritems():
+                if getattr(db_binary, key) != value:
+                    raise ArchiveException('{0}: Does not match binary in database.'.format(binary.hashed_file.filename))
+        except NoResultFound:
+            db_binary = DBBinary(**unique)
+            for key, value in rest.iteritems():
+                setattr(db_binary, key, value)
+            session.add(db_binary)
+            session.flush()
+            import_metadata_into_db(db_binary, session)
+
+            self._add_built_using(db_binary, binary.hashed_file.filename, control, suite, extra_archives=extra_source_archives)
+
+        if suite not in db_binary.suites:
+            db_binary.suites.append(suite)
+
+        session.flush()
+
+        return db_binary
+
+    def _ensure_extra_source_exists(self, filename, source, archive, extra_archives=None):
+        """ensure source exists in the given archive
+
+        This is intended to be used to check that Built-Using sources exist.
+
+        @type  filename: str
+        @param filename: filename to use in error messages
+
+        @type  source: L{daklib.dbconn.DBSource}
+        @param source: source to look for
+
+        @type  archive: L{daklib.dbconn.Archive}
+        @param archive: archive to look in
+
+        @type  extra_archives: list of L{daklib.dbconn.Archive}
+        @param extra_archives: list of archives to copy the source package from
+                               if it is not yet present in C{archive}
+        """
+        session = self.session
+        db_file = session.query(ArchiveFile).filter_by(file=source.poolfile, archive=archive).first()
+        if db_file is not None:
+            return True
+
+        # Try to copy file from one extra archive
+        if extra_archives is None:
+            extra_archives = []
+        db_file = session.query(ArchiveFile).filter_by(file=source.poolfile).filter(ArchiveFile.archive_id.in_([ a.archive_id for a in extra_archives])).first()
+        if db_file is None:
+            raise ArchiveException('{0}: Built-Using refers to package {1} (= {2}) not in target archive {3}.'.format(filename, source.source, source.version, archive.archive_name))
+
+        source_archive = db_file.archive
+        for dsc_file in source.srcfiles:
+            af = session.query(ArchiveFile).filter_by(file=dsc_file.poolfile, archive=source_archive, component=db_file.component).one()
+            # We were given an explicit list of archives so it is okay to copy from tainted archives.
+            self._copy_file(af.file, archive, db_file.component, allow_tainted=True)
+
+    def _add_built_using(self, db_binary, filename, control, suite, extra_archives=None):
+        """Add Built-Using sources to C{db_binary.extra_sources}
+        """
+        session = self.session
+        built_using = control.get('Built-Using', None)
+
+        if built_using is not None:
+            for dep in apt_pkg.parse_depends(built_using):
+                assert len(dep) == 1, 'Alternatives are not allowed in Built-Using field'
+                bu_source_name, bu_source_version, comp = dep[0]
+                assert comp == '=', 'Built-Using must contain strict dependencies'
+
+                bu_source = session.query(DBSource).filter_by(source=bu_source_name, version=bu_source_version).first()
+                if bu_source is None:
+                    raise ArchiveException('{0}: Built-Using refers to non-existing source package {1} (= {2})'.format(filename, bu_source_name, bu_source_version))
+
+                self._ensure_extra_source_exists(filename, bu_source, suite.archive, extra_archives=extra_archives)
+
+                db_binary.extra_sources.append(bu_source)
+
+    def install_source(self, directory, source, suite, component, changed_by, allow_tainted=False, fingerprint=None):
+        """Install a source package
+
+        @type  directory: str
+        @param directory: directory the source package is located in
+
+        @type  source: L{daklib.upload.Source}
+        @param source: source package to install
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: target suite
+
+        @type  component: L{daklib.dbconn.Component}
+        @param component: target component
+
+        @type  changed_by: L{daklib.dbconn.Maintainer}
+        @param changed_by: person who prepared this version of the package
+
+        @type  allow_tainted: bool
+        @param allow_tainted: allow to copy additional files from tainted archives
+
+        @type  fingerprint: L{daklib.dbconn.Fingerprint}
+        @param fingerprint: optional fingerprint
+
+        @rtype:  L{daklib.dbconn.DBSource}
+        @return: database object for the new source
+        """
+        session = self.session
+        archive = suite.archive
+        control = source.dsc
+        maintainer = get_or_set_maintainer(control['Maintainer'], session)
+        source_name = control['Source']
+
+        ### Add source package to database
+
+        # We need to install the .dsc first as the DBSource object refers to it.
+        db_file_dsc = self._install_file(directory, source._dsc_file, archive, component, source_name)
+
+        unique = dict(
+            source=source_name,
+            version=control['Version'],
+            )
+        rest = dict(
+            maintainer=maintainer,
+            changedby=changed_by,
+            #install_date=datetime.now().date(),
+            poolfile=db_file_dsc,
+            fingerprint=fingerprint,
+            dm_upload_allowed=(control.get('DM-Upload-Allowed', 'no') == 'yes'),
+            )
+
+        created = False
+        try:
+            db_source = session.query(DBSource).filter_by(**unique).one()
+            for key, value in rest.iteritems():
+                if getattr(db_source, key) != value:
+                    raise ArchiveException('{0}: Does not match source in database.'.format(source._dsc_file.filename))
+        except NoResultFound:
+            created = True
+            db_source = DBSource(**unique)
+            for key, value in rest.iteritems():
+                setattr(db_source, key, value)
+            # XXX: set as default in postgres?
+            db_source.install_date = datetime.now().date()
+            session.add(db_source)
+            session.flush()
+
+            # Add .dsc file. Other files will be added later.
+            db_dsc_file = DSCFile()
+            db_dsc_file.source = db_source
+            db_dsc_file.poolfile = db_file_dsc
+            session.add(db_dsc_file)
+            session.flush()
+
+        if suite in db_source.suites:
+            return db_source
+
+        db_source.suites.append(suite)
+
+        if not created:
+            for f in db_source.srcfiles:
+                self._copy_file(f.poolfile, archive, component, allow_tainted=allow_tainted)
+            return db_source
+
+        ### Now add remaining files and copy them to the archive.
+
+        for hashed_file in source.files.itervalues():
+            hashed_file_path = os.path.join(directory, hashed_file.filename)
+            if os.path.exists(hashed_file_path):
+                db_file = self._install_file(directory, hashed_file, archive, component, source_name)
+                session.add(db_file)
+            else:
+                db_file = self.get_file(hashed_file, source_name)
+                self._copy_file(db_file, archive, component, allow_tainted=allow_tainted)
+
+            db_dsc_file = DSCFile()
+            db_dsc_file.source = db_source
+            db_dsc_file.poolfile = db_file
+            session.add(db_dsc_file)
+
+        session.flush()
+
+        # Importing is safe as we only arrive here when we did not find the source already installed earlier.
+        import_metadata_into_db(db_source, session)
+
+        # Uploaders are the maintainer and co-maintainers from the Uploaders field
+        db_source.uploaders.append(maintainer)
+        if 'Uploaders' in control:
+            from daklib.textutils import split_uploaders
+            for u in split_uploaders(control['Uploaders']):
+                db_source.uploaders.append(get_or_set_maintainer(u, session))
+        session.flush()
+
+        return db_source
+
+    def _copy_file(self, db_file, archive, component, allow_tainted=False):
+        """Copy a file to the given archive and component
+
+        @type  db_file: L{daklib.dbconn.PoolFile}
+        @param db_file: file to copy
+
+        @type  archive: L{daklib.dbconn.Archive}
+        @param archive: target archive
+
+        @type  component: L{daklib.dbconn.Archive}
+        @param component: target component
+
+        @type  allow_tainted: bool
+        @param allow_tainted: allow to copy from tainted archives (such as NEW)
+        """
+        session = self.session
+
+        if session.query(ArchiveFile).filter_by(archive=archive, component=component, file=db_file).first() is None:
+            query = session.query(ArchiveFile).filter_by(file=db_file)
+            if not allow_tainted:
+                query = query.join(Archive).filter(Archive.tainted == False)
+
+            source_af = query.first()
+            if source_af is None:
+                raise ArchiveException('cp: Could not find {0} in any archive.'.format(db_file.filename))
+            target_af = ArchiveFile(archive, component, db_file)
+            session.add(target_af)
+            session.flush()
+            self.fs.copy(source_af.path, target_af.path, link=False, mode=archive.mode)
+
+    def copy_binary(self, db_binary, suite, component, allow_tainted=False, extra_archives=None):
+        """Copy a binary package to the given suite and component
+
+        @type  db_binary: L{daklib.dbconn.DBBinary}
+        @param db_binary: binary to copy
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: target suite
+
+        @type  component: L{daklib.dbconn.Component}
+        @param component: target component
+
+        @type  allow_tainted: bool
+        @param allow_tainted: allow to copy from tainted archives (such as NEW)
+
+        @type  extra_archives: list of L{daklib.dbconn.Archive}
+        @param extra_archives: extra archives to copy Built-Using sources from
+        """
+        session = self.session
+        archive = suite.archive
+        if archive.tainted:
+            allow_tainted = True
+
+        filename = db_binary.poolfile.filename
+
+        # make sure source is present in target archive
+        db_source = db_binary.source
+        if session.query(ArchiveFile).filter_by(archive=archive, file=db_source.poolfile).first() is None:
+            raise ArchiveException('{0}: cannot copy to {1}: source is not present in target archive'.format(filename, suite.suite_name))
+
+        # make sure built-using packages are present in target archive
+        for db_source in db_binary.extra_sources:
+            self._ensure_extra_source_exists(filename, db_source, archive, extra_archives=extra_archives)
+
+        # copy binary
+        db_file = db_binary.poolfile
+        self._copy_file(db_file, suite.archive, component, allow_tainted=allow_tainted)
+        if suite not in db_binary.suites:
+            db_binary.suites.append(suite)
+        self.session.flush()
+
+    def copy_source(self, db_source, suite, component, allow_tainted=False):
+        """Copy a source package to the given suite and component
+
+        @type  db_source: L{daklib.dbconn.DBSource}
+        @param db_source: source to copy
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: target suite
+
+        @type  component: L{daklib.dbconn.Component}
+        @param component: target component
+
+        @type  allow_tainted: bool
+        @param allow_tainted: allow to copy from tainted archives (such as NEW)
+        """
+        archive = suite.archive
+        if archive.tainted:
+            allow_tainted = True
+        for db_dsc_file in db_source.srcfiles:
+            self._copy_file(db_dsc_file.poolfile, archive, component, allow_tainted=allow_tainted)
+        if suite not in db_source.suites:
+            db_source.suites.append(suite)
+        self.session.flush()
+
+    def remove_file(self, db_file, archive, component):
+        """Remove a file from a given archive and component
+
+        @type  db_file: L{daklib.dbconn.PoolFile}
+        @param db_file: file to remove
+
+        @type  archive: L{daklib.dbconn.Archive}
+        @param archive: archive to remove the file from
+
+        @type  component: L{daklib.dbconn.Component}
+        @param component: component to remove the file from
+        """
+        af = self.session.query(ArchiveFile).filter_by(file=db_file, archive=archive, component=component)
+        self.fs.unlink(af.path)
+        self.session.delete(af)
+
+    def remove_binary(self, binary, suite):
+        """Remove a binary from a given suite and component
+
+        @type  binary: L{daklib.dbconn.DBBinary}
+        @param binary: binary to remove
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to remove the package from
+        """
+        binary.suites.remove(suite)
+        self.session.flush()
+
+    def remove_source(self, source, suite):
+        """Remove a source from a given suite and component
+
+        @type  source: L{daklib.dbconn.DBSource}
+        @param source: source to remove
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to remove the package from
+
+        @raise ArchiveException: source package is still referenced by other
+                                 binaries in the suite
+        """
+        session = self.session
+
+        query = session.query(DBBinary).filter_by(source=source) \
+            .filter(DBBinary.suites.contains(suite))
+        if query.first() is not None:
+            raise ArchiveException('src:{0} is still used by binaries in suite {1}'.format(source.source, suite.suite_name))
+
+        source.suites.remove(suite)
+        session.flush()
+
+    def commit(self):
+        """commit changes"""
+        try:
+            self.session.commit()
+            self.fs.commit()
+        finally:
+            self.session.rollback()
+            self.fs.rollback()
+
+    def rollback(self):
+        """rollback changes"""
+        self.session.rollback()
+        self.fs.rollback()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        if type is None:
+            self.commit()
+        else:
+            self.rollback()
+        return None
+
+class ArchiveUpload(object):
+    """handle an upload
+
+    This class can be used in a with-statement::
+
+       with ArchiveUpload(...) as upload:
+          ...
+
+    Doing so will automatically run any required cleanup and also rollback the
+    transaction if it was not committed.
+    """
+    def __init__(self, directory, changes, keyrings):
+        self.transaction = ArchiveTransaction()
+        """transaction used to handle the upload
+        @type: L{daklib.archive.ArchiveTransaction}
+        """
+
+        self.session = self.transaction.session
+        """database session"""
+
+        self.original_directory = directory
+        self.original_changes = changes
+
+        self.changes = None
+        """upload to process
+        @type: L{daklib.upload.Changes}
+        """
+
+        self.directory = None
+        """directory with temporary copy of files. set by C{prepare}
+        @type: str
+        """
+
+        self.keyrings = keyrings
+
+        self.fingerprint = self.session.query(Fingerprint).filter_by(fingerprint=changes.primary_fingerprint).one()
+        """fingerprint of the key used to sign the upload
+        @type: L{daklib.dbconn.Fingerprint}
+        """
+
+        self.reject_reasons = []
+        """reasons why the upload cannot by accepted
+        @type: list of str
+        """
+
+        self.warnings = []
+        """warnings
+        @note: Not used yet.
+        @type: list of str
+        """
+
+        self.final_suites = None
+
+        self.new = False
+        """upload is NEW. set by C{check}
+        @type: bool
+        """
+
+        self._checked = False
+        """checks passes. set by C{check}
+        @type: bool
+        """
+
+        self._new_queue = self.session.query(PolicyQueue).filter_by(queue_name='new').one()
+        self._new = self._new_queue.suite
+
+    def warn(self, message):
+        """add a warning message
+
+        Adds a warning message that can later be seen in C{self.warnings}
+
+        @type  message: string
+        @param message: warning message
+        """
+        self.warnings.append(message)
+
+    def prepare(self):
+        """prepare upload for further processing
+
+        This copies the files involved to a temporary directory.  If you use
+        this method directly, you have to remove the directory given by the
+        C{directory} attribute later on your own.
+
+        Instead of using the method directly, you can also use a with-statement::
+
+           with ArchiveUpload(...) as upload:
+              ...
+
+        This will automatically handle any required cleanup.
+        """
+        assert self.directory is None
+        assert self.original_changes.valid_signature
+
+        cnf = Config()
+        session = self.transaction.session
+
+        group = cnf.get('Dinstall::UnprivGroup') or None
+        self.directory = utils.temp_dirname(parent=cnf.get('Dir::TempPath'),
+                                            mode=0o2750, group=group)
+        with FilesystemTransaction() as fs:
+            src = os.path.join(self.original_directory, self.original_changes.filename)
+            dst = os.path.join(self.directory, self.original_changes.filename)
+            fs.copy(src, dst, mode=0o640)
+
+            self.changes = upload.Changes(self.directory, self.original_changes.filename, self.keyrings)
+
+            for f in self.changes.files.itervalues():
+                src = os.path.join(self.original_directory, f.filename)
+                dst = os.path.join(self.directory, f.filename)
+                if not os.path.exists(src):
+                    continue
+                fs.copy(src, dst, mode=0o640)
+
+            source = None
+            try:
+                source = self.changes.source
+            except Exception:
+                # Do not raise an exception here if the .dsc is invalid.
+                pass
+
+            if source is not None:
+                for f in source.files.itervalues():
+                    src = os.path.join(self.original_directory, f.filename)
+                    dst = os.path.join(self.directory, f.filename)
+                    if not os.path.exists(dst):
+                        try:
+                            db_file = self.transaction.get_file(f, source.dsc['Source'], check_hashes=False)
+                            db_archive_file = session.query(ArchiveFile).filter_by(file=db_file).first()
+                            fs.copy(db_archive_file.path, dst, mode=0o640)
+                        except KeyError:
+                            # Ignore if get_file could not find it. Upload will
+                            # probably be rejected later.
+                            pass
+
+    def unpacked_source(self):
+        """Path to unpacked source
+
+        Get path to the unpacked source. This method does unpack the source
+        into a temporary directory under C{self.directory} if it has not
+        been done so already.
+
+        @rtype:  str or C{None}
+        @return: string giving the path to the unpacked source directory
+                 or C{None} if no source was included in the upload.
+        """
+        assert self.directory is not None
+
+        source = self.changes.source
+        if source is None:
+            return None
+        dsc_path = os.path.join(self.directory, source._dsc_file.filename)
+
+        sourcedir = os.path.join(self.directory, 'source')
+        if not os.path.exists(sourcedir):
+            devnull = open('/dev/null', 'w')
+            daklib.daksubprocess.check_call(["dpkg-source", "--no-copy", "--no-check", "-x", dsc_path, sourcedir], shell=False, stdout=devnull)
+        if not os.path.isdir(sourcedir):
+            raise Exception("{0} is not a directory after extracting source package".format(sourcedir))
+        return sourcedir
+
+    def _map_suite(self, suite_name):
+        for rule in Config().value_list("SuiteMappings"):
+            fields = rule.split()
+            rtype = fields[0]
+            if rtype == "map" or rtype == "silent-map":
+                (src, dst) = fields[1:3]
+                if src == suite_name:
+                    suite_name = dst
+                    if rtype != "silent-map":
+                        self.warnings.append('Mapping {0} to {1}.'.format(src, dst))
+            elif rtype == "ignore":
+                ignored = fields[1]
+                if suite_name == ignored:
+                    self.warnings.append('Ignoring target suite {0}.'.format(ignored))
+                    suite_name = None
+            elif rtype == "reject":
+                rejected = fields[1]
+                if suite_name == rejected:
+                    raise checks.Reject('Uploads to {0} are not accepted.'.format(rejected))
+            ## XXX: propup-version and map-unreleased not yet implemented
+        return suite_name
+
+    def _mapped_suites(self):
+        """Get target suites after mappings
+
+        @rtype:  list of L{daklib.dbconn.Suite}
+        @return: list giving the mapped target suites of this upload
+        """
+        session = self.session
+
+        suite_names = []
+        for dist in self.changes.distributions:
+            suite_name = self._map_suite(dist)
+            if suite_name is not None:
+                suite_names.append(suite_name)
+
+        suites = session.query(Suite).filter(Suite.suite_name.in_(suite_names))
+        return suites
+
+    def _check_new(self, suite):
+        """Check if upload is NEW
+
+        An upload is NEW if it has binary or source packages that do not have
+        an override in C{suite} OR if it references files ONLY in a tainted
+        archive (eg. when it references files in NEW).
+
+        @rtype:  bool
+        @return: C{True} if the upload is NEW, C{False} otherwise
+        """
+        session = self.session
+        new = False
+
+        # Check for missing overrides
+        for b in self.changes.binaries:
+            override = self._binary_override(suite, b)
+            if override is None:
+                self.warnings.append('binary:{0} is NEW.'.format(b.control['Package']))
+                new = True
+
+        if self.changes.source is not None:
+            override = self._source_override(suite, self.changes.source)
+            if override is None:
+                self.warnings.append('source:{0} is NEW.'.format(self.changes.source.dsc['Source']))
+                new = True
+
+        # Check if we reference a file only in a tainted archive
+        files = self.changes.files.values()
+        if self.changes.source is not None:
+            files.extend(self.changes.source.files.values())
+        for f in files:
+            query = session.query(ArchiveFile).join(PoolFile).filter(PoolFile.sha1sum == f.sha1sum)
+            query_untainted = query.join(Archive).filter(Archive.tainted == False)
+
+            in_archive = (query.first() is not None)
+            in_untainted_archive = (query_untainted.first() is not None)
+
+            if in_archive and not in_untainted_archive:
+                self.warnings.append('{0} is only available in NEW.'.format(f.filename))
+                new = True
+
+        return new
+
+    def _final_suites(self):
+        session = self.session
+
+        mapped_suites = self._mapped_suites()
+        final_suites = set()
+
+        for suite in mapped_suites:
+            overridesuite = suite
+            if suite.overridesuite is not None:
+                overridesuite = session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+            if self._check_new(overridesuite):
+                self.new = True
+            final_suites.add(suite)
+
+        return final_suites
+
+    def _binary_override(self, suite, binary):
+        """Get override entry for a binary
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to get override for
+
+        @type  binary: L{daklib.upload.Binary}
+        @param binary: binary to get override for
+
+        @rtype:  L{daklib.dbconn.Override} or C{None}
+        @return: override for the given binary or C{None}
+        """
+        if suite.overridesuite is not None:
+            suite = self.session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+
+        mapped_component = get_mapped_component(binary.component)
+        if mapped_component is None:
+            return None
+
+        query = self.session.query(Override).filter_by(suite=suite, package=binary.control['Package']) \
+                .join(Component).filter(Component.component_name == mapped_component.component_name) \
+                .join(OverrideType).filter(OverrideType.overridetype == binary.type)
+
+        try:
+            return query.one()
+        except NoResultFound:
+            return None
+
+    def _source_override(self, suite, source):
+        """Get override entry for a source
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to get override for
+
+        @type  source: L{daklib.upload.Source}
+        @param source: source to get override for
+
+        @rtype:  L{daklib.dbconn.Override} or C{None}
+        @return: override for the given source or C{None}
+        """
+        if suite.overridesuite is not None:
+            suite = self.session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+
+        # XXX: component for source?
+        query = self.session.query(Override).filter_by(suite=suite, package=source.dsc['Source']) \
+                .join(OverrideType).filter(OverrideType.overridetype == 'dsc')
+
+        try:
+            return query.one()
+        except NoResultFound:
+            return None
+
+    def _binary_component(self, suite, binary, only_overrides=True):
+        """get component for a binary
+
+        By default this will only look at overrides to get the right component;
+        if C{only_overrides} is C{False} this method will also look at the
+        Section field.
+
+        @type  suite: L{daklib.dbconn.Suite}
+
+        @type  binary: L{daklib.upload.Binary}
+
+        @type  only_overrides: bool
+        @param only_overrides: only use overrides to get the right component
+
+        @rtype: L{daklib.dbconn.Component} or C{None}
+        """
+        override = self._binary_override(suite, binary)
+        if override is not None:
+            return override.component
+        if only_overrides:
+            return None
+        return get_mapped_component(binary.component, self.session)
+
+    def check(self, force=False):
+        """run checks against the upload
+
+        @type  force: bool
+        @param force: ignore failing forcable checks
+
+        @rtype:  bool
+        @return: C{True} if all checks passed, C{False} otherwise
+        """
+        # XXX: needs to be better structured.
+        assert self.changes.valid_signature
+
+        try:
+            # Validate signatures and hashes before we do any real work:
+            for chk in (
+                    checks.SignatureAndHashesCheck,
+                    checks.ChangesCheck,
+                    checks.ExternalHashesCheck,
+                    checks.SourceCheck,
+                    checks.BinaryCheck,
+                    checks.BinaryTimestampCheck,
+                    checks.SingleDistributionCheck,
+                    ):
+                chk().check(self)
+
+            final_suites = self._final_suites()
+            if len(final_suites) == 0:
+                self.reject_reasons.append('No target suite found. Please check your target distribution and that you uploaded to the right archive.')
+                return False
+
+            self.final_suites = final_suites
+
+            for chk in (
+                    checks.TransitionCheck,
+                    checks.ACLCheck,
+                    checks.NoSourceOnlyCheck,
+                    checks.LintianCheck,
+                    ):
+                chk().check(self)
+
+            for chk in (
+                    checks.ACLCheck,
+                    checks.SourceFormatCheck,
+                    checks.SuiteArchitectureCheck,
+                    checks.VersionCheck,
+                    ):
+                for suite in final_suites:
+                    chk().per_suite_check(self, suite)
+
+            if len(self.reject_reasons) != 0:
+                return False
+
+            self._checked = True
+            return True
+        except checks.Reject as e:
+            self.reject_reasons.append(unicode(e))
+        except Exception as e:
+            self.reject_reasons.append("Processing raised an exception: {0}.\n{1}".format(e, traceback.format_exc()))
+        return False
+
+    def _install_to_suite(self, suite, source_component_func, binary_component_func, source_suites=None, extra_source_archives=None):
+        """Install upload to the given suite
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to install the package into. This is the real suite,
+                      ie. after any redirection to NEW or a policy queue
+
+        @param source_component_func: function to get the L{daklib.dbconn.Component}
+                                      for a L{daklib.upload.Source} object
+
+        @param binary_component_func: function to get the L{daklib.dbconn.Component}
+                                      for a L{daklib.upload.Binary} object
+
+        @param source_suites: see L{daklib.archive.ArchiveTransaction.install_binary}
+
+        @param extra_source_archives: see L{daklib.archive.ArchiveTransaction.install_binary}
+
+        @return: tuple with two elements. The first is a L{daklib.dbconn.DBSource}
+                 object for the install source or C{None} if no source was
+                 included. The second is a list of L{daklib.dbconn.DBBinary}
+                 objects for the installed binary packages.
+        """
+        # XXX: move this function to ArchiveTransaction?
+
+        control = self.changes.changes
+        changed_by = get_or_set_maintainer(control.get('Changed-By', control['Maintainer']), self.session)
+
+        if source_suites is None:
+            source_suites = self.session.query(Suite).join((VersionCheck, VersionCheck.reference_id == Suite.suite_id)).filter(VersionCheck.check == 'Enhances').filter(VersionCheck.suite == suite).subquery()
+
+        source = self.changes.source
+        if source is not None:
+            component = source_component_func(source)
+            db_source = self.transaction.install_source(self.directory, source, suite, component, changed_by, fingerprint=self.fingerprint)
+        else:
+            db_source = None
+
+        db_binaries = []
+        for binary in self.changes.binaries:
+            component = binary_component_func(binary)
+            db_binary = self.transaction.install_binary(self.directory, binary, suite, component, fingerprint=self.fingerprint, source_suites=source_suites, extra_source_archives=extra_source_archives)
+            db_binaries.append(db_binary)
+
+        if suite.copychanges:
+            src = os.path.join(self.directory, self.changes.filename)
+            dst = os.path.join(suite.archive.path, 'dists', suite.suite_name, self.changes.filename)
+            self.transaction.fs.copy(src, dst, mode=suite.archive.mode)
+
+        return (db_source, db_binaries)
+
+    def _install_changes(self):
+        assert self.changes.valid_signature
+        control = self.changes.changes
+        session = self.transaction.session
+        config = Config()
+
+        changelog_id = None
+        # Only add changelog for sourceful uploads and binNMUs
+        if 'source' in self.changes.architectures or re_bin_only_nmu.search(control['Version']):
+            query = 'INSERT INTO changelogs_text (changelog) VALUES (:changelog) RETURNING id'
+            changelog_id = session.execute(query, {'changelog': control['Changes']}).scalar()
+            assert changelog_id is not None
+
+        db_changes = DBChange()
+        db_changes.changesname = self.changes.filename
+        db_changes.source = control['Source']
+        db_changes.binaries = control.get('Binary', None)
+        db_changes.architecture = control['Architecture']
+        db_changes.version = control['Version']
+        db_changes.distribution = control['Distribution']
+        db_changes.urgency = control['Urgency']
+        db_changes.maintainer = control['Maintainer']
+        db_changes.changedby = control.get('Changed-By', control['Maintainer'])
+        db_changes.date = control['Date']
+        db_changes.fingerprint = self.fingerprint.fingerprint
+        db_changes.changelog_id = changelog_id
+        db_changes.closes = self.changes.closed_bugs
+
+        try:
+            self.transaction.session.add(db_changes)
+            self.transaction.session.flush()
+        except sqlalchemy.exc.IntegrityError:
+            raise ArchiveException('{0} is already known.'.format(self.changes.filename))
+
+        return db_changes
+
+    def _install_policy(self, policy_queue, target_suite, db_changes, db_source, db_binaries):
+        u = PolicyQueueUpload()
+        u.policy_queue = policy_queue
+        u.target_suite = target_suite
+        u.changes = db_changes
+        u.source = db_source
+        u.binaries = db_binaries
+        self.transaction.session.add(u)
+        self.transaction.session.flush()
+
+        dst = os.path.join(policy_queue.path, self.changes.filename)
+        self.transaction.fs.copy(self.changes.path, dst, mode=policy_queue.change_perms)
+
+        return u
+
+    def try_autobyhand(self):
+        """Try AUTOBYHAND
+
+        Try to handle byhand packages automatically.
+
+        @rtype:  list of L{daklib.upload.HashedFile}
+        @return: list of remaining byhand files
+        """
+        assert len(self.reject_reasons) == 0
+        assert self.changes.valid_signature
+        assert self.final_suites is not None
+        assert self._checked
+
+        byhand = self.changes.byhand_files
+        if len(byhand) == 0:
+            return True
+
+        suites = list(self.final_suites)
+        assert len(suites) == 1, "BYHAND uploads must be to a single suite"
+        suite = suites[0]
+
+        cnf = Config()
+        control = self.changes.changes
+        automatic_byhand_packages = cnf.subtree("AutomaticByHandPackages")
+
+        remaining = []
+        for f in byhand:
+            if '_' in f.filename:
+                parts = f.filename.split('_', 2)
+                if len(parts) != 3:
+                    print "W: unexpected byhand filename {0}. No automatic processing.".format(f.filename)
+                    remaining.append(f)
+                    continue
+
+                package, version, archext = parts
+                arch, ext = archext.split('.', 1)
+            else:
+                parts = f.filename.split('.')
+                if len(parts) < 2:
+                    print "W: unexpected byhand filename {0}. No automatic processing.".format(f.filename)
+                    remaining.append(f)
+                    continue
+
+                package = parts[0]
+                version = '0'
+                arch = 'all'
+                ext = parts[-1]
+
+            try:
+                rule = automatic_byhand_packages.subtree(package)
+            except KeyError:
+                remaining.append(f)
+                continue
+
+            if rule['Source'] != self.changes.source_name \
+                    or rule['Section'] != f.section \
+                    or ('Extension' in rule and rule['Extension'] != ext):
+                remaining.append(f)
+                continue
+
+            script = rule['Script']
+            retcode = daklib.daksubprocess.call([script, os.path.join(self.directory, f.filename), control['Version'], arch, os.path.join(self.directory, self.changes.filename)], shell=False)
+            if retcode != 0:
+                print "W: error processing {0}.".format(f.filename)
+                remaining.append(f)
+
+        return len(remaining) == 0
+
+    def _install_byhand(self, policy_queue_upload, hashed_file):
+        """install byhand file
+
+        @type  policy_queue_upload: L{daklib.dbconn.PolicyQueueUpload}
+
+        @type  hashed_file: L{daklib.upload.HashedFile}
+        """
+        fs = self.transaction.fs
+        session = self.transaction.session
+        policy_queue = policy_queue_upload.policy_queue
+
+        byhand_file = PolicyQueueByhandFile()
+        byhand_file.upload = policy_queue_upload
+        byhand_file.filename = hashed_file.filename
+        session.add(byhand_file)
+        session.flush()
+
+        src = os.path.join(self.directory, hashed_file.filename)
+        dst = os.path.join(policy_queue.path, hashed_file.filename)
+        fs.copy(src, dst, mode=policy_queue.change_perms)
+
+        return byhand_file
+
+    def _do_bts_versiontracking(self):
+        cnf = Config()
+        fs = self.transaction.fs
+
+        btsdir = cnf.get('Dir::BTSVersionTrack')
+        if btsdir is None or btsdir == '':
+            return
+
+        base = os.path.join(btsdir, self.changes.filename[:-8])
+
+        # version history
+        sourcedir = self.unpacked_source()
+        if sourcedir is not None:
+            fh = open(os.path.join(sourcedir, 'debian', 'changelog'), 'r')
+            versions = fs.create("{0}.versions".format(base), mode=0o644)
+            for line in fh.readlines():
+                if re_changelog_versions.match(line):
+                    versions.write(line)
+            fh.close()
+            versions.close()
+
+        # binary -> source mapping
+        debinfo = fs.create("{0}.debinfo".format(base), mode=0o644)
+        for binary in self.changes.binaries:
+            control = binary.control
+            source_package, source_version = binary.source
+            line = " ".join([control['Package'], control['Version'], control['Architecture'], source_package, source_version])
+            print >>debinfo, line
+        debinfo.close()
+
+    def _policy_queue(self, suite):
+        if suite.policy_queue is not None:
+            return suite.policy_queue
+        return None
+
+    def install(self):
+        """install upload
+
+        Install upload to a suite or policy queue.  This method does B{not}
+        handle uploads to NEW.
+
+        You need to have called the C{check} method before calling this method.
+        """
+        assert len(self.reject_reasons) == 0
+        assert self.changes.valid_signature
+        assert self.final_suites is not None
+        assert self._checked
+        assert not self.new
+
+        db_changes = self._install_changes()
+
+        for suite in self.final_suites:
+            overridesuite = suite
+            if suite.overridesuite is not None:
+                overridesuite = self.session.query(Suite).filter_by(suite_name=suite.overridesuite).one()
+
+            policy_queue = self._policy_queue(suite)
+
+            redirected_suite = suite
+            if policy_queue is not None:
+                redirected_suite = policy_queue.suite
+
+            # source can be in the suite we install to or any suite we enhance
+            source_suite_ids = set([suite.suite_id, redirected_suite.suite_id])
+            for enhanced_suite_id, in self.session.query(VersionCheck.reference_id) \
+                    .filter(VersionCheck.suite_id.in_(source_suite_ids)) \
+                    .filter(VersionCheck.check == 'Enhances'):
+                source_suite_ids.add(enhanced_suite_id)
+
+            source_suites = self.session.query(Suite).filter(Suite.suite_id.in_(source_suite_ids)).subquery()
+
+            source_component_func = lambda source: self._source_override(overridesuite, source).component
+            binary_component_func = lambda binary: self._binary_component(overridesuite, binary)
+
+            (db_source, db_binaries) = self._install_to_suite(redirected_suite, source_component_func, binary_component_func, source_suites=source_suites, extra_source_archives=[suite.archive])
+
+            if policy_queue is not None:
+                self._install_policy(policy_queue, suite, db_changes, db_source, db_binaries)
+
+            # copy to build queues
+            if policy_queue is None or policy_queue.send_to_build_queues:
+                for build_queue in suite.copy_queues:
+                    self._install_to_suite(build_queue.suite, source_component_func, binary_component_func, source_suites=source_suites, extra_source_archives=[suite.archive])
+
+        self._do_bts_versiontracking()
+
+    def install_to_new(self):
+        """install upload to NEW
+
+        Install upload to NEW.  This method does B{not} handle regular uploads
+        to suites or policy queues.
+
+        You need to have called the C{check} method before calling this method.
+        """
+        # Uploads to NEW are special as we don't have overrides.
+        assert len(self.reject_reasons) == 0
+        assert self.changes.valid_signature
+        assert self.final_suites is not None
+
+        source = self.changes.source
+        binaries = self.changes.binaries
+        byhand = self.changes.byhand_files
+
+        # we need a suite to guess components
+        suites = list(self.final_suites)
+        assert len(suites) == 1, "NEW uploads must be to a single suite"
+        suite = suites[0]
+
+        # decide which NEW queue to use
+        if suite.new_queue is None:
+            new_queue = self.transaction.session.query(PolicyQueue).filter_by(queue_name='new').one()
+        else:
+            new_queue = suite.new_queue
+        if len(byhand) > 0:
+            # There is only one global BYHAND queue
+            new_queue = self.transaction.session.query(PolicyQueue).filter_by(queue_name='byhand').one()
+        new_suite = new_queue.suite
+
+
+        def binary_component_func(binary):
+            return self._binary_component(suite, binary, only_overrides=False)
+
+        # guess source component
+        # XXX: should be moved into an extra method
+        binary_component_names = set()
+        for binary in binaries:
+            component = binary_component_func(binary)
+            binary_component_names.add(component.component_name)
+        source_component_name = None
+        for c in self.session.query(Component).order_by(Component.component_id):
+            guess = c.component_name
+            if guess in binary_component_names:
+                source_component_name = guess
+                break
+        if source_component_name is None:
+            source_component = self.session.query(Component).order_by(Component.component_id).first()
+        else:
+            source_component = self.session.query(Component).filter_by(component_name=source_component_name).one()
+        source_component_func = lambda source: source_component
+
+        db_changes = self._install_changes()
+        (db_source, db_binaries) = self._install_to_suite(new_suite, source_component_func, binary_component_func, source_suites=True, extra_source_archives=[suite.archive])
+        policy_upload = self._install_policy(new_queue, suite, db_changes, db_source, db_binaries)
+
+        for f in byhand:
+            self._install_byhand(policy_upload, f)
+
+        self._do_bts_versiontracking()
+
+    def commit(self):
+        """commit changes"""
+        self.transaction.commit()
+
+    def rollback(self):
+        """rollback changes"""
+        self.transaction.rollback()
+
+    def __enter__(self):
+        self.prepare()
+        return self
+
+    def __exit__(self, type, value, traceback):
+        if self.directory is not None:
+            shutil.rmtree(self.directory)
+            self.directory = None
+        self.changes = None
+        self.transaction.rollback()
+        return None
diff --git a/daklib/changesutils.py b/daklib/changesutils.py
deleted file mode 100755 (executable)
index d1dbad8..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python
-# vim:set et ts=4 sw=4:
-
-"""Utilities for handling changes files
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
-@copyright: 2009 Frank Lichtenheld <djpig@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import copy
-import os
-import stat
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib.queue import *
-from daklib import utils
-from daklib.config import Config
-
-################################################################################
-
-__all__ = []
-
-################################################################################
-
-def indiv_sg_compare (a, b):
-    """Sort by source name, source, version, 'have source', and
-       finally by filename."""
-    # Sort by source version
-    q = apt_pkg.version_compare(a["version"], b["version"])
-    if q:
-        return -q
-
-    # Sort by 'have source'
-    a_has_source = a["architecture"].get("source")
-    b_has_source = b["architecture"].get("source")
-    if a_has_source and not b_has_source:
-        return -1
-    elif b_has_source and not a_has_source:
-        return 1
-
-    return cmp(a["filename"], b["filename"])
-
-__all__.append('indiv_sg_compare')
-
-############################################################
-
-def sg_compare (a, b):
-    a = a[1]
-    b = b[1]
-    """Sort by have note, source already in database and time of oldest upload."""
-    # Sort by have note
-    a_note_state = a["note_state"]
-    b_note_state = b["note_state"]
-    if a_note_state < b_note_state:
-        return -1
-    elif a_note_state > b_note_state:
-        return 1
-    # Sort by source already in database (descending)
-    source_in_database = cmp(a["source_in_database"], b["source_in_database"])
-    if source_in_database:
-        return -source_in_database
-
-    # Sort by time of oldest upload
-    return cmp(a["oldest"], b["oldest"])
-
-__all__.append('sg_compare')
-
-def sort_changes(changes_files, session, binaries = None):
-    """Sort into source groups, then sort each source group by version,
-    have source, filename.  Finally, sort the source groups by have
-    note, time of oldest upload of each source upload."""
-    if len(changes_files) == 1:
-        return changes_files
-
-    sorted_list = []
-    cache = {}
-    # Read in all the .changes files
-    for filename in changes_files:
-        u = Upload()
-        try:
-            u.pkg.changes_file = filename
-            u.load_changes(filename)
-            u.update_subst()
-            cache[filename] = copy.copy(u.pkg.changes)
-            cache[filename]["filename"] = filename
-        except:
-            sorted_list.append(filename)
-            break
-    # Divide the .changes into per-source groups
-    per_source = {}
-    for filename in cache.keys():
-        source = cache[filename]["source"]
-        if not per_source.has_key(source):
-            per_source[source] = {}
-            per_source[source]["list"] = []
-        per_source[source]["list"].append(cache[filename])
-    # Determine oldest time and have note status for each source group
-    for source in per_source.keys():
-        q = session.query(DBSource).filter_by(source = source).all()
-        per_source[source]["source_in_database"] = binaries and -(len(q)>0) or len(q)>0
-        source_list = per_source[source]["list"]
-        first = source_list[0]
-        oldest = os.stat(first["filename"])[stat.ST_MTIME]
-        have_note = 0
-        for d in per_source[source]["list"]:
-            mtime = os.stat(d["filename"])[stat.ST_MTIME]
-            if mtime < oldest:
-                oldest = mtime
-            have_note += has_new_comment(d["source"], d["version"], session)
-        per_source[source]["oldest"] = oldest
-        if not have_note:
-            per_source[source]["note_state"] = 0; # none
-        elif have_note < len(source_list):
-            per_source[source]["note_state"] = 1; # some
-        else:
-            per_source[source]["note_state"] = 2; # all
-        per_source[source]["list"].sort(indiv_sg_compare)
-    per_source_items = per_source.items()
-    per_source_items.sort(sg_compare)
-    for i in per_source_items:
-        for j in i[1]["list"]:
-            sorted_list.append(j["filename"])
-    return sorted_list
-
-__all__.append('sort_changes')
-
-################################################################################
-
-def changes_to_queue(upload, srcqueue, destqueue, session):
-    """Move a changes file to a different queue and mark as approved for the
-       source queue"""
-
-    try:
-        chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
-    except NoResultFound:
-        return False
-
-    chg.approved_for_id = srcqueue.policy_queue_id
-
-    for f in chg.files:
-        # update the changes_pending_files row
-        f.queue = destqueue
-        # Only worry about unprocessed files
-        if not f.processed:
-            utils.move(os.path.join(srcqueue.path, f.filename), destqueue.path, perms=int(destqueue.perms, 8))
-
-    utils.move(os.path.join(srcqueue.path, upload.pkg.changes_file), destqueue.path, perms=int(destqueue.perms, 8))
-    chg.in_queue = destqueue
-    session.commit()
-
-    return True
-
-__all__.append('changes_to_queue')
-
-def new_accept(upload, dry_run, session):
-    print "ACCEPT"
-
-    if not dry_run:
-        cnf = Config()
-
-        (summary, short_summary) = upload.build_summaries()
-        destqueue = get_policy_queue('newstage', session)
-
-        srcqueue = get_policy_queue_from_path(upload.pkg.directory, session)
-
-        if not srcqueue:
-            # Assume NEW and hope for the best
-            srcqueue = get_policy_queue('new', session)
-
-        changes_to_queue(upload, srcqueue, destqueue, session)
-
-__all__.append('new_accept')
diff --git a/daklib/checks.py b/daklib/checks.py
new file mode 100644 (file)
index 0000000..d148d15
--- /dev/null
@@ -0,0 +1,778 @@
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# Parts based on code that is
+# Copyright (C) 2001-2006, James Troup <james@nocrew.org>
+# Copyright (C) 2009-2010, Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""module provided pre-acceptance tests
+
+Please read the documentation for the L{Check} class for the interface.
+"""
+
+from daklib.config import Config
+import daklib.daksubprocess
+from daklib.dbconn import *
+import daklib.dbconn as dbconn
+from daklib.regexes import *
+from daklib.textutils import fix_maintainer, ParseMaintError
+import daklib.lintian as lintian
+import daklib.utils as utils
+from daklib.upload import InvalidHashException
+
+import apt_inst
+import apt_pkg
+from apt_pkg import version_compare
+import errno
+import os
+import subprocess
+import time
+import yaml
+
+def check_fields_for_valid_utf8(filename, control):
+    """Check all fields of a control file for valid UTF-8"""
+    for field in control.keys():
+        try:
+            field.decode('utf-8')
+            control[field].decode('utf-8')
+        except UnicodeDecodeError:
+            raise Reject('{0}: The {1} field is not valid UTF-8'.format(filename, field))
+
+class Reject(Exception):
+    """exception raised by failing checks"""
+    pass
+
+class RejectStupidMaintainerException(Exception):
+    """exception raised by failing the external hashes check"""
+
+    def __str__(self):
+        return "'%s' has mismatching %s from the external files db ('%s' [current] vs '%s' [external])" % self.args[:4]
+
+class RejectACL(Reject):
+    """exception raise by failing ACL checks"""
+    def __init__(self, acl, reason):
+        self.acl = acl
+        self.reason = reason
+
+    def __str__(self):
+        return "ACL {0}: {1}".format(self.acl.name, self.reason)
+
+class Check(object):
+    """base class for checks
+
+    checks are called by L{daklib.archive.ArchiveUpload}. Failing tests should
+    raise a L{daklib.checks.Reject} exception including a human-readable
+    description why the upload should be rejected.
+    """
+    def check(self, upload):
+        """do checks
+
+        @type  upload: L{daklib.archive.ArchiveUpload}
+        @param upload: upload to check
+
+        @raise daklib.checks.Reject: upload should be rejected
+        """
+        raise NotImplemented
+    def per_suite_check(self, upload, suite):
+        """do per-suite checks
+
+        @type  upload: L{daklib.archive.ArchiveUpload}
+        @param upload: upload to check
+
+        @type  suite: L{daklib.dbconn.Suite}
+        @param suite: suite to check
+
+        @raise daklib.checks.Reject: upload should be rejected
+        """
+        raise NotImplemented
+    @property
+    def forcable(self):
+        """allow to force ignore failing test
+
+        C{True} if it is acceptable to force ignoring a failing test,
+        C{False} otherwise
+        """
+        return False
+
+class SignatureAndHashesCheck(Check):
+    """Check signature of changes and dsc file (if included in upload)
+
+    Make sure the signature is valid and done by a known user.
+    """
+    def check(self, upload):
+        changes = upload.changes
+        if not changes.valid_signature:
+            raise Reject("Signature for .changes not valid.")
+        self._check_hashes(upload, changes.filename, changes.files.itervalues())
+
+        source = None
+        try:
+            source = changes.source
+        except Exception as e:
+            raise Reject("Invalid dsc file: {0}".format(e))
+        if source is not None:
+            if not source.valid_signature:
+                raise Reject("Signature for .dsc not valid.")
+            if source.primary_fingerprint != changes.primary_fingerprint:
+                raise Reject(".changes and .dsc not signed by the same key.")
+            self._check_hashes(upload, source.filename, source.files.itervalues())
+
+        if upload.fingerprint is None or upload.fingerprint.uid is None:
+            raise Reject(".changes signed by unknown key.")
+
+    """Make sure hashes match existing files
+
+    @type  upload: L{daklib.archive.ArchiveUpload}
+    @param upload: upload we are processing
+
+    @type  filename: str
+    @param filename: name of the file the expected hash values are taken from
+
+    @type  files: sequence of L{daklib.upload.HashedFile}
+    @param files: files to check the hashes for
+    """
+    def _check_hashes(self, upload, filename, files):
+        try:
+            for f in files:
+                f.check(upload.directory)
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                raise Reject('{0} refers to non-existing file: {1}\n'
+                             'Perhaps you need to include it in your upload?'
+                             .format(filename, os.path.basename(e.filename)))
+            raise
+        except InvalidHashException as e:
+            raise Reject('{0}: {1}'.format(filename, unicode(e)))
+
+class ChangesCheck(Check):
+    """Check changes file for syntax errors."""
+    def check(self, upload):
+        changes = upload.changes
+        control = changes.changes
+        fn = changes.filename
+
+        for field in ('Distribution', 'Source', 'Binary', 'Architecture', 'Version', 'Maintainer', 'Files', 'Changes', 'Description'):
+            if field not in control:
+                raise Reject('{0}: misses mandatory field {1}'.format(fn, field))
+
+        check_fields_for_valid_utf8(fn, control)
+
+        source_match = re_field_source.match(control['Source'])
+        if not source_match:
+            raise Reject('{0}: Invalid Source field'.format(fn))
+        version_match = re_field_version.match(control['Version'])
+        if not version_match:
+            raise Reject('{0}: Invalid Version field'.format(fn))
+        version_without_epoch = version_match.group('without_epoch')
+
+        match = re_file_changes.match(fn)
+        if not match:
+            raise Reject('{0}: Does not match re_file_changes'.format(fn))
+        if match.group('package') != source_match.group('package'):
+            raise Reject('{0}: Filename does not match Source field'.format(fn))
+        if match.group('version') != version_without_epoch:
+            raise Reject('{0}: Filename does not match Version field'.format(fn))
+
+        for bn in changes.binary_names:
+            if not re_field_package.match(bn):
+                raise Reject('{0}: Invalid binary package name {1}'.format(fn, bn))
+
+        if 'source' in changes.architectures and changes.source is None:
+            raise Reject("Changes has architecture source, but no source found.")
+        if changes.source is not None and 'source' not in changes.architectures:
+            raise Reject("Upload includes source, but changes does not say so.")
+
+        try:
+            fix_maintainer(changes.changes['Maintainer'])
+        except ParseMaintError as e:
+            raise Reject('{0}: Failed to parse Maintainer field: {1}'.format(changes.filename, e))
+
+        try:
+            changed_by = changes.changes.get('Changed-By')
+            if changed_by is not None:
+                fix_maintainer(changed_by)
+        except ParseMaintError as e:
+            raise Reject('{0}: Failed to parse Changed-By field: {1}'.format(changes.filename, e))
+
+        if len(changes.files) == 0:
+            raise Reject("Changes includes no files.")
+
+        for bugnum in changes.closed_bugs:
+            if not re_isanum.match(bugnum):
+                raise Reject('{0}: "{1}" in Closes field is not a number'.format(changes.filename, bugnum))
+
+        return True
+
+class ExternalHashesCheck(Check):
+    """Checks hashes in .changes and .dsc against an external database."""
+    def check_single(self, session, f):
+        q = session.execute("SELECT size, md5sum, sha1sum, sha256sum FROM external_files WHERE filename LIKE '%%/%s'" % f.filename)
+        (ext_size, ext_md5sum, ext_sha1sum, ext_sha256sum) = q.fetchone() or (None, None, None, None)
+
+        if not ext_size:
+            return
+
+        if ext_size != f.size:
+            raise RejectStupidMaintainerException(f.filename, 'size', f.size, ext_size)
+
+        if ext_md5sum != f.md5sum:
+            raise RejectStupidMaintainerException(f.filename, 'md5sum', f.md5sum, ext_md5sum)
+
+        if ext_sha1sum != f.sha1sum:
+            raise RejectStupidMaintainerException(f.filename, 'sha1sum', f.sha1sum, ext_sha1sum)
+
+        if ext_sha256sum != f.sha256sum:
+            raise RejectStupidMaintainerException(f.filename, 'sha256sum', f.sha256sum, ext_sha256sum)
+
+    def check(self, upload):
+        cnf = Config()
+
+        if not cnf.use_extfiles:
+            return
+
+        session = upload.session
+        changes = upload.changes
+
+        for f in changes.files.itervalues():
+            self.check_single(session, f)
+        source = changes.source
+        if source is not None:
+            for f in source.files.itervalues():
+                self.check_single(session, f)
+
+class BinaryCheck(Check):
+    """Check binary packages for syntax errors."""
+    def check(self, upload):
+        for binary in upload.changes.binaries:
+            self.check_binary(upload, binary)
+
+        binary_names = set([ binary.control['Package'] for binary in upload.changes.binaries ])
+        for bn in binary_names:
+            if bn not in upload.changes.binary_names:
+                raise Reject('Package {0} is not mentioned in Binary field in changes'.format(bn))
+
+        return True
+
+    def check_binary(self, upload, binary):
+        fn = binary.hashed_file.filename
+        control = binary.control
+
+        for field in ('Package', 'Architecture', 'Version', 'Description'):
+            if field not in control:
+                raise Reject('{0}: Missing mandatory field {0}.'.format(fn, field))
+
+        check_fields_for_valid_utf8(fn, control)
+
+        # check fields
+
+        package = control['Package']
+        if not re_field_package.match(package):
+            raise Reject('{0}: Invalid Package field'.format(fn))
+
+        version = control['Version']
+        version_match = re_field_version.match(version)
+        if not version_match:
+            raise Reject('{0}: Invalid Version field'.format(fn))
+        version_without_epoch = version_match.group('without_epoch')
+
+        architecture = control['Architecture']
+        if architecture not in upload.changes.architectures:
+            raise Reject('{0}: Architecture not in Architecture field in changes file'.format(fn))
+        if architecture == 'source':
+            raise Reject('{0}: Architecture "source" invalid for binary packages'.format(fn))
+
+        source = control.get('Source')
+        if source is not None and not re_field_source.match(source):
+            raise Reject('{0}: Invalid Source field'.format(fn))
+
+        # check filename
+
+        match = re_file_binary.match(fn)
+        if package != match.group('package'):
+            raise Reject('{0}: filename does not match Package field'.format(fn))
+        if version_without_epoch != match.group('version'):
+            raise Reject('{0}: filename does not match Version field'.format(fn))
+        if architecture != match.group('architecture'):
+            raise Reject('{0}: filename does not match Architecture field'.format(fn))
+
+        # check dependency field syntax
+
+        for field in ('Breaks', 'Conflicts', 'Depends', 'Enhances', 'Pre-Depends',
+                      'Provides', 'Recommends', 'Replaces', 'Suggests'):
+            value = control.get(field)
+            if value is not None:
+                if value.strip() == '':
+                    raise Reject('{0}: empty {1} field'.format(fn, field))
+                try:
+                    apt_pkg.parse_depends(value)
+                except:
+                    raise Reject('{0}: APT could not parse {1} field'.format(fn, field))
+
+        for field in ('Built-Using',):
+            value = control.get(field)
+            if value is not None:
+                if value.strip() == '':
+                    raise Reject('{0}: empty {1} field'.format(fn, field))
+                try:
+                    apt_pkg.parse_src_depends(value)
+                except:
+                    raise Reject('{0}: APT could not parse {1} field'.format(fn, field))
+
+class BinaryTimestampCheck(Check):
+    """check timestamps of files in binary packages
+
+    Files in the near future cause ugly warnings and extreme time travel
+    can cause errors on extraction.
+    """
+    def check(self, upload):
+        cnf = Config()
+        future_cutoff = time.time() + cnf.find_i('Dinstall::FutureTimeTravelGrace', 24*3600)
+        past_cutoff = time.mktime(time.strptime(cnf.find('Dinstall::PastCutoffYear', '1975'), '%Y'))
+
+        class TarTime(object):
+            def __init__(self):
+                self.future_files = dict()
+                self.past_files = dict()
+            def callback(self, member, data):
+                if member.mtime > future_cutoff:
+                    self.future_files[member.name] = member.mtime
+                elif member.mtime < past_cutoff:
+                    self.past_files[member.name] = member.mtime
+
+        def format_reason(filename, direction, files):
+            reason = "{0}: has {1} file(s) with a timestamp too far in the {2}:\n".format(filename, len(files), direction)
+            for fn, ts in files.iteritems():
+                reason += "  {0} ({1})".format(fn, time.ctime(ts))
+            return reason
+
+        for binary in upload.changes.binaries:
+            filename = binary.hashed_file.filename
+            path = os.path.join(upload.directory, filename)
+            deb = apt_inst.DebFile(path)
+            tar = TarTime()
+            deb.control.go(tar.callback)
+            if tar.future_files:
+                raise Reject(format_reason(filename, 'future', tar.future_files))
+            if tar.past_files:
+                raise Reject(format_reason(filename, 'past', tar.past_files))
+
+class SourceCheck(Check):
+    """Check source package for syntax errors."""
+    def check_filename(self, control, filename, regex):
+        # In case we have an .orig.tar.*, we have to strip the Debian revison
+        # from the version number. So handle this special case first.
+        is_orig = True
+        match = re_file_orig.match(filename)
+        if not match:
+            is_orig = False
+            match = regex.match(filename)
+
+        if not match:
+            raise Reject('{0}: does not match regular expression for source filenames'.format(filename))
+        if match.group('package') != control['Source']:
+            raise Reject('{0}: filename does not match Source field'.format(filename))
+
+        version = control['Version']
+        if is_orig:
+            upstream_match = re_field_version_upstream.match(version)
+            if not upstream_match:
+                raise Reject('{0}: Source package includes upstream tarball, but {0} has no Debian revision.'.format(filename, version))
+            version = upstream_match.group('upstream')
+        version_match =  re_field_version.match(version)
+        version_without_epoch = version_match.group('without_epoch')
+        if match.group('version') != version_without_epoch:
+            raise Reject('{0}: filename does not match Version field'.format(filename))
+
+    def check(self, upload):
+        if upload.changes.source is None:
+            return True
+
+        changes = upload.changes.changes
+        source = upload.changes.source
+        control = source.dsc
+        dsc_fn = source._dsc_file.filename
+
+        check_fields_for_valid_utf8(dsc_fn, control)
+
+        # check fields
+        if not re_field_package.match(control['Source']):
+            raise Reject('{0}: Invalid Source field'.format(dsc_fn))
+        if control['Source'] != changes['Source']:
+            raise Reject('{0}: Source field does not match Source field in changes'.format(dsc_fn))
+        if control['Version'] != changes['Version']:
+            raise Reject('{0}: Version field does not match Version field in changes'.format(dsc_fn))
+
+        # check filenames
+        self.check_filename(control, dsc_fn, re_file_dsc)
+        for f in source.files.itervalues():
+            self.check_filename(control, f.filename, re_file_source)
+
+        # check dependency field syntax
+        for field in ('Build-Conflicts', 'Build-Conflicts-Indep', 'Build-Depends', 'Build-Depends-Arch', 'Build-Depends-Indep'):
+            value = control.get(field)
+            if value is not None:
+                if value.strip() == '':
+                    raise Reject('{0}: empty {1} field'.format(dsc_fn, field))
+                try:
+                    apt_pkg.parse_src_depends(value)
+                except Exception as e:
+                    raise Reject('{0}: APT could not parse {1} field: {2}'.format(dsc_fn, field, e))
+
+        rejects = utils.check_dsc_files(dsc_fn, control, source.files.keys())
+        if len(rejects) > 0:
+            raise Reject("\n".join(rejects))
+
+        return True
+
+class SingleDistributionCheck(Check):
+    """Check that the .changes targets only a single distribution."""
+    def check(self, upload):
+        if len(upload.changes.distributions) != 1:
+            raise Reject("Only uploads to a single distribution are allowed.")
+
+class ACLCheck(Check):
+    """Check the uploader is allowed to upload the packages in .changes"""
+
+    def _does_hijack(self, session, upload, suite):
+        # Try to catch hijacks.
+        # This doesn't work correctly. Uploads to experimental can still
+        # "hijack" binaries from unstable. Also one can hijack packages
+        # via buildds (but people who try this should not be DMs).
+        for binary_name in upload.changes.binary_names:
+            binaries = session.query(DBBinary).join(DBBinary.source) \
+                .filter(DBBinary.suites.contains(suite)) \
+                .filter(DBBinary.package == binary_name)
+            for binary in binaries:
+                if binary.source.source != upload.changes.changes['Source']:
+                    return True, binary.package, binary.source.source
+        return False, None, None
+
+    def _check_acl(self, session, upload, acl):
+        source_name = upload.changes.source_name
+
+        if acl.match_fingerprint and upload.fingerprint not in acl.fingerprints:
+            return None, None
+        if acl.match_keyring is not None and upload.fingerprint.keyring != acl.match_keyring:
+            return None, None
+
+        if not acl.allow_new:
+            if upload.new:
+                return False, "NEW uploads are not allowed"
+            for f in upload.changes.files.itervalues():
+                if f.section == 'byhand' or f.section.startswith("raw-"):
+                    return False, "BYHAND uploads are not allowed"
+        if not acl.allow_source and upload.changes.source is not None:
+            return False, "sourceful uploads are not allowed"
+        binaries = upload.changes.binaries
+        if len(binaries) != 0:
+            if not acl.allow_binary:
+                return False, "binary uploads are not allowed"
+            if upload.changes.source is None and not acl.allow_binary_only:
+                return False, "binary-only uploads are not allowed"
+            if not acl.allow_binary_all:
+                uploaded_arches = set(upload.changes.architectures)
+                uploaded_arches.discard('source')
+                allowed_arches = set(a.arch_string for a in acl.architectures)
+                forbidden_arches = uploaded_arches - allowed_arches
+                if len(forbidden_arches) != 0:
+                    return False, "uploads for architecture(s) {0} are not allowed".format(", ".join(forbidden_arches))
+        if not acl.allow_hijack:
+            for suite in upload.final_suites:
+                does_hijack, hijacked_binary, hijacked_from = self._does_hijack(session, upload, suite)
+                if does_hijack:
+                    return False, "hijacks are not allowed (binary={0}, other-source={1})".format(hijacked_binary, hijacked_from)
+
+        acl_per_source = session.query(ACLPerSource).filter_by(acl=acl, fingerprint=upload.fingerprint, source=source_name).first()
+        if acl.allow_per_source:
+            if acl_per_source is None:
+                return False, "not allowed to upload source package '{0}'".format(source_name)
+        if acl.deny_per_source and acl_per_source is not None:
+            return False, acl_per_source.reason or "forbidden to upload source package '{0}'".format(source_name)
+
+        return True, None
+
+    def check(self, upload):
+        session = upload.session
+        fingerprint = upload.fingerprint
+        keyring = fingerprint.keyring
+
+        if keyring is None:
+            raise Reject('No keyring for fingerprint {0}'.format(fingerprint.fingerprint))
+        if not keyring.active:
+            raise Reject('Keyring {0} is not active'.format(keyring.name))
+
+        acl = fingerprint.acl or keyring.acl
+        if acl is None:
+            raise Reject('No ACL for fingerprint {0}'.format(fingerprint.fingerprint))
+        result, reason = self._check_acl(session, upload, acl)
+        if not result:
+            raise RejectACL(acl, reason)
+
+        for acl in session.query(ACL).filter_by(is_global=True):
+            result, reason = self._check_acl(session, upload, acl)
+            if result == False:
+                raise RejectACL(acl, reason)
+
+        return True
+
+    def per_suite_check(self, upload, suite):
+        acls = suite.acls
+        if len(acls) != 0:
+            accept = False
+            for acl in acls:
+                result, reason = self._check_acl(upload.session, upload, acl)
+                if result == False:
+                    raise Reject(reason)
+                accept = accept or result
+            if not accept:
+                raise Reject('Not accepted by any per-suite acl (suite={0})'.format(suite.suite_name))
+        return True
+
+class TransitionCheck(Check):
+    """check for a transition"""
+    def check(self, upload):
+        if 'source' not in upload.changes.architectures:
+            return True
+
+        transitions = self.get_transitions()
+        if transitions is None:
+            return True
+
+        control = upload.changes.changes
+        source = re_field_source.match(control['Source']).group('package')
+
+        for trans in transitions:
+            t = transitions[trans]
+            source = t["source"]
+            expected = t["new"]
+
+            # Will be None if nothing is in testing.
+            current = get_source_in_suite(source, "testing", session)
+            if current is not None:
+                compare = apt_pkg.version_compare(current.version, expected)
+
+            if current is None or compare < 0:
+                # This is still valid, the current version in testing is older than
+                # the new version we wait for, or there is none in testing yet
+
+                # Check if the source we look at is affected by this.
+                if source in t['packages']:
+                    # The source is affected, lets reject it.
+
+                    rejectmsg = "{0}: part of the {1} transition.\n\n".format(source, trans)
+
+                    if current is not None:
+                        currentlymsg = "at version {0}".format(current.version)
+                    else:
+                        currentlymsg = "not present in testing"
+
+                    rejectmsg += "Transition description: {0}\n\n".format(t["reason"])
+
+                    rejectmsg += "\n".join(textwrap.wrap("""Your package
+is part of a testing transition designed to get {0} migrated (it is
+currently {1}, we need version {2}).  This transition is managed by the
+Release Team, and {3} is the Release-Team member responsible for it.
+Please mail debian-release@lists.debian.org or contact {3} directly if you
+need further assistance.  You might want to upload to experimental until this
+transition is done.""".format(source, currentlymsg, expected,t["rm"])))
+
+                    raise Reject(rejectmsg)
+
+        return True
+
+    def get_transitions(self):
+        cnf = Config()
+        path = cnf.get('Dinstall::ReleaseTransitions', '')
+        if path == '' or not os.path.exists(path):
+            return None
+
+        contents = file(path, 'r').read()
+        try:
+            transitions = yaml.safe_load(contents)
+            return transitions
+        except yaml.YAMLError as msg:
+            utils.warn('Not checking transitions, the transitions file is broken: {0}'.format(msg))
+
+        return None
+
+class NoSourceOnlyCheck(Check):
+    """Check for source-only upload
+
+    Source-only uploads are only allowed if Dinstall::AllowSourceOnlyUploads is
+    set. Otherwise they are rejected.
+    """
+    def check(self, upload):
+        if Config().find_b("Dinstall::AllowSourceOnlyUploads"):
+            return True
+        changes = upload.changes
+        if changes.source is not None and len(changes.binaries) == 0:
+            raise Reject('Source-only uploads are not allowed.')
+        return True
+
+class LintianCheck(Check):
+    """Check package using lintian"""
+    def check(self, upload):
+        changes = upload.changes
+
+        # Only check sourceful uploads.
+        if changes.source is None:
+            return True
+        # Only check uploads to unstable or experimental.
+        if 'unstable' not in changes.distributions and 'experimental' not in changes.distributions:
+            return True
+
+        cnf = Config()
+        if 'Dinstall::LintianTags' not in cnf:
+            return True
+        tagfile = cnf['Dinstall::LintianTags']
+
+        with open(tagfile, 'r') as sourcefile:
+            sourcecontent = sourcefile.read()
+        try:
+            lintiantags = yaml.safe_load(sourcecontent)['lintian']
+        except yaml.YAMLError as msg:
+            raise Exception('Could not read lintian tags file {0}, YAML error: {1}'.format(tagfile, msg))
+
+        fd, temp_filename = utils.temp_filename(mode=0o644)
+        temptagfile = os.fdopen(fd, 'w')
+        for tags in lintiantags.itervalues():
+            for tag in tags:
+                print >>temptagfile, tag
+        temptagfile.close()
+
+        changespath = os.path.join(upload.directory, changes.filename)
+        try:
+            cmd = []
+            result = 0
+
+            user = cnf.get('Dinstall::UnprivUser') or None
+            if user is not None:
+                cmd.extend(['sudo', '-H', '-u', user])
+
+            cmd.extend(['/usr/bin/lintian', '--show-overrides', '--tags-from-file', temp_filename, changespath])
+            output = daklib.daksubprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            result = e.returncode
+            output = e.output
+        finally:
+            os.unlink(temp_filename)
+
+        if result == 2:
+            utils.warn("lintian failed for %s [return code: %s]." % \
+                (changespath, result))
+            utils.warn(utils.prefix_multi_line_string(output, \
+                " [possible output:] "))
+
+        parsed_tags = lintian.parse_lintian_output(output)
+        rejects = list(lintian.generate_reject_messages(parsed_tags, lintiantags))
+        if len(rejects) != 0:
+            raise Reject('\n'.join(rejects))
+
+        return True
+
+class SourceFormatCheck(Check):
+    """Check source format is allowed in the target suite"""
+    def per_suite_check(self, upload, suite):
+        source = upload.changes.source
+        session = upload.session
+        if source is None:
+            return True
+
+        source_format = source.dsc['Format']
+        query = session.query(SrcFormat).filter_by(format_name=source_format).filter(SrcFormat.suites.contains(suite))
+        if query.first() is None:
+            raise Reject('source format {0} is not allowed in suite {1}'.format(source_format, suite.suite_name))
+
+class SuiteArchitectureCheck(Check):
+    def per_suite_check(self, upload, suite):
+        session = upload.session
+        for arch in upload.changes.architectures:
+            query = session.query(Architecture).filter_by(arch_string=arch).filter(Architecture.suites.contains(suite))
+            if query.first() is None:
+                raise Reject('Architecture {0} is not allowed in suite {1}'.format(arch, suite.suite_name))
+
+        return True
+
+class VersionCheck(Check):
+    """Check version constraints"""
+    def _highest_source_version(self, session, source_name, suite):
+        db_source = session.query(DBSource).filter_by(source=source_name) \
+            .filter(DBSource.suites.contains(suite)).order_by(DBSource.version.desc()).first()
+        if db_source is None:
+            return None
+        else:
+            return db_source.version
+
+    def _highest_binary_version(self, session, binary_name, suite, architecture):
+        db_binary = session.query(DBBinary).filter_by(package=binary_name) \
+            .filter(DBBinary.suites.contains(suite)) \
+            .join(DBBinary.architecture) \
+            .filter(Architecture.arch_string.in_(['all', architecture])) \
+            .order_by(DBBinary.version.desc()).first()
+        if db_binary is None:
+            return None
+        else:
+            return db_binary.version
+
+    def _version_checks(self, upload, suite, other_suite, op, op_name):
+        session = upload.session
+
+        if upload.changes.source is not None:
+            source_name = upload.changes.source.dsc['Source']
+            source_version = upload.changes.source.dsc['Version']
+            v = self._highest_source_version(session, source_name, other_suite)
+            if v is not None and not op(version_compare(source_version, v)):
+                raise Reject("Version check failed:\n"
+                             "Your upload included the source package {0}, version {1},\n"
+                             "however {3} already has version {2}.\n"
+                             "Uploads to {5} must have a {4} version than present in {3}."
+                             .format(source_name, source_version, v, other_suite.suite_name, op_name, suite.suite_name))
+
+        for binary in upload.changes.binaries:
+            binary_name = binary.control['Package']
+            binary_version = binary.control['Version']
+            architecture = binary.control['Architecture']
+            v = self._highest_binary_version(session, binary_name, other_suite, architecture)
+            if v is not None and not op(version_compare(binary_version, v)):
+                raise Reject("Version check failed:\n"
+                             "Your upload included the binary package {0}, version {1}, for {2},\n"
+                             "however {4} already has version {3}.\n"
+                             "Uploads to {6} must have a {5} version than present in {4}."
+                             .format(binary_name, binary_version, architecture, v, other_suite.suite_name, op_name, suite.suite_name))
+
+    def per_suite_check(self, upload, suite):
+        session = upload.session
+
+        vc_newer = session.query(dbconn.VersionCheck).filter_by(suite=suite) \
+            .filter(dbconn.VersionCheck.check.in_(['MustBeNewerThan', 'Enhances']))
+        must_be_newer_than = [ vc.reference for vc in vc_newer ]
+        # Must be newer than old versions in `suite`
+        must_be_newer_than.append(suite)
+
+        for s in must_be_newer_than:
+            self._version_checks(upload, suite, s, lambda result: result > 0, 'higher')
+
+        vc_older = session.query(dbconn.VersionCheck).filter_by(suite=suite, check='MustBeOlderThan')
+        must_be_older_than = [ vc.reference for vc in vc_older ]
+
+        for s in must_be_older_than:
+            self._version_checks(upload, suite, s, lambda result: result < 0, 'lower')
+
+        return True
+
+    @property
+    def forcable(self):
+        return True
diff --git a/daklib/command.py b/daklib/command.py
new file mode 100644 (file)
index 0000000..c1f9c70
--- /dev/null
@@ -0,0 +1,325 @@
+"""module to handle command files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2012, Ansgar Burchardt <ansgar@debian.org>
+@license: GPL-2+
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import apt_pkg
+import os
+import re
+import tempfile
+
+from daklib.config import Config
+from daklib.dbconn import *
+from daklib.gpg import SignedFile
+from daklib.regexes import re_field_package
+from daklib.textutils import fix_maintainer
+from daklib.utils import gpg_get_key_addresses, send_mail, TemplateSubst
+
+class CommandError(Exception):
+    pass
+
+class CommandFile(object):
+    def __init__(self, filename, data, log=None):
+        if log is None:
+            from daklib.daklog import Logger
+            log = Logger()
+        self.cc = []
+        self.result = []
+        self.log = log
+        self.filename = filename
+        self.data = data
+
+    def _check_replay(self, signed_file, session):
+        """check for replays
+
+        @note: Will commit changes to the database.
+
+        @type signed_file: L{daklib.gpg.SignedFile}
+
+        @param session: database session
+        """
+        # Mark commands file as seen to prevent replays.
+        signature_history = SignatureHistory.from_signed_file(signed_file)
+        session.add(signature_history)
+        session.commit()
+
+    def _quote_section(self, section):
+        lines = []
+        for l in str(section).splitlines():
+            lines.append("> {0}".format(l))
+        return "\n".join(lines)
+
+    def _evaluate_sections(self, sections, session):
+        session.rollback()
+        try:
+            while True:
+                sections.next()
+                section = sections.section
+                self.result.append(self._quote_section(section))
+
+                action = section.get('Action', None)
+                if action is None:
+                    raise CommandError('Encountered section without Action field')
+
+                if action == 'dm':
+                    self.action_dm(self.fingerprint, section, session)
+                elif action == 'dm-remove':
+                    self.action_dm_remove(self.fingerprint, section, session)
+                elif action == 'dm-migrate':
+                    self.action_dm_migrate(self.fingerprint, section, session)
+                elif action == 'break-the-archive':
+                    self.action_break_the_archive(self.fingerprint, section, session)
+                else:
+                    raise CommandError('Unknown action: {0}'.format(action))
+
+                self.result.append('')
+        except StopIteration:
+            pass
+        finally:
+            session.rollback()
+
+    def _notify_uploader(self):
+        cnf = Config()
+
+        bcc = 'X-DAK: dak process-command'
+        if 'Dinstall::Bcc' in cnf:
+            bcc = '{0}\nBcc: {1}'.format(bcc, cnf['Dinstall::Bcc'])
+
+        cc = set(fix_maintainer(address)[1] for address in self.cc)
+
+        subst = {
+            '__DAK_ADDRESS__': cnf['Dinstall::MyEmailAddress'],
+            '__MAINTAINER_TO__': fix_maintainer(self.uploader)[1],
+            '__CC__': ", ".join(cc),
+            '__BCC__': bcc,
+            '__RESULTS__': "\n".join(self.result),
+            '__FILENAME__': self.filename,
+            }
+
+        message = TemplateSubst(subst, os.path.join(cnf['Dir::Templates'], 'process-command.processed'))
+
+        send_mail(message)
+
+    def evaluate(self):
+        """evaluate commands file
+
+        @rtype:   bool
+        @returns: C{True} if the file was processed sucessfully,
+                  C{False} otherwise
+        """
+        result = True
+
+        session = DBConn().session()
+
+        keyrings = session.query(Keyring).filter_by(active=True).order_by(Keyring.priority)
+        keyring_files = [ k.keyring_name for k in keyrings ]
+
+        signed_file = SignedFile(self.data, keyring_files)
+        if not signed_file.valid:
+            self.log.log(['invalid signature', self.filename])
+            return False
+
+        self.fingerprint = session.query(Fingerprint).filter_by(fingerprint=signed_file.primary_fingerprint).one()
+        if self.fingerprint.keyring is None:
+            self.log.log(['singed by key in unknown keyring', self.filename])
+            return False
+        assert self.fingerprint.keyring.active
+
+        self.log.log(['processing', self.filename, 'signed-by={0}'.format(self.fingerprint.fingerprint)])
+
+        with tempfile.TemporaryFile() as fh:
+            fh.write(signed_file.contents)
+            fh.seek(0)
+            sections = apt_pkg.TagFile(fh)
+
+        self.uploader = None
+        addresses = gpg_get_key_addresses(self.fingerprint.fingerprint)
+        if len(addresses) > 0:
+            self.uploader = addresses[0]
+
+        try:
+            sections.next()
+            section = sections.section
+            if 'Uploader' in section:
+                self.uploader = section['Uploader']
+            # TODO: Verify first section has valid Archive field
+            if 'Archive' not in section:
+                raise CommandError('No Archive field in first section.')
+
+            # TODO: send mail when we detected a replay.
+            self._check_replay(signed_file, session)
+
+            self._evaluate_sections(sections, session)
+            self.result.append('')
+        except Exception as e:
+            self.log.log(['ERROR', e])
+            self.result.append("There was an error processing this section. No changes were committed.\nDetails:\n{0}".format(e))
+            result = False
+
+        self._notify_uploader()
+
+        session.close()
+
+        return result
+
+    def _split_packages(self, value):
+        names = value.split()
+        for name in names:
+            if not re_field_package.match(name):
+                raise CommandError('Invalid package name "{0}"'.format(name))
+        return names
+
+    def action_dm(self, fingerprint, section, session):
+        cnf = Config()
+
+        if 'Command::DM::AdminKeyrings' not in cnf \
+                or 'Command::DM::ACL' not in cnf \
+                or 'Command::DM::Keyrings' not in cnf:
+            raise CommandError('DM command is not configured for this archive.')
+
+        allowed_keyrings = cnf.value_list('Command::DM::AdminKeyrings')
+        if fingerprint.keyring.keyring_name not in allowed_keyrings:
+            raise CommandError('Key {0} is not allowed to set DM'.format(fingerprint.fingerprint))
+
+        acl_name = cnf.get('Command::DM::ACL', 'dm')
+        acl = session.query(ACL).filter_by(name=acl_name).one()
+
+        fpr_hash = section['Fingerprint'].translate(None, ' ')
+        fpr = session.query(Fingerprint).filter_by(fingerprint=fpr_hash).first()
+        if fpr is None:
+            raise CommandError('Unknown fingerprint {0}'.format(fpr_hash))
+        if fpr.keyring is None or fpr.keyring.keyring_name not in cnf.value_list('Command::DM::Keyrings'):
+            raise CommandError('Key {0} is not in DM keyring.'.format(fpr.fingerprint))
+        addresses = gpg_get_key_addresses(fpr.fingerprint)
+        if len(addresses) > 0:
+            self.cc.append(addresses[0])
+
+        self.log.log(['dm', 'fingerprint', fpr.fingerprint])
+        self.result.append('Fingerprint: {0}'.format(fpr.fingerprint))
+        if len(addresses) > 0:
+            self.log.log(['dm', 'uid', addresses[0]])
+            self.result.append('Uid: {0}'.format(addresses[0]))
+
+        for source in self._split_packages(section.get('Allow', '')):
+            # Check for existance of source package to catch typos
+            if session.query(DBSource).filter_by(source=source).first() is None:
+                raise CommandError('Tried to grant permissions for unknown source package: {0}'.format(source))
+
+            if session.query(ACLPerSource).filter_by(acl=acl, fingerprint=fpr, source=source).first() is None:
+                aps = ACLPerSource()
+                aps.acl = acl
+                aps.fingerprint = fpr
+                aps.source = source
+                aps.created_by = fingerprint
+                aps.reason = section.get('Reason')
+                session.add(aps)
+                self.log.log(['dm', 'allow', fpr.fingerprint, source])
+                self.result.append('Allowed: {0}'.format(source))
+            else:
+                self.result.append('Already-Allowed: {0}'.format(source))
+
+        session.flush()
+
+        for source in self._split_packages(section.get('Deny', '')):
+            count = session.query(ACLPerSource).filter_by(acl=acl, fingerprint=fpr, source=source).delete()
+            if count == 0:
+                raise CommandError('Tried to remove upload permissions for package {0}, '
+                                   'but no upload permissions were granted before.'.format(source))
+
+            self.log.log(['dm', 'deny', fpr.fingerprint, source])
+            self.result.append('Denied: {0}'.format(source))
+
+        session.commit()
+
+    def _action_dm_admin_common(self, fingerprint, section, session):
+        cnf = Config()
+
+        if 'Command::DM-Admin::AdminFingerprints' not in cnf \
+                or 'Command::DM::ACL' not in cnf:
+            raise CommandError('DM admin command is not configured for this archive.')
+
+        allowed_fingerprints = cnf.value_list('Command::DM-Admin::AdminFingerprints')
+        if fingerprint.fingerprint not in allowed_fingerprints:
+            raise CommandError('Key {0} is not allowed to admin DM'.format(fingerprint.fingerprint))
+
+    def action_dm_remove(self, fingerprint, section, session):
+        self._action_dm_admin_common(fingerprint, section, session)
+
+        cnf = Config()
+        acl_name = cnf.get('Command::DM::ACL', 'dm')
+        acl = session.query(ACL).filter_by(name=acl_name).one()
+
+        fpr_hash = section['Fingerprint'].translate(None, ' ')
+        fpr = session.query(Fingerprint).filter_by(fingerprint=fpr_hash).first()
+        if fpr is None:
+            self.result.append('Unknown fingerprint: {0}\nNo action taken.'.format(fpr_hash))
+            return
+
+        self.log.log(['dm-remove', fpr.fingerprint])
+
+        count = 0
+        for entry in session.query(ACLPerSource).filter_by(acl=acl, fingerprint=fpr):
+            self.log.log(['dm-remove', fpr.fingerprint, 'source={0}'.format(entry.source)])
+            count += 1
+            session.delete(entry)
+
+        self.result.append('Removed: {0}.\n{1} acl entries removed.'.format(fpr.fingerprint, count))
+
+        session.commit()
+
+    def action_dm_migrate(self, fingerprint, section, session):
+        self._action_dm_admin_common(fingerprint, section, session)
+        cnf = Config()
+        acl_name = cnf.get('Command::DM::ACL', 'dm')
+        acl = session.query(ACL).filter_by(name=acl_name).one()
+
+        fpr_hash_from = section['From'].translate(None, ' ')
+        fpr_from = session.query(Fingerprint).filter_by(fingerprint=fpr_hash_from).first()
+        if fpr_from is None:
+            self.result.append('Unknown fingerprint (From): {0}\nNo action taken.'.format(fpr_hash_from))
+            return
+
+        fpr_hash_to = section['To'].translate(None, ' ')
+        fpr_to = session.query(Fingerprint).filter_by(fingerprint=fpr_hash_to).first()
+        if fpr_to is None:
+            self.result.append('Unknown fingerprint (To): {0}\nNo action taken.'.format(fpr_hash_to))
+            return
+        if fpr_to.keyring is None or fpr_to.keyring.keyring_name not in cnf.value_list('Command::DM::Keyrings'):
+            self.result.append('Key (To) {0} is not in DM keyring.\nNo action taken.'.format(fpr_to.fingerprint))
+            return
+
+        self.log.log(['dm-migrate', 'from={0}'.format(fpr_hash_from), 'to={0}'.format(fpr_hash_to)])
+
+        count = 0
+        for entry in session.query(ACLPerSource).filter_by(acl=acl, fingerprint=fpr_from):
+            self.log.log(['dm-migrate', 'from={0}'.format(fpr_hash_from), 'to={0}'.format(fpr_hash_to), 'source={0}'.format(entry.source)])
+            entry.fingerprint = fpr_to
+            count += 1
+
+        self.result.append('Migrated {0} to {1}.\n{2} acl entries changed.'.format(fpr_hash_from, fpr_hash_to, count))
+
+        session.commit()
+
+    def action_break_the_archive(self, fingerprint, section, session):
+        name = 'Dave'
+        uid = fingerprint.uid
+        if uid is not None and uid.name is not None:
+            name = uid.name.split()[0]
+
+        self.result.append("DAK9000: I'm sorry, {0}. I'm afraid I can't do that.".format(name))
index 7e3c8bac582baf895a8a3b7f2b08d39d5619566c..a6a7951e806d4654534a1d65631b4cf96413fe12 100755 (executable)
@@ -28,6 +28,7 @@ Config access class
 
 ################################################################################
 
+import grp
 import os
 import apt_pkg
 import socket
@@ -76,6 +77,19 @@ class Config(object):
         if conffile:
             apt_pkg.read_config_file_isc(self.Cnf, conffile)
 
+        # Read group-specific options
+        if 'ByGroup' in self.Cnf:
+            bygroup = self.Cnf.subtree('ByGroup')
+            groups = set([os.getgid()])
+            groups.update(os.getgroups())
+
+            for group in bygroup.list():
+                gid = grp.getgrnam(group).gr_gid
+                if gid in groups:
+                    if bygroup.get(group):
+                        apt_pkg.read_config_file_isc(self.Cnf, bygroup[group])
+                    break
+
         # Rebind some functions
         # TODO: Clean this up
         self.get = self.Cnf.get
@@ -83,9 +97,13 @@ class Config(object):
         self.value_list = self.Cnf.value_list
         self.find = self.Cnf.find
         self.find_b = self.Cnf.find_b
+        self.find_i = self.Cnf.find_i
 
     def has_key(self, name):
-        return self.Cnf.has_key(name)
+        return name in self.Cnf
+
+    def __contains__(self, name):
+        return name in self.Cnf
 
     def __getitem__(self, name):
         return self.Cnf[name]
@@ -115,7 +133,7 @@ class Config(object):
         """
         for field in [('db_revision',      None,       int),
                       ('defaultsuitename', 'unstable', str),
-                      ('exportpath',       '',         str)
+                      ('use_extfiles',     None,       int)
                       ]:
             setattr(self, 'get_%s' % field[0], lambda s=None, x=field[0], y=field[1], z=field[2]: self.get_db_value(x, y, z))
             setattr(Config, '%s' % field[0], property(fget=getattr(self, 'get_%s' % field[0])))
@@ -129,4 +147,3 @@ class Config(object):
             return get_suite(suitename)
 
     defaultsuite = property(get_defaultsuite)
-
old mode 100755 (executable)
new mode 100644 (file)
index e808da6..f9c1feb
@@ -31,11 +31,10 @@ from daklib.filewriter import BinaryContentsFileWriter, SourceContentsFileWriter
 
 from multiprocessing import Pool
 from shutil import rmtree
-from subprocess import Popen, PIPE, check_call
 from tempfile import mkdtemp
 
+import daklib.daksubprocess
 import os.path
-import signal
 
 class BinaryContentsWriter(object):
     '''
@@ -65,15 +64,23 @@ class BinaryContentsWriter(object):
             'type':          self.overridetype.overridetype,
         }
 
-        sql = '''
-with
+        sql_create_temp = '''
+create temp table newest_binaries (
+    id integer primary key,
+    package text);
 
-newest_binaries as
-    (select distinct on (package) id, package from binaries
+create index newest_binaries_by_package on newest_binaries (package);
+
+insert into newest_binaries (id, package)
+    select distinct on (package) id, package from binaries
         where type = :type and
             (architecture = :arch_all or architecture = :arch) and
             id in (select bin from bin_associations where suite = :suite)
-        order by package, version desc),
+        order by package, version desc;'''
+        self.session.execute(sql_create_temp, params=params)
+
+        sql = '''
+with
 
 unique_override as
     (select o.package, s.section
@@ -115,6 +122,7 @@ select bc.file, string_agg(o.section || '/' || b.package, ',' order by b.package
         Returns a writer object.
         '''
         values = {
+            'archive':      self.suite.archive.path,
             'suite':        self.suite.suite_name,
             'component':    self.component.component_name,
             'debtype':      self.overridetype.overridetype,
@@ -165,16 +173,22 @@ class SourceContentsWriter(object):
             'component_id': self.component.component_id,
         }
 
-        sql = '''
-with
-  newest_sources as
-    (select distinct on (source) s.id, s.source from source s
-        join files f on f.id = s.file
-        join location l on l.id = f.location
+        sql_create_temp = '''
+create temp table newest_sources (
+    id integer primary key,
+    source text);
+
+create index sources_binaries_by_source on newest_sources (source);
+
+insert into newest_sources (id, source)
+    select distinct on (source) s.id, s.source from source s
+        join files_archive_map af on s.file = af.file_id
         where s.id in (select source from src_associations where suite = :suite_id)
-            and l.component = :component_id
-        order by source, version desc)
+            and af.component_id = :component_id
+        order by source, version desc;'''
+        self.session.execute(sql_create_temp, params=params)
 
+        sql = '''
 select sc.file, string_agg(s.source, ',' order by s.source) as pkglist
     from newest_sources s, src_contents sc
     where s.id = sc.source_id group by sc.file'''
@@ -208,6 +222,7 @@ select sc.file, string_agg(s.source, ',' order by s.source) as pkglist
         Returns a writer object.
         '''
         values = {
+            'archive':   self.suite.archive.path,
             'suite':     self.suite.suite_name,
             'component': self.component.component_name
         }
@@ -238,6 +253,7 @@ def binary_helper(suite_id, arch_id, overridetype_id, component_id):
         overridetype.overridetype, component.component_name]
     contents_writer = BinaryContentsWriter(suite, architecture, overridetype, component)
     contents_writer.write_file()
+    session.close()
     return log_message
 
 def source_helper(suite_id, component_id):
@@ -251,6 +267,7 @@ def source_helper(suite_id, component_id):
     log_message = [suite.suite_name, 'source', component.component_name]
     contents_writer = SourceContentsWriter(suite, component)
     contents_writer.write_file()
+    session.close()
     return log_message
 
 class ContentsWriter(object):
@@ -266,7 +283,7 @@ class ContentsWriter(object):
         class_.logger.log(result)
 
     @classmethod
-    def write_all(class_, logger, suite_names = [], component_names = [], force = False):
+    def write_all(class_, logger, archive_names = [], suite_names = [], component_names = [], force = False):
         '''
         Writes all Contents files for suites in list suite_names which defaults
         to all 'touchable' suites if not specified explicitely. Untouchable
@@ -275,13 +292,15 @@ class ContentsWriter(object):
         class_.logger = logger
         session = DBConn().session()
         suite_query = session.query(Suite)
+        if len(archive_names) > 0:
+            suite_query = suite_query.join(Suite.archive).filter(Archive.archive_name.in_(archive_names))
         if len(suite_names) > 0:
             suite_query = suite_query.filter(Suite.suite_name.in_(suite_names))
         component_query = session.query(Component)
         if len(component_names) > 0:
             component_query = component_query.filter(Component.component_name.in_(component_names))
         if not force:
-            suite_query = suite_query.filter_by(untouchable = False)
+            suite_query = suite_query.filter(Suite.untouchable == False)
         deb_id = get_override_type('deb', session).overridetype_id
         udeb_id = get_override_type('udeb', session).overridetype_id
         pool = Pool()
@@ -363,26 +382,21 @@ def binary_scan_helper(binary_id):
     scanner = BinaryContentsScanner(binary_id)
     scanner.scan()
 
-
-def subprocess_setup():
-    # Python installs a SIGPIPE handler by default. This is usually not what
-    # non-Python subprocesses expect.
-    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
 class UnpackedSource(object):
     '''
     UnpackedSource extracts a source package into a temporary location and
     gives you some convinient function for accessing it.
     '''
-    def __init__(self, dscfilename):
+    def __init__(self, dscfilename, tmpbasedir=None):
         '''
         The dscfilename is a name of a DSC file that will be extracted.
         '''
-        temp_directory = mkdtemp(dir = Config()['Dir::TempPath'])
+        basedir = tmpbasedir if tmpbasedir else Config()['Dir::TempPath']
+        temp_directory = mkdtemp(dir = basedir)
         self.root_directory = os.path.join(temp_directory, 'root')
         command = ('dpkg-source', '--no-copy', '--no-check', '-q', '-x',
             dscfilename, self.root_directory)
-        check_call(command, preexec_fn = subprocess_setup)
+        daklib.daksubprocess.check_call(command)
 
     def get_root_directory(self):
         '''
@@ -485,4 +499,3 @@ def source_scan_helper(source_id):
         scanner.scan()
     except Exception as e:
         print e
-
diff --git a/daklib/daksubprocess.py b/daklib/daksubprocess.py
new file mode 100644 (file)
index 0000000..ff1df77
--- /dev/null
@@ -0,0 +1,70 @@
+"""subprocess management for dak
+
+@copyright: 2013, Ansgar Burchardt <ansgar@debian.org>
+@license: GPL-2+
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import signal
+import subprocess
+
+#
+def fix_signal_handlers():
+    """reset signal handlers to default action.
+
+    Python changes the signal handler to SIG_IGN for a few signals which
+    causes unexpected behaviour in child processes. This function resets
+    them to their default action.
+
+    Reference: http://bugs.python.org/issue1652
+    """
+    for signal_name in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'):
+        try:
+            signal_number = getattr(signal, signal_name)
+            signal.signal(signal_number, signal.SIG_DFL)
+        except AttributeError:
+            pass
+
+def _generate_preexec_fn(other_preexec_fn=None):
+    def preexec_fn():
+        fix_signal_handlers()
+        if other_preexec_fn is not None:
+            other_preexec_fn()
+    return preexec_fn
+
+def call(*args, **kwargs):
+    """wrapper around subprocess.call that fixes signal handling"""
+    preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
+    kwargs['preexec_fn'] = preexec_fn
+    return subprocess.call(*args, **kwargs)
+
+def check_call(*args, **kwargs):
+    """wrapper around subprocess.check_call that fixes signal handling"""
+    preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
+    kwargs['preexec_fn'] = preexec_fn
+    return subprocess.check_call(*args, **kwargs)
+
+def check_output(*args, **kwargs):
+    """wrapper around subprocess.check_output that fixes signal handling"""
+    preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
+    kwargs['preexec_fn'] = preexec_fn
+    return subprocess.check_output(*args, **kwargs)
+
+def Popen(*args, **kwargs):
+    """wrapper around subprocess.Popen that fixes signal handling"""
+    preexec_fn = _generate_preexec_fn(kwargs.get('preexec_fn'))
+    kwargs['preexec_fn'] = preexec_fn
+    return subprocess.Popen(*args, **kwargs)
old mode 100755 (executable)
new mode 100644 (file)
index c0801b4..7a1fcb2
 ################################################################################
 
 import apt_pkg
+import daklib.daksubprocess
 import os
 from os.path import normpath
 import re
 import psycopg2
+import subprocess
 import traceback
-import commands
-import signal
 
 try:
     # python >= 2.6
@@ -52,7 +52,6 @@ except:
 from datetime import datetime, timedelta
 from errno import ENOENT
 from tempfile import mkstemp, mkdtemp
-from subprocess import Popen, PIPE
 from tarfile import TarFile
 
 from inspect import getargspec
@@ -110,11 +109,11 @@ class DebVersion(UserDefinedType):
         return None
 
 sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6", "0.7"]:
+if sa_major_version in ["0.5", "0.6", "0.7", "0.8"]:
     from sqlalchemy.databases import postgres
     postgres.ischema_names['debversion'] = DebVersion
 else:
-    raise Exception("dak only ported to SQLA versions 0.5 to 0.7.  See daklib/dbconn.py")
+    raise Exception("dak only ported to SQLA versions 0.5 to 0.8.  See daklib/dbconn.py")
 
 ################################################################################
 
@@ -311,7 +310,7 @@ class ORMObject(object):
         return object_session(self)
 
     def clone(self, session = None):
-        '''
+        """
         Clones the current object in a new session and returns the new clone. A
         fresh session is created if the optional session parameter is not
         provided. The function will fail if a session is provided and has
@@ -324,8 +323,8 @@ class ORMObject(object):
         WARNING: Only persistent (committed) objects can be cloned. Changes
         made to the original object that are not committed yet will get lost.
         The session of the new object will always be rolled back to avoid
-        ressource leaks.
-        '''
+        resource leaks.
+        """
 
         if self.session() is None:
             raise RuntimeError( \
@@ -369,6 +368,20 @@ validator = Validator()
 
 ################################################################################
 
+class ACL(ORMObject):
+    def __repr__(self):
+        return "<ACL {0}>".format(self.name)
+
+__all__.append('ACL')
+
+class ACLPerSource(ORMObject):
+    def __repr__(self):
+        return "<ACLPerSource acl={0} fingerprint={1} source={2} reason={3}>".format(self.acl.name, self.fingerprint.fingerprint, self.source, self.reason)
+
+__all__.append('ACLPerSource')
+
+################################################################################
+
 class Architecture(ORMObject):
     def __init__(self, arch_string = None, description = None):
         self.arch_string = arch_string
@@ -419,27 +432,6 @@ def get_architecture(architecture, session=None):
 
 __all__.append('get_architecture')
 
-# TODO: should be removed because the implementation is too trivial
-@session_wrapper
-def get_architecture_suites(architecture, session=None):
-    """
-    Returns list of Suite objects for given C{architecture} name
-
-    @type architecture: str
-    @param architecture: Architecture name to search for
-
-    @type session: Session
-    @param session: Optional SQL session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: list
-    @return: list of Suite objects for the given name (may be empty)
-    """
-
-    return get_architecture(architecture, session).suites
-
-__all__.append('get_architecture_suites')
-
 ################################################################################
 
 class Archive(object):
@@ -480,6 +472,19 @@ __all__.append('get_archive')
 
 ################################################################################
 
+class ArchiveFile(object):
+    def __init__(self, archive=None, component=None, file=None):
+        self.archive = archive
+        self.component = component
+        self.file = file
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
+
+__all__.append('ArchiveFile')
+
+################################################################################
+
 class BinContents(ORMObject):
     def __init__(self, file = None, binary = None):
         self.file = file
@@ -492,15 +497,10 @@ __all__.append('BinContents')
 
 ################################################################################
 
-def subprocess_setup():
-    # Python installs a SIGPIPE handler by default. This is usually not what
-    # non-Python subprocesses expect.
-    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
 class DBBinary(ORMObject):
     def __init__(self, package = None, source = None, version = None, \
         maintainer = None, architecture = None, poolfile = None, \
-        binarytype = 'deb'):
+        binarytype = 'deb', fingerprint=None):
         self.package = package
         self.source = source
         self.version = version
@@ -508,6 +508,7 @@ class DBBinary(ORMObject):
         self.architecture = architecture
         self.poolfile = poolfile
         self.binarytype = binarytype
+        self.fingerprint = fingerprint
 
     @property
     def pkid(self):
@@ -524,9 +525,6 @@ class DBBinary(ORMObject):
 
     metadata = association_proxy('key', 'value')
 
-    def get_component_name(self):
-        return self.poolfile.location.component.component_name
-
     def scan_contents(self):
         '''
         Yields the contents of the package. Only regular files are yielded and
@@ -535,8 +533,8 @@ class DBBinary(ORMObject):
         package does not contain any regular file.
         '''
         fullpath = self.poolfile.fullpath
-        dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
-            preexec_fn = subprocess_setup)
+        dpkg_cmd = ('dpkg-deb', '--fsys-tarfile', fullpath)
+        dpkg = daklib.daksubprocess.Popen(dpkg_cmd, stdout=subprocess.PIPE)
         tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
         for member in tar.getmembers():
             if not member.isdir():
@@ -574,7 +572,6 @@ class DBBinary(ORMObject):
         @rtype: dict
         @return: fields of the control section as a dictionary.
         '''
-        import apt_pkg
         stanza = self.read_control()
         return apt_pkg.TagSection(stanza)
 
@@ -625,73 +622,12 @@ def get_component_by_package_suite(package, suite_list, arch_list=[], session=No
     if binary is None:
         return None
     else:
-        return binary.get_component_name()
+        return binary.poolfile.component.component_name
 
 __all__.append('get_component_by_package_suite')
 
 ################################################################################
 
-class BinaryACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinaryACL %s>' % self.binary_acl_id
-
-__all__.append('BinaryACL')
-
-################################################################################
-
-class BinaryACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BinaryACLMap %s>' % self.binary_acl_map_id
-
-__all__.append('BinaryACLMap')
-
-################################################################################
-
-MINIMAL_APT_CONF="""
-Dir
-{
-   ArchiveDir "%(archivepath)s";
-   OverrideDir "%(overridedir)s";
-   CacheDir "%(cachedir)s";
-};
-
-Default
-{
-   Packages::Compress ". bzip2 gzip";
-   Sources::Compress ". bzip2 gzip";
-   DeLinkLimit 0;
-   FileMode 0664;
-}
-
-bindirectory "incoming"
-{
-   Packages "Packages";
-   Contents " ";
-
-   BinOverride "override.sid.all3";
-   BinCacheDB "packages-accepted.db";
-
-   FileList "%(filelist)s";
-
-   PathPrefix "";
-   Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
-   Sources "Sources";
-   BinOverride "override.sid.all3";
-   SrcOverride "override.sid.all3.src";
-   FileList "%(filelist)s";
-};
-"""
-
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -699,390 +635,8 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
-    def write_metadata(self, starttime, force=False):
-        # Do we write out metafiles?
-        if not (force or self.generate_metadata):
-            return
-
-        session = DBConn().session().object_session(self)
-
-        fl_fd = fl_name = ac_fd = ac_name = None
-        tempdir = None
-        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
-        startdir = os.getcwd()
-
-        try:
-            # Grab files we want to include
-            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
-            # Write file list with newer files
-            (fl_fd, fl_name) = mkstemp()
-            for n in newer:
-                os.write(fl_fd, '%s\n' % n.fullpath)
-            os.close(fl_fd)
-
-            cnf = Config()
-
-            # Write minimal apt.conf
-            # TODO: Remove hardcoding from template
-            (ac_fd, ac_name) = mkstemp()
-            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
-                                                'filelist': fl_name,
-                                                'cachedir': cnf["Dir::Cache"],
-                                                'overridedir': cnf["Dir::Override"],
-                                                })
-            os.close(ac_fd)
-
-            # Run apt-ftparchive generate
-            os.chdir(os.path.dirname(ac_name))
-            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
-            # Run apt-ftparchive release
-            # TODO: Eww - fix this
-            bname = os.path.basename(self.path)
-            os.chdir(self.path)
-            os.chdir('..')
-
-            # We have to remove the Release file otherwise it'll be included in the
-            # new one
-            try:
-                os.unlink(os.path.join(bname, 'Release'))
-            except OSError:
-                pass
-
-            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
-            # Crude hack with open and append, but this whole section is and should be redone.
-            if self.notautomatic:
-                release=open("Release", "a")
-                release.write("NotAutomatic: yes\n")
-                release.close()
-
-            # Sign if necessary
-            if self.signingkey:
-                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
-                if cnf.has_key("Dinstall::SigningPubKeyring"):
-                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
-                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
-            # Move the files if we got this far
-            os.rename('Release', os.path.join(bname, 'Release'))
-            if self.signingkey:
-                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
-        # Clean up any left behind files
-        finally:
-            os.chdir(startdir)
-            if fl_fd:
-                try:
-                    os.close(fl_fd)
-                except OSError:
-                    pass
-
-            if fl_name:
-                try:
-                    os.unlink(fl_name)
-                except OSError:
-                    pass
-
-            if ac_fd:
-                try:
-                    os.close(ac_fd)
-                except OSError:
-                    pass
-
-            if ac_name:
-                try:
-                    os.unlink(ac_name)
-                except OSError:
-                    pass
-
-    def clean_and_update(self, starttime, Logger, dryrun=False):
-        """WARNING: This routine commits for you"""
-        session = DBConn().session().object_session(self)
-
-        if self.generate_metadata and not dryrun:
-            self.write_metadata(starttime)
-
-        # Grab files older than our execution time
-        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-        older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
-        for o in older:
-            killdb = False
-            try:
-                if dryrun:
-                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
-                else:
-                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
-                    os.unlink(o.fullpath)
-                    killdb = True
-            except OSError as e:
-                # If it wasn't there, don't worry
-                if e.errno == ENOENT:
-                    killdb = True
-                else:
-                    # TODO: Replace with proper logging call
-                    Logger.log(["E: Could not remove %s" % o.fullpath])
-
-            if killdb:
-                session.delete(o)
-
-        session.commit()
-
-        for f in os.listdir(self.path):
-            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
-                continue
-
-            if not self.contains_filename(f):
-                fp = os.path.join(self.path, f)
-                if dryrun:
-                    Logger.log(["I: Would remove unused link %s" % fp])
-                else:
-                    Logger.log(["I: Removing unused link %s" % fp])
-                    try:
-                        os.unlink(fp)
-                    except OSError:
-                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
-    def contains_filename(self, filename):
-        """
-        @rtype Boolean
-        @returns True if filename is supposed to be in the queue; False otherwise
-        """
-        session = DBConn().session().object_session(self)
-        if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
-            return True
-        elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
-            return True
-        return False
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if (f.fileid is not None and f.fileid == poolfile.file_id) or \
-               (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
-                   # In this case, update the BuildQueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(poolfile).add(f)
-                   return f
-
-        # Prepare BuildQueueFile object
-        qf = BuildQueueFile()
-        qf.build_queue_id = self.queue_id
-        qf.filename = poolfile_basename
-
-        targetpath = poolfile.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetpath, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetpath, queuepath)
-                qf.fileid = poolfile.file_id
-        except FileExistsError:
-            if not poolfile.identical_to(queuepath):
-                raise
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-    def add_changes_from_policy_queue(self, policyqueue, changes):
-        """
-        Copies a changes from a policy queue together with its poolfiles.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the changes from
-
-        @type changes: DBChange
-        @param changes: changes to copy to this build queue
-        """
-        for policyqueuefile in changes.files:
-            self.add_file_from_policy_queue(policyqueue, policyqueuefile)
-        for poolfile in changes.poolfiles:
-            self.add_file_from_pool(poolfile)
-
-    def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
-        """
-        Copies a file from a policy queue.
-        Assumes that the policyqueuefile is attached to the same SQLAlchemy
-        session as the Queue object is.  The caller is responsible for
-        committing after calling this function.
-
-        @type policyqueue: PolicyQueue
-        @param policyqueue: policy queue to copy the file from
-
-        @type policyqueuefile: ChangePendingFile
-        @param policyqueuefile: file to be added to the build queue
-        """
-        session = DBConn().session().object_session(policyqueuefile)
-
-        # Is the file already there?
-        try:
-            f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
-            f.lastused = datetime.now()
-            return f
-        except NoResultFound:
-            pass # continue below
-
-        # We have to add the file.
-        f = BuildQueuePolicyFile()
-        f.build_queue = self
-        f.file = policyqueuefile
-        f.filename = policyqueuefile.filename
-
-        source = os.path.join(policyqueue.path, policyqueuefile.filename)
-        target = f.fullpath
-        try:
-            # Always copy files from policy queues as they might move around.
-            import utils
-            utils.copy(source, target)
-        except FileExistsError:
-            if not policyqueuefile.identical_to(target):
-                raise
-        except OSError:
-            return None
-
-        session.add(f)
-        return f
-
 __all__.append('BuildQueue')
 
-@session_wrapper
-def get_build_queue(queuename, session=None):
-    """
-    Returns BuildQueue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: BuildQueue
-    @return: BuildQueue object for the given queue
-    """
-
-    q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_build_queue')
-
-################################################################################
-
-class BuildQueueFile(object):
-    """
-    BuildQueueFile represents a file in a build queue coming from a pool.
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class BuildQueuePolicyFile(object):
-    """
-    BuildQueuePolicyFile represents a file in a build queue that comes from a
-    policy queue (and not a pool).
-    """
-
-    def __init__(self, *args, **kwargs):
-        pass
-
-    #@property
-    #def filename(self):
-    #    return self.file.filename
-
-    @property
-    def fullpath(self):
-        return os.path.join(self.build_queue.path, self.filename)
-
-__all__.append('BuildQueuePolicyFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingFile %s>' % self.change_pending_file_id
-
-    def identical_to(self, filename):
-        """
-        compare size and hash with the given file
-
-        @rtype: bool
-        @return: true if the given file has the same size and hash as this object; false otherwise
-        """
-        st = os.stat(filename)
-        if self.size != st.st_size:
-            return False
-
-        f = open(filename, "r")
-        sha256sum = apt_pkg.sha256sum(f)
-        if sha256sum != self.sha256sum:
-            return False
-
-        return True
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
 ################################################################################
 
 class Component(ORMObject):
@@ -1103,7 +657,7 @@ class Component(ORMObject):
 
     def properties(self):
         return ['component_name', 'component_id', 'description', \
-            'location_count', 'meets_dfsg', 'overrides_count']
+            'meets_dfsg', 'overrides_count']
 
     def not_null_constraints(self):
         return ['component_name']
@@ -1134,6 +688,34 @@ def get_component(component, session=None):
 
 __all__.append('get_component')
 
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+    """get component after mappings
+
+    Evaluate component mappings from ComponentMappings in dak.conf for the
+    given component name.
+
+    @todo: ansgar wants to get rid of this. It's currently only used for
+           the security archive
+
+    @type  component_name: str
+    @param component_name: component name
+
+    @param session: database session
+
+    @rtype:  L{daklib.dbconn.Component} or C{None}
+    @return: component after applying maps or C{None}
+    """
+    cnf = Config()
+    for m in cnf.value_list("ComponentMappings"):
+        (src, dst) = m.split()
+        if component_name == src:
+            component_name = dst
+    component = session.query(Component).filter_by(component_name=component_name).first()
+    return component
+
+__all__.append('get_mapped_component')
+
 @session_wrapper
 def get_component_names(session=None):
     """
@@ -1411,26 +993,40 @@ __all__.append('ExternalOverride')
 ################################################################################
 
 class PoolFile(ORMObject):
-    def __init__(self, filename = None, location = None, filesize = -1, \
+    def __init__(self, filename = None, filesize = -1, \
         md5sum = None):
         self.filename = filename
-        self.location = location
         self.filesize = filesize
         self.md5sum = md5sum
 
     @property
     def fullpath(self):
-        return os.path.join(self.location.path, self.filename)
+        session = DBConn().session().object_session(self)
+        af = session.query(ArchiveFile).join(Archive) \
+                    .filter(ArchiveFile.file == self) \
+                    .order_by(Archive.tainted.desc()).first()
+        return af.path
+
+    @property
+    def component(self):
+        session = DBConn().session().object_session(self)
+        component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+                              .group_by(ArchiveFile.component_id).one()
+        return session.query(Component).get(component_id)
+
+    @property
+    def basename(self):
+        return os.path.basename(self.filename)
 
     def is_valid(self, filesize = -1, md5sum = None):
         return self.filesize == long(filesize) and self.md5sum == md5sum
 
     def properties(self):
         return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
-            'sha256sum', 'location', 'source', 'binary', 'last_used']
+            'sha256sum', 'source', 'binary', 'last_used']
 
     def not_null_constraints(self):
-        return ['filename', 'md5sum', 'location']
+        return ['filename', 'md5sum']
 
     def identical_to(self, filename):
         """
@@ -1452,60 +1048,6 @@ class PoolFile(ORMObject):
 
 __all__.append('PoolFile')
 
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
-    """
-    Returns a tuple:
-    (ValidFileFound [boolean], PoolFile object or None)
-
-    @type filename: string
-    @param filename: the filename of the file to check against the DB
-
-    @type filesize: int
-    @param filesize: the size of the file to check against the DB
-
-    @type md5sum: string
-    @param md5sum: the md5sum of the file to check against the DB
-
-    @type location_id: int
-    @param location_id: the id of the location to look in
-
-    @rtype: tuple
-    @return: Tuple of length 2.
-                 - If valid pool file found: (C{True}, C{PoolFile object})
-                 - If valid pool file not found:
-                     - (C{False}, C{None}) if no file found
-                     - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
-    """
-
-    poolfile = session.query(Location).get(location_id). \
-        files.filter_by(filename=filename).first()
-    valid = False
-    if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
-        valid = True
-
-    return (valid, poolfile)
-
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
-    """
-    Returns a PoolFile objects or None for the given id
-
-    @type file_id: int
-    @param file_id: the id of the file to look for
-
-    @rtype: PoolFile or None
-    @return: either the PoolFile object or None
-    """
-
-    return session.query(PoolFile).get(file_id)
-
-__all__.append('get_poolfile_by_id')
-
 @session_wrapper
 def get_poolfile_like_name(filename, session=None):
     """
@@ -1525,39 +1067,6 @@ def get_poolfile_like_name(filename, session=None):
 
 __all__.append('get_poolfile_like_name')
 
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
-    """
-    Add a new file to the pool
-
-    @type filename: string
-    @param filename: filename
-
-    @type datadict: dict
-    @param datadict: dict with needed data
-
-    @type location_id: int
-    @param location_id: database id of the location
-
-    @rtype: PoolFile
-    @return: the PoolFile object created
-    """
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
-
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
-
-    return poolfile
-
-__all__.append('add_poolfile')
-
 ################################################################################
 
 class Fingerprint(ORMObject):
@@ -1683,7 +1192,7 @@ class Keyring(object):
 
         k = os.popen(self.gpg_invocation % keyring, "r")
         key = None
-        signingkey = False
+        need_fingerprint = False
 
         for line in k:
             field = line.split(":")
@@ -1694,18 +1203,16 @@ class Keyring(object):
                 if "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-                self.keys[key]["fingerprints"] = []
-                signingkey = True
-            elif key and field[0] == "sub" and len(field) >= 12:
-                signingkey = ("s" in field[11])
+                need_fingerprint = True
             elif key and field[0] == "uid":
                 (name, addr) = self.parse_address(field[9])
                 if "email" not in self.keys[key] and "@" in addr:
                     self.keys[key]["email"] = addr
                     self.keys[key]["name"] = name
-            elif signingkey and field[0] == "fpr":
-                self.keys[key]["fingerprints"].append(field[9])
+            elif need_fingerprint and field[0] == "fpr":
+                self.keys[key]["fingerprints"] = [field[9]]
                 self.fpr_lookup[field[9]] = key
+                need_fingerprint = False
 
     def import_users_from_ldap(self, session):
         import ldap
@@ -1713,8 +1220,22 @@ class Keyring(object):
 
         LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
         LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+        ca_cert_file = cnf.get('Import-LDAP-Fingerprints::CACertFile')
 
         l = ldap.open(LDAPServer)
+
+        if ca_cert_file:
+            # TODO: This should request a new context and use
+            # connection-specific options (i.e. "l.set_option(...)")
+
+            # Request a new TLS context. If there was already one, libldap
+            # would not change the TLS options (like which CAs to trust).
+            #l.set_option(ldap.OPT_X_TLS_NEWCTX, True)
+            ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_HARD)
+            #ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, None)
+            ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, ca_cert_file)
+            l.start_tls_s()
+
         l.simple_bind_s("","")
         Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
                "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
@@ -1822,17 +1343,6 @@ __all__.append('get_primary_keyring_path')
 
 ################################################################################
 
-class KeyringACLMap(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<KeyringACLMap %s>' % self.keyring_acl_map_id
-
-__all__.append('KeyringACLMap')
-
-################################################################################
-
 class DBChange(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1840,19 +1350,6 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
-    def clean_from_queue(self):
-        session = DBConn().session().object_session(self)
-
-        # Remove changes_pool_files entries
-        self.poolfiles = []
-
-        # Remove changes_pending_files references
-        self.files = []
-
-        # Clear out of queue
-        self.in_queue = None
-        self.approved_for_id = None
-
 __all__.append('DBChange')
 
 @session_wrapper
@@ -1882,58 +1379,6 @@ __all__.append('get_dbchange')
 
 ################################################################################
 
-class Location(ORMObject):
-    def __init__(self, path = None, component = None):
-        self.path = path
-        self.component = component
-        # the column 'type' should go away, see comment at mapper
-        self.archive_type = 'pool'
-
-    def properties(self):
-        return ['path', 'location_id', 'archive_type', 'component', \
-            'files_count']
-
-    def not_null_constraints(self):
-        return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
-    """
-    Returns Location object for the given combination of location, component
-    and archive
-
-    @type location: string
-    @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
-    @type component: string
-    @param component: the component name (if None, no restriction applied)
-
-    @type archive: string
-    @param archive: the archive name (if None, no restriction applied)
-
-    @rtype: Location / None
-    @return: Either a Location object or None if one can't be found
-    """
-
-    q = session.query(Location).filter_by(path=location)
-
-    if archive is not None:
-        q = q.join(Archive).filter_by(archive_name=archive)
-
-    if component is not None:
-        q = q.join(Component).filter_by(component_name=component)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_location')
-
-################################################################################
-
 class Maintainer(ORMObject):
     def __init__(self, name = None):
         self.name = name
@@ -2015,7 +1460,7 @@ class NewComment(object):
 __all__.append('NewComment')
 
 @session_wrapper
-def has_new_comment(package, version, session=None):
+def has_new_comment(policy_queue, package, version, session=None):
     """
     Returns true if the given combination of C{package}, C{version} has a comment.
 
@@ -2033,7 +1478,7 @@ def has_new_comment(package, version, session=None):
     @return: true/false
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     q = q.filter_by(package=package)
     q = q.filter_by(version=version)
 
@@ -2042,7 +1487,7 @@ def has_new_comment(package, version, session=None):
 __all__.append('has_new_comment')
 
 @session_wrapper
-def get_new_comments(package=None, version=None, comment_id=None, session=None):
+def get_new_comments(policy_queue, package=None, version=None, comment_id=None, session=None):
     """
     Returns (possibly empty) list of NewComment objects for the given
     parameters
@@ -2064,7 +1509,7 @@ def get_new_comments(package=None, version=None, comment_id=None, session=None):
     @return: A (possibly empty) list of NewComment objects will be returned
     """
 
-    q = session.query(NewComment)
+    q = session.query(NewComment).filter_by(policy_queue=policy_queue)
     if package is not None: q = q.filter_by(package=package)
     if version is not None: q = q.filter_by(version=version)
     if comment_id is not None: q = q.filter_by(comment_id=comment_id)
@@ -2217,30 +1662,30 @@ def get_policy_queue(queuename, session=None):
 
 __all__.append('get_policy_queue')
 
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
-    """
-    Returns PolicyQueue object for given C{path name}
+################################################################################
 
-    @type queuename: string
-    @param queuename: The path
+class PolicyQueueUpload(object):
+    def __cmp__(self, other):
+        ret = cmp(self.changes.source, other.changes.source)
+        if ret == 0:
+            ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+        if ret == 0:
+            if self.source is not None and other.source is None:
+                ret = -1
+            elif self.source is None and other.source is not None:
+                ret = 1
+        if ret == 0:
+            ret = cmp(self.changes.changesname, other.changes.changesname)
+        return ret
+
+__all__.append('PolicyQueueUpload')
 
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
+################################################################################
 
-    @rtype: PolicyQueue
-    @return: PolicyQueue object for the given queue
-    """
+class PolicyQueueByhandFile(object):
+    pass
 
-    q = session.query(PolicyQueue).filter_by(path=pathname)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_policy_queue_from_path')
+__all__.append('PolicyQueueByhandFile')
 
 ################################################################################
 
@@ -2391,6 +1836,26 @@ __all__.append('get_sections')
 
 ################################################################################
 
+class SignatureHistory(ORMObject):
+    @classmethod
+    def from_signed_file(cls, signed_file):
+        """signature history entry from signed file
+
+        @type  signed_file: L{daklib.gpg.SignedFile}
+        @param signed_file: signed file
+
+        @rtype: L{SignatureHistory}
+        """
+        self = cls()
+        self.fingerprint = signed_file.primary_fingerprint
+        self.signature_timestamp = signed_file.signature_timestamp
+        self.contents_sha1 = signed_file.contents_sha1()
+        return self
+
+__all__.append('SignatureHistory')
+
+################################################################################
+
 class SrcContents(ORMObject):
     def __init__(self, file = None, source = None):
         self.file = file
@@ -2459,13 +1924,14 @@ class Dak822(Deb822):
 
 class DBSource(ORMObject):
     def __init__(self, source = None, version = None, maintainer = None, \
-        changedby = None, poolfile = None, install_date = None):
+        changedby = None, poolfile = None, install_date = None, fingerprint = None):
         self.source = source
         self.version = version
         self.maintainer = maintainer
         self.changedby = changedby
         self.poolfile = poolfile
         self.install_date = install_date
+        self.fingerprint = fingerprint
 
     @property
     def pkid(self):
@@ -2478,7 +1944,7 @@ class DBSource(ORMObject):
 
     def not_null_constraints(self):
         return ['source', 'version', 'install_date', 'maintainer', \
-            'changedby', 'poolfile', 'install_date']
+            'changedby', 'poolfile']
 
     def read_control_fields(self):
         '''
@@ -2493,9 +1959,6 @@ class DBSource(ORMObject):
 
     metadata = association_proxy('key', 'value')
 
-    def get_component_name(self):
-        return self.poolfile.location.component.component_name
-
     def scan_contents(self):
         '''
         Returns a set of names for non directories. The path names are
@@ -2679,213 +2142,6 @@ def import_metadata_into_db(obj, session=None):
 
 __all__.append('import_metadata_into_db')
 
-
-################################################################################
-
-def split_uploaders(uploaders_list):
-    '''
-    Split the Uploaders field into the individual uploaders and yield each of
-    them. Beware: email addresses might contain commas.
-    '''
-    import re
-    for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
-        yield uploader.strip()
-
-@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-    pfs = []
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    # If Changed-By isn't available, fall back to maintainer
-    if u.pkg.changes.has_key("changed-by"):
-        source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    else:
-        source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        session.flush()
-        pfs.append(poolfile)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    source.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-                pfs.append(obj)
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                pfs.append(poolfile)
-                files_id = poolfile.file_id
-        else:
-            poolfile = get_poolfile_by_id(files_id, session)
-            if poolfile is None:
-                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
-            pfs.append(poolfile)
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    # Add the src_uploaders to the DB
-    session.flush()
-    session.refresh(source)
-    source.uploaders = [source.maintainer]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in split_uploaders(u.pkg.dsc["uploaders"]):
-            source.uploaders.append(get_or_set_maintainer(up, session))
-
-    session.flush()
-
-    return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
-@session_wrapper
-def add_deb_to_db(u, filename, session=None):
-    """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
-    """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
-    if entry.get("files id", None):
-        poolfile = get_poolfile_by_id(bin.poolfile_id)
-        bin.poolfile_id = entry["files id"]
-    else:
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        bin.poolfile_id = entry["files id"] = poolfile.file_id
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-
-    # If we couldn't find anything and the upload contains Arch: source,
-    # fall back to trying the source package, source version uploaded
-    # This maintains backwards compatibility with previous dak behaviour
-    # and deals with slightly broken binary debs which don't properly
-    # declare their source package name
-    if len(bin_sources) == 0:
-        if u.pkg.changes["architecture"].has_key("source") \
-           and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
-            bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
-
-    # If we couldn't find a source here, we reject
-    # TODO: Fix this so that it doesn't kill process-upload and instead just
-    #       performs a reject.  To be honest, we should probably spot this
-    #       *much* earlier than here
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, entry["architecture"],
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
-    bin.source_id = bin_sources[0].source_id
-
-    if entry.has_key("built-using"):
-        for srcname, version in entry["built-using"]:
-            exsources = get_sources_from_name(srcname, version, session=session)
-            if len(exsources) != 1:
-                raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                          (srcname, version, bin.package, bin.version, entry["architecture"],
-                                           filename, bin.binarytype, u.pkg.changes["fingerprint"]))
-
-            bin.extra_sources.append(exsources[0])
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-
-    suite_names = u.pkg.changes["distribution"].keys()
-    bin.suites = session.query(Suite). \
-        filter(Suite.suite_name.in_(suite_names)).all()
-
-    session.flush()
-
-    # Deal with contents - disabled for now
-    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
-    #if not contents:
-    #    print "REJECT\nCould not determine contents of package %s" % bin.package
-    #    session.rollback()
-    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-    return bin, poolfile
-
-__all__.append('add_deb_to_db')
-
-################################################################################
-
-class SourceACL(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<SourceACL %s>' % self.source_acl_id
-
-__all__.append('SourceACL')
-
 ################################################################################
 
 class SrcFormat(object):
@@ -2997,6 +2253,10 @@ class Suite(ORMObject):
         else:
             return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
 
+    @property
+    def path(self):
+        return os.path.join(self.archive.path, 'dists', self.suite_name)
+
 __all__.append('Suite')
 
 @session_wrapper
@@ -3133,17 +2393,6 @@ __all__.append('get_uid_from_fingerprint')
 
 ################################################################################
 
-class UploadBlock(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
-
-__all__.append('UploadBlock')
-
-################################################################################
-
 class MetadataKey(ORMObject):
     def __init__(self, key = None):
         self.key = key
@@ -3267,35 +2516,29 @@ class DBConn(object):
 
     def __setuptables(self):
         tables = (
+            'acl',
+            'acl_architecture_map',
+            'acl_fingerprint_map',
+            'acl_per_source',
             'architecture',
             'archive',
             'bin_associations',
             'bin_contents',
             'binaries',
             'binaries_metadata',
-            'binary_acl',
-            'binary_acl_map',
             'build_queue',
-            'build_queue_files',
-            'build_queue_policy_files',
             'changelogs_text',
             'changes',
             'component',
+            'component_suite',
             'config',
-            'changes_pending_binaries',
-            'changes_pending_files',
-            'changes_pending_source',
-            'changes_pending_files_map',
-            'changes_pending_source_files',
-            'changes_pool_files',
             'dsc_files',
             'external_overrides',
             'extra_src_references',
             'files',
+            'files_archive_map',
             'fingerprint',
             'keyrings',
-            'keyring_acl_map',
-            'location',
             'maintainer',
             'metadata_keys',
             'new_comments',
@@ -3303,21 +2546,24 @@ class DBConn(object):
             'override',
             'override_type',
             'policy_queue',
+            'policy_queue_upload',
+            'policy_queue_upload_binaries_map',
+            'policy_queue_byhand_file',
             'priority',
             'section',
+            'signature_history',
             'source',
-            'source_acl',
             'source_metadata',
             'src_associations',
             'src_contents',
             'src_format',
             'src_uploaders',
             'suite',
+            'suite_acl_map',
             'suite_architectures',
             'suite_build_queue_copy',
             'suite_src_formats',
             'uid',
-            'upload_blocks',
             'version_check',
         )
 
@@ -3327,7 +2573,6 @@ class DBConn(object):
             'any_associations_source',
             'bin_associations_binaries',
             'binaries_suite_arch',
-            'binfiles_suite_component_arch',
             'changelogs',
             'file_arch_suite',
             'newest_all_associations',
@@ -3361,21 +2606,33 @@ class DBConn(object):
                    backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
             extension = validator)
 
+        mapper(ACL, self.tbl_acl,
+               properties = dict(
+                architectures = relation(Architecture, secondary=self.tbl_acl_architecture_map, collection_class=set),
+                fingerprints = relation(Fingerprint, secondary=self.tbl_acl_fingerprint_map, collection_class=set),
+                match_keyring = relation(Keyring, primaryjoin=(self.tbl_acl.c.match_keyring_id == self.tbl_keyrings.c.id)),
+                per_source = relation(ACLPerSource, collection_class=set),
+                ))
+
+        mapper(ACLPerSource, self.tbl_acl_per_source,
+               properties = dict(
+                acl = relation(ACL),
+                fingerprint = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.fingerprint_id == self.tbl_fingerprint.c.id)),
+                created_by = relation(Fingerprint, primaryjoin=(self.tbl_acl_per_source.c.created_by_id == self.tbl_fingerprint.c.id)),
+                ))
+
         mapper(Archive, self.tbl_archive,
                properties = dict(archive_id = self.tbl_archive.c.id,
                                  archive_name = self.tbl_archive.c.name))
 
-        mapper(BuildQueue, self.tbl_build_queue,
-               properties = dict(queue_id = self.tbl_build_queue.c.id))
-
-        mapper(BuildQueueFile, self.tbl_build_queue_files,
-               properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='buildqueueinstances')))
+        mapper(ArchiveFile, self.tbl_files_archive_map,
+               properties = dict(archive = relation(Archive, backref='files'),
+                                 component = relation(Component),
+                                 file = relation(PoolFile, backref='archives')))
 
-        mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
-               properties = dict(
-                build_queue = relation(BuildQueue, backref='policy_queue_files'),
-                file = relation(ChangePendingFile, lazy='joined')))
+        mapper(BuildQueue, self.tbl_build_queue,
+               properties = dict(queue_id = self.tbl_build_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
 
         mapper(DBBinary, self.tbl_binaries,
                properties = dict(binary_id = self.tbl_binaries.c.id,
@@ -3388,7 +2645,7 @@ class DBConn(object):
                                  arch_id = self.tbl_binaries.c.architecture,
                                  architecture = relation(Architecture),
                                  poolfile_id = self.tbl_binaries.c.file,
-                                 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
+                                 poolfile = relation(PoolFile),
                                  binarytype = self.tbl_binaries.c.type,
                                  fingerprint_id = self.tbl_binaries.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
@@ -3401,14 +2658,6 @@ class DBConn(object):
                                      collection_class=attribute_mapped_collection('key'))),
                 extension = validator)
 
-        mapper(BinaryACL, self.tbl_binary_acl,
-               properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
-
-        mapper(BinaryACLMap, self.tbl_binary_acl_map,
-               properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
-                                 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
-                                 architecture = relation(Architecture)))
-
         mapper(Component, self.tbl_component,
                properties = dict(component_id = self.tbl_component.c.id,
                                  component_name = self.tbl_component.c.name),
@@ -3433,13 +2682,7 @@ class DBConn(object):
 
         mapper(PoolFile, self.tbl_files,
                properties = dict(file_id = self.tbl_files.c.id,
-                                 filesize = self.tbl_files.c.size,
-                                 location_id = self.tbl_files.c.location,
-                                 location = relation(Location,
-                                     # using lazy='dynamic' in the back
-                                     # reference because we have A LOT of
-                                     # files in one location
-                                     backref=backref('files', lazy='dynamic'))),
+                                 filesize = self.tbl_files.c.size),
                 extension = validator)
 
         mapper(Fingerprint, self.tbl_fingerprint,
@@ -3448,19 +2691,16 @@ class DBConn(object):
                                  uid = relation(Uid),
                                  keyring_id = self.tbl_fingerprint.c.keyring,
                                  keyring = relation(Keyring),
-                                 source_acl = relation(SourceACL),
-                                 binary_acl = relation(BinaryACL)),
+                                 acl = relation(ACL)),
                extension = validator)
 
         mapper(Keyring, self.tbl_keyrings,
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
-                                 keyring_id = self.tbl_keyrings.c.id))
+                                 keyring_id = self.tbl_keyrings.c.id,
+                                 acl = relation(ACL, primaryjoin=(self.tbl_keyrings.c.acl_id == self.tbl_acl.c.id)))),
 
         mapper(DBChange, self.tbl_changes,
                properties = dict(change_id = self.tbl_changes.c.id,
-                                 poolfiles = relation(PoolFile,
-                                                      secondary=self.tbl_changes_pool_files,
-                                                      backref="changeslinks"),
                                  seen = self.tbl_changes.c.seen,
                                  source = self.tbl_changes.c.source,
                                  binaries = self.tbl_changes.c.binaries,
@@ -3470,54 +2710,7 @@ class DBConn(object):
                                  maintainer = self.tbl_changes.c.maintainer,
                                  changedby = self.tbl_changes.c.changedby,
                                  date = self.tbl_changes.c.date,
-                                 version = self.tbl_changes.c.version,
-                                 files = relation(ChangePendingFile,
-                                                  secondary=self.tbl_changes_pending_files_map,
-                                                  backref="changesfile"),
-                                 in_queue_id = self.tbl_changes.c.in_queue,
-                                 in_queue = relation(PolicyQueue,
-                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
-                                 approved_for_id = self.tbl_changes.c.approved_for))
-
-        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
-               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
-        mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
-                                 filename = self.tbl_changes_pending_files.c.filename,
-                                 size = self.tbl_changes_pending_files.c.size,
-                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
-                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
-                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
-        mapper(ChangePendingSource, self.tbl_changes_pending_source,
-               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
-                                 change = relation(DBChange),
-                                 maintainer = relation(Maintainer,
-                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
-                                 changedby = relation(Maintainer,
-                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
-                                 fingerprint = relation(Fingerprint),
-                                 source_files = relation(ChangePendingFile,
-                                                         secondary=self.tbl_changes_pending_source_files,
-                                                         backref="pending_sources")))
-
-
-        mapper(KeyringACLMap, self.tbl_keyring_acl_map,
-               properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
-                                 keyring = relation(Keyring, backref="keyring_acl_map"),
-                                 architecture = relation(Architecture)))
-
-        mapper(Location, self.tbl_location,
-               properties = dict(location_id = self.tbl_location.c.id,
-                                 component_id = self.tbl_location.c.component,
-                                 component = relation(Component, backref='location'),
-                                 archive_id = self.tbl_location.c.archive,
-                                 archive = relation(Archive),
-                                 # FIXME: the 'type' column is old cruft and
-                                 # should be removed in the future.
-                                 archive_type = self.tbl_location.c.type),
-               extension = validator)
+                                 version = self.tbl_changes.c.version))
 
         mapper(Maintainer, self.tbl_maintainer,
                properties = dict(maintainer_id = self.tbl_maintainer.c.id,
@@ -3528,7 +2721,8 @@ class DBConn(object):
                 extension = validator)
 
         mapper(NewComment, self.tbl_new_comments,
-               properties = dict(comment_id = self.tbl_new_comments.c.id))
+               properties = dict(comment_id = self.tbl_new_comments.c.id,
+                                 policy_queue = relation(PolicyQueue)))
 
         mapper(Override, self.tbl_override,
                properties = dict(suite_id = self.tbl_override.c.suite,
@@ -3553,7 +2747,23 @@ class DBConn(object):
                                  overridetype_id = self.tbl_override_type.c.id))
 
         mapper(PolicyQueue, self.tbl_policy_queue,
-               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+                                 suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
+
+        mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+               properties = dict(
+                   changes = relation(DBChange),
+                   policy_queue = relation(PolicyQueue, backref='uploads'),
+                   target_suite = relation(Suite),
+                   source = relation(DBSource),
+                   binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+                ))
+
+        mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+               properties = dict(
+                   upload = relation(PolicyQueueUpload, backref='byhand'),
+                   )
+               )
 
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
@@ -3562,12 +2772,14 @@ class DBConn(object):
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
 
+        mapper(SignatureHistory, self.tbl_signature_history)
+
         mapper(DBSource, self.tbl_source,
                properties = dict(source_id = self.tbl_source.c.id,
                                  version = self.tbl_source.c.version,
                                  maintainer_id = self.tbl_source.c.maintainer,
                                  poolfile_id = self.tbl_source.c.file,
-                                 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+                                 poolfile = relation(PoolFile),
                                  fingerprint_id = self.tbl_source.c.sig_fpr,
                                  fingerprint = relation(Fingerprint),
                                  changedby_id = self.tbl_source.c.changedby,
@@ -3581,20 +2793,23 @@ class DBConn(object):
                                      collection_class=attribute_mapped_collection('key'))),
                extension = validator)
 
-        mapper(SourceACL, self.tbl_source_acl,
-               properties = dict(source_acl_id = self.tbl_source_acl.c.id))
-
         mapper(SrcFormat, self.tbl_src_format,
                properties = dict(src_format_id = self.tbl_src_format.c.id,
                                  format_name = self.tbl_src_format.c.format_name))
 
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(PolicyQueue),
+                                 policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
+                                 new_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.new_queue_id == self.tbl_policy_queue.c.id)),
                                  copy_queues = relation(BuildQueue,
                                      secondary=self.tbl_suite_build_queue_copy),
                                  srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
-                                     backref=backref('suites', lazy='dynamic'))),
+                                     backref=backref('suites', lazy='dynamic')),
+                                 archive = relation(Archive, backref='suites'),
+                                 acls = relation(ACL, secondary=self.tbl_suite_acl_map, collection_class=set),
+                                 components = relation(Component, secondary=self.tbl_component_suite,
+                                                   order_by=self.tbl_component.c.ordering,
+                                                   backref=backref('suites'))),
                 extension = validator)
 
         mapper(Uid, self.tbl_uid,
@@ -3602,11 +2817,6 @@ class DBConn(object):
                                  fingerprint = relation(Fingerprint)),
                extension = validator)
 
-        mapper(UploadBlock, self.tbl_upload_blocks,
-               properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
-                                 fingerprint = relation(Fingerprint, backref="uploadblocks"),
-                                 uid = relation(Uid, backref="uploadblocks")))
-
         mapper(BinContents, self.tbl_bin_contents,
             properties = dict(
                 binary = relation(DBBinary,
@@ -3670,7 +2880,7 @@ class DBConn(object):
             engine_args['pool_size'] = int(cnf['DB::PoolSize'])
         if cnf.has_key('DB::MaxOverflow'):
             engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
-        if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
+        if sa_major_version != '0.5' and cnf.has_key('DB::Unicode') and \
             cnf['DB::Unicode'] == 'false':
             engine_args['use_native_unicode'] = False
 
@@ -3722,5 +2932,3 @@ class DBConn(object):
         return session
 
 __all__.append('DBConn')
-
-
old mode 100755 (executable)
new mode 100644 (file)
index 3b816ee..2f080e6
@@ -27,7 +27,7 @@ Helper code for file writing with optional compression.
 
 from daklib.config import Config
 
-from subprocess import check_call
+from daklib.daksubprocess import check_call
 
 import os, os.path
 
@@ -48,9 +48,7 @@ class BaseFileWriter(object):
         self.gzip = 'gzip' in compression
         self.bzip2 = 'bzip2' in compression
         self.xz = 'xz' in compression
-        root_dir = Config()['Dir::Root']
-        relative_dir = template % keywords
-        self.path = os.path.join(root_dir, relative_dir)
+        self.path = template % keywords
 
     def open(self):
         '''
@@ -102,9 +100,9 @@ class BinaryContentsFileWriter(BaseFileWriter):
         }
         flags.update(keywords)
         if flags['debtype'] == 'deb':
-            template = "dists/%(suite)s/%(component)s/Contents-%(architecture)s"
+            template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-%(architecture)s"
         else: # udeb
-            template = "dists/%(suite)s/%(component)s/Contents-udeb-%(architecture)s"
+            template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-udeb-%(architecture)s"
         BaseFileWriter.__init__(self, template, **flags)
 
 class SourceContentsFileWriter(BaseFileWriter):
@@ -117,7 +115,7 @@ class SourceContentsFileWriter(BaseFileWriter):
             'compression': ['gzip'],
         }
         flags.update(keywords)
-        template = "dists/%(suite)s/%(component)s/Contents-source"
+        template = "%(archive)s/dists/%(suite)s/%(component)s/Contents-source"
         BaseFileWriter.__init__(self, template, **flags)
 
 class PackagesFileWriter(BaseFileWriter):
@@ -131,9 +129,9 @@ class PackagesFileWriter(BaseFileWriter):
         }
         flags.update(keywords)
         if flags['debtype'] == 'deb':
-            template = "dists/%(suite)s/%(component)s/binary-%(architecture)s/Packages"
+            template = "%(archive)s/dists/%(suite)s/%(component)s/binary-%(architecture)s/Packages"
         else: # udeb
-            template = "dists/%(suite)s/%(component)s/debian-installer/binary-%(architecture)s/Packages"
+            template = "%(archive)s/dists/%(suite)s/%(component)s/debian-installer/binary-%(architecture)s/Packages"
         BaseFileWriter.__init__(self, template, **flags)
 
 class SourcesFileWriter(BaseFileWriter):
@@ -146,7 +144,7 @@ class SourcesFileWriter(BaseFileWriter):
             'compression': ['gzip', 'bzip2'],
         }
         flags.update(keywords)
-        template = "dists/%(suite)s/%(component)s/source/Sources"
+        template = "%(archive)s/dists/%(suite)s/%(component)s/source/Sources"
         BaseFileWriter.__init__(self, template, **flags)
 
 class TranslationFileWriter(BaseFileWriter):
@@ -160,5 +158,5 @@ class TranslationFileWriter(BaseFileWriter):
             'language':     'en',
         }
         flags.update(keywords)
-        template = "dists/%(suite)s/%(component)s/i18n/Translation-%(language)s"
+        template = "%(archive)s/dists/%(suite)s/%(component)s/i18n/Translation-%(language)s"
         super(TranslationFileWriter, self).__init__(template, **flags)
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/daklib/fstransactions.py b/daklib/fstransactions.py
new file mode 100644 (file)
index 0000000..eb4874a
--- /dev/null
@@ -0,0 +1,223 @@
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Transactions for filesystem actions
+"""
+
+import errno
+import os
+import shutil
+
+class _FilesystemAction(object):
+    @property
+    def temporary_name(self):
+        raise NotImplementedError()
+
+    def check_for_temporary(self):
+        try:
+            if os.path.exists(self.temporary_name):
+                raise IOError("Temporary file '{0}' already exists.".format(self.temporary_name))
+        except NotImplementedError:
+            pass
+
+class _FilesystemCopyAction(_FilesystemAction):
+    def __init__(self, source, destination, link=True, symlink=False, mode=None):
+        self.destination = destination
+        self.need_cleanup = False
+
+        dirmode = 0o2755
+        if mode is not None:
+            dirmode = 0o2700 | mode
+            # Allow +x for group and others if they have +r.
+            if dirmode & 0o0040:
+                dirmode = dirmode | 0o0010
+            if dirmode & 0o0004:
+                dirmode = dirmode | 0o0001
+
+        self.check_for_temporary()
+        destdir = os.path.dirname(self.destination)
+        if not os.path.exists(destdir):
+            os.makedirs(destdir, dirmode)
+        if symlink:
+            os.symlink(source, self.destination)
+        elif link:
+            try:
+                os.link(source, self.destination)
+            except OSError:
+                shutil.copy2(source, self.destination)
+        else:
+            shutil.copy2(source, self.destination)
+
+        self.need_cleanup = True
+        if mode is not None:
+            os.chmod(self.destination, mode)
+
+    @property
+    def temporary_name(self):
+        return self.destination
+
+    def commit(self):
+        pass
+
+    def rollback(self):
+        if self.need_cleanup:
+            os.unlink(self.destination)
+            self.need_cleanup = False
+
+class _FilesystemUnlinkAction(_FilesystemAction):
+    def __init__(self, path):
+        self.path = path
+        self.need_cleanup = False
+
+        self.check_for_temporary()
+        os.rename(self.path, self.temporary_name)
+        self.need_cleanup = True
+
+    @property
+    def temporary_name(self):
+        return "{0}.dak-rm".format(self.path)
+
+    def commit(self):
+        if self.need_cleanup:
+            os.unlink(self.temporary_name)
+            self.need_cleanup = False
+
+    def rollback(self):
+        if self.need_cleanup:
+            os.rename(self.temporary_name, self.path)
+            self.need_cleanup = False
+
+class _FilesystemCreateAction(_FilesystemAction):
+    def __init__(self, path):
+        self.path = path
+        self.need_cleanup = True
+
+    @property
+    def temporary_name(self):
+        return self.path
+
+    def commit(self):
+        pass
+
+    def rollback(self):
+        if self.need_cleanup:
+            os.unlink(self.path)
+            self.need_cleanup = False
+
+class FilesystemTransaction(object):
+    """transactions for filesystem actions"""
+    def __init__(self):
+        self.actions = []
+
+    def copy(self, source, destination, link=False, symlink=False, mode=None):
+        """copy C{source} to C{destination}
+
+        @type  source: str
+        @param source: source file
+
+        @type  destination: str
+        @param destination: destination file
+
+        @type  link: bool
+        @param link: try hardlinking, falling back to copying
+
+        @type  symlink: bool
+        @param symlink: create a symlink instead of copying
+
+        @type  mode: int
+        @param mode: permissions to change C{destination} to
+        """
+        if isinstance(mode, str) or isinstance(mode, unicode):
+            mode = int(mode, 8)
+
+        self.actions.append(_FilesystemCopyAction(source, destination, link=link, symlink=symlink, mode=mode))
+
+    def move(self, source, destination, mode=None):
+        """move C{source} to C{destination}
+
+        @type  source: str
+        @param source: source file
+
+        @type  destination: str
+        @param destination: destination file
+
+        @type  mode: int
+        @param mode: permissions to change C{destination} to
+        """
+        self.copy(source, destination, link=True, mode=mode)
+        self.unlink(source)
+
+    def unlink(self, path):
+        """unlink C{path}
+
+        @type  path: str
+        @param path: file to unlink
+        """
+        self.actions.append(_FilesystemUnlinkAction(path))
+
+    def create(self, path, mode=None):
+        """create C{filename} and return file handle
+
+        @type  filename: str
+        @param filename: file to create
+
+        @type  mode: int
+        @param mode: permissions for the new file
+
+        @return: file handle of the new file
+        """
+        if isinstance(mode, str) or isinstance(mode, unicode):
+            mode = int(mode, 8)
+
+        destdir = os.path.dirname(path)
+        if not os.path.exists(destdir):
+            os.makedirs(destdir, 0o2775)
+        if os.path.exists(path):
+            raise IOError("File '{0}' already exists.".format(path))
+        fh = open(path, 'w')
+        self.actions.append(_FilesystemCreateAction(path))
+        if mode is not None:
+            os.chmod(path, mode)
+        return fh
+
+    def commit(self):
+        """Commit all recorded actions."""
+        try:
+            for action in self.actions:
+                action.commit()
+        except:
+            self.rollback()
+            raise
+        finally:
+            self.actions = []
+
+    def rollback(self):
+        """Undo all recorded actions."""
+        try:
+            for action in self.actions:
+                action.rollback()
+        finally:
+            self.actions = []
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        if type is None:
+            self.commit()
+        else:
+            self.rollback()
+        return None
index 62bfe096510453c180acb4fda1f5b80af6581ffa..828bf64906278f86db0aa13547b91356c0ba6568 100644 (file)
@@ -19,6 +19,8 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
+import apt_pkg
+import datetime
 import errno
 import fcntl
 import os
@@ -68,7 +70,7 @@ class SignedFile(object):
     def __init__(self, data, keyrings, require_signature=True, gpg="/usr/bin/gpg"):
         """
         @param data: string containing the message
-        @param keyrings: seqeuence of keyrings
+        @param keyrings: sequence of keyrings
         @param require_signature: if True (the default), will raise an exception if no valid signature was found
         @param gpg: location of the gpg binary
         """
@@ -141,6 +143,17 @@ class SignedFile(object):
 
         return dict( (fd, "".join(read_lines[fd])) for fd in read_lines.keys() )
 
+    def _parse_date(self, value):
+        """parse date string in YYYY-MM-DD format
+
+        @rtype:   L{datetime.datetime}
+        @returns: datetime objects for 0:00 on the given day
+        """
+        year, month, day = value.split('-')
+        date = datetime.date(int(year), int(month), int(day))
+        time = datetime.time(0, 0)
+        return datetime.datetime.combine(date, time)
+
     def _parse_status(self, line):
         fields = line.split()
         if fields[0] != "[GNUPG:]":
@@ -153,6 +166,7 @@ class SignedFile(object):
             self.valid = True
             self.fingerprint = fields[2]
             self.primary_fingerprint = fields[11]
+            self.signature_timestamp = self._parse_date(fields[3])
 
         if fields[1] == "BADARMOR":
             raise GpgException("Bad armor.")
@@ -190,4 +204,7 @@ class SignedFile(object):
         finally:
             os._exit(1)
 
+    def contents_sha1(self):
+        return apt_pkg.sha1sum(self.contents)
+
 # vim: set sw=4 et:
old mode 100755 (executable)
new mode 100644 (file)
index 320184e3934d19f6253bc713fe06da0e8f84cda0..5766371ce8382715937caffadb0a6b7682b679ce 100755 (executable)
@@ -42,14 +42,20 @@ def getSources(suite, component, session, timestamp = None):
     if timestamp:
         extra_cond = "AND extract(epoch from sa.created) > %d" % timestamp
     query = """
-        SELECT s.id, l.path, f.filename
+        SELECT s.id, archive.path || 'pool/', c.name || '/' || f.filename
             FROM source s
             JOIN src_associations sa
                 ON s.id = sa.source AND sa.suite = :suite %s
+            JOIN suite
+                ON sa.suite = suite.id
+            JOIN archive
+                ON suite.archive_id = archive.id
             JOIN files f
                 ON s.file = f.id
-            JOIN location l
-                ON f.location = l.id AND l.component = :component
+            JOIN files_archive_map fam
+                ON fam.file_id = f.id AND fam.component_id = :component
+            JOIN component c
+                ON fam.component_id = c.id
             ORDER BY filename
     """ % extra_cond
     args = { 'suite': suite.suite_id,
@@ -106,12 +112,16 @@ CREATE TEMP TABLE gf_candidates (
     source text);
 
 INSERT INTO gf_candidates (id, filename, path, architecture, src, source)
-    SELECT bc.id, f.filename, l.path, bc.architecture, bc.source as src, s.source
+    SELECT bc.id, c.name || '/' || f.filename, archive.path || 'pool/' , bc.architecture, bc.source as src, s.source
         FROM b_candidates bc
         JOIN source s ON bc.source = s.id
         JOIN files f ON bc.file = f.id
-        JOIN location l ON f.location = l.id
-        WHERE l.component = :component;
+        JOIN files_archive_map fam ON f.id = fam.file_id
+        JOIN component c ON fam.component_id = c.id
+        JOIN archive ON fam.archive_id = archive.id
+        JOIN suite ON suite.archive_id = archive.id
+
+        WHERE c.id = :component AND suite.id = :suite;
 
 WITH arch_any AS
 
diff --git a/daklib/metadata.py b/daklib/metadata.py
deleted file mode 100755 (executable)
index 793b073..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-"""
-Helper code for packages and sources generation.
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2011 Torsten Werner <twerner@debian.org>
-@copyright: 2011 Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-################################################################################
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-from daklib.dbconn import *
-from daklib.config import Config
-
-from multiprocessing import Pool
-from subprocess import Popen, PIPE
-
-import os.path
-
-class MetadataScanner(object):
-    '''
-    MetadataScanner provides a threadsafe method scan() to scan the metadata of
-    a DBSource or DBBinary object depending on what is passed as dbclass'''
-
-    def __init__(self, dbclass, pkid, verbose=True):
-        '''
-        The argument binary_id is the id of the DBBinary object that
-
-        should be scanned.
-        '''
-        self.verbose = True
-        self.dbclass = dbclass
-        self.pkid = pkid
-
-    def scan(self, dummy_arg = None):
-        '''
-        This method does the actual scan and fills in the associated metadata
-        property. It commits any changes to the database. The argument dummy_arg
-        is ignored but needed by our threadpool implementation.
-        '''
-        obj = None
-        fullpath = 'UNKNOWN PATH'
-
-        session = DBConn().session()
-        try:
-            obj = session.query(self.dbclass).get(self.pkid)
-            fullpath = obj.poolfile.fullpath
-            import_metadata_into_db(obj, session=session)
-            if self.verbose:
-                print "Imported %s (%s)" % (self.pkid, fullpath)
-            session.commit()
-        except Exception as e:
-            print "Failed to import %s [id=%s; fullpath=%s]" % (self.dbclass.__name__, self.pkid, fullpath)
-            print "Exception: ", e
-            session.rollback()
-
-        session.close()
-
-    @classmethod
-    def scan_all(class_, scantype='source', limit = None):
-        '''
-        The class method scan_all() scans all sources using multiple threads.
-        The number of sources to be scanned can be limited with the limit
-        argument. Returns the number of processed and remaining files as a
-        dict.
-        '''
-        session = DBConn().session()
-        if scantype == 'source':
-            dbclass = DBSource
-            query = session.query(DBSource).filter(~DBSource.source_id.in_(session.query(SourceMetadata.source_id.distinct())))
-            t = 'sources'
-        else:
-            # Otherwise binary
-            dbclass = DBBinary
-            query = session.query(DBBinary).filter(~DBBinary.binary_id.in_(session.query(BinaryMetadata.binary_id.distinct())))
-            t = 'binaries'
-
-        remaining = query.count
-        if limit is not None:
-            query = query.limit(limit)
-        processed = query.count()
-        pool = Pool(processes=10)
-        for obj in query.yield_per(100):
-            pool.apply_async(scan_helper, (dbclass, obj.pkid, ))
-        pool.close()
-        pool.join()
-        remaining = remaining()
-        session.close()
-        return { 'processed': processed, 'remaining': remaining , 'type': t}
-
-def scan_helper(dbclass, source_id):
-    '''
-    This function runs in a subprocess.
-    '''
-    scanner = MetadataScanner(dbclass, source_id)
-    scanner.scan()
diff --git a/daklib/policy.py b/daklib/policy.py
new file mode 100644 (file)
index 0000000..aeed9a2
--- /dev/null
@@ -0,0 +1,312 @@
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""module to process policy queue uploads"""
+
+from .config import Config
+from .dbconn import BinaryMetadata, Component, MetadataKey, Override, OverrideType, Suite, get_mapped_component
+from .fstransactions import FilesystemTransaction
+from .regexes import re_file_changes, re_file_safe
+import daklib.utils as utils
+
+import errno
+import os
+import shutil
+import tempfile
+
+class UploadCopy(object):
+    """export a policy queue upload
+
+    This class can be used in a with-statement::
+
+       with UploadCopy(...) as copy:
+          ...
+
+    Doing so will provide a temporary copy of the upload in the directory
+    given by the C{directory} attribute.  The copy will be removed on leaving
+    the with-block.
+    """
+    def __init__(self, upload, group=None):
+        """initializer
+
+        @type  upload: L{daklib.dbconn.PolicyQueueUpload}
+        @param upload: upload to handle
+        """
+
+        self.directory = None
+        self.upload = upload
+        self.group = group
+
+    def export(self, directory, mode=None, symlink=True, ignore_existing=False):
+        """export a copy of the upload
+
+        @type  directory: str
+        @param directory: directory to export to
+
+        @type  mode: int
+        @param mode: permissions to use for the copied files
+
+        @type  symlink: bool
+        @param symlink: use symlinks instead of copying the files
+
+        @type  ignore_existing: bool
+        @param ignore_existing: ignore already existing files
+        """
+        with FilesystemTransaction() as fs:
+            source = self.upload.source
+            queue = self.upload.policy_queue
+
+            if source is not None:
+                for dsc_file in source.srcfiles:
+                    f = dsc_file.poolfile
+                    dst = os.path.join(directory, os.path.basename(f.filename))
+                    if not os.path.exists(dst) or not ignore_existing:
+                        fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
+
+            for binary in self.upload.binaries:
+                f = binary.poolfile
+                dst = os.path.join(directory, os.path.basename(f.filename))
+                if not os.path.exists(dst) or not ignore_existing:
+                    fs.copy(f.fullpath, dst, mode=mode, symlink=symlink)
+
+            # copy byhand files
+            for byhand in self.upload.byhand:
+                src = os.path.join(queue.path, byhand.filename)
+                dst = os.path.join(directory, byhand.filename)
+                if not os.path.exists(dst) or not ignore_existing:
+                    fs.copy(src, dst, mode=mode, symlink=symlink)
+
+            # copy .changes
+            src = os.path.join(queue.path, self.upload.changes.changesname)
+            dst = os.path.join(directory, self.upload.changes.changesname)
+            if not os.path.exists(dst) or not ignore_existing:
+                fs.copy(src, dst, mode=mode, symlink=symlink)
+
+    def __enter__(self):
+        assert self.directory is None
+
+        mode = 0o0700
+        symlink = True
+        if self.group is not None:
+            mode = 0o2750
+            symlink = False
+
+        cnf = Config()
+        self.directory = utils.temp_dirname(parent=cnf.get('Dir::TempPath'),
+                                            mode=mode,
+                                            group=self.group)
+        self.export(self.directory, symlink=symlink)
+        return self
+
+    def __exit__(self, *args):
+        if self.directory is not None:
+            shutil.rmtree(self.directory)
+            self.directory = None
+        return None
+
+class PolicyQueueUploadHandler(object):
+    """process uploads to policy queues
+
+    This class allows to accept or reject uploads and to get a list of missing
+    overrides (for NEW processing).
+    """
+    def __init__(self, upload, session):
+        """initializer
+
+        @type  upload: L{daklib.dbconn.PolicyQueueUpload}
+        @param upload: upload to process
+
+        @param session: database session
+        """
+        self.upload = upload
+        self.session = session
+
+    @property
+    def _overridesuite(self):
+        overridesuite = self.upload.target_suite
+        if overridesuite.overridesuite is not None:
+            overridesuite = self.session.query(Suite).filter_by(suite_name=overridesuite.overridesuite).one()
+        return overridesuite
+
+    def _source_override(self, component_name):
+        package = self.upload.source.source
+        suite = self._overridesuite
+        component = get_mapped_component(component_name, self.session)
+        query = self.session.query(Override).filter_by(package=package, suite=suite) \
+            .join(OverrideType).filter(OverrideType.overridetype == 'dsc') \
+            .filter(Override.component == component)
+        return query.first()
+
+    def _binary_override(self, binary, component_name):
+        package = binary.package
+        suite = self._overridesuite
+        overridetype = binary.binarytype
+        component = get_mapped_component(component_name, self.session)
+        query = self.session.query(Override).filter_by(package=package, suite=suite) \
+            .join(OverrideType).filter(OverrideType.overridetype == overridetype) \
+            .filter(Override.component == component)
+        return query.first()
+
+    def _binary_metadata(self, binary, key):
+        metadata_key = self.session.query(MetadataKey).filter_by(key=key).first()
+        if metadata_key is None:
+            return None
+        metadata = self.session.query(BinaryMetadata).filter_by(binary=binary, key=metadata_key).first()
+        if metadata is None:
+            return None
+        return metadata.value
+
+    @property
+    def _changes_prefix(self):
+        changesname = self.upload.changes.changesname
+        assert changesname.endswith('.changes')
+        assert re_file_changes.match(changesname)
+        return changesname[0:-8]
+
+    def accept(self):
+        """mark upload as accepted"""
+        assert len(self.missing_overrides()) == 0
+
+        fn1 = 'ACCEPT.{0}'.format(self._changes_prefix)
+        fn = os.path.join(self.upload.policy_queue.path, 'COMMENTS', fn1)
+        try:
+            fh = os.open(fn, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644)
+            os.write(fh, 'OK\n')
+            os.close(fh)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+
+    def reject(self, reason):
+        """mark upload as rejected
+
+        @type  reason: str
+        @param reason: reason for the rejection
+        """
+        cnf = Config()
+
+        fn1 = 'REJECT.{0}'.format(self._changes_prefix)
+        assert re_file_safe.match(fn1)
+
+        fn = os.path.join(self.upload.policy_queue.path, 'COMMENTS', fn1)
+        try:
+            fh = os.open(fn, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644)
+            os.write(fh, 'NOTOK\n')
+            os.write(fh, 'From: {0} <{1}>\n\n'.format(utils.whoami(), cnf['Dinstall::MyAdminAddress']))
+            os.write(fh, reason)
+            os.close(fh)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+
+    def get_action(self):
+        """get current action
+
+        @rtype:  str
+        @return: string giving the current action, one of 'ACCEPT', 'ACCEPTED', 'REJECT'
+        """
+        changes_prefix = self._changes_prefix
+
+        for action in ('ACCEPT', 'ACCEPTED', 'REJECT'):
+            fn1 = '{0}.{1}'.format(action, changes_prefix)
+            fn = os.path.join(self.upload.policy_queue.path, 'COMMENTS', fn1)
+            if os.path.exists(fn):
+                return action
+
+        return None
+
+    def missing_overrides(self, hints=None):
+        """get missing override entries for the upload
+
+        @type  hints: list of dict
+        @param hints: suggested hints for new overrides in the same format as
+                      the return value
+
+        @return: list of dicts with the following keys:
+
+                 - package: package name
+                 - priority: default priority (from upload)
+                 - section: default section (from upload)
+                 - component: default component (from upload)
+                 - type: type of required override ('dsc', 'deb' or 'udeb')
+
+                 All values are strings.
+        """
+        # TODO: use Package-List field
+        missing = []
+        components = set()
+
+        if hints is None:
+            hints = []
+        hints_map = dict([ ((o['type'], o['package']), o) for o in hints ])
+
+        for binary in self.upload.binaries:
+            priority = self._binary_metadata(binary, 'Priority')
+            section = self._binary_metadata(binary, 'Section')
+            component = 'main'
+            if section.find('/') != -1:
+                component = section.split('/', 1)[0]
+            override = self._binary_override(binary, component)
+            if override is None and not any(o['package'] == binary.package and o['type'] == binary.binarytype for o in missing):
+                hint = hints_map.get((binary.binarytype, binary.package))
+                if hint is not None:
+                    missing.append(hint)
+                    component = hint['component']
+                else:
+                    missing.append(dict(
+                            package = binary.package,
+                            priority = priority,
+                            section = section,
+                            component = component,
+                            type = binary.binarytype,
+                            ))
+            components.add(component)
+
+        source = self.upload.source
+        source_component = '(unknown)'
+        for component, in self.session.query(Component.component_name).order_by(Component.ordering):
+            if component in components:
+                source_component = component
+                break
+            else:
+                if source is not None:
+                    if self._source_override(component) is not None:
+                        source_component = component
+                        break
+
+        if source is not None:
+            override = self._source_override(source_component)
+            if override is None:
+                hint = hints_map.get(('dsc', source.source))
+                if hint is not None:
+                    missing.append(hint)
+                else:
+                    section = 'misc'
+                    if component != 'main':
+                        section = "{0}/{1}".format(component, section)
+                    missing.append(dict(
+                            package = source.source,
+                            priority = 'extra',
+                            section = section,
+                            component = source_component,
+                            type = 'dsc',
+                            ))
+
+        return missing
index d0772276f20158f9ce3206d319ad3857222c5c8d..5a5c8f90f07db110f4a6150f1c7eff66fb9143ff 100755 (executable)
@@ -41,8 +41,6 @@ from types import *
 from sqlalchemy.sql.expression import desc
 from sqlalchemy.orm.exc import NoResultFound
 
-import yaml
-
 from dak_exceptions import *
 from changes import *
 from regexes import *
@@ -56,254 +54,60 @@ from textutils import fix_maintainer
 from lintian import parse_lintian_output, generate_reject_messages
 from contents import UnpackedSource
 
-###############################################################################
-
-def get_type(f, session):
-    """
-    Get the file type of C{f}
-
-    @type f: dict
-    @param f: file entry from Changes object
-
-    @type session: SQLA Session
-    @param session: SQL Alchemy session object
-
-    @rtype: string
-    @return: filetype
-
-    """
-    # Determine the type
-    if f.has_key("dbtype"):
-        file_type = f["dbtype"]
-    elif re_source_ext.match(f["type"]):
-        file_type = "dsc"
-    elif f['architecture'] == 'source' and f["type"] == 'unreadable':
-        utils.warn('unreadable source file (will continue and hope for the best)')
-        return f["type"]
-    else:
-        file_type = f["type"]
-        utils.fubar("invalid type (%s) for new.  Dazed, confused and sure as heck not continuing." % (file_type))
-
-    # Validate the override type
-    type_id = get_override_type(file_type, session)
-    if type_id is None:
-        utils.fubar("invalid type (%s) for new.  Say wha?" % (file_type))
-
-    return file_type
-
 ################################################################################
 
-# Determine what parts in a .changes are NEW
-
-def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = None):
-    """
-    Determine what parts in a C{changes} file are NEW.
-
-    @type filename: str
-    @param filename: changes filename
-
-    @type changes: Upload.Pkg.changes dict
-    @param changes: Changes dictionary
-
-    @type files: Upload.Pkg.files dict
-    @param files: Files dictionary
-
-    @type warn: bool
-    @param warn: Warn if overrides are added for (old)stable
-
-    @type dsc: Upload.Pkg.dsc dict
-    @param dsc: (optional); Dsc dictionary
-
-    @type new: dict
-    @param new: new packages as returned by a previous call to this function, but override information may have changed
-
-    @rtype: dict
-    @return: dictionary of NEW components.
-
-    """
-    # TODO: This should all use the database instead of parsing the changes
-    # file again
-    byhand = {}
-    if new is None:
-        new = {}
-
-    dbchg = get_dbchange(filename, session)
-    if dbchg is None:
-        print "Warning: cannot find changes file in database; won't check byhand"
-
-    # Try to get the Package-Set field from an included .dsc file (if possible).
-    if dsc:
-        for package, entry in build_package_list(dsc, session).items():
-            if package not in new:
-                new[package] = entry
-
-    # Build up a list of potentially new things
-    for name, f in files.items():
-        # Keep a record of byhand elements
-        if f["section"] == "byhand":
-            byhand[name] = 1
-            continue
-
-        pkg = f["package"]
-        priority = f["priority"]
-        section = f["section"]
-        file_type = get_type(f, session)
-        component = f["component"]
-
-        if file_type == "dsc":
-            priority = "source"
-
-        if not new.has_key(pkg):
-            new[pkg] = {}
-            new[pkg]["priority"] = priority
-            new[pkg]["section"] = section
-            new[pkg]["type"] = file_type
-            new[pkg]["component"] = component
-            new[pkg]["files"] = []
-        else:
-            old_type = new[pkg]["type"]
-            if old_type != file_type:
-                # source gets trumped by deb or udeb
-                if old_type == "dsc":
-                    new[pkg]["priority"] = priority
-                    new[pkg]["section"] = section
-                    new[pkg]["type"] = file_type
-                    new[pkg]["component"] = component
-
-        new[pkg]["files"].append(name)
-
-        if f.has_key("othercomponents"):
-            new[pkg]["othercomponents"] = f["othercomponents"]
-
-    # Fix up the list of target suites
-    cnf = Config()
-    for suite in changes["suite"].keys():
-        oldsuite = get_suite(suite, session)
-        if not oldsuite:
-            print "WARNING: Invalid suite %s found" % suite
-            continue
-
-        if oldsuite.overridesuite:
-            newsuite = get_suite(oldsuite.overridesuite, session)
-
-            if newsuite:
-                print "INFORMATION: Using overrides from suite %s instead of suite %s" % (
-                    oldsuite.overridesuite, suite)
-                del changes["suite"][suite]
-                changes["suite"][oldsuite.overridesuite] = 1
-            else:
-                print "WARNING: Told to use overridesuite %s for %s but it doesn't exist.  Bugger" % (
-                    oldsuite.overridesuite, suite)
-
-    # Check for unprocessed byhand files
-    if dbchg is not None:
-        for b in byhand.keys():
-            # Find the file entry in the database
-            found = False
-            for f in dbchg.files:
-                if f.filename == b:
-                    found = True
-                    # If it's processed, we can ignore it
-                    if f.processed:
-                        del byhand[b]
-                    break
-
-            if not found:
-                print "Warning: Couldn't find BYHAND item %s in the database; assuming unprocessed"
-
-    # Check for new stuff
-    for suite in changes["suite"].keys():
-        for pkg in new.keys():
-            ql = get_override(pkg, suite, new[pkg]["component"], new[pkg]["type"], session)
-            if len(ql) > 0:
-                for file_entry in new[pkg]["files"]:
-                    if files[file_entry].has_key("new"):
-                        del files[file_entry]["new"]
-                del new[pkg]
-
-    if warn:
-        for s in ['stable', 'oldstable']:
-            if changes["suite"].has_key(s):
-                print "WARNING: overrides will be added for %s!" % s
-        for pkg in new.keys():
-            if new[pkg].has_key("othercomponents"):
-                print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
-
-    return new, byhand
-
-################################################################################
+def check_valid(overrides, session):
+    """Check if section and priority for new overrides exist in database.
 
-def check_valid(new, session = None):
-    """
-    Check if section and priority for NEW packages exist in database.
     Additionally does sanity checks:
       - debian-installer packages have to be udeb (or source)
-      - non debian-installer packages can not be udeb
-      - source priority can only be assigned to dsc file types
-
-    @type new: dict
-    @param new: Dict of new packages with their section, priority and type.
-
-    """
-    for pkg in new.keys():
-        section_name = new[pkg]["section"]
-        priority_name = new[pkg]["priority"]
-        file_type = new[pkg]["type"]
-
-        section = get_section(section_name, session)
-        if section is None:
-            new[pkg]["section id"] = -1
-        else:
-            new[pkg]["section id"] = section.section_id
+      - non debian-installer packages cannot be udeb
 
-        priority = get_priority(priority_name, session)
-        if priority is None:
-            new[pkg]["priority id"] = -1
-        else:
-            new[pkg]["priority id"] = priority.priority_id
-
-        # Sanity checks
-        di = section_name.find("debian-installer") != -1
-
-        # If d-i, we must be udeb and vice-versa
-        if     (di and file_type not in ("udeb", "dsc")) or \
-           (not di and file_type == "udeb"):
-            new[pkg]["section id"] = -1
-
-        # If dsc we need to be source and vice-versa
-        if (priority == "source" and file_type != "dsc") or \
-           (priority != "source" and file_type == "dsc"):
-            new[pkg]["priority id"] = -1
-
-###############################################################################
+    @type  overrides: list of dict
+    @param overrides: list of overrides to check. The overrides need
+                      to be given in form of a dict with the following keys:
 
-# Used by Upload.check_timestamps
-class TarTime(object):
-    def __init__(self, future_cutoff, past_cutoff):
-        self.reset()
-        self.future_cutoff = future_cutoff
-        self.past_cutoff = past_cutoff
+                      - package: package name
+                      - priority
+                      - section
+                      - component
+                      - type: type of requested override ('dsc', 'deb' or 'udeb')
 
-    def reset(self):
-        self.future_files = {}
-        self.ancient_files = {}
+                      All values are strings.
 
-    def callback(self, member, data):
-        if member.mtime > self.future_cutoff:
-            self.future_files[Name] = member.mtime
-        if member.mtime < self.past_cutoff:
-            self.ancient_files[Name] = member.mtime
+    @rtype:  bool
+    @return: C{True} if all overrides are valid, C{False} if there is any
+             invalid override.
+    """
+    all_valid = True
+    for o in overrides:
+        o['valid'] = True
+        if session.query(Priority).filter_by(priority=o['priority']).first() is None:
+            o['valid'] = False
+        if session.query(Section).filter_by(section=o['section']).first() is None:
+            o['valid'] = False
+        if get_mapped_component(o['component'], session) is None:
+            o['valid'] = False
+        if o['type'] not in ('dsc', 'deb', 'udeb'):
+            raise Exception('Unknown override type {0}'.format(o['type']))
+        if o['type'] == 'udeb' and o['section'] != 'debian-installer':
+            o['valid'] = False
+        if o['section'] == 'debian-installer' and o['type'] not in ('dsc', 'udeb'):
+            o['valid'] = False
+        all_valid = all_valid and o['valid']
+    return all_valid
 
 ###############################################################################
 
 def prod_maintainer(notes, upload):
     cnf = Config()
+    changes = upload.changes
+    whitelists = [ upload.target_suite.mail_whitelist ]
 
     # Here we prepare an editor and get them ready to prod...
     (fd, temp_filename) = utils.temp_filename()
     temp_file = os.fdopen(fd, 'w')
-    for note in notes:
-        temp_file.write(note.comment)
+    temp_file.write("\n\n=====\n\n".join([note.comment for note in notes]))
     temp_file.close()
     editor = os.environ.get("EDITOR","vi")
     answer = 'E'
@@ -326,13 +130,20 @@ def prod_maintainer(notes, upload):
     if answer == 'A':
         return
     elif answer == 'Q':
-        end()
-        sys.exit(0)
+        return 0
     # Otherwise, do the proding...
     user_email_address = utils.whoami() + " <%s>" % (
         cnf["Dinstall::MyAdminAddress"])
 
-    Subst = upload.Subst
+    changed_by = changes.changedby or changes.maintainer
+    maintainer = changes.maintainer
+    maintainer_to = utils.mail_addresses_for_upload(maintainer, changed_by, changes.fingerprint)
+
+    Subst = {
+        '__SOURCE__': upload.changes.source,
+        '__CHANGES_FILENAME__': upload.changes.changesname,
+        '__MAINTAINER_TO__': ", ".join(maintainer_to),
+        }
 
     Subst["__FROM_ADDRESS__"] = user_email_address
     Subst["__PROD_MESSAGE__"] = prod_message
@@ -342,7 +153,7 @@ def prod_maintainer(notes, upload):
         Subst,cnf["Dir::Templates"]+"/process-new.prod")
 
     # Send the prod mail
-    utils.send_mail(prod_mail_message)
+    utils.send_mail(prod_mail_message, whitelists=whitelists)
 
     print "Sent prodding message"
 
@@ -372,12 +183,12 @@ def edit_note(note, upload, session, trainee=False):
     if answer == 'A':
         return
     elif answer == 'Q':
-        end()
-        sys.exit(0)
+        return 0
 
     comment = NewComment()
-    comment.package = upload.pkg.changes["source"]
-    comment.version = upload.pkg.changes["version"]
+    comment.policy_queue = upload.policy_queue
+    comment.package = upload.changes.source
+    comment.version = upload.changes.version
     comment.comment = newnote
     comment.author  = utils.whoami()
     comment.trainee = trainee
@@ -388,7 +199,7 @@ def edit_note(note, upload, session, trainee=False):
 
 # FIXME: Should move into the database
 # suite names DMs can upload to
-dm_suites = ['unstable', 'experimental', 'squeeze-backports']
+dm_suites = ['unstable', 'experimental', 'squeeze-backports','squeeze-backports-sloppy', 'wheezy-backports']
 
 def get_newest_source(source, session):
     'returns the newest DBSource object in dm_suites'
@@ -437,48 +248,9 @@ class Upload(object):
 
     ###########################################################################
 
-    def reset (self):
-        """ Reset a number of internal variables."""
-
-        # Initialize the substitution template map
-        cnf = Config()
-        self.Subst = {}
-        self.Subst["__ADMIN_ADDRESS__"] = cnf["Dinstall::MyAdminAddress"]
-        if cnf.has_key("Dinstall::BugServer"):
-            self.Subst["__BUG_SERVER__"] = cnf["Dinstall::BugServer"]
-        self.Subst["__DISTRO__"] = cnf["Dinstall::MyDistribution"]
-        self.Subst["__DAK_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
-
-        self.rejects = []
-        self.warnings = []
-        self.notes = []
-
-        self.later_check_files = []
-
-        self.pkg.reset()
-
-    def package_info(self):
-        """
-        Format various messages from this Upload to send to the maintainer.
-        """
-
-        msgs = (
-            ('Reject Reasons', self.rejects),
-            ('Warnings', self.warnings),
-            ('Notes', self.notes),
-        )
-
-        msg = ''
-        for title, messages in msgs:
-            if messages:
-                msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
-        msg += '\n\n'
-
-        return msg
-
-    ###########################################################################
     def update_subst(self):
         """ Set up the per-package template substitution mappings """
+        raise Exception('to be removed')
 
         cnf = Config()
 
@@ -534,119 +306,6 @@ class Upload(object):
         self.Subst["__VERSION__"] = self.pkg.changes.get("version", "Unknown")
         self.Subst["__SUITE__"] = ", ".join(self.pkg.changes["distribution"])
 
-    ###########################################################################
-    def load_changes(self, filename):
-        """
-        Load a changes file and setup a dictionary around it. Also checks for mandantory
-        fields  within.
-
-        @type filename: string
-        @param filename: Changes filename, full path.
-
-        @rtype: boolean
-        @return: whether the changes file was valid or not.  We may want to
-                 reject even if this is True (see what gets put in self.rejects).
-                 This is simply to prevent us even trying things later which will
-                 fail because we couldn't properly parse the file.
-        """
-        Cnf = Config()
-        self.pkg.changes_file = filename
-
-        # Parse the .changes field into a dictionary
-        try:
-            self.pkg.changes.update(parse_changes(filename))
-        except CantOpenError:
-            self.rejects.append("%s: can't read file." % (filename))
-            return False
-        except ParseChangesError as line:
-            self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
-            return False
-        except ChangesUnicodeError:
-            self.rejects.append("%s: changes file not proper utf-8" % (filename))
-            return False
-
-        # Parse the Files field from the .changes into another dictionary
-        try:
-            self.pkg.files.update(utils.build_file_list(self.pkg.changes))
-        except ParseChangesError as line:
-            self.rejects.append("%s: parse error, can't grok: %s." % (filename, line))
-            return False
-        except UnknownFormatError as format:
-            self.rejects.append("%s: unknown format '%s'." % (filename, format))
-            return False
-
-        # Check for mandatory fields
-        for i in ("distribution", "source", "binary", "architecture",
-                  "version", "maintainer", "files", "changes", "description"):
-            if not self.pkg.changes.has_key(i):
-                # Avoid undefined errors later
-                self.rejects.append("%s: Missing mandatory field `%s'." % (filename, i))
-                return False
-
-        # Strip a source version in brackets from the source field
-        if re_strip_srcver.search(self.pkg.changes["source"]):
-            self.pkg.changes["source"] = re_strip_srcver.sub('', self.pkg.changes["source"])
-
-        # Ensure the source field is a valid package name.
-        if not re_valid_pkg_name.match(self.pkg.changes["source"]):
-            self.rejects.append("%s: invalid source name '%s'." % (filename, self.pkg.changes["source"]))
-
-        # Split multi-value fields into a lower-level dictionary
-        for i in ("architecture", "distribution", "binary", "closes"):
-            o = self.pkg.changes.get(i, "")
-            if o != "":
-                del self.pkg.changes[i]
-
-            self.pkg.changes[i] = {}
-
-            for j in o.split():
-                self.pkg.changes[i][j] = 1
-
-        # Fix the Maintainer: field to be RFC822/2047 compatible
-        try:
-            (self.pkg.changes["maintainer822"],
-             self.pkg.changes["maintainer2047"],
-             self.pkg.changes["maintainername"],
-             self.pkg.changes["maintaineremail"]) = \
-                   fix_maintainer (self.pkg.changes["maintainer"])
-        except ParseMaintError as msg:
-            self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
-                   % (filename, self.pkg.changes["maintainer"], msg))
-
-        # ...likewise for the Changed-By: field if it exists.
-        try:
-            (self.pkg.changes["changedby822"],
-             self.pkg.changes["changedby2047"],
-             self.pkg.changes["changedbyname"],
-             self.pkg.changes["changedbyemail"]) = \
-                   fix_maintainer (self.pkg.changes.get("changed-by", ""))
-        except ParseMaintError as msg:
-            self.pkg.changes["changedby822"] = ""
-            self.pkg.changes["changedby2047"] = ""
-            self.pkg.changes["changedbyname"] = ""
-            self.pkg.changes["changedbyemail"] = ""
-
-            self.rejects.append("%s: Changed-By field ('%s') failed to parse: %s" \
-                   % (filename, self.pkg.changes["changed-by"], msg))
-
-        # Ensure all the values in Closes: are numbers
-        if self.pkg.changes.has_key("closes"):
-            for i in self.pkg.changes["closes"].keys():
-                if re_isanum.match (i) == None:
-                    self.rejects.append(("%s: `%s' from Closes field isn't a number." % (filename, i)))
-
-        # chopversion = no epoch; chopversion2 = no epoch and no revision (e.g. for .orig.tar.gz comparison)
-        self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
-        self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
-
-        # Check the .changes is non-empty
-        if not self.pkg.files:
-            self.rejects.append("%s: nothing to do (Files field is empty)." % (os.path.basename(self.pkg.changes_file)))
-            return False
-
-        # Changes was syntactically valid even if we'll reject
-        return True
-
     ###########################################################################
 
     def check_distributions(self):
@@ -706,252 +365,8 @@ class Upload(object):
 
     ###########################################################################
 
-    def binary_file_checks(self, f, session):
-        cnf = Config()
-        entry = self.pkg.files[f]
-
-        # Extract package control information
-        deb_file = utils.open_file(f)
-        try:
-            control = apt_pkg.TagSection(utils.deb_extract_control(deb_file))
-        except:
-            self.rejects.append("%s: deb_extract_control() raised %s." % (f, sys.exc_info()[0]))
-            deb_file.close()
-            # Can't continue, none of the checks on control would work.
-            return
-
-        deb_file.close()
-
-        # Check for mandatory fields
-        for field in [ "Package", "Architecture", "Version", "Description" ]:
-            if field not in control:
-                # Can't continue
-                self.rejects.append("%s: No %s field in control." % (f, field))
-                return
-
-        # Ensure the package name matches the one give in the .changes
-        if not self.pkg.changes["binary"].has_key(control.find("Package", "")):
-            self.rejects.append("%s: control file lists name as `%s', which isn't in changes file." % (f, control.find("Package", "")))
-
-        # Validate the package field
-        package = control["Package"]
-        if not re_valid_pkg_name.match(package):
-            self.rejects.append("%s: invalid package name '%s'." % (f, package))
-
-        # Validate the version field
-        version = control["Version"]
-        if not re_valid_version.match(version):
-            self.rejects.append("%s: invalid version number '%s'." % (f, version))
-
-        # Ensure the architecture of the .deb is one we know about.
-        default_suite = cnf.get("Dinstall::DefaultSuite", "unstable")
-        architecture = control["Architecture"]
-        upload_suite = self.pkg.changes["distribution"].keys()[0]
-
-        if      architecture not in [a.arch_string for a in get_suite_architectures(default_suite, session = session)] \
-            and architecture not in [a.arch_string for a in get_suite_architectures(upload_suite, session = session)]:
-            self.rejects.append("Unknown architecture '%s'." % (architecture))
-
-        # Ensure the architecture of the .deb is one of the ones
-        # listed in the .changes.
-        if not self.pkg.changes["architecture"].has_key(architecture):
-            self.rejects.append("%s: control file lists arch as `%s', which isn't in changes file." % (f, architecture))
-
-        # Sanity-check the Depends field
-        depends = control.find("Depends")
-        if depends == '':
-            self.rejects.append("%s: Depends field is empty." % (f))
-
-        # Sanity-check the Provides field
-        provides = control.find("Provides")
-        if provides is not None:
-            provide = re_spacestrip.sub('', provides)
-            if provide == '':
-                self.rejects.append("%s: Provides field is empty." % (f))
-            prov_list = provide.split(",")
-            for prov in prov_list:
-                if not re_valid_pkg_name.match(prov):
-                    self.rejects.append("%s: Invalid Provides field content %s." % (f, prov))
-
-        # If there is a Built-Using field, we need to check we can find the
-        # exact source version
-        built_using = control.find("Built-Using")
-        if built_using is not None:
-            try:
-                entry["built-using"] = []
-                for dep in apt_pkg.parse_depends(built_using):
-                    bu_s, bu_v, bu_e = dep[0]
-                    # Check that it's an exact match dependency and we have
-                    # some form of version
-                    if bu_e != "=" or len(bu_v) < 1:
-                        self.rejects.append("%s: Built-Using contains non strict dependency (%s %s %s)" % (f, bu_s, bu_e, bu_v))
-                    else:
-                        # Find the source id for this version
-                        bu_so = get_sources_from_name(bu_s, version=bu_v, session = session)
-                        if len(bu_so) != 1:
-                            self.rejects.append("%s: Built-Using (%s = %s): Cannot find source package" % (f, bu_s, bu_v))
-                        else:
-                            entry["built-using"].append( (bu_so[0].source, bu_so[0].version, ) )
-
-            except ValueError as e:
-                self.rejects.append("%s: Cannot parse Built-Using field: %s" % (f, str(e)))
-
-
-        # Check the section & priority match those given in the .changes (non-fatal)
-        if control.find("Section") and entry["section"] != "" \
-           and entry["section"] != control.find("Section"):
-            self.warnings.append("%s control file lists section as `%s', but changes file has `%s'." % \
-                                (f, control.find("Section", ""), entry["section"]))
-        if control.find("Priority") and entry["priority"] != "" \
-           and entry["priority"] != control.find("Priority"):
-            self.warnings.append("%s control file lists priority as `%s', but changes file has `%s'." % \
-                                (f, control.find("Priority", ""), entry["priority"]))
-
-        entry["package"] = package
-        entry["architecture"] = architecture
-        entry["version"] = version
-        entry["maintainer"] = control.find("Maintainer", "")
-
-        if f.endswith(".udeb"):
-            self.pkg.files[f]["dbtype"] = "udeb"
-        elif f.endswith(".deb"):
-            self.pkg.files[f]["dbtype"] = "deb"
-        else:
-            self.rejects.append("%s is neither a .deb or a .udeb." % (f))
-
-        entry["source"] = control.find("Source", entry["package"])
-
-        # Get the source version
-        source = entry["source"]
-        source_version = ""
-
-        if source.find("(") != -1:
-            m = re_extract_src_version.match(source)
-            source = m.group(1)
-            source_version = m.group(2)
-
-        if not source_version:
-            source_version = self.pkg.files[f]["version"]
-
-        entry["source package"] = source
-        entry["source version"] = source_version
-
-        # Ensure the filename matches the contents of the .deb
-        m = re_isadeb.match(f)
-
-        #  package name
-        file_package = m.group(1)
-        if entry["package"] != file_package:
-            self.rejects.append("%s: package part of filename (%s) does not match package name in the %s (%s)." % \
-                                (f, file_package, entry["dbtype"], entry["package"]))
-        epochless_version = re_no_epoch.sub('', control.find("Version"))
-
-        #  version
-        file_version = m.group(2)
-        if epochless_version != file_version:
-            self.rejects.append("%s: version part of filename (%s) does not match package version in the %s (%s)." % \
-                                (f, file_version, entry["dbtype"], epochless_version))
-
-        #  architecture
-        file_architecture = m.group(3)
-        if entry["architecture"] != file_architecture:
-            self.rejects.append("%s: architecture part of filename (%s) does not match package architecture in the %s (%s)." % \
-                                (f, file_architecture, entry["dbtype"], entry["architecture"]))
-
-        # Check for existent source
-        source_version = entry["source version"]
-        source_package = entry["source package"]
-        if self.pkg.changes["architecture"].has_key("source"):
-            if source_version != self.pkg.changes["version"]:
-                self.rejects.append("source version (%s) for %s doesn't match changes version %s." % \
-                                    (source_version, f, self.pkg.changes["version"]))
-        else:
-            # Check in the SQL database
-            if not source_exists(source_package, source_version, suites = \
-                self.pkg.changes["distribution"].keys(), session = session):
-                # Check in one of the other directories
-                source_epochless_version = re_no_epoch.sub('', source_version)
-                dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
-
-                byhand_dir = get_policy_queue('byhand', session).path
-                new_dir = get_policy_queue('new', session).path
-
-                if os.path.exists(os.path.join(byhand_dir, dsc_filename)):
-                    entry["byhand"] = 1
-                elif os.path.exists(os.path.join(new_dir, dsc_filename)):
-                    entry["new"] = 1
-                else:
-                    dsc_file_exists = False
-                    # TODO: Don't hardcode this list: use all relevant queues
-                    #       The question is how to determine what is relevant
-                    for queue_name in ["embargoed", "unembargoed", "proposedupdates", "oldproposedupdates"]:
-                        queue = get_policy_queue(queue_name, session)
-                        if queue:
-                            if os.path.exists(os.path.join(queue.path, dsc_filename)):
-                                dsc_file_exists = True
-                                break
-
-                    if not dsc_file_exists:
-                        self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
-        # Check the version and for file overwrites
-        self.check_binary_against_db(f, session)
-
-    def source_file_checks(self, f, session):
-        entry = self.pkg.files[f]
-
-        m = re_issource.match(f)
-        if not m:
-            return
-
-        entry["package"] = m.group(1)
-        entry["version"] = m.group(2)
-        entry["type"] = m.group(3)
-
-        # Ensure the source package name matches the Source filed in the .changes
-        if self.pkg.changes["source"] != entry["package"]:
-            self.rejects.append("%s: changes file doesn't say %s for Source" % (f, entry["package"]))
-
-        # Ensure the source version matches the version in the .changes file
-        if re_is_orig_source.match(f):
-            changes_version = self.pkg.changes["chopversion2"]
-        else:
-            changes_version = self.pkg.changes["chopversion"]
-
-        if changes_version != entry["version"]:
-            self.rejects.append("%s: should be %s according to changes file." % (f, changes_version))
-
-        # Ensure the .changes lists source in the Architecture field
-        if not self.pkg.changes["architecture"].has_key("source"):
-            self.rejects.append("%s: changes file doesn't list `source' in Architecture field." % (f))
-
-        # Check the signature of a .dsc file
-        if entry["type"] == "dsc":
-            # check_signature returns either:
-            #  (None, [list, of, rejects]) or (signature, [])
-            (self.pkg.dsc["fingerprint"], rejects) = utils.check_signature(f)
-            for j in rejects:
-                self.rejects.append(j)
-
-        entry["architecture"] = "source"
-
     def per_suite_file_checks(self, f, suite, session):
-        cnf = Config()
-        entry = self.pkg.files[f]
-
-        # Skip byhand
-        if entry.has_key("byhand"):
-            return
-
-        # Check we have fields we need to do these checks
-        oktogo = True
-        for m in ['component', 'package', 'priority', 'size', 'md5sum']:
-            if not entry.has_key(m):
-                self.rejects.append("file '%s' does not have field %s set" % (f, m))
-                oktogo = False
-
-        if not oktogo:
-            return
+        raise Exception('removed')
 
         # Handle component mappings
         for m in cnf.value_list("ComponentMappings"):
@@ -960,695 +375,12 @@ class Upload(object):
                 entry["original component"] = source
                 entry["component"] = dest
 
-        # Ensure the component is valid for the target suite
-        if entry["component"] not in get_component_names(session):
-            self.rejects.append("unknown component `%s' for suite `%s'." % (entry["component"], suite))
-            return
-
-        # Validate the component
-        if not get_component(entry["component"], session):
-            self.rejects.append("file '%s' has unknown component '%s'." % (f, entry["component"]))
-            return
-
-        # See if the package is NEW
-        if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), f, session):
-            entry["new"] = 1
-
-        # Validate the priority
-        if entry["priority"].find('/') != -1:
-            self.rejects.append("file '%s' has invalid priority '%s' [contains '/']." % (f, entry["priority"]))
-
-        # Determine the location
-        location = cnf["Dir::Pool"]
-        l = get_location(location, entry["component"], session=session)
-        if l is None:
-            self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s)" % entry["component"])
-            entry["location id"] = -1
-        else:
-            entry["location id"] = l.location_id
-
-        # Check the md5sum & size against existing files (if any)
-        entry["pool name"] = utils.poolify(self.pkg.changes["source"], entry["component"])
-
-        found, poolfile = check_poolfile(os.path.join(entry["pool name"], f),
-                                         entry["size"], entry["md5sum"], entry["location id"])
-
-        if found is None:
-            self.rejects.append("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
-        elif found is False and poolfile is not None:
-            self.rejects.append("md5sum and/or size mismatch on existing copy of %s." % (f))
-        else:
-            if poolfile is None:
-                entry["files id"] = None
-            else:
-                entry["files id"] = poolfile.file_id
-
-        # Check for packages that have moved from one component to another
-        entry['suite'] = suite
-        arch_list = [entry["architecture"], 'all']
-        component = get_component_by_package_suite(self.pkg.files[f]['package'], \
-            [suite], arch_list = arch_list, session = session)
-        if component is not None:
-            entry["othercomponents"] = component
-
-    def check_files(self, action=True):
-        file_keys = self.pkg.files.keys()
-        holding = Holding()
-        cnf = Config()
-
-        if action:
-            cwd = os.getcwd()
-            os.chdir(self.pkg.directory)
-            for f in file_keys:
-                ret = holding.copy_to_holding(f)
-                if ret is not None:
-                    self.warnings.append('Could not copy %s to holding; will attempt to find in DB later' % f)
-
-            os.chdir(cwd)
-
-        # check we already know the changes file
-        # [NB: this check must be done post-suite mapping]
-        base_filename = os.path.basename(self.pkg.changes_file)
-
-        session = DBConn().session()
-
-        try:
-            dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
-            # if in the pool or in a queue other than unchecked, reject
-            if (dbc.in_queue is None) \
-                   or (dbc.in_queue is not None
-                       and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
-                self.rejects.append("%s file already known to dak" % base_filename)
-        except NoResultFound as e:
-            # not known, good
-            pass
-
-        has_binaries = False
-        has_source = False
-
-        for f, entry in self.pkg.files.items():
-            # Ensure the file does not already exist in one of the accepted directories
-            # TODO: Dynamically generate this list
-            for queue_name in [ "byhand", "new", "proposedupdates", "oldproposedupdates", "embargoed", "unembargoed" ]:
-                queue = get_policy_queue(queue_name, session)
-                if queue and os.path.exists(os.path.join(queue.path, f)):
-                    self.rejects.append("%s file already exists in the %s queue." % (f, queue_name))
-
-            if not re_taint_free.match(f):
-                self.rejects.append("!!WARNING!! tainted filename: '%s'." % (f))
-
-            # Check the file is readable
-            if os.access(f, os.R_OK) == 0:
-                # When running in -n, copy_to_holding() won't have
-                # generated the reject_message, so we need to.
-                if action:
-                    if os.path.exists(f):
-                        self.rejects.append("Can't read `%s'. [permission denied]" % (f))
-                    else:
-                        # Don't directly reject, mark to check later to deal with orig's
-                        # we can find in the pool
-                        self.later_check_files.append(f)
-                entry["type"] = "unreadable"
-                continue
-
-            # If it's byhand skip remaining checks
-            if entry["section"] == "byhand" or entry["section"][:4] == "raw-":
-                entry["byhand"] = 1
-                entry["type"] = "byhand"
-
-            # Checks for a binary package...
-            elif re_isadeb.match(f):
-                has_binaries = True
-                entry["type"] = "deb"
-
-                # This routine appends to self.rejects/warnings as appropriate
-                self.binary_file_checks(f, session)
-
-            # Checks for a source package...
-            elif re_issource.match(f):
-                has_source = True
-
-                # This routine appends to self.rejects/warnings as appropriate
-                self.source_file_checks(f, session)
-
-            # Not a binary or source package?  Assume byhand...
-            else:
-                entry["byhand"] = 1
-                entry["type"] = "byhand"
-
-            # Per-suite file checks
-            entry["oldfiles"] = {}
-            for suite in self.pkg.changes["distribution"].keys():
-                self.per_suite_file_checks(f, suite, session)
-
-        session.close()
-
-        # If the .changes file says it has source, it must have source.
-        if self.pkg.changes["architecture"].has_key("source"):
-            if not has_source:
-                self.rejects.append("no source found and Architecture line in changes mention source.")
-
-            if (not has_binaries) and (not cnf.find_b("Dinstall::AllowSourceOnlyUploads")):
-                self.rejects.append("source only uploads are not supported.")
-
-    ###########################################################################
-
-    def __dsc_filename(self):
-        """
-        Returns: (Status, Dsc_Filename)
-        where
-          Status: Boolean; True when there was no error, False otherwise
-          Dsc_Filename: String; name of the dsc file if Status is True, reason for the error otherwise
-        """
-        dsc_filename = None
-
-        # find the dsc
-        for name, entry in self.pkg.files.items():
-            if entry.has_key("type") and entry["type"] == "dsc":
-                if dsc_filename:
-                    return False, "cannot process a .changes file with multiple .dsc's."
-                else:
-                    dsc_filename = name
-
-        if not dsc_filename:
-            return False, "source uploads must contain a dsc file"
-
-        return True, dsc_filename
-
-    def load_dsc(self, action=True, signing_rules=1):
-        """
-        Find and load the dsc from self.pkg.files into self.dsc
-
-        Returns: (Status, Reason)
-        where
-          Status: Boolean; True when there was no error, False otherwise
-          Reason: String; When Status is False this describes the error
-        """
-
-        # find the dsc
-        (status, dsc_filename) = self.__dsc_filename()
-        if not status:
-            # If status is false, dsc_filename has the reason
-            return False, dsc_filename
-
-        try:
-            self.pkg.dsc.update(utils.parse_changes(dsc_filename, signing_rules=signing_rules, dsc_file=1))
-        except CantOpenError:
-            if not action:
-                return False, "%s: can't read file." % (dsc_filename)
-        except ParseChangesError as line:
-            return False, "%s: parse error, can't grok: %s." % (dsc_filename, line)
-        except InvalidDscError as line:
-            return False, "%s: syntax error on line %s." % (dsc_filename, line)
-        except ChangesUnicodeError:
-            return False, "%s: dsc file not proper utf-8." % (dsc_filename)
-
-        return True, None
-
-    ###########################################################################
-
-    def check_dsc(self, action=True, session=None):
-        """Returns bool indicating whether or not the source changes are valid"""
-        # Ensure there is source to check
-        if not self.pkg.changes["architecture"].has_key("source"):
-            return True
-
-        if session is None:
-            session = DBConn().session()
-
-        (status, reason) = self.load_dsc(action=action)
-        if not status:
-            self.rejects.append(reason)
-            return False
-        (status, dsc_filename) = self.__dsc_filename()
-        if not status:
-            # If status is false, dsc_filename has the reason
-            self.rejects.append(dsc_filename)
-            return False
-
-        # Build up the file list of files mentioned by the .dsc
-        try:
-            self.pkg.dsc_files.update(utils.build_file_list(self.pkg.dsc, is_a_dsc=1))
-        except NoFilesFieldError:
-            self.rejects.append("%s: no Files: field." % (dsc_filename))
-            return False
-        except UnknownFormatError as format:
-            self.rejects.append("%s: unknown format '%s'." % (dsc_filename, format))
-            return False
-        except ParseChangesError as line:
-            self.rejects.append("%s: parse error, can't grok: %s." % (dsc_filename, line))
-            return False
-
-        # Enforce mandatory fields
-        for i in ("format", "source", "version", "binary", "maintainer", "architecture", "files"):
-            if not self.pkg.dsc.has_key(i):
-                self.rejects.append("%s: missing mandatory field `%s'." % (dsc_filename, i))
-                return False
-
-        # Validate the source and version fields
-        if not re_valid_pkg_name.match(self.pkg.dsc["source"]):
-            self.rejects.append("%s: invalid source name '%s'." % (dsc_filename, self.pkg.dsc["source"]))
-        if not re_valid_version.match(self.pkg.dsc["version"]):
-            self.rejects.append("%s: invalid version number '%s'." % (dsc_filename, self.pkg.dsc["version"]))
-
-        # Only a limited list of source formats are allowed in each suite
-        for dist in self.pkg.changes["distribution"].keys():
-            suite = get_suite(dist, session=session)
-            if not suite:
-                self.rejects.append("%s: cannot find suite %s when checking source formats" % (dsc_filename, dist))
-                continue
-            allowed = [ x.format_name for x in suite.srcformats ]
-            if self.pkg.dsc["format"] not in allowed:
-                self.rejects.append("%s: source format '%s' not allowed in %s (accepted: %s) " % (dsc_filename, self.pkg.dsc["format"], dist, ", ".join(allowed)))
-
-        # Validate the Maintainer field
-        try:
-            # We ignore the return value
-            fix_maintainer(self.pkg.dsc["maintainer"])
-        except ParseMaintError as msg:
-            self.rejects.append("%s: Maintainer field ('%s') failed to parse: %s" \
-                                 % (dsc_filename, self.pkg.dsc["maintainer"], msg))
-
-        # Validate the build-depends field(s)
-        for field_name in [ "build-depends", "build-depends-indep" ]:
-            field = self.pkg.dsc.get(field_name)
-            if field:
-                # Have apt try to parse them...
-                try:
-                    apt_pkg.parse_src_depends(field)
-                except:
-                    self.rejects.append("%s: invalid %s field (can not be parsed by apt)." % (dsc_filename, field_name.title()))
-
-        # Ensure the version number in the .dsc matches the version number in the .changes
-        epochless_dsc_version = re_no_epoch.sub('', self.pkg.dsc["version"])
-        changes_version = self.pkg.files[dsc_filename]["version"]
-
-        if epochless_dsc_version != self.pkg.files[dsc_filename]["version"]:
-            self.rejects.append("version ('%s') in .dsc does not match version ('%s') in .changes." % (epochless_dsc_version, changes_version))
-
-        # Ensure the Files field contain only what's expected
-        self.rejects.extend(check_dsc_files(dsc_filename, self.pkg.dsc, self.pkg.dsc_files))
-
-        # Ensure source is newer than existing source in target suites
-        session = DBConn().session()
-        self.check_source_against_db(dsc_filename, session)
-        self.check_dsc_against_db(dsc_filename, session)
-
-        dbchg = get_dbchange(self.pkg.changes_file, session)
-
-        # Finally, check if we're missing any files
-        for f in self.later_check_files:
-            print 'XXX: %s' % f
-            # Check if we've already processed this file if we have a dbchg object
-            ok = False
-            if dbchg:
-                for pf in dbchg.files:
-                    if pf.filename == f and pf.processed:
-                        self.notes.append('%s was already processed so we can go ahead' % f)
-                        ok = True
-                        del self.pkg.files[f]
-            if not ok:
-                self.rejects.append("Could not find file %s references in changes" % f)
-
-        session.close()
-
-        return (len(self.rejects) == 0)
-
-    ###########################################################################
-
-    def get_changelog_versions(self, source_dir):
-        """Extracts a the source package and (optionally) grabs the
-        version history out of debian/changelog for the BTS."""
-
-        cnf = Config()
-
-        # Find the .dsc (again)
-        dsc_filename = None
-        for f in self.pkg.files.keys():
-            if self.pkg.files[f]["type"] == "dsc":
-                dsc_filename = f
-
-        # If there isn't one, we have nothing to do. (We have reject()ed the upload already)
-        if not dsc_filename:
-            return
-
-        # Create a symlink mirror of the source files in our temporary directory
-        for f in self.pkg.files.keys():
-            m = re_issource.match(f)
-            if m:
-                src = os.path.join(source_dir, f)
-                # If a file is missing for whatever reason, give up.
-                if not os.path.exists(src):
-                    return
-                ftype = m.group(3)
-                if re_is_orig_source.match(f) and self.pkg.orig_files.has_key(f) and \
-                   self.pkg.orig_files[f].has_key("path"):
-                    continue
-                dest = os.path.join(os.getcwd(), f)
-                os.symlink(src, dest)
-
-        # If the orig files are not a part of the upload, create symlinks to the
-        # existing copies.
-        for orig_file in self.pkg.orig_files.keys():
-            if not self.pkg.orig_files[orig_file].has_key("path"):
-                continue
-            dest = os.path.join(os.getcwd(), os.path.basename(orig_file))
-            os.symlink(self.pkg.orig_files[orig_file]["path"], dest)
-
-        # Extract the source
-        try:
-            unpacked = UnpackedSource(dsc_filename)
-        except Exception as e:
-            self.rejects.append("'dpkg-source -x' failed for %s. (%s)" % (dsc_filename, str(e)))
-            return
-
-        if not cnf.find("Dir::BTSVersionTrack"):
-            return
-
-        # Get the upstream version
-        upstr_version = re_no_epoch.sub('', self.pkg.dsc["version"])
-        if re_strip_revision.search(upstr_version):
-            upstr_version = re_strip_revision.sub('', upstr_version)
-
-        # Ensure the changelog file exists
-        changelog_file = unpacked.get_changelog_file()
-        if changelog_file is None:
-            self.rejects.append("%s: debian/changelog not found in extracted source." % (dsc_filename))
-            return
-
-        # Parse the changelog
-        self.pkg.dsc["bts changelog"] = ""
-        for line in changelog_file.readlines():
-            m = re_changelog_versions.match(line)
-            if m:
-                self.pkg.dsc["bts changelog"] += line
-        changelog_file.close()
-        unpacked.cleanup()
-
-        # Check we found at least one revision in the changelog
-        if not self.pkg.dsc["bts changelog"]:
-            self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
-
-    def check_source(self):
-        # Bail out if:
-        #    a) there's no source
-        if not self.pkg.changes["architecture"].has_key("source"):
-            return
-
-        tmpdir = utils.temp_dirname()
-
-        # Move into the temporary directory
-        cwd = os.getcwd()
-        os.chdir(tmpdir)
-
-        # Get the changelog version history
-        self.get_changelog_versions(cwd)
-
-        # Move back and cleanup the temporary tree
-        os.chdir(cwd)
-
-        try:
-            shutil.rmtree(tmpdir)
-        except OSError as e:
-            if e.errno != errno.EACCES:
-                print "foobar"
-                utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
-            self.rejects.append("%s: source tree could not be cleanly removed." % (self.pkg.dsc["source"]))
-            # We probably have u-r or u-w directories so chmod everything
-            # and try again.
-            cmd = "chmod -R u+rwx %s" % (tmpdir)
-            result = os.system(cmd)
-            if result != 0:
-                utils.fubar("'%s' failed with result %s." % (cmd, result))
-            shutil.rmtree(tmpdir)
-        except Exception as e:
-            print "foobar2 (%s)" % e
-            utils.fubar("%s: couldn't remove tmp dir for source tree." % (self.pkg.dsc["source"]))
-
-    ###########################################################################
-    def ensure_hashes(self):
-        # Make sure we recognise the format of the Files: field in the .changes
-        format = self.pkg.changes.get("format", "0.0").split(".", 1)
-        if len(format) == 2:
-            format = int(format[0]), int(format[1])
-        else:
-            format = int(float(format[0])), 0
-
-        # We need to deal with the original changes blob, as the fields we need
-        # might not be in the changes dict serialised into the .dak anymore.
-        orig_changes = utils.parse_deb822(self.pkg.changes['filecontents'])
-
-        # Copy the checksums over to the current changes dict.  This will keep
-        # the existing modifications to it intact.
-        for field in orig_changes:
-            if field.startswith('checksums-'):
-                self.pkg.changes[field] = orig_changes[field]
-
-        # Check for unsupported hashes
-        for j in utils.check_hash_fields(".changes", self.pkg.changes):
-            self.rejects.append(j)
-
-        for j in utils.check_hash_fields(".dsc", self.pkg.dsc):
-            self.rejects.append(j)
-
-        # We have to calculate the hash if we have an earlier changes version than
-        # the hash appears in rather than require it exist in the changes file
-        for hashname, hashfunc, version in utils.known_hashes:
-            # TODO: Move _ensure_changes_hash into this class
-            for j in utils._ensure_changes_hash(self.pkg.changes, format, version, self.pkg.files, hashname, hashfunc):
-                self.rejects.append(j)
-            if "source" in self.pkg.changes["architecture"]:
-                # TODO: Move _ensure_dsc_hash into this class
-                for j in utils._ensure_dsc_hash(self.pkg.dsc, self.pkg.dsc_files, hashname, hashfunc):
-                    self.rejects.append(j)
-
-    def check_hashes(self):
-        for m in utils.check_hash(".changes", self.pkg.files, "md5", apt_pkg.md5sum):
-            self.rejects.append(m)
-
-        for m in utils.check_size(".changes", self.pkg.files):
-            self.rejects.append(m)
-
-        for m in utils.check_hash(".dsc", self.pkg.dsc_files, "md5", apt_pkg.md5sum):
-            self.rejects.append(m)
-
-        for m in utils.check_size(".dsc", self.pkg.dsc_files):
-            self.rejects.append(m)
-
-        self.ensure_hashes()
-
-    ###########################################################################
-
-    def ensure_orig(self, target_dir='.', session=None):
-        """
-        Ensures that all orig files mentioned in the changes file are present
-        in target_dir. If they do not exist, they are symlinked into place.
-
-        An list containing the symlinks that were created are returned (so they
-        can be removed).
-        """
-
-        symlinked = []
-        cnf = Config()
-
-        for filename, entry in self.pkg.dsc_files.iteritems():
-            if not re_is_orig_source.match(filename):
-                # File is not an orig; ignore
-                continue
-
-            if os.path.exists(filename):
-                # File exists, no need to continue
-                continue
-
-            def symlink_if_valid(path):
-                f = utils.open_file(path)
-                md5sum = apt_pkg.md5sum(f)
-                f.close()
-
-                fingerprint = (os.stat(path)[stat.ST_SIZE], md5sum)
-                expected = (int(entry['size']), entry['md5sum'])
-
-                if fingerprint != expected:
-                    return False
-
-                dest = os.path.join(target_dir, filename)
-
-                os.symlink(path, dest)
-                symlinked.append(dest)
-
-                return True
-
-            session_ = session
-            if session is None:
-                session_ = DBConn().session()
-
-            found = False
-
-            # Look in the pool
-            for poolfile in get_poolfile_like_name('%s' % filename, session_):
-                poolfile_path = os.path.join(
-                    poolfile.location.path, poolfile.filename
-                )
-
-                if symlink_if_valid(poolfile_path):
-                    found = True
-                    break
-
-            if session is None:
-                session_.close()
-
-            if found:
-                continue
-
-            # Look in some other queues for the file
-            queue_names = ['new', 'byhand',
-                           'proposedupdates', 'oldproposedupdates',
-                           'embargoed', 'unembargoed']
-
-            for queue_name in queue_names:
-                queue = get_policy_queue(queue_name, session)
-                if not queue:
-                    continue
-
-                queuefile_path = os.path.join(queue.path, filename)
-
-                if not os.path.exists(queuefile_path):
-                    # Does not exist in this queue
-                    continue
-
-                if symlink_if_valid(queuefile_path):
-                    break
-
-        return symlinked
-
-    ###########################################################################
-
-    def check_lintian(self):
-        """
-        Extends self.rejects by checking the output of lintian against tags
-        specified in Dinstall::LintianTags.
-        """
-
-        cnf = Config()
-
-        # Don't reject binary uploads
-        if not self.pkg.changes['architecture'].has_key('source'):
-            return
-
-        # Only check some distributions
-        for dist in ('unstable', 'experimental'):
-            if dist in self.pkg.changes['distribution']:
-                break
-        else:
-            return
-
-        # If we do not have a tagfile, don't do anything
-        tagfile = cnf.get("Dinstall::LintianTags")
-        if not tagfile:
-            return
-
-        # Parse the yaml file
-        sourcefile = file(tagfile, 'r')
-        sourcecontent = sourcefile.read()
-        sourcefile.close()
-
-        try:
-            lintiantags = yaml.load(sourcecontent)['lintian']
-        except yaml.YAMLError as msg:
-            utils.fubar("Can not read the lintian tags file %s, YAML error: %s." % (tagfile, msg))
-            return
-
-        # Try and find all orig mentioned in the .dsc
-        symlinked = self.ensure_orig()
-
-        # Setup the input file for lintian
-        fd, temp_filename = utils.temp_filename()
-        temptagfile = os.fdopen(fd, 'w')
-        for tags in lintiantags.values():
-            temptagfile.writelines(['%s\n' % x for x in tags])
-        temptagfile.close()
-
-        try:
-            cmd = "lintian --show-overrides --tags-from-file %s %s" % \
-                (temp_filename, self.pkg.changes_file)
-
-            result, output = commands.getstatusoutput(cmd)
-        finally:
-            # Remove our tempfile and any symlinks we created
-            os.unlink(temp_filename)
-
-            for symlink in symlinked:
-                os.unlink(symlink)
-
-        if result == 2:
-            utils.warn("lintian failed for %s [return code: %s]." % \
-                (self.pkg.changes_file, result))
-            utils.warn(utils.prefix_multi_line_string(output, \
-                " [possible output:] "))
-
-        def log(*txt):
-            if self.logger:
-                self.logger.log(
-                    [self.pkg.changes_file, "check_lintian"] + list(txt)
-                )
-
-        # Generate messages
-        parsed_tags = parse_lintian_output(output)
-        self.rejects.extend(
-            generate_reject_messages(parsed_tags, lintiantags, log=log)
-        )
-
-    ###########################################################################
-    def check_urgency(self):
-        cnf = Config()
-        if self.pkg.changes["architecture"].has_key("source"):
-            if not self.pkg.changes.has_key("urgency"):
-                self.pkg.changes["urgency"] = cnf["Urgency::Default"]
-            self.pkg.changes["urgency"] = self.pkg.changes["urgency"].lower()
-            if self.pkg.changes["urgency"] not in cnf.value_list("Urgency::Valid"):
-                self.warnings.append("%s is not a valid urgency; it will be treated as %s by testing." % \
-                                     (self.pkg.changes["urgency"], cnf["Urgency::Default"]))
-                self.pkg.changes["urgency"] = cnf["Urgency::Default"]
-
     ###########################################################################
 
     # Sanity check the time stamps of files inside debs.
     # [Files in the near future cause ugly warnings and extreme time
     #  travel can cause errors on extraction]
 
-    def check_timestamps(self):
-        Cnf = Config()
-
-        future_cutoff = time.time() + int(Cnf["Dinstall::FutureTimeTravelGrace"])
-        past_cutoff = time.mktime(time.strptime(Cnf["Dinstall::PastCutoffYear"],"%Y"))
-        tar = TarTime(future_cutoff, past_cutoff)
-
-        for filename, entry in self.pkg.files.items():
-            if entry["type"] == "deb":
-                tar.reset()
-                try:
-                    deb = apt_inst.DebFile(filename)
-                    deb.control.go(tar.callback)
-
-                    future_files = tar.future_files.keys()
-                    if future_files:
-                        num_future_files = len(future_files)
-                        future_file = future_files[0]
-                        future_date = tar.future_files[future_file]
-                        self.rejects.append("%s: has %s file(s) with a time stamp too far into the future (e.g. %s [%s])."
-                               % (filename, num_future_files, future_file, time.ctime(future_date)))
-
-                    ancient_files = tar.ancient_files.keys()
-                    if ancient_files:
-                        num_ancient_files = len(ancient_files)
-                        ancient_file = ancient_files[0]
-                        ancient_date = tar.ancient_files[ancient_file]
-                        self.rejects.append("%s: has %s file(s) with a time stamp too ancient (e.g. %s [%s])."
-                               % (filename, num_ancient_files, ancient_file, time.ctime(ancient_date)))
-                except:
-                    self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_info()[0], sys.exc_info()[1]))
-
     def check_if_upload_is_sponsored(self, uid_email, uid_name):
         for key in "maintaineremail", "changedbyemail", "maintainername", "changedbyname":
             if not self.pkg.changes.has_key(key):
@@ -1674,164 +406,9 @@ class Upload(object):
 
         return sponsored
 
-
-    ###########################################################################
-    # check_signed_by_key checks
-    ###########################################################################
-
-    def check_signed_by_key(self):
-        """Ensure the .changes is signed by an authorized uploader."""
-        session = DBConn().session()
-
-        # First of all we check that the person has proper upload permissions
-        # and that this upload isn't blocked
-        fpr = get_fingerprint(self.pkg.changes['fingerprint'], session=session)
-
-        if fpr is None:
-            self.rejects.append("Cannot find fingerprint %s" % self.pkg.changes["fingerprint"])
-            return
-
-        # TODO: Check that import-keyring adds UIDs properly
-        if not fpr.uid:
-            self.rejects.append("Cannot find uid for fingerprint %s.  Please contact ftpmaster@debian.org" % fpr.fingerprint)
-            return
-
-        # Check that the fingerprint which uploaded has permission to do so
-        self.check_upload_permissions(fpr, session)
-
-        # Check that this package is not in a transition
-        self.check_transition(session)
-
-        session.close()
-
-
-    def check_upload_permissions(self, fpr, session):
-        # Check any one-off upload blocks
-        self.check_upload_blocks(fpr, session)
-
-        # If the source_acl is None, source is never allowed
-        if fpr.source_acl is None:
-            if self.pkg.changes["architecture"].has_key("source"):
-                rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
-                rej += '\nPlease contact ftpmaster if you think this is incorrect'
-                self.rejects.append(rej)
-                return
-        # Do DM as a special case
-        # DM is a special case unfortunately, so we check it first
-        # (keys with no source access get more access than DMs in one
-        #  way; DMs can only upload for their packages whether source
-        #  or binary, whereas keys with no access might be able to
-        #  upload some binaries)
-        elif fpr.source_acl.access_level == 'dm':
-            self.check_dm_upload(fpr, session)
-        else:
-            # If not a DM, we allow full upload rights
-            uid_email = "%s@debian.org" % (fpr.uid.uid)
-            self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
-
-
-        # Check binary upload permissions
-        # By this point we know that DMs can't have got here unless they
-        # are allowed to deal with the package concerned so just apply
-        # normal checks
-        if fpr.binary_acl.access_level == 'full':
-            return
-
-        # Otherwise we're in the map case
-        tmparches = self.pkg.changes["architecture"].copy()
-        tmparches.pop('source', None)
-
-        for bam in fpr.binary_acl_map:
-            tmparches.pop(bam.architecture.arch_string, None)
-
-        if len(tmparches.keys()) > 0:
-            if fpr.binary_reject:
-                rej = "changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
-                if len(tmparches.keys()) == 1:
-                    rej += "\n\narchitecture involved is: %s" % ",".join(tmparches.keys())
-                else:
-                    rej += "\n\narchitectures involved are: %s" % ",".join(tmparches.keys())
-                self.rejects.append(rej)
-            else:
-                # TODO: This is where we'll implement reject vs throw away binaries later
-                rej = "Uhm.  I'm meant to throw away the binaries now but that's not implemented yet"
-                rej += "\nPlease complain to ftpmaster@debian.org as this shouldn't have been turned on"
-                rej += "\nFingerprint: %s", (fpr.fingerprint)
-                self.rejects.append(rej)
-
-
-    def check_upload_blocks(self, fpr, session):
-        """Check whether any upload blocks apply to this source, source
-           version, uid / fpr combination"""
-
-        def block_rej_template(fb):
-            rej = 'Manual upload block in place for package %s' % fb.source
-            if fb.version is not None:
-                rej += ', version %s' % fb.version
-            return rej
-
-        for fb in session.query(UploadBlock).filter_by(source = self.pkg.changes['source']).all():
-            # version is None if the block applies to all versions
-            if fb.version is None or fb.version == self.pkg.changes['version']:
-                # Check both fpr and uid - either is enough to cause a reject
-                if fb.fpr is not None:
-                    if fb.fpr.fingerprint == fpr.fingerprint:
-                        self.rejects.append(block_rej_template(fb) + ' for fingerprint %s\nReason: %s' % (fpr.fingerprint, fb.reason))
-                if fb.uid is not None:
-                    if fb.uid == fpr.uid:
-                        self.rejects.append(block_rej_template(fb) + ' for uid %s\nReason: %s' % (fb.uid.uid, fb.reason))
-
-
     def check_dm_upload(self, fpr, session):
         # Quoth the GR (http://www.debian.org/vote/2007/vote_003):
         ## none of the uploaded packages are NEW
-        rej = False
-        for f in self.pkg.files.keys():
-            if self.pkg.files[f].has_key("byhand"):
-                self.rejects.append("%s may not upload BYHAND file %s" % (fpr.uid.uid, f))
-                rej = True
-            if self.pkg.files[f].has_key("new"):
-                self.rejects.append("%s may not upload NEW file %s" % (fpr.uid.uid, f))
-                rej = True
-
-        if rej:
-            return
-
-        r = get_newest_source(self.pkg.changes["source"], session)
-
-        if r is None:
-            rej = "Could not find existing source package %s in the DM allowed suites and this is a DM upload" % self.pkg.changes["source"]
-            self.rejects.append(rej)
-            return
-
-        if not r.dm_upload_allowed:
-            rej = "Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version (%s)" % (self.pkg.changes["source"], r.version)
-            self.rejects.append(rej)
-            return
-
-        ## the Maintainer: field of the uploaded .changes file corresponds with
-        ## the owner of the key used (ie, non-developer maintainers may not sponsor
-        ## uploads)
-        if self.check_if_upload_is_sponsored(fpr.uid.uid, fpr.uid.name):
-            self.rejects.append("%s (%s) is not authorised to sponsor uploads" % (fpr.uid.uid, fpr.fingerprint))
-
-        ## the most recent version of the package uploaded to unstable or
-        ## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie,
-        ## non-developer maintainers cannot NMU or hijack packages)
-
-        # uploader includes the maintainer
-        accept = False
-        for uploader in r.uploaders:
-            (rfc822, rfc2047, name, email) = uploader.get_split_maintainer()
-            # Eww - I hope we never have two people with the same name in Debian
-            if email == fpr.uid.uid or name == fpr.uid.name:
-                accept = True
-                break
-
-        if not accept:
-            self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (fpr.uid.uid, self.pkg.changes["source"]))
-            return
-
         ## none of the packages are being taken over from other source packages
         for b in self.pkg.changes["binary"].keys():
             for suite in self.pkg.changes["distribution"].keys():
@@ -1839,79 +416,6 @@ class Upload(object):
                     if s.source != self.pkg.changes["source"]:
                         self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (fpr.uid.uid, b, s, suite))
 
-
-
-    def check_transition(self, session):
-        cnf = Config()
-
-        sourcepkg = self.pkg.changes["source"]
-
-        # No sourceful upload -> no need to do anything else, direct return
-        # We also work with unstable uploads, not experimental or those going to some
-        # proposed-updates queue
-        if "source" not in self.pkg.changes["architecture"] or \
-           "unstable" not in self.pkg.changes["distribution"]:
-            return
-
-        # Also only check if there is a file defined (and existant) with
-        # checks.
-        transpath = cnf.get("Dinstall::ReleaseTransitions", "")
-        if transpath == "" or not os.path.exists(transpath):
-            return
-
-        # Parse the yaml file
-        sourcefile = file(transpath, 'r')
-        sourcecontent = sourcefile.read()
-        try:
-            transitions = yaml.load(sourcecontent)
-        except yaml.YAMLError as msg:
-            # This shouldn't happen, there is a wrapper to edit the file which
-            # checks it, but we prefer to be safe than ending up rejecting
-            # everything.
-            utils.warn("Not checking transitions, the transitions file is broken: %s." % (msg))
-            return
-
-        # Now look through all defined transitions
-        for trans in transitions:
-            t = transitions[trans]
-            source = t["source"]
-            expected = t["new"]
-
-            # Will be None if nothing is in testing.
-            current = get_source_in_suite(source, "testing", session)
-            if current is not None:
-                compare = apt_pkg.version_compare(current.version, expected)
-
-            if current is None or compare < 0:
-                # This is still valid, the current version in testing is older than
-                # the new version we wait for, or there is none in testing yet
-
-                # Check if the source we look at is affected by this.
-                if sourcepkg in t['packages']:
-                    # The source is affected, lets reject it.
-
-                    rejectmsg = "%s: part of the %s transition.\n\n" % (
-                        sourcepkg, trans)
-
-                    if current is not None:
-                        currentlymsg = "at version %s" % (current.version)
-                    else:
-                        currentlymsg = "not present in testing"
-
-                    rejectmsg += "Transition description: %s\n\n" % (t["reason"])
-
-                    rejectmsg += "\n".join(textwrap.wrap("""Your package
-is part of a testing transition designed to get %s migrated (it is
-currently %s, we need version %s).  This transition is managed by the
-Release Team, and %s is the Release-Team member responsible for it.
-Please mail debian-release@lists.debian.org or contact %s directly if you
-need further assistance.  You might want to upload to experimental until this
-transition is done."""
-                            % (source, currentlymsg, expected,t["rm"], t["rm"])))
-
-                    self.rejects.append(rejectmsg)
-                    return
-
     ###########################################################################
     # End check_signed_by_key checks
     ###########################################################################
@@ -1936,61 +440,6 @@ transition is done."""
 
     ###########################################################################
 
-    def close_bugs(self, summary, action):
-        """
-        Send mail to close bugs as instructed by the closes field in the changes file.
-        Also add a line to summary if any work was done.
-
-        @type summary: string
-        @param summary: summary text, as given by L{build_summaries}
-
-        @type action: bool
-        @param action: Set to false no real action will be done.
-
-        @rtype: string
-        @return: summary. If action was taken, extended by the list of closed bugs.
-
-        """
-
-        template = os.path.join(Config()["Dir::Templates"], 'process-unchecked.bug-close')
-
-        bugs = self.pkg.changes["closes"].keys()
-
-        if not bugs:
-            return summary
-
-        bugs.sort()
-        summary += "Closing bugs: "
-        for bug in bugs:
-            summary += "%s " % (bug)
-            if action:
-                self.update_subst()
-                self.Subst["__BUG_NUMBER__"] = bug
-                if self.pkg.changes["distribution"].has_key("stable"):
-                    self.Subst["__STABLE_WARNING__"] = """
-Note that this package is not part of the released stable Debian
-distribution.  It may have dependencies on other unreleased software,
-or other instabilities.  Please take care if you wish to install it.
-The update will eventually make its way into the next released Debian
-distribution."""
-                else:
-                    self.Subst["__STABLE_WARNING__"] = ""
-                mail_message = utils.TemplateSubst(self.Subst, template)
-                utils.send_mail(mail_message)
-
-                # Clear up after ourselves
-                del self.Subst["__BUG_NUMBER__"]
-                del self.Subst["__STABLE_WARNING__"]
-
-        if action and self.logger:
-            self.logger.log(["closing bugs"] + bugs)
-
-        summary += "\n"
-
-        return summary
-
-    ###########################################################################
-
     def announce(self, short_summary, action):
         """
         Send an announce mail about a new upload.
@@ -2055,201 +504,6 @@ distribution."""
         return summary
 
     ###########################################################################
-    @session_wrapper
-    def accept (self, summary, short_summary, session=None):
-        """
-        Accept an upload.
-
-        This moves all files referenced from the .changes into the pool,
-        sends the accepted mail, announces to lists, closes bugs and
-        also checks for override disparities. If enabled it will write out
-        the version history for the BTS Version Tracking and will finally call
-        L{queue_build}.
-
-        @type summary: string
-        @param summary: Summary text
-
-        @type short_summary: string
-        @param short_summary: Short summary
-        """
-
-        cnf = Config()
-        stats = SummaryStats()
-
-        print "Installing."
-        self.logger.log(["installing changes", self.pkg.changes_file])
-
-        binaries = []
-        poolfiles = []
-
-        # Add the .dsc file to the DB first
-        for newfile, entry in self.pkg.files.items():
-            if entry["type"] == "dsc":
-                source, dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
-                for j in pfs:
-                    poolfiles.append(j)
-
-        # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
-        for newfile, entry in self.pkg.files.items():
-            if entry["type"] == "deb":
-                b, pf = add_deb_to_db(self, newfile, session)
-                binaries.append(b)
-                poolfiles.append(pf)
-
-        # If this is a sourceful diff only upload that is moving
-        # cross-component we need to copy the .orig files into the new
-        # component too for the same reasons as above.
-        # XXX: mhy: I think this should be in add_dsc_to_db
-        if self.pkg.changes["architecture"].has_key("source"):
-            for orig_file in self.pkg.orig_files.keys():
-                if not self.pkg.orig_files[orig_file].has_key("id"):
-                    continue # Skip if it's not in the pool
-                orig_file_id = self.pkg.orig_files[orig_file]["id"]
-                if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
-                    continue # Skip if the location didn't change
-
-                # Do the move
-                oldf = get_poolfile_by_id(orig_file_id, session)
-                old_filename = os.path.join(oldf.location.path, oldf.filename)
-                old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
-                           'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
-                new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
-                # TODO: Care about size/md5sum collisions etc
-                (found, newf) = check_poolfile(new_filename, old_dat['size'], old_dat['md5sum'], dsc_location_id, session)
-
-                # TODO: Uhm, what happens if newf isn't None - something has gone badly and we should cope
-                if newf is None:
-                    utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
-                    newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
-                    session.flush()
-
-                    # Don't reference the old file from this changes
-                    for p in poolfiles:
-                        if p.file_id == oldf.file_id:
-                            poolfiles.remove(p)
-
-                    poolfiles.append(newf)
-
-                    # Fix up the DSC references
-                    toremove = []
-
-                    for df in source.srcfiles:
-                        if df.poolfile.file_id == oldf.file_id:
-                            # Add a new DSC entry and mark the old one for deletion
-                            # Don't do it in the loop so we don't change the thing we're iterating over
-                            newdscf = DSCFile()
-                            newdscf.source_id = source.source_id
-                            newdscf.poolfile_id = newf.file_id
-                            session.add(newdscf)
-
-                            toremove.append(df)
-
-                    for df in toremove:
-                        session.delete(df)
-
-                    # Flush our changes
-                    session.flush()
-
-                    # Make sure that our source object is up-to-date
-                    session.expire(source)
-
-        # Add changelog information to the database
-        self.store_changelog()
-
-        # Install the files into the pool
-        for newfile, entry in self.pkg.files.items():
-            destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
-            utils.move(newfile, destination)
-            self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
-            stats.accept_bytes += float(entry["size"])
-
-        # Copy the .changes file across for suite which need it.
-        copy_changes = dict([(x.copychanges, '')
-                             for x in session.query(Suite).filter(Suite.suite_name.in_(self.pkg.changes["distribution"].keys())).all()
-                             if x.copychanges is not None])
-
-        for dest in copy_changes.keys():
-            utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
-
-        # We're done - commit the database changes
-        session.commit()
-        # Our SQL session will automatically start a new transaction after
-        # the last commit
-
-        # Now ensure that the metadata has been added
-        # This has to be done after we copy the files into the pool
-        # For source if we have it:
-        if self.pkg.changes["architecture"].has_key("source"):
-            import_metadata_into_db(source, session)
-
-        # Now for any of our binaries
-        for b in binaries:
-            import_metadata_into_db(b, session)
-
-        session.commit()
-
-        # Move the .changes into the 'done' directory
-        ye, mo, da = time.gmtime()[0:3]
-        donedir = os.path.join(cnf["Dir::Done"], str(ye), "%0.2d" % mo, "%0.2d" % da)
-        if not os.path.isdir(donedir):
-            os.makedirs(donedir)
-
-        utils.move(self.pkg.changes_file,
-                   os.path.join(donedir, os.path.basename(self.pkg.changes_file)))
-
-        if self.pkg.changes["architecture"].has_key("source"):
-            UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
-
-        self.update_subst()
-        self.Subst["__SUMMARY__"] = summary
-        mail_message = utils.TemplateSubst(self.Subst,
-                                           os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
-        utils.send_mail(mail_message)
-        self.announce(short_summary, 1)
-
-        ## Helper stuff for DebBugs Version Tracking
-        if cnf.find("Dir::BTSVersionTrack"):
-            if self.pkg.changes["architecture"].has_key("source"):
-                (fd, temp_filename) = utils.temp_filename(cnf["Dir::BTSVersionTrack"], prefix=".")
-                version_history = os.fdopen(fd, 'w')
-                version_history.write(self.pkg.dsc["bts changelog"])
-                version_history.close()
-                filename = "%s/%s" % (cnf["Dir::BTSVersionTrack"],
-                                      self.pkg.changes_file[:-8]+".versions")
-                os.rename(temp_filename, filename)
-                os.chmod(filename, 0o644)
-
-            # Write out the binary -> source mapping.
-            (fd, temp_filename) = utils.temp_filename(cnf["Dir::BTSVersionTrack"], prefix=".")
-            debinfo = os.fdopen(fd, 'w')
-            for name, entry in sorted(self.pkg.files.items()):
-                if entry["type"] == "deb":
-                    line = " ".join([entry["package"], entry["version"],
-                                     entry["architecture"], entry["source package"],
-                                     entry["source version"]])
-                    debinfo.write(line+"\n")
-            debinfo.close()
-            filename = "%s/%s" % (cnf["Dir::BTSVersionTrack"],
-                                  self.pkg.changes_file[:-8]+".debinfo")
-            os.rename(temp_filename, filename)
-            os.chmod(filename, 0o644)
-
-        session.commit()
-
-        # Set up our copy queues (e.g. buildd queues)
-        for suite_name in self.pkg.changes["distribution"].keys():
-            suite = get_suite(suite_name, session)
-            for q in suite.copy_queues:
-                for f in poolfiles:
-                    q.add_file_from_pool(f)
-
-        session.commit()
-
-        # Finally...
-        stats.accept_count += 1
 
     def check_override(self):
         """
@@ -2280,250 +534,6 @@ distribution."""
         utils.send_mail(mail_message)
         del self.Subst["__SUMMARY__"]
 
-    ###########################################################################
-
-    def remove(self, from_dir=None):
-        """
-        Used (for instance) in p-u to remove the package from unchecked
-
-        Also removes the package from holding area.
-        """
-        if from_dir is None:
-            from_dir = self.pkg.directory
-        h = Holding()
-
-        for f in self.pkg.files.keys():
-            os.unlink(os.path.join(from_dir, f))
-            if os.path.exists(os.path.join(h.holding_dir, f)):
-                os.unlink(os.path.join(h.holding_dir, f))
-
-        os.unlink(os.path.join(from_dir, self.pkg.changes_file))
-        if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
-            os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
-
-    ###########################################################################
-
-    def move_to_queue (self, queue):
-        """
-        Move files to a destination queue using the permissions in the table
-        """
-        h = Holding()
-        utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
-                   queue.path, perms=int(queue.change_perms, 8))
-        for f in self.pkg.files.keys():
-            utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
-
-    ###########################################################################
-
-    def force_reject(self, reject_files):
-        """
-        Forcefully move files from the current directory to the
-        reject directory.  If any file already exists in the reject
-        directory it will be moved to the morgue to make way for
-        the new file.
-
-        @type reject_files: dict
-        @param reject_files: file dictionary
-
-        """
-
-        cnf = Config()
-
-        for file_entry in reject_files:
-            # Skip any files which don't exist or which we don't have permission to copy.
-            if os.access(file_entry, os.R_OK) == 0:
-                continue
-
-            dest_file = os.path.join(cnf["Dir::Reject"], file_entry)
-
-            try:
-                dest_fd = os.open(dest_file, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o644)
-            except OSError as e:
-                # File exists?  Let's find a new name by adding a number
-                if e.errno == errno.EEXIST:
-                    try:
-                        dest_file = utils.find_next_free(dest_file, 255)
-                    except NoFreeFilenameError:
-                        # Something's either gone badly Pete Tong, or
-                        # someone is trying to exploit us.
-                        utils.warn("**WARNING** failed to find a free filename for %s in %s." % (file_entry, cnf["Dir::Reject"]))
-                        return
-
-                    # Make sure we really got it
-                    try:
-                        dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0o644)
-                    except OSError as e:
-                        # Likewise
-                        utils.warn("**WARNING** failed to claim %s in the reject directory." % (file_entry))
-                        return
-                else:
-                    raise
-            # If we got here, we own the destination file, so we can
-            # safely overwrite it.
-            utils.move(file_entry, dest_file, 1, perms=0o660)
-            os.close(dest_fd)
-
-    ###########################################################################
-    def do_reject (self, manual=0, reject_message="", notes=""):
-        """
-        Reject an upload. If called without a reject message or C{manual} is
-        true, spawn an editor so the user can write one.
-
-        @type manual: bool
-        @param manual: manual or automated rejection
-
-        @type reject_message: string
-        @param reject_message: A reject message
-
-        @return: 0
-
-        """
-        # If we weren't given a manual rejection message, spawn an
-        # editor so the user can add one in...
-        if manual and not reject_message:
-            (fd, temp_filename) = utils.temp_filename()
-            temp_file = os.fdopen(fd, 'w')
-            if len(notes) > 0:
-                for note in notes:
-                    temp_file.write("\nAuthor: %s\nVersion: %s\nTimestamp: %s\n\n%s" \
-                                    % (note.author, note.version, note.notedate, note.comment))
-            temp_file.close()
-            editor = os.environ.get("EDITOR","vi")
-            answer = 'E'
-            while answer == 'E':
-                os.system("%s %s" % (editor, temp_filename))
-                temp_fh = utils.open_file(temp_filename)
-                reject_message = "".join(temp_fh.readlines())
-                temp_fh.close()
-                print "Reject message:"
-                print utils.prefix_multi_line_string(reject_message,"  ",include_blank_lines=1)
-                prompt = "[R]eject, Edit, Abandon, Quit ?"
-                answer = "XXX"
-                while prompt.find(answer) == -1:
-                    answer = utils.our_raw_input(prompt)
-                    m = re_default_answer.search(prompt)
-                    if answer == "":
-                        answer = m.group(1)
-                    answer = answer[:1].upper()
-            os.unlink(temp_filename)
-            if answer == 'A':
-                return 1
-            elif answer == 'Q':
-                sys.exit(0)
-
-        print "Rejecting.\n"
-
-        cnf = Config()
-
-        reason_filename = self.pkg.changes_file[:-8] + ".reason"
-        reason_filename = os.path.join(cnf["Dir::Reject"], reason_filename)
-        changesfile = os.path.join(cnf["Dir::Reject"], self.pkg.changes_file)
-
-        # Move all the files into the reject directory
-        reject_files = self.pkg.files.keys() + [self.pkg.changes_file]
-        self.force_reject(reject_files)
-
-        # Change permissions of the .changes file to be world readable
-        try:
-            os.chmod(changesfile, os.stat(changesfile).st_mode | stat.S_IROTH)
-        except OSError as (errno, strerror):
-            # Ignore 'Operation not permitted' error.
-            if errno != 1:
-                raise
-
-        # If we fail here someone is probably trying to exploit the race
-        # so let's just raise an exception ...
-        if os.path.exists(reason_filename):
-            os.unlink(reason_filename)
-        reason_fd = os.open(reason_filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0o644)
-
-        rej_template = os.path.join(cnf["Dir::Templates"], "queue.rejected")
-
-        self.update_subst()
-        if not manual:
-            self.Subst["__REJECTOR_ADDRESS__"] = cnf["Dinstall::MyEmailAddress"]
-            self.Subst["__MANUAL_REJECT_MESSAGE__"] = ""
-            self.Subst["__CC__"] = "X-DAK-Rejection: automatic (moo)"
-            os.write(reason_fd, reject_message)
-            reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
-        else:
-            # Build up the rejection email
-            user_email_address = utils.whoami() + " <%s>" % (cnf["Dinstall::MyAdminAddress"])
-            self.Subst["__REJECTOR_ADDRESS__"] = user_email_address
-            self.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message
-            self.Subst["__REJECT_MESSAGE__"] = ""
-            self.Subst["__CC__"] = "Cc: " + cnf["Dinstall::MyEmailAddress"]
-            reject_mail_message = utils.TemplateSubst(self.Subst, rej_template)
-            # Write the rejection email out as the <foo>.reason file
-            os.write(reason_fd, reject_mail_message)
-
-        del self.Subst["__REJECTOR_ADDRESS__"]
-        del self.Subst["__MANUAL_REJECT_MESSAGE__"]
-        del self.Subst["__CC__"]
-
-        os.close(reason_fd)
-
-        # Send the rejection mail
-        utils.send_mail(reject_mail_message)
-
-        if self.logger:
-            self.logger.log(["rejected", self.pkg.changes_file])
-
-        stats = SummaryStats()
-        stats.reject_count += 1
-        return 0
-
-    ################################################################################
-    def in_override_p(self, package, component, suite, binary_type, filename, session):
-        """
-        Check if a package already has override entries in the DB
-
-        @type package: string
-        @param package: package name
-
-        @type component: string
-        @param component: database id of the component
-
-        @type suite: int
-        @param suite: database id of the suite
-
-        @type binary_type: string
-        @param binary_type: type of the package
-
-        @type filename: string
-        @param filename: filename we check
-
-        @return: the database result. But noone cares anyway.
-
-        """
-
-        cnf = Config()
-
-        if binary_type == "": # must be source
-            file_type = "dsc"
-        else:
-            file_type = binary_type
-
-        # Override suite name; used for example with proposed-updates
-        oldsuite = get_suite(suite, session)
-        if (not oldsuite is None) and oldsuite.overridesuite:
-            suite = oldsuite.overridesuite
-
-        result = get_override(package, suite, component, file_type, session)
-
-        # If checking for a source package fall back on the binary override type
-        if file_type == "dsc" and len(result) < 1:
-            result = get_override(package, suite, component, ['deb', 'udeb'], session)
-
-        # Remember the section and priority so we can check them later if appropriate
-        if len(result) > 0:
-            result = result[0]
-            self.pkg.files[filename]["override section"] = result.section.section
-            self.pkg.files[filename]["override priority"] = result.priority.priority
-            return result
-
-        return None
-
     ################################################################################
     def get_anyversion(self, sv_list, suite):
         """
@@ -2631,220 +641,7 @@ distribution."""
                         self.rejects.append("%s: old version (%s) in %s <= new version (%s) targeted at %s." % (filename, existent_version, suite, new_version, target_suite))
 
     ################################################################################
-    def check_binary_against_db(self, filename, session):
-        # Ensure version is sane
-        self.cross_suite_version_check( \
-            get_suite_version_by_package(self.pkg.files[filename]["package"], \
-                self.pkg.files[filename]["architecture"], session),
-            filename, self.pkg.files[filename]["version"], sourceful=False)
-
-        # Check for any existing copies of the file
-        q = session.query(DBBinary).filter_by(package=self.pkg.files[filename]["package"])
-        q = q.filter_by(version=self.pkg.files[filename]["version"])
-        q = q.join(Architecture).filter_by(arch_string=self.pkg.files[filename]["architecture"])
-
-        if q.count() > 0:
-            self.rejects.append("%s: can not overwrite existing copy already in the archive." % filename)
-
-    ################################################################################
-
-    def check_source_against_db(self, filename, session):
-        source = self.pkg.dsc.get("source")
-        version = self.pkg.dsc.get("version")
-
-        # Ensure version is sane
-        self.cross_suite_version_check( \
-            get_suite_version_by_source(source, session), filename, version,
-            sourceful=True)
-
-    ################################################################################
-    def check_dsc_against_db(self, filename, session):
-        """
-
-        @warning: NB: this function can remove entries from the 'files' index [if
-         the orig tarball is a duplicate of the one in the archive]; if
-         you're iterating over 'files' and call this function as part of
-         the loop, be sure to add a check to the top of the loop to
-         ensure you haven't just tried to dereference the deleted entry.
 
-        """
-
-        Cnf = Config()
-        self.pkg.orig_files = {} # XXX: do we need to clear it?
-        orig_files = self.pkg.orig_files
-
-        # Try and find all files mentioned in the .dsc.  This has
-        # to work harder to cope with the multiple possible
-        # locations of an .orig.tar.gz.
-        # The ordering on the select is needed to pick the newest orig
-        # when it exists in multiple places.
-        for dsc_name, dsc_entry in self.pkg.dsc_files.items():
-            found = None
-            if self.pkg.files.has_key(dsc_name):
-                actual_md5 = self.pkg.files[dsc_name]["md5sum"]
-                actual_size = int(self.pkg.files[dsc_name]["size"])
-                found = "%s in incoming" % (dsc_name)
-
-                # Check the file does not already exist in the archive
-                ql = get_poolfile_like_name(dsc_name, session)
-
-                # Strip out anything that isn't '%s' or '/%s$'
-                for i in ql:
-                    if not i.filename.endswith(dsc_name):
-                        ql.remove(i)
-
-                # "[dak] has not broken them.  [dak] has fixed a
-                # brokenness.  Your crappy hack exploited a bug in
-                # the old dinstall.
-                #
-                # "(Come on!  I thought it was always obvious that
-                # one just doesn't release different files with
-                # the same name and version.)"
-                #                        -- ajk@ on d-devel@l.d.o
-
-                if len(ql) > 0:
-                    # Ignore exact matches for .orig.tar.gz
-                    match = 0
-                    if re_is_orig_source.match(dsc_name):
-                        for i in ql:
-                            if self.pkg.files.has_key(dsc_name) and \
-                               int(self.pkg.files[dsc_name]["size"]) == int(i.filesize) and \
-                               self.pkg.files[dsc_name]["md5sum"] == i.md5sum:
-                                self.warnings.append("ignoring %s, since it's already in the archive." % (dsc_name))
-                                # TODO: Don't delete the entry, just mark it as not needed
-                                # This would fix the stupidity of changing something we often iterate over
-                                # whilst we're doing it
-                                del self.pkg.files[dsc_name]
-                                dsc_entry["files id"] = i.file_id
-                                if not orig_files.has_key(dsc_name):
-                                    orig_files[dsc_name] = {}
-                                orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
-                                match = 1
-
-                                # Don't bitch that we couldn't find this file later
-                                try:
-                                    self.later_check_files.remove(dsc_name)
-                                except ValueError:
-                                    pass
-
-
-                    if not match:
-                        self.rejects.append("can not overwrite existing copy of '%s' already in the archive." % (dsc_name))
-
-            elif re_is_orig_source.match(dsc_name):
-                # Check in the pool
-                ql = get_poolfile_like_name(dsc_name, session)
-
-                # Strip out anything that isn't '%s' or '/%s$'
-                # TODO: Shouldn't we just search for things which end with our string explicitly in the SQL?
-                for i in ql:
-                    if not i.filename.endswith(dsc_name):
-                        ql.remove(i)
-
-                if len(ql) > 0:
-                    # Unfortunately, we may get more than one match here if,
-                    # for example, the package was in potato but had an -sa
-                    # upload in woody.  So we need to choose the right one.
-
-                    # default to something sane in case we don't match any or have only one
-                    x = ql[0]
-
-                    if len(ql) > 1:
-                        for i in ql:
-                            old_file = os.path.join(i.location.path, i.filename)
-                            old_file_fh = utils.open_file(old_file)
-                            actual_md5 = apt_pkg.md5sum(old_file_fh)
-                            old_file_fh.close()
-                            actual_size = os.stat(old_file)[stat.ST_SIZE]
-                            if actual_md5 == dsc_entry["md5sum"] and actual_size == int(dsc_entry["size"]):
-                                x = i
-
-                    old_file = os.path.join(i.location.path, i.filename)
-                    old_file_fh = utils.open_file(old_file)
-                    actual_md5 = apt_pkg.md5sum(old_file_fh)
-                    old_file_fh.close()
-                    actual_size = os.stat(old_file)[stat.ST_SIZE]
-                    found = old_file
-                    suite_type = x.location.archive_type
-                    # need this for updating dsc_files in install()
-                    dsc_entry["files id"] = x.file_id
-                    # See install() in process-accepted...
-                    if not orig_files.has_key(dsc_name):
-                        orig_files[dsc_name] = {}
-                    orig_files[dsc_name]["id"] = x.file_id
-                    orig_files[dsc_name]["path"] = old_file
-                    orig_files[dsc_name]["location"] = x.location.location_id
-                else:
-                    # TODO: Determine queue list dynamically
-                    # Not there? Check the queue directories...
-                    for queue_name in [ "byhand", "new", "proposedupdates", "oldproposedupdates", "embargoed", "unembargoed" ]:
-                        queue = get_policy_queue(queue_name, session)
-                        if not queue:
-                            continue
-
-                        in_otherdir = os.path.join(queue.path, dsc_name)
-
-                        if os.path.exists(in_otherdir):
-                            in_otherdir_fh = utils.open_file(in_otherdir)
-                            actual_md5 = apt_pkg.md5sum(in_otherdir_fh)
-                            in_otherdir_fh.close()
-                            actual_size = os.stat(in_otherdir)[stat.ST_SIZE]
-                            found = in_otherdir
-                            if not orig_files.has_key(dsc_name):
-                                orig_files[dsc_name] = {}
-                            orig_files[dsc_name]["path"] = in_otherdir
-
-                    if not found:
-                        self.rejects.append("%s refers to %s, but I can't find it in the queue or in the pool." % (filename, dsc_name))
-                        continue
-            else:
-                self.rejects.append("%s refers to %s, but I can't find it in the queue." % (filename, dsc_name))
-                continue
-            if actual_md5 != dsc_entry["md5sum"]:
-                self.rejects.append("md5sum for %s doesn't match %s." % (found, filename))
-            if actual_size != int(dsc_entry["size"]):
-                self.rejects.append("size for %s doesn't match %s." % (found, filename))
-
-    ################################################################################
-    # This is used by process-new and process-holding to recheck a changes file
-    # at the time we're running.  It mainly wraps various other internal functions
-    # and is similar to accepted_checks - these should probably be tidied up
-    # and combined
-    def recheck(self, session):
-        cnf = Config()
-        for f in self.pkg.files.keys():
-            # The .orig.tar.gz can disappear out from under us is it's a
-            # duplicate of one in the archive.
-            if not self.pkg.files.has_key(f):
-                continue
-
-            entry = self.pkg.files[f]
-
-            # Check that the source still exists
-            if entry["type"] == "deb":
-                source_version = entry["source version"]
-                source_package = entry["source package"]
-                if not self.pkg.changes["architecture"].has_key("source") \
-                   and not source_exists(source_package, source_version, \
-                    suites = self.pkg.changes["distribution"].keys(), session = session):
-                    source_epochless_version = re_no_epoch.sub('', source_version)
-                    dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
-                    found = False
-                    for queue_name in ["embargoed", "unembargoed", "newstage"]:
-                        queue = get_policy_queue(queue_name, session)
-                        if queue and os.path.exists(os.path.join(queue.path, dsc_filename)):
-                            found = True
-                    if not found:
-                        self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
-            # Version and file overwrite checks
-            if entry["type"] == "deb":
-                self.check_binary_against_db(f, session)
-            elif entry["type"] == "dsc":
-                self.check_source_against_db(f, session)
-                self.check_dsc_against_db(f, session)
-
-    ################################################################################
     def accepted_checks(self, overwrite_checks, session):
         # Recheck anything that relies on the database; since that's not
         # frozen between accept and our run time when called from p-a.
@@ -2854,12 +651,6 @@ distribution."""
         propogate={}
         nopropogate={}
 
-        # Find the .dsc (again)
-        dsc_filename = None
-        for f in self.pkg.files.keys():
-            if self.pkg.files[f]["type"] == "dsc":
-                dsc_filename = f
-
         for checkfile in self.pkg.files.keys():
             # The .orig.tar.gz can disappear out from under us is it's a
             # duplicate of one in the archive.
@@ -2868,24 +659,6 @@ distribution."""
 
             entry = self.pkg.files[checkfile]
 
-            # Check that the source still exists
-            if entry["type"] == "deb":
-                source_version = entry["source version"]
-                source_package = entry["source package"]
-                if not self.pkg.changes["architecture"].has_key("source") \
-                   and not source_exists(source_package, source_version, \
-                    suites = self.pkg.changes["distribution"].keys(), \
-                    session = session):
-                    self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
-
-            # Version and file overwrite checks
-            if overwrite_checks:
-                if entry["type"] == "deb":
-                    self.check_binary_against_db(checkfile, session)
-                elif entry["type"] == "dsc":
-                    self.check_source_against_db(checkfile, session)
-                    self.check_dsc_against_db(dsc_filename, session)
-
             # propogate in the case it is in the override tables:
             for suite in self.pkg.changes.get("propdistribution", {}).keys():
                 if self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
@@ -2903,60 +676,3 @@ distribution."""
             for suite in self.pkg.changes["distribution"].keys():
                 if not self.in_override_p(entry["package"], entry["component"], suite, entry.get("dbtype",""), checkfile, session):
                     self.rejects.append("%s is NEW for %s." % (checkfile, suite))
-
-    ################################################################################
-    # If any file of an upload has a recent mtime then chances are good
-    # the file is still being uploaded.
-
-    def upload_too_new(self):
-        cnf = Config()
-        too_new = False
-        # Move back to the original directory to get accurate time stamps
-        cwd = os.getcwd()
-        os.chdir(self.pkg.directory)
-        file_list = self.pkg.files.keys()
-        file_list.extend(self.pkg.dsc_files.keys())
-        file_list.append(self.pkg.changes_file)
-        for f in file_list:
-            try:
-                last_modified = time.time()-os.path.getmtime(f)
-                if last_modified < int(cnf["Dinstall::SkipTime"]):
-                    too_new = True
-                    break
-            except:
-                pass
-
-        os.chdir(cwd)
-        return too_new
-
-    def store_changelog(self):
-
-        # Skip binary-only upload if it is not a bin-NMU
-        if not self.pkg.changes['architecture'].has_key('source'):
-            from daklib.regexes import re_bin_only_nmu
-            if not re_bin_only_nmu.search(self.pkg.changes['version']):
-                return
-
-        session = DBConn().session()
-
-        # Check if upload already has a changelog entry
-        query = """SELECT changelog_id FROM changes WHERE source = :source
-                   AND version = :version AND architecture = :architecture AND changelog_id != 0"""
-        if session.execute(query, {'source': self.pkg.changes['source'], \
-                                   'version': self.pkg.changes['version'], \
-                                   'architecture': " ".join(self.pkg.changes['architecture'].keys())}).rowcount:
-            session.commit()
-            return
-
-        # Add current changelog text into changelogs_text table, return created ID
-        query = "INSERT INTO changelogs_text (changelog) VALUES (:changelog) RETURNING id"
-        ID = session.execute(query, {'changelog': self.pkg.changes['changes']}).fetchone()[0]
-
-        # Link ID to the upload available in changes table
-        query = """UPDATE changes SET changelog_id = :id WHERE source = :source
-                   AND version = :version AND architecture = :architecture"""
-        session.execute(query, {'id': ID, 'source': self.pkg.changes['source'], \
-                                'version': self.pkg.changes['version'], \
-                                'architecture': " ".join(self.pkg.changes['architecture'].keys())})
-
-        session.commit()
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index 8c9a7fa..95cf352
@@ -90,10 +90,6 @@ re_spacestrip = re.compile('(\s)')
 # From import_archive.py
 re_arch_from_filename = re.compile(r"binary-[^/]+")
 
-# From import_ldap_fingerprints.py
-re_gpg_fingerprint = re.compile(r"^\s+Key fingerprint = (.*)$", re.MULTILINE)
-re_debian_address = re.compile(r"^.*<(.*)@debian\.org>$", re.MULTILINE)
-
 # From new_security_install.py
 re_taint_free = re.compile(r"^['/;\-\+\.~\s\w]+$")
 
@@ -131,3 +127,53 @@ re_includeinrelease = re.compile (r"(Translation-[a-zA-Z_]+\.(?:bz2|xz)|Contents
 
 # in generate_index_diffs
 re_includeinpdiff = re.compile(r"(Translation-[a-zA-Z_]+\.(?:bz2|xz))")
+
+
+######################################################################
+# Patterns matching filenames                                        #
+######################################################################
+
+# Match safe filenames
+re_file_safe = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9_.:~+-]*$')
+
+# Prefix of binary and source filenames
+_re_file_prefix = r'^(?P<package>[a-z0-9][a-z0-9.+-]+)_(?P<version>[A-Za-z0-9.:~+-]+?)'
+
+# Match binary packages
+# Groups: package, version, architecture, type
+re_file_binary = re.compile(_re_file_prefix + r'_(?P<architecture>[a-z0-9-]+)\.(?P<type>u?deb)$')
+
+# Match changes files
+# Groups: package, version, suffix
+re_file_changes = re.compile(_re_file_prefix + r'_(?P<suffix>[a-zA-Z0-9+-]+)\.changes$')
+
+# Match dsc files
+# Groups: package, version
+re_file_dsc = re.compile(_re_file_prefix + r'\.dsc$')
+
+# Match other source files
+# Groups: package, version
+re_file_source = re.compile(_re_file_prefix + r'(?:(?:\.orig(?:-[a-zA-Z0-9-]+)?|\.debian)?\.tar\.(?:bz2|gz|xz)|\.diff\.gz)$')
+
+# Match upstream tarball
+# Groups: package, version
+re_file_orig = re.compile(_re_file_prefix + r'\.orig(?:-[a-zA-Z0-9-]+)?\.tar\.(?:bz2|gz|xz)')
+
+######################################################################
+# Patterns matching fields                                           #
+######################################################################
+
+# Match package name
+re_field_package = re.compile(r'^[a-z0-9][a-z0-9.+-]+$')
+
+# Match version
+# Groups: without-epoch
+re_field_version = re.compile(r'^(?:[0-9]+:)?(?P<without_epoch>[A-Za-z0-9.:~+-]+)$')
+
+# Extract upstream version
+# Groups: upstream
+re_field_version_upstream = re.compile(r'^(?:[0-9]+:)?(?P<upstream>.*)-[^-]*$')
+
+# Match source field
+# Groups: package, version
+re_field_source = re.compile(r'^(?P<package>[a-z0-9][a-z0-9.+-]+)(?:\s*\((?P<version>[A-Za-z0-9.:~+-]+)\))?$')
old mode 100755 (executable)
new mode 100644 (file)
index 03df5d8590657c9273b5eb1e7fac05db2a0ce19c..b4646759a81b77dae865bc5e950c727e15778b18 100644 (file)
@@ -34,6 +34,8 @@ def force_to_utf8(s):
     Forces a string to UTF-8.  If the string isn't already UTF-8,
     it's assumed to be ISO-8859-1.
     """
+    if isinstance(s, unicode):
+        return s
     try:
         unicode(s, 'utf-8')
         return s
@@ -111,3 +113,8 @@ def fix_maintainer(maintainer):
     return (rfc822_maint, rfc2047_maint, name, email)
 
 ################################################################################
+
+def split_uploaders(field):
+    import re
+    for u in re.sub(">[ ]*,", ">\t", field).split("\t"):
+        yield u.strip()
diff --git a/daklib/upload.py b/daklib/upload.py
new file mode 100644 (file)
index 0000000..cabec15
--- /dev/null
@@ -0,0 +1,554 @@
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""module to handle uploads not yet installed to the archive
+
+This module provides classes to handle uploads not yet installed to the
+archive.  Central is the L{Changes} class which represents a changes file.
+It provides methods to access the included binary and source packages.
+"""
+
+import apt_inst
+import apt_pkg
+import os
+import re
+
+from daklib.gpg import SignedFile
+from daklib.regexes import *
+
+class InvalidChangesException(Exception):
+    pass
+
+class InvalidBinaryException(Exception):
+    pass
+
+class InvalidSourceException(Exception):
+    pass
+
+class InvalidHashException(Exception):
+    def __init__(self, filename, hash_name, expected, actual):
+        self.filename = filename
+        self.hash_name = hash_name
+        self.expected = expected
+        self.actual = actual
+    def __str__(self):
+        return ("Invalid {0} hash for {1}:\n"
+                "According to the control file the {0} hash should be {2},\n"
+                "but {1} has {3}.\n"
+                "\n"
+                "If you did not include {1} in you upload, a different version\n"
+                "might already be known to the archive software.") \
+                .format(self.hash_name, self.filename, self.expected, self.actual)
+
+class InvalidFilenameException(Exception):
+    def __init__(self, filename):
+        self.filename = filename
+    def __str__(self):
+        return "Invalid filename '{0}'.".format(self.filename)
+
+class HashedFile(object):
+    """file with checksums
+    """
+    def __init__(self, filename, size, md5sum, sha1sum, sha256sum, section=None, priority=None):
+        self.filename = filename
+        """name of the file
+        @type: str
+        """
+
+        self.size = size
+        """size in bytes
+        @type: long
+        """
+
+        self.md5sum = md5sum
+        """MD5 hash in hexdigits
+        @type: str
+        """
+
+        self.sha1sum = sha1sum
+        """SHA1 hash in hexdigits
+        @type: str
+        """
+
+        self.sha256sum = sha256sum
+        """SHA256 hash in hexdigits
+        @type: str
+        """
+
+        self.section = section
+        """section or C{None}
+        @type: str or C{None}
+        """
+
+        self.priority = priority
+        """priority or C{None}
+        @type: str of C{None}
+        """
+
+    @classmethod
+    def from_file(cls, directory, filename, section=None, priority=None):
+        """create with values for an existing file
+
+        Create a C{HashedFile} object that refers to an already existing file.
+
+        @type  directory: str
+        @param directory: directory the file is located in
+
+        @type  filename: str
+        @param filename: filename
+
+        @type  section: str or C{None}
+        @param section: optional section as given in .changes files
+
+        @type  priority: str or C{None}
+        @param priority: optional priority as given in .changes files
+
+        @rtype:  L{HashedFile}
+        @return: C{HashedFile} object for the given file
+        """
+        path = os.path.join(directory, filename)
+        size = os.stat(path).st_size
+        with open(path, 'r') as fh:
+            hashes = apt_pkg.Hashes(fh)
+        return cls(filename, size, hashes.md5, hashes.sha1, hashes.sha256, section, priority)
+
+    def check(self, directory):
+        """Validate hashes
+
+        Check if size and hashes match the expected value.
+
+        @type  directory: str
+        @param directory: directory the file is located in
+
+        @raise InvalidHashException: hash mismatch
+        """
+        path = os.path.join(directory, self.filename)
+        fh = open(path, 'r')
+
+        size = os.stat(path).st_size
+        if size != self.size:
+            raise InvalidHashException(self.filename, 'size', self.size, size)
+
+        md5sum = apt_pkg.md5sum(fh)
+        if md5sum != self.md5sum:
+            raise InvalidHashException(self.filename, 'md5sum', self.md5sum, md5sum)
+
+        fh.seek(0)
+        sha1sum = apt_pkg.sha1sum(fh)
+        if sha1sum != self.sha1sum:
+            raise InvalidHashException(self.filename, 'sha1sum', self.sha1sum, sha1sum)
+
+        fh.seek(0)
+        sha256sum = apt_pkg.sha256sum(fh)
+        if sha256sum != self.sha256sum:
+            raise InvalidHashException(self.filename, 'sha256sum', self.sha256sum, sha256sum)
+
+def parse_file_list(control, has_priority_and_section):
+    """Parse Files and Checksums-* fields
+
+    @type  control: dict-like
+    @param control: control file to take fields from
+
+    @type  has_priority_and_section: bool
+    @param has_priority_and_section: Files field include section and priority
+                                     (as in .changes)
+
+    @raise InvalidChangesException: missing fields or other grave errors
+
+    @rtype:  dict
+    @return: dict mapping filenames to L{daklib.upload.HashedFile} objects
+    """
+    entries = {}
+
+    for line in control.get("Files", "").split('\n'):
+        if len(line) == 0:
+            continue
+
+        if has_priority_and_section:
+            (md5sum, size, section, priority, filename) = line.split()
+            entry = dict(md5sum=md5sum, size=long(size), section=section, priority=priority, filename=filename)
+        else:
+            (md5sum, size, filename) = line.split()
+            entry = dict(md5sum=md5sum, size=long(size), filename=filename)
+
+        entries[filename] = entry
+
+    for line in control.get("Checksums-Sha1", "").split('\n'):
+        if len(line) == 0:
+            continue
+        (sha1sum, size, filename) = line.split()
+        entry = entries.get(filename, None)
+        if entry is None:
+            raise InvalidChangesException('{0} is listed in Checksums-Sha1, but not in Files.'.format(filename))
+        if entry is not None and entry.get('size', None) != long(size):
+            raise InvalidChangesException('Size for {0} in Files and Checksum-Sha1 fields differ.'.format(filename))
+        entry['sha1sum'] = sha1sum
+
+    for line in control.get("Checksums-Sha256", "").split('\n'):
+        if len(line) == 0:
+            continue
+        (sha256sum, size, filename) = line.split()
+        entry = entries.get(filename, None)
+        if entry is None:
+            raise InvalidChangesException('{0} is listed in Checksums-Sha256, but not in Files.'.format(filename))
+        if entry is not None and entry.get('size', None) != long(size):
+            raise InvalidChangesException('Size for {0} in Files and Checksum-Sha256 fields differ.'.format(filename))
+        entry['sha256sum'] = sha256sum
+
+    files = {}
+    for entry in entries.itervalues():
+        filename = entry['filename']
+        if 'size' not in entry:
+            raise InvalidChangesException('No size for {0}.'.format(filename))
+        if 'md5sum' not in entry:
+            raise InvalidChangesException('No md5sum for {0}.'.format(filename))
+        if 'sha1sum' not in entry:
+            raise InvalidChangesException('No sha1sum for {0}.'.format(filename))
+        if 'sha256sum' not in entry:
+            raise InvalidChangesException('No sha256sum for {0}.'.format(filename))
+        if not re_file_safe.match(filename):
+            raise InvalidChangesException("{0}: References file with unsafe filename {1}.".format(self.filename, filename))
+        f = files[filename] = HashedFile(**entry)
+
+    return files
+
+class Changes(object):
+    """Representation of a .changes file
+    """
+    def __init__(self, directory, filename, keyrings, require_signature=True):
+        if not re_file_safe.match(filename):
+            raise InvalidChangesException('{0}: unsafe filename'.format(filename))
+
+        self.directory = directory
+        """directory the .changes is located in
+        @type: str
+        """
+
+        self.filename = filename
+        """name of the .changes file
+        @type: str
+        """
+
+        data = open(self.path).read()
+        self._signed_file = SignedFile(data, keyrings, require_signature)
+        self.changes = apt_pkg.TagSection(self._signed_file.contents)
+        """dict to access fields of the .changes file
+        @type: dict-like
+        """
+
+        self._binaries = None
+        self._source = None
+        self._files = None
+        self._keyrings = keyrings
+        self._require_signature = require_signature
+
+    @property
+    def path(self):
+        """path to the .changes file
+        @type: str
+        """
+        return os.path.join(self.directory, self.filename)
+
+    @property
+    def primary_fingerprint(self):
+        """fingerprint of the key used for signing the .changes file
+        @type: str
+        """
+        return self._signed_file.primary_fingerprint
+
+    @property
+    def valid_signature(self):
+        """C{True} if the .changes has a valid signature
+        @type: bool
+        """
+        return self._signed_file.valid
+
+    @property
+    def architectures(self):
+        """list of architectures included in the upload
+        @type: list of str
+        """
+        return self.changes.get('Architecture', '').split()
+
+    @property
+    def distributions(self):
+        """list of target distributions for the upload
+        @type: list of str
+        """
+        return self.changes['Distribution'].split()
+
+    @property
+    def source(self):
+        """included source or C{None}
+        @type: L{daklib.upload.Source} or C{None}
+        """
+        if self._source is None:
+            source_files = []
+            for f in self.files.itervalues():
+                if re_file_dsc.match(f.filename) or re_file_source.match(f.filename):
+                    source_files.append(f)
+            if len(source_files) > 0:
+                self._source = Source(self.directory, source_files, self._keyrings, self._require_signature)
+        return self._source
+
+    @property
+    def sourceful(self):
+        """C{True} if the upload includes source
+        @type: bool
+        """
+        return "source" in self.architectures
+
+    @property
+    def source_name(self):
+        """source package name
+        @type: str
+        """
+        return re_field_source.match(self.changes['Source']).group('package')
+
+    @property
+    def binaries(self):
+        """included binary packages
+        @type: list of L{daklib.upload.Binary}
+        """
+        if self._binaries is None:
+            binaries = []
+            for f in self.files.itervalues():
+                if re_file_binary.match(f.filename):
+                    binaries.append(Binary(self.directory, f))
+            self._binaries = binaries
+        return self._binaries
+
+    @property
+    def byhand_files(self):
+        """included byhand files
+        @type: list of L{daklib.upload.HashedFile}
+        """
+        byhand = []
+
+        for f in self.files.itervalues():
+            if re_file_dsc.match(f.filename) or re_file_source.match(f.filename) or re_file_binary.match(f.filename):
+                continue
+            if f.section != 'byhand' and f.section[:4] != 'raw-':
+                raise InvalidChangesException("{0}: {1} looks like a byhand package, but is in section {2}".format(self.filename, f.filename, f.section))
+            byhand.append(f)
+
+        return byhand
+
+    @property
+    def binary_names(self):
+        """names of included binary packages
+        @type: list of str
+        """
+        return self.changes['Binary'].split()
+
+    @property
+    def closed_bugs(self):
+        """bugs closed by this upload
+        @type: list of str
+        """
+        return self.changes.get('Closes', '').split()
+
+    @property
+    def files(self):
+        """dict mapping filenames to L{daklib.upload.HashedFile} objects
+        @type: dict
+        """
+        if self._files is None:
+            self._files = parse_file_list(self.changes, True)
+        return self._files
+
+    @property
+    def bytes(self):
+        """total size of files included in this upload in bytes
+        @type: number
+        """
+        count = 0
+        for f in self.files.itervalues():
+            count += f.size
+        return count
+
+    def __cmp__(self, other):
+        """compare two changes files
+
+        We sort by source name and version first.  If these are identical,
+        we sort changes that include source before those without source (so
+        that sourceful uploads get processed first), and finally fall back
+        to the filename (this should really never happen).
+
+        @rtype:  number
+        @return: n where n < 0 if self < other, n = 0 if self == other, n > 0 if self > other
+        """
+        ret = cmp(self.changes.get('Source'), other.changes.get('Source'))
+
+        if ret == 0:
+            # compare version
+            ret = apt_pkg.version_compare(self.changes.get('Version', ''), other.changes.get('Version', ''))
+
+        if ret == 0:
+            # sort changes with source before changes without source
+            if 'source' in self.architectures and 'source' not in other.architectures:
+                ret = -1
+            elif 'source' not in self.architectures and 'source' in other.architectures:
+                ret = 1
+            else:
+                ret = 0
+
+        if ret == 0:
+            # fall back to filename
+            ret = cmp(self.filename, other.filename)
+
+        return ret
+
+class Binary(object):
+    """Representation of a binary package
+    """
+    def __init__(self, directory, hashed_file):
+        self.hashed_file = hashed_file
+        """file object for the .deb
+        @type: HashedFile
+        """
+
+        path = os.path.join(directory, hashed_file.filename)
+        data = apt_inst.DebFile(path).control.extractdata("control")
+
+        self.control = apt_pkg.TagSection(data)
+        """dict to access fields in DEBIAN/control
+        @type: dict-like
+        """
+
+    @classmethod
+    def from_file(cls, directory, filename):
+        hashed_file = HashedFile.from_file(directory, filename)
+        return cls(directory, hashed_file)
+
+    @property
+    def source(self):
+        """get tuple with source package name and version
+        @type: tuple of str
+        """
+        source = self.control.get("Source", None)
+        if source is None:
+            return (self.control["Package"], self.control["Version"])
+        match = re_field_source.match(source)
+        if not match:
+            raise InvalidBinaryException('{0}: Invalid Source field.'.format(self.hashed_file.filename))
+        version = match.group('version')
+        if version is None:
+            version = self.control['Version']
+        return (match.group('package'), version)
+
+    @property
+    def type(self):
+        """package type ('deb' or 'udeb')
+        @type: str
+        """
+        match = re_file_binary.match(self.hashed_file.filename)
+        if not match:
+            raise InvalidBinaryException('{0}: Does not match re_file_binary'.format(self.hashed_file.filename))
+        return match.group('type')
+
+    @property
+    def component(self):
+        """component name
+        @type: str
+        """
+        fields = self.control['Section'].split('/')
+        if len(fields) > 1:
+            return fields[0]
+        return "main"
+
+class Source(object):
+    """Representation of a source package
+    """
+    def __init__(self, directory, hashed_files, keyrings, require_signature=True):
+        self.hashed_files = hashed_files
+        """list of source files (including the .dsc itself)
+        @type: list of L{HashedFile}
+        """
+
+        self._dsc_file = None
+        for f in hashed_files:
+            if re_file_dsc.match(f.filename):
+                if self._dsc_file is not None:
+                    raise InvalidSourceException("Multiple .dsc found ({0} and {1})".format(self._dsc_file.filename, f.filename))
+                else:
+                    self._dsc_file = f
+
+        # make sure the hash for the dsc is valid before we use it
+        self._dsc_file.check(directory)
+
+        dsc_file_path = os.path.join(directory, self._dsc_file.filename)
+        data = open(dsc_file_path, 'r').read()
+        self._signed_file = SignedFile(data, keyrings, require_signature)
+        self.dsc = apt_pkg.TagSection(self._signed_file.contents)
+        """dict to access fields in the .dsc file
+        @type: dict-like
+        """
+
+        self._files = None
+
+    @classmethod
+    def from_file(cls, directory, filename, keyrings, require_signature=True):
+        hashed_file = HashedFile.from_file(directory, filename)
+        return cls(directory, [hashed_file], keyrings, require_signature)
+
+    @property
+    def files(self):
+        """dict mapping filenames to L{HashedFile} objects for additional source files
+
+        This list does not include the .dsc itself.
+
+        @type: dict
+        """
+        if self._files is None:
+            self._files = parse_file_list(self.dsc, False)
+        return self._files
+
+    @property
+    def primary_fingerprint(self):
+        """fingerprint of the key used to sign the .dsc
+        @type: str
+        """
+        return self._signed_file.primary_fingerprint
+
+    @property
+    def valid_signature(self):
+        """C{True} if the .dsc has a valid signature
+        @type: bool
+        """
+        return self._signed_file.valid
+
+    @property
+    def component(self):
+        """guessed component name
+
+        Might be wrong. Don't rely on this.
+
+        @type: str
+        """
+        if 'Section' not in self.dsc:
+            return 'main'
+        fields = self.dsc['Section'].split('/')
+        if len(fields) > 1:
+            return fields[0]
+        return "main"
+
+    @property
+    def filename(self):
+        """filename of .dsc file
+        @type: str
+        """
+        return self._dsc_file.filename
index 83f556b27d0f2c8f5010b346f6c7eed0aedeacdb..fbe3b1a025c55ecaaeb2d881c81a708987154a9e 100755 (executable)
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
 import commands
+import datetime
 import email.Header
 import os
 import pwd
+import grp
 import select
 import socket
 import shutil
@@ -39,10 +41,15 @@ import time
 import re
 import email as modemail
 import subprocess
+import ldap
 
+import daklib.config as config
+import daklib.daksubprocess
 from dbconn import DBConn, get_architecture, get_component, get_suite, \
                    get_override_type, Keyring, session_wrapper, \
-                   get_active_keyring_paths, get_primary_keyring_path
+                   get_active_keyring_paths, get_primary_keyring_path, \
+                   get_suite_architectures, get_or_set_metadatakey, DBSource, \
+                   Component, Override, OverrideType
 from sqlalchemy import desc
 from dak_exceptions import *
 from gpg import SignedFile
@@ -50,7 +57,7 @@ from textutils import fix_maintainer
 from regexes import re_html_escaping, html_escaping, re_single_line_field, \
                     re_multi_line_field, re_srchasver, re_taint_free, \
                     re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
-                    re_is_orig_source
+                    re_is_orig_source, re_build_dep_arch
 
 from formats import parse_format, validate_changes_format
 from srcformats import get_format_from_string
@@ -59,7 +66,6 @@ from collections import defaultdict
 ################################################################################
 
 default_config = "/etc/dak/dak.conf"     #: default dak config, defines host properties
-default_apt_config = "/etc/dak/apt.conf" #: default apt config, not normally used
 
 alias_cache = None        #: Cache for email alias checks
 key_uid_email_cache = {}  #: Cache for email addresses from gpg key uids
@@ -72,7 +78,7 @@ known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)),
 # code in lenny's Python. This also affects commands.getoutput and
 # commands.getstatus.
 def dak_getstatusoutput(cmd):
-    pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
+    pipe = daklib.daksubprocess.Popen(cmd, shell=True, universal_newlines=True,
         stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
 
     output = pipe.stdout.read()
@@ -366,7 +372,7 @@ def check_size(where, files):
 
 ################################################################################
 
-def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
+def check_dsc_files(dsc_filename, dsc, dsc_files):
     """
     Verify that the files listed in the Files field of the .dsc are
     those expected given the announced Format.
@@ -385,13 +391,6 @@ def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
     """
     rejmsg = []
 
-    # Parse the file if needed
-    if dsc is None:
-        dsc = parse_changes(dsc_filename, signing_rules=1, dsc_file=1);
-
-    if dsc_files is None:
-        dsc_files = build_file_list(dsc, is_a_dsc=1)
-
     # Ensure .dsc lists proper set of source files according to the format
     # announced
     has = defaultdict(lambda: 0)
@@ -406,7 +405,7 @@ def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
         (r'orig-.+\.tar\.(gz|bz2|xz)', ('more_orig_tar',)),
     )
 
-    for f in dsc_files.keys():
+    for f in dsc_files:
         m = re_issource.match(f)
         if not m:
             rejmsg.append("%s: %s in Files field not recognised as source."
@@ -605,8 +604,24 @@ def build_package_list(dsc, session = None):
 
 ################################################################################
 
-def send_mail (message, filename=""):
-    """sendmail wrapper, takes _either_ a message string or a file as arguments"""
+def send_mail (message, filename="", whitelists=None):
+    """sendmail wrapper, takes _either_ a message string or a file as arguments
+
+    @type  whitelists: list of (str or None)
+    @param whitelists: path to whitelists. C{None} or an empty list whitelists
+                       everything, otherwise an address is whitelisted if it is
+                       included in any of the lists.
+                       In addition a global whitelist can be specified in
+                       Dinstall::MailWhiteList.
+    """
+
+    maildir = Cnf.get('Dir::Mail')
+    if maildir:
+        path = os.path.join(maildir, datetime.datetime.now().isoformat())
+        path = find_next_free(path)
+        fh = open(path, 'w')
+        print >>fh, message,
+        fh.close()
 
     # Check whether we're supposed to be sending mail
     if Cnf.has_key("Dinstall::Options::No-Mail") and Cnf["Dinstall::Options::No-Mail"]:
@@ -618,23 +633,24 @@ def send_mail (message, filename=""):
         os.write (fd, message)
         os.close (fd)
 
-    if Cnf.has_key("Dinstall::MailWhiteList") and \
-           Cnf["Dinstall::MailWhiteList"] != "":
+    if whitelists is None or None in whitelists:
+        whitelists = []
+    if Cnf.get('Dinstall::MailWhiteList', ''):
+        whitelists.append(Cnf['Dinstall::MailWhiteList'])
+    if len(whitelists) != 0:
         message_in = open_file(filename)
         message_raw = modemail.message_from_file(message_in)
         message_in.close();
 
         whitelist = [];
-        whitelist_in = open_file(Cnf["Dinstall::MailWhiteList"])
-        try:
+        for path in whitelists:
+          with open_file(path, 'r') as whitelist_in:
             for line in whitelist_in:
                 if not re_whitespace_comment.match(line):
                     if re_re_mark.match(line):
                         whitelist.append(re.compile(re_re_mark.sub("", line.strip(), 1)))
                     else:
                         whitelist.append(re.compile(re.escape(line.strip())))
-        finally:
-            whitelist_in.close()
 
         # Fields to check.
         fields = ["To", "Bcc", "Cc"]
@@ -651,7 +667,7 @@ def send_mail (message, filename=""):
                             mail_whitelisted = 1
                             break
                     if not mail_whitelisted:
-                        print "Skipping %s since it's not in %s" % (item, Cnf["Dinstall::MailWhiteList"])
+                        print "Skipping {0} since it's not whitelisted".format(item)
                         continue
                     match.append(item)
 
@@ -691,13 +707,11 @@ def send_mail (message, filename=""):
 
 ################################################################################
 
-def poolify (source, component):
-    if component:
-        component += '/'
+def poolify (source, component=None):
     if source[:3] == "lib":
-        return component + source[:4] + '/' + source + '/'
+        return source[:4] + '/' + source + '/'
     else:
-        return component + source[:1] + '/' + source + '/'
+        return source[:1] + '/' + source + '/'
 
 ################################################################################
 
@@ -766,7 +780,7 @@ def which_conf_file ():
         homedir = os.getenv("HOME")
         confpath = os.path.join(homedir, "/etc/dak.conf")
         if os.path.exists(confpath):
-            apt_pkg.ReadConfigFileISC(Cnf,confpath)
+            apt_pkg.read_config_file_isc(Cnf,confpath)
 
     # We are still in here, so there is no local config file or we do
     # not allow local files. Do the normal stuff.
@@ -775,20 +789,6 @@ def which_conf_file ():
 
     return default_config
 
-def which_apt_conf_file ():
-    res = socket.getfqdn()
-    # In case we allow local config files per user, try if one exists
-    if Cnf.find_b("Config::" + res + "::AllowLocalConfig"):
-        homedir = os.getenv("HOME")
-        confpath = os.path.join(homedir, "/etc/dak.conf")
-        if os.path.exists(confpath):
-            apt_pkg.ReadConfigFileISC(Cnf,default_config)
-
-    if Cnf.get("Config::" + res + "::AptConfig"):
-        return Cnf["Config::" + res + "::AptConfig"]
-    else:
-        return default_apt_config
-
 def which_alias_file():
     hostname = socket.getfqdn()
     aliasfn = '/var/lib/misc/'+hostname+'/forward-alias'
@@ -1064,43 +1064,6 @@ def parse_args(Options):
 
 ################################################################################
 
-# Inspired(tm) by Bryn Keller's print_exc_plus (See
-# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52215)
-
-def print_exc():
-    tb = sys.exc_info()[2]
-    while tb.tb_next:
-        tb = tb.tb_next
-    stack = []
-    frame = tb.tb_frame
-    while frame:
-        stack.append(frame)
-        frame = frame.f_back
-    stack.reverse()
-    traceback.print_exc()
-    for frame in stack:
-        print "\nFrame %s in %s at line %s" % (frame.f_code.co_name,
-                                             frame.f_code.co_filename,
-                                             frame.f_lineno)
-        for key, value in frame.f_locals.items():
-            print "\t%20s = " % key,
-            try:
-                print value
-            except:
-                print "<unable to print>"
-
-################################################################################
-
-def try_with_debug(function):
-    try:
-        function()
-    except SystemExit:
-        raise
-    except:
-        print_exc()
-
-################################################################################
-
 def arch_compare_sw (a, b):
     """
     Function for use in sorting lists of architectures.
@@ -1424,50 +1387,65 @@ def gpg_get_key_addresses(fingerprint):
     addresses = key_uid_email_cache.get(fingerprint)
     if addresses != None:
         return addresses
-    addresses = set()
+    addresses = list()
     cmd = "gpg --no-default-keyring %s --fingerprint %s" \
                 % (gpg_keyring_args(), fingerprint)
     (result, output) = commands.getstatusoutput(cmd)
     if result == 0:
         for l in output.split('\n'):
             m = re_gpg_uid.match(l)
-            if m:
-                addresses.add(m.group(1))
+            if not m:
+                continue
+            address = m.group(1)
+            if address.endswith('@debian.org'):
+                # prefer @debian.org addresses
+                # TODO: maybe not hardcode the domain
+                addresses.insert(0, address)
+            else:
+                addresses.append(m.group(1))
     key_uid_email_cache[fingerprint] = addresses
     return addresses
 
 ################################################################################
 
-# Inspired(tm) by http://www.zopelabs.com/cookbook/1022242603
-
-def wrap(paragraph, max_length, prefix=""):
-    line = ""
-    s = ""
-    have_started = 0
-    words = paragraph.split()
+def get_logins_from_ldap(fingerprint='*'):
+    """retrieve login from LDAP linked to a given fingerprint"""
+
+    LDAPDn = Cnf['Import-LDAP-Fingerprints::LDAPDn']
+    LDAPServer = Cnf['Import-LDAP-Fingerprints::LDAPServer']
+    l = ldap.open(LDAPServer)
+    l.simple_bind_s('','')
+    Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+                       '(keyfingerprint=%s)' % fingerprint,
+                       ['uid', 'keyfingerprint'])
+    login = {}
+    for elem in Attrs:
+        login[elem[1]['keyFingerPrint'][0]] = elem[1]['uid'][0]
+    return login
 
-    for word in words:
-        word_size = len(word)
-        if word_size > max_length:
-            if have_started:
-                s += line + '\n' + prefix
-            s += word + '\n' + prefix
-        else:
-            if have_started:
-                new_length = len(line) + word_size + 1
-                if new_length > max_length:
-                    s += line + '\n' + prefix
-                    line = word
-                else:
-                    line += ' ' + word
-            else:
-                line = word
-        have_started = 1
-
-    if have_started:
-        s += line
+################################################################################
 
-    return s
+def get_users_from_ldap():
+    """retrieve login and user names from LDAP"""
+
+    LDAPDn = Cnf['Import-LDAP-Fingerprints::LDAPDn']
+    LDAPServer = Cnf['Import-LDAP-Fingerprints::LDAPServer']
+    l = ldap.open(LDAPServer)
+    l.simple_bind_s('','')
+    Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+                       '(uid=*)', ['uid', 'cn', 'mn', 'sn'])
+    users = {}
+    for elem in Attrs:
+        elem = elem[1]
+        name = []
+        for k in ('cn', 'mn', 'sn'):
+            try:
+                if elem[k][0] != '-':
+                    name.append(elem[k][0])
+            except KeyError:
+                pass
+        users[' '.join(name)] = elem['uid'][0]
+    return users
 
 ################################################################################
 
@@ -1484,31 +1462,70 @@ def clean_symlink (src, dest, root):
 
 ################################################################################
 
-def temp_filename(directory=None, prefix="dak", suffix=""):
+def temp_filename(directory=None, prefix="dak", suffix="", mode=None, group=None):
     """
     Return a secure and unique filename by pre-creating it.
-    If 'directory' is non-null, it will be the directory the file is pre-created in.
-    If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
-    If 'suffix' is non-null, the filename will end with it.
 
-    Returns a pair (fd, name).
+    @type directory: str
+    @param directory: If non-null it will be the directory the file is pre-created in.
+
+    @type prefix: str
+    @param prefix: The filename will be prefixed with this string
+
+    @type suffix: str
+    @param suffix: The filename will end with this string
+
+    @type mode: str
+    @param mode: If set the file will get chmodded to those permissions
+
+    @type group: str
+    @param group: If set the file will get chgrped to the specified group.
+
+    @rtype: list
+    @return: Returns a pair (fd, name)
     """
 
-    return tempfile.mkstemp(suffix, prefix, directory)
+    (tfd, tfname) = tempfile.mkstemp(suffix, prefix, directory)
+    if mode:
+        os.chmod(tfname, mode)
+    if group:
+        gid = grp.getgrnam(group).gr_gid
+        os.chown(tfname, -1, gid)
+    return (tfd, tfname)
 
 ################################################################################
 
-def temp_dirname(parent=None, prefix="dak", suffix=""):
+def temp_dirname(parent=None, prefix="dak", suffix="", mode=None, group=None):
     """
     Return a secure and unique directory by pre-creating it.
-    If 'parent' is non-null, it will be the directory the directory is pre-created in.
-    If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
-    If 'suffix' is non-null, the filename will end with it.
 
-    Returns a pathname to the new directory
+    @type parent: str
+    @param parent: If non-null it will be the directory the directory is pre-created in.
+
+    @type prefix: str
+    @param prefix: The filename will be prefixed with this string
+
+    @type suffix: str
+    @param suffix: The filename will end with this string
+
+    @type mode: str
+    @param mode: If set the file will get chmodded to those permissions
+
+    @type group: str
+    @param group: If set the file will get chgrped to the specified group.
+
+    @rtype: list
+    @return: Returns a pair (fd, name)
+
     """
 
-    return tempfile.mkdtemp(suffix, prefix, parent)
+    tfname = tempfile.mkdtemp(suffix, prefix, parent)
+    if mode:
+        os.chmod(tfname, mode)
+    if group:
+        gid = grp.getgrnam(group).gr_gid
+        os.chown(tfname, -1, gid)
+    return tfname
 
 ################################################################################
 
@@ -1545,14 +1562,7 @@ def get_changes_files(from_dir):
 
 ################################################################################
 
-apt_pkg.init()
-
-Cnf = apt_pkg.Configuration()
-if not os.getenv("DAK_TEST"):
-    apt_pkg.read_config_file_isc(Cnf,default_config)
-
-if which_conf_file() != default_config:
-    apt_pkg.read_config_file_isc(Cnf,which_conf_file())
+Cnf = config.Config().Cnf
 
 ################################################################################
 
@@ -1610,7 +1620,6 @@ def get_packages_from_ftp(root, suite, component, architecture):
 
     @rtype: TagFile
     @return: apt_pkg class containing package data
-
     """
     filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (root, suite, component, architecture)
     (fd, temp_file) = temp_filename()
@@ -1623,7 +1632,7 @@ def get_packages_from_ftp(root, suite, component, architecture):
         if (result != 0):
             fubar("Gunzip invocation failed!\n%s\n" % (output), result)
     packages = open_file(temp_file)
-    Packages = apt_pkg.ParseTagFile(packages)
+    Packages = apt_pkg.TagFile(packages)
     os.unlink(temp_file)
     return Packages
 
@@ -1632,3 +1641,237 @@ def get_packages_from_ftp(root, suite, component, architecture):
 def deb_extract_control(fh):
     """extract DEBIAN/control from a binary package"""
     return apt_inst.DebFile(fh).control.extractdata("control")
+
+################################################################################
+
+def mail_addresses_for_upload(maintainer, changed_by, fingerprint):
+    """mail addresses to contact for an upload
+
+    @type  maintainer: str
+    @param maintainer: Maintainer field of the .changes file
+
+    @type  changed_by: str
+    @param changed_by: Changed-By field of the .changes file
+
+    @type  fingerprint: str
+    @param fingerprint: fingerprint of the key used to sign the upload
+
+    @rtype:  list of str
+    @return: list of RFC 2047-encoded mail addresses to contact regarding
+             this upload
+    """
+    addresses = [maintainer]
+    if changed_by != maintainer:
+        addresses.append(changed_by)
+
+    fpr_addresses = gpg_get_key_addresses(fingerprint)
+    if len(fpr_addresses) > 0 and fix_maintainer(changed_by)[3] not in fpr_addresses and fix_maintainer(maintainer)[3] not in fpr_addresses:
+        addresses.append(fpr_addresses[0])
+
+    encoded_addresses = [ fix_maintainer(e)[1] for e in addresses ]
+    return encoded_addresses
+
+################################################################################
+
+def call_editor(text="", suffix=".txt"):
+    """run editor and return the result as a string
+
+    @type  text: str
+    @param text: initial text
+
+    @type  suffix: str
+    @param suffix: extension for temporary file
+
+    @rtype:  str
+    @return: string with the edited text
+    """
+    editor = os.environ.get('VISUAL', os.environ.get('EDITOR', 'vi'))
+    tmp = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
+    try:
+        print >>tmp, text,
+        tmp.close()
+        daklib.daksubprocess.check_call([editor, tmp.name])
+        return open(tmp.name, 'r').read()
+    finally:
+        os.unlink(tmp.name)
+
+################################################################################
+
+def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False):
+    dbsuite = get_suite(suite, session)
+    overridesuite = dbsuite
+    if dbsuite.overridesuite is not None:
+        overridesuite = get_suite(dbsuite.overridesuite, session)
+    dep_problem = 0
+    p2c = {}
+    all_broken = {}
+    if arches:
+        all_arches = set(arches)
+    else:
+        all_arches = set([x.arch_string for x in get_suite_architectures(suite)])
+    all_arches -= set(["source", "all"])
+    metakey_d = get_or_set_metadatakey("Depends", session)
+    metakey_p = get_or_set_metadatakey("Provides", session)
+    params = {
+        'suite_id':     dbsuite.suite_id,
+        'metakey_d_id': metakey_d.key_id,
+        'metakey_p_id': metakey_p.key_id,
+    }
+    for architecture in all_arches | set(['all']):
+        deps = {}
+        sources = {}
+        virtual_packages = {}
+        params['arch_id'] = get_architecture(architecture, session).arch_id
+
+        statement = '''
+            SELECT b.id, b.package, s.source, c.name as component,
+                (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
+                (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
+                FROM binaries b
+                JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
+                JOIN source s ON b.source = s.id
+                JOIN files_archive_map af ON b.file = af.file_id
+                JOIN component c ON af.component_id = c.id
+                WHERE b.architecture = :arch_id'''
+        query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
+            from_statement(statement).params(params)
+        for binary_id, package, source, component, depends, provides in query:
+            sources[package] = source
+            p2c[package] = component
+            if depends is not None:
+                deps[package] = depends
+            # Maintain a counter for each virtual package.  If a
+            # Provides: exists, set the counter to 0 and count all
+            # provides by a package not in the list for removal.
+            # If the counter stays 0 at the end, we know that only
+            # the to-be-removed packages provided this virtual
+            # package.
+            if provides is not None:
+                for virtual_pkg in provides.split(","):
+                    virtual_pkg = virtual_pkg.strip()
+                    if virtual_pkg == package: continue
+                    if not virtual_packages.has_key(virtual_pkg):
+                        virtual_packages[virtual_pkg] = 0
+                    if package not in removals:
+                        virtual_packages[virtual_pkg] += 1
+
+        # If a virtual package is only provided by the to-be-removed
+        # packages, treat the virtual package as to-be-removed too.
+        for virtual_pkg in virtual_packages.keys():
+            if virtual_packages[virtual_pkg] == 0:
+                removals.append(virtual_pkg)
+
+        # Check binary dependencies (Depends)
+        for package in deps.keys():
+            if package in removals: continue
+            parsed_dep = []
+            try:
+                parsed_dep += apt_pkg.parse_depends(deps[package])
+            except ValueError as e:
+                print "Error for package %s: %s" % (package, e)
+            for dep in parsed_dep:
+                # Check for partial breakage.  If a package has a ORed
+                # dependency, there is only a dependency problem if all
+                # packages in the ORed depends will be removed.
+                unsat = 0
+                for dep_package, _, _ in dep:
+                    if dep_package in removals:
+                        unsat += 1
+                if unsat == len(dep):
+                    component = p2c[package]
+                    source = sources[package]
+                    if component != "main":
+                        source = "%s/%s" % (source, component)
+                    all_broken.setdefault(source, {}).setdefault(package, set()).add(architecture)
+                    dep_problem = 1
+
+    if all_broken:
+        if cruft:
+            print "  - broken Depends:"
+        else:
+            print "# Broken Depends:"
+        for source, bindict in sorted(all_broken.items()):
+            lines = []
+            for binary, arches in sorted(bindict.items()):
+                if arches == all_arches or 'all' in arches:
+                    lines.append(binary)
+                else:
+                    lines.append('%s [%s]' % (binary, ' '.join(sorted(arches))))
+            if cruft:
+                print '    %s: %s' % (source, lines[0])
+            else:
+                print '%s: %s' % (source, lines[0])
+            for line in lines[1:]:
+                if cruft:
+                    print '    ' + ' ' * (len(source) + 2) + line
+                else:
+                    print ' ' * (len(source) + 2) + line
+        if not cruft:
+            print
+
+    # Check source dependencies (Build-Depends and Build-Depends-Indep)
+    all_broken.clear()
+    metakey_bd = get_or_set_metadatakey("Build-Depends", session)
+    metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
+    params = {
+        'suite_id':    dbsuite.suite_id,
+        'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
+    }
+    statement = '''
+        SELECT s.id, s.source, string_agg(sm.value, ', ') as build_dep
+           FROM source s
+           JOIN source_metadata sm ON s.id = sm.src_id
+           WHERE s.id in
+               (SELECT source FROM src_associations
+                   WHERE suite = :suite_id)
+               AND sm.key_id in :metakey_ids
+           GROUP BY s.id, s.source'''
+    query = session.query('id', 'source', 'build_dep').from_statement(statement). \
+        params(params)
+    for source_id, source, build_dep in query:
+        if source in removals: continue
+        parsed_dep = []
+        if build_dep is not None:
+            # Remove [arch] information since we want to see breakage on all arches
+            build_dep = re_build_dep_arch.sub("", build_dep)
+            try:
+                parsed_dep += apt_pkg.parse_depends(build_dep)
+            except ValueError as e:
+                print "Error for source %s: %s" % (source, e)
+        for dep in parsed_dep:
+            unsat = 0
+            for dep_package, _, _ in dep:
+                if dep_package in removals:
+                    unsat += 1
+            if unsat == len(dep):
+                component, = session.query(Component.component_name) \
+                    .join(Component.overrides) \
+                    .filter(Override.suite == overridesuite) \
+                    .filter(Override.package == re.sub('/(contrib|non-free)$', '', source)) \
+                    .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
+                    .first()
+                if component != "main":
+                    source = "%s/%s" % (source, component)
+                all_broken.setdefault(source, set()).add(pp_deps(dep))
+                dep_problem = 1
+
+    if all_broken:
+        if cruft:
+            print "  - broken Build-Depends:"
+        else:
+            print "# Broken Build-Depends:"
+        for source, bdeps in sorted(all_broken.items()):
+            bdeps = sorted(bdeps)
+            if cruft:
+                print '    %s: %s' % (source, bdeps[0])
+            else:
+                print '%s: %s' % (source, bdeps[0])
+            for bdep in bdeps[1:]:
+                if cruft:
+                    print '    ' + ' ' * (len(source) + 2) + bdep
+                else:
+                    print ' ' * (len(source) + 2) + bdep
+        if not cruft:
+            print
+
+    return dep_problem
index 683de835037510f4c78f061fcde76022293bcc7e..cda93b010f16fd71975fdfc6391d76a58b369dfc 100644 (file)
@@ -5,5 +5,4 @@ set -e
 if [ "$1" = "configure" ]; then
       # Default (blank) files so that programs at least run --help and stuff
       touch /etc/dak/dak.conf
-      touch /etc/dak/apt.conf
 fi
index 7fabb4ee281b245a20afbf1f54cde999ab7d3b95..0e9dbd3377d0a64b1a90906528a2e6f88255dd8e 100644 (file)
@@ -108,14 +108,15 @@ Dinstall
     //// option is set.
     // BXANotify "true";
 
-    //// FutureTimeTravelGrace (required): specifies how many seconds into the
+    //// FutureTimeTravelGrace (optional): specifies how many seconds into the
     //// future timestamps are allowed to be inside a deb before being rejected.
-    //// 28800 = 8 hours
-    FutureTimeTravelGrace 28800;
+    //// Defaults to 86400 (24 hours) if not specified.
+    FutureTimeTravelGrace 86400;
 
-    //// PastCutoffYear (required): specifies the cut-off year which is used when
+    //// PastCutoffYear (optional): specifies the cut-off year which is used when
     //// deciding whether or not to reject packages based on the file timestamp.
-    PastCutoffYear "1984";
+    //// Defaults to "1975" if not specified.
+    PastCutoffYear "1975";
 
     //// SkipTime (required): an integer value which is the number of seconds
     //// that a file must be older than (via it's last modified timestamp)
index 8ad674c352f448f161c90b4c803b813f3133b5c4..60be47784500982af5e3a3cc5ac58e6dc459ae8d 100644 (file)
@@ -5,7 +5,7 @@ o Please be careful: dak sends out lots of emails and if not
   configured properly will happily send them to lots of people who
   probably didn't want those emails.
 
-o Don't use the debian dak.conf, apt.conf, cron.* etc. as starting
+o Don't use the debian dak.conf, cron.* etc. as starting
   points for your own configuration files, they're highly Debian
   specific.  Start from scratch and refer to the security.debian.org
   config files (-security) as they're a better example for a private
@@ -19,14 +19,14 @@ Generic and generally useful
 
 o To process queue/:
 
-  * dak process-unchecked - processes queue/unchecked
-  * dak process-accepted - move files from queue/accepted into the pool (and database)
-  * dak process-new - allows ftp administrator to processes queue/new and queue/byhand
+  * dak process-upload - processes queue/unchecked
+  * dak process-new - allows ftp administrator to process queue/new and queue/byhand
+  * dak process-policy - processes policy queues (including new and byhand)
 
 o To generate indices files:
 
   * dak dominate          - removes obsolete packages from suites
-  * dak generate-filelist - generates file lists for apt-ftparchive
+  * dak generate-packages-sources2 - generate Packages, Sources
   * dak generate-releases - generates Release
 
 o To clean things up:
@@ -51,7 +51,6 @@ o dak init-archive - initializes a projectb database from an exisiting archive
 Generic but not overly useful (in normal use)
 ---------------------------------------------
 
-o dak dot-dak-decode - dumps info in .dak files
 o dak import-users-from-passwd - sync PostgreSQL users with system users
 o dak cruft-report - check for obsolete or duplicated packages
 o dak init-dirs - directory creation in the initial setup of an archive
@@ -79,8 +78,6 @@ Very Incomplete or otherwise not generally useful
 -------------------------------------------------
 
 o dak init-db - currently only initializes a DB from a dak.conf config file
-o dak compare-suites - looks for version descrepancies that shouldn't exist in many
-                      archives
 o dak check-overrides - override cruft checker that doesn't work well with New Incoming
 
 Scripts invoked by other scripts
@@ -91,29 +88,4 @@ o dak examine-package - invoked by 'dak process-new' to "check" NEW packages
                        How do I get started?
                        =====================
 
-[Very incomplete - FIXME]
-
-o Write your own dak.conf and apt.conf files.  dak looks for those
-  config files in /etc/dak/.  /etc/dak/dak.conf can define
-  alternative configuration files with Config::host::DakConfig and
-  Config::host::AptConfig (where "host" is the fully qualified domain
-  name of your machine).
-o Create a PostgreSQL database on the host given in dak.conf's DB::Host
-  with the name specified in DB::Name.
-o Run 'dak init-dirs': this will create all directories which are specified in
-  dak.conf and apt.conf.
-o If you have an existing archive:
-   * Run 'dak init-archive'
-  otherwise:
-   * Create the table structure.  init_pool.sql contains all SQL statements
-     which are needed for this.  After changing all occurences of "projectb"
-     to the name of your database (as defined in DB::Name) you can run:
-         psql <DB::Name> < init_pool.sql
-   * Create the following groups in postgres: ftpmaster ftpteam ftptrainee
-   * Run 'dak update-db' to upgrade the database schema.
-   * Run 'dak init-db': it will populate your database with the values from
-     dak.conf and apt.conf.
-   * Run 'psql <DB::Name> < add_constraints.sql'.
-o Copy all templates from the "templates" directory to to the directory
-  specified in Dir::Templates, and adapt them to your distribution.
-
+Please refer to setup/README for instructions how to setup dak.
index 2b159a8f0915840f1889504a2b91bcda9a7aa6ac..ff6810f7e3475d32ced1003aaaad0549bffe0697 100644 (file)
@@ -347,3 +347,35 @@ Canadians: This is a lighthouse. Your call.
 
 <mhy> I wish they wouldnt leave biscuits out, thats just tempting. Damnit.
 
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+<Ganneff> WARNING 2012-08-27 20:16:19 Got unexpected notice from ChanServ on OFTC:
+<Ganneff>         IrcMsg(prefix="ChanServ!services@services.oftc.net", command="NOTICE",
+<Ganneff>         args=('dak', '[#debian-ftp] "hello, have you tried rebooting? Didn\'t
+<Ganneff>         work? Have you tried ignoring it?"')).
+<Ganneff> hihi
+<DktrKranz> actually, we could patch that notice to be "Have you tried to turn it off and on again", a-la-IT-Crowd
+<Ganneff> yeah well, but we dont want them to turn the archive off and on again
+<Ganneff> thats MY job, damn you.
+<Ganneff> i turn off cron
+<Ganneff> and forget to turn it back on
+<Ganneff> me mine my job
+<DktrKranz> may I call you Roy then?
+<Ganneff> may i kick you?
+<Ganneff> im not a geek. im married and have a son. nono geek no.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+<taffit> Hi, http://ftp-master.debian.org/deferred.html is a bit outated
+<Ganneff> yes, DktrKranz broke the code.
+<DktrKranz> nope
+<DktrKranz> it's dak's fault!
+<Ganneff> never ever.
+<Ganneff> must be you
+<jcristau> bad dak
+<Ganneff> rule 1: dak is never at fault.
+<Ganneff> rule 2: ftpmasters are always right. may cancel rule 1.
+<Ganneff> rule 3: see above.
+<ansgar> The bot is innocent!
+<DktrKranz> rule 0: there are no rules!
+* DktrKranz hides
index 7b9c4a68b54f3fb2a4f5c28177d8e8d634060ce0..47667d6743881d54441d6e52057adf9d4fd02317 100644 (file)
@@ -3,11 +3,11 @@ Rough Guide to doing Stable Point Releases in Debian
 
 - sudo to dak
 - bash:
-suite=oldstable
-suitename=lenny
-pusuite=oldstable-proposed-updates
-oldrev=5.0.8
-newrev=5.0.9
+suite=stable
+suitename=wheezy
+pusuite=proposed-updates
+oldrev=7.1
+newrev=7.2
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 . "${configdir}/common"
@@ -26,18 +26,30 @@ dak control-suite -l ${suite} > ${suite}.list
   edit ${pusuite}.list (and later the Changelog too)
 - bash:
 dak make-changelog -s ${pusuite} -b ${suite} | cat - ${ftpdir}/dists/${suite}/ChangeLog | sponge ${ftpdir}/dists/${suite}/ChangeLog
-cat ${pusuite}.list | dak control-suite --add ${suite}
+dak control-suite --add ${suite} < ${pusuite}.list
 dak control-suite --remove ${pusuite} < ${pusuite}.list
 
+- clean up *.changes from proposed-updates:
+# Be careful if uploads were not included in the point release.
+pumorguedir="${base}/morgue/queues/$(date +%Y/%m)"
+mkdir -p "${pumorguedir}"
+cd ${ftpdir}/dists/${pusuite}
+mv -t "${pumorguedir}" -n -- *.changes
+
+- -r0 additions?
+  For example new d-i.
+
 - sync with stable RM if there is any propup needed. do it, if so:
 like, cat /srv/release.debian.org/www/squeeze/6.0/6.0.4/propup.unstable | dak control-suite --add unstable
 Note: unstable first, then testing
 - ask rms if they have RMs to do.
+- check output from cruft report:
+dak cruft-report -s ${suite}
 - and then check if they have a d-i update. if so, bash:
 # set dioldver to "empty" if there is no old to remove
-diver=20090123lenny9
-dioldver=20090123lenny8
+diver=20130613+deb7u1
 dak copy-installer -s ${pusuite} -d ${suite} ${diver}
+dioldver=20130613
 cd $ftpdir/dists/${suite}/main
 if [ "${dioldver}" != "empty" ]; then
     for iarch in $(dak admin s-a list-arch ${suite}); do
@@ -50,6 +62,14 @@ if [ "${dioldver}" != "empty" ]; then
 fi
 cd $ftpdir/dists/${suite}
 
+- Updates for win32-loader?
+cd ${ftpdir}/tools/win32-loader
+if [ -d ${suitename}-proposed-updates ]; then
+    rm -r ${suite}
+    mv ${suitename}-proposed-updates ${suite}
+fi
+cd ${ftpdir}
+
 - Update version number in README, README.html and dists/README,
   Clean up dists/stable/ChangeLog (add header, basically). bash:
   $EDITOR ChangeLog ../README ../../README*
@@ -78,20 +98,33 @@ dak generate-filelist -s ${suite}
 - Let SRM see if all is ok
 
 - then:
-dak generate-packages-sources -s ${suite} ; dak contents generate -f -s ${suite}
+if [ "${suitename}" = "squeeze" ]; then
+  dak generate-packages-sources -s ${suite} && dak contents generate -f -s ${suite} -a ftp-master
+else
+  dak generate-packages-sources2 --force -s ${suite} && dak contents generate -f -s ${suite} -a ftp-master
+fi
 
-cd $ftpdir/dists/${suite}
-for carch in $(dak admin s-a list-arch ${suite}); do
-  echo doing ${carch}
-  cp $base/dak/templates/contents Contents-${carch}.new;
-  zcat {main,contrib,non-free}/Contents-${carch}.gz | ~joerg/mergecontents.pl | sort >> Contents-${carch}.new;
-  gzip -9v Contents-${carch}.new;
-  mv Contents-${carch}.new.gz Contents-${carch}.gz;
-done
+- For squeeze: merge Contents
+if [ "${suitename}" = "squeeze" ]; then
+  cd $ftpdir/dists/${suite}
+  for carch in $(dak admin s-a list-arch ${suite}); do
+    echo doing ${carch}
+    cp $base/dak/templates/contents Contents-${carch}.new;
+    zcat {main,contrib,non-free}/Contents-${carch}.gz | ~joerg/mergecontents.pl | sort >> Contents-${carch}.new;
+    gzip -9v Contents-${carch}.new;
+    mv Contents-${carch}.new.gz Contents-${carch}.gz;
+  done
+  rm {main,contrib,non-free}/Contents-*
+fi
 
-rm {main,contrib,non-free}/Contents-*
+- For wheezy: update main/i18n/Index
+if [ "${suitename}" = "wheezy" ]; then
+  ${scriptsdir}/generate-i18n-Index "${ftpdir}/dists/${suite}"
+fi
 
+- Generate Releases:
 dak generate-releases -f -s ${suite}
+rm ${ftpdir}/dists/${suite}/InRelease
 
 - have the SRMs sign it and put the signature in.
 - Check if a mirror push is needed or next dinstall is enough. for a push
diff --git a/docs/talks/DebConf9/Makefile b/docs/talks/DebConf9/Makefile
deleted file mode 100644 (file)
index 39f2321..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-# $Id: $
-
-LATEX = latex
-PDFLATEX = pdflatex
-DVIPS = dvips
-BIBTEX = bibtex
-PDFVIEW = xpdf -fullscreen
-
-SRC := $(shell egrep -l '^[^%]*\\begin\{document\}' *.tex)
-DVI = $(SRC:%.tex=%.dvi)
-PDF = $(SRC:%.tex=%.pdf)
-PS  = $(SRC:%.tex=%.ps)
-
-all: pdf
-
-$(PDF): %.pdf : %.tex
-       # call two time because of toc etc
-       @$(PDFLATEX) $<
-       @$(PDFLATEX) $<
-       @$(PDFLATEX) $<
-
-show:
-       $(PDFVIEW) $(PDF)
-
-pdf: $(PDF)
-
-clean:
-       -rm -f $(DVI) $(PDF) $(DVI:%.dvi=%.aux) $(DVI:%.dvi=%.log) $(DVI:%.dvi=%.out) $(DVI:%.dvi=%.toc) $(DVI:%.dvi=%.nav) $(DVI:%.dvi=%.snm)
diff --git a/docs/talks/DebConf9/background.jpg b/docs/talks/DebConf9/background.jpg
deleted file mode 100644 (file)
index d20a9c5..0000000
Binary files a/docs/talks/DebConf9/background.jpg and /dev/null differ
diff --git a/docs/talks/DebConf9/ftpmaster.pdf b/docs/talks/DebConf9/ftpmaster.pdf
deleted file mode 100644 (file)
index af4e916..0000000
Binary files a/docs/talks/DebConf9/ftpmaster.pdf and /dev/null differ
diff --git a/docs/talks/DebConf9/ftpmaster.tex b/docs/talks/DebConf9/ftpmaster.tex
deleted file mode 100644 (file)
index 30a5a3e..0000000
+++ /dev/null
@@ -1,432 +0,0 @@
-\documentclass{beamer}
-%\documentclass[draft]{beamer}
-
-\usepackage[german]{babel}
-\usepackage[latin1]{inputenc}
-\usepackage{times}
-\usepackage[T1]{fontenc}
-\usepackage{url}
-
-\mode<presentation>{
-  \usetheme{Madrid}
-  \hypersetup{pdfpagemode=FullScreen}
-  \usecolortheme{albatross}
-  \useinnertheme[shadow]{rounded}
-  \usefonttheme{serif}
-  \usefonttheme{structurebold}
-% \beamerdefaultoverlayspecification{<+-| alert@+>}
-%  \setbeamercovered{transparent}
-}
-
-\title[FTPTeam]{The Debian FTP Team}
-\subtitle{Masters, Assistants, Trainees \\ \alert{Motto: Fuck it! What could possibly go wrong?}}
-
-\author[Joerg Jaspert]{Joerg Jaspert \\ \texttt{joerg@debian.org}}
-\date{Cáceres, July 2009}
-
-\institute[DebConf9]{Debian Conference 2009}
-
-% \AtBeginSection[]
-% {
-%   \begin{frame}<beamer>{Outline}
-%     \tableofcontents[currentsection] %,currentsubsection]
-%   \end{frame}
-% }
-
-
-\begin{document}
-
-\setbeamertemplate{navigation symbols}{}
-{\setbeamertemplate{background}{\includegraphics[width=\paperwidth]{background.jpg}}
-
-\begin{frame}
-  \titlepage
-\end{frame}
-
-\begin{frame}[allowframebreaks,allowdisplaybreaks]{Outline}
-\tiny
-  \tableofcontents
-\end{frame}
-
-\section{History}
-\subsection{Software / Hosting}
-\begin{frame}{History - Scripts}{\alert{Motto: Fuck it! What could possibly go wrong?}}
-  \begin{itemize}
-  \item Ian Murdock/Jackson makeshift scripts
-  \item Monolithic perl dinstall written by Guy Maor
-    \begin{itemize}
-    \item Not exactly secure: upload, shipit
-    \item Not even using PGP either.
-    \end{itemize}
-  \item Katie: a rewrite in python. Known as DAK.
-  \end{itemize}
-\end{frame}
-
-\begin{frame}{History - Hosting}{\alert{Oh fuck, what just went wrong?}}
-  \begin{itemize}
-  \item Under a desk @ cmich.edu
-  \item First master.d.o by Bdale @ HP
-
-{\tiny It was an HP Vectra 486 tower system with 66mhz CPU upgrade, an
-  Adaptec 1740A EISA SCSI controller and two disk drives.  A 330Mb
-  root disk and a 660Mb disk to hold the archive.  Both were 5.25"
-  full-height drives.  All the pieces came off pallets of materials
-  heading out for scrap. Before it left Bdale the archive disk got
-  swapped out for a 1.3Gb drive, also 5.25" full height scsi and a
-  cast-off heading for scrap.
-
-  The first machine to host .debian.org using Debian.}
-
-\pause
-
-  \item Moved to I-Connect
-  \item Then to Novare for several years
-  \item Then to Visi.Net, as 'ftp-master.d.o'
-  \item Relocated to above.net
-  \item Then to the HP DC in Ft. Collins
-  \item Currently at Brown.edu
-  \end{itemize}
-
-\end{frame}
-
-
-\subsection{Archive}
-\begin{frame}{History - Archive}{\alert{That guy impressed me and I am not easily impressed. Wow. BLUE Hair }}
-  \begin{itemize}
-    \item Architectures / Releases:
-      \begin{description}
-        \item[Bo] 1: i386
-        \item[Hamm] 2: + m68k
-        \item[Slink] 4: + sparc, alpha
-        \item[Potato] 6: + powerpc, arm
-        \item[Woody] 11: + mips, mipsel, hppa, ia64, s390
-        \item[Sarge] 11 (unofficial amd64 release)
-        \item[Etch] 11: + amd64, - m68k (there is a semi-official etch-m68k release)
-        \item[Lenny] 12: + armel
-        \item[Squeeze] ?: + kfreebsd-i386, kfreebsd-amd64, - arm, ...
-      \end{description}
-
-    \item  Proposed Architectures:
-      \begin{itemize}
-      \item avr32
-      \item sh\{3,4\}
-      \end{itemize}
-    \end{itemize}
-\end{frame}
-
-\begin{frame}{History - Archive size}{\alert{Ganneff - if it goes wrong, we make it ``Fuck it what do we care?'', but dont tell anyone.}}
-  \begin{itemize}
-  \item Releases:
-    \begin{description}
-    \item[Buzz] 229Mb
-    \item[Rex] 306Mb
-    \item[Bo] 1.2Gb
-    \item[Hamm] 2.1Gb
-    \item[Slink] 4.1Gb
-    \item[Potato] 6.7Gb
-    \item[Woody] 27Gb
-    \item[Sarge] 57Gb
-    \item[Etch] 82Gb
-    \item[Lenny] 125Gb
-    \item[squeeze+X] 1Tb?
-    \end{description}
-
-\pause
-
-  \item Daily pushes:
-    \begin{description}
-    \item[2005] 200Mb up to 2Gb, average 1Gb. Once a day.
-    \item[2009] 1Gb up to 6Gb, average 2Gb. 4 times a day.
-    \end{description}
-  \end{itemize}
-\end{frame}
-
-\subsection{Team}
-\begin{frame}{History - Team}{\alert{sgran - the world B.G. (Before Ganneff)}}
-  \begin{itemize}
-  \item Ian Murdock
-  \item + later Ian Jackson, Bruce Perens, Bdale Garbee
-  \item Guy Maor takes over (moved to dinstall) [somewhere 1995/1996]
-  \item Richard Braakman and James Troup push in [July 1998] % <elmo> so 1998-07-09, looks like a good date for Richard + Me
-  \item James recruits Antti-Juhani Kaijanaho and Gergely Madarasz [Dec 1999]
-  \item Michael Beattie tricks his way in [August 2000]
-  \item Richard becomes inactive [August 2000]
-  \item Work on katie starts (named da-katie) [2000]
-  \item Anthony Towns joins [January 2001]
-  \item Guy becomes inactive [February 2001]
-  \item Ryan Murray is added [June 2001]
-  \item Randall Donald joins [November 2001]
-  \item Daniel Silverstone follows [August 2003]
-  \end{itemize}
-\end{frame}
-
-% Help from elmo for the dates:
-
-% From: Guy Maor <maor@debian.org>
-% Subject: Re: ftpmaster-y stuff
-% To: James Troup <james@nocrew.org>
-% Date: 21 Nov 1999 17:03:08 -0800
-
-% James Troup <james@nocrew.org> writes:
-
-% > I've started being a bit pro-active with ftpmaster stuff, e.g. getting
-% > Gorgo and Ibid on board and announcing it and stuff.  I'm doing this
-% > on the basis that, you don't mind, and if you did you'd say.  Of
-% > course this fails a bit miserably if you don't [say, that is], so
-% > please do tell me if I'm overstepping my bounds...
-
-% I trust you and Richard completely with the ftpmaster stuff.  You have
-% the authority to do anything as far as I'm concerned.
-
-% ------------------------------------------------------------------------
-
-% Guy
-
-% From: Richard Braakman <dark@xs4all.nl>
-% Subject: Re: New ftpmaster (?) Michael Beattie (aka Omnic)
-% To: James Troup <james@nocrew.org>
-% Date: Wed, 16 Aug 2000 13:27:29 +0200
-
-% On Sun, Aug 06, 2000 at 10:29:27PM +0100, James Troup wrote:
-% > person... Omnic is excessively keen and seems to have lots of free
-% > time... unless you have any objections? I'll ask him to join the team
-% > in a couple of days.
-
-% No.  In fact I'm almost writing my retirement notice now -- I'm just working
-% through the current pile of mail before sending anything.
-
-
-\begin{frame}{History - Team}{\alert{mhy - commit it (RE: team motto)}}
-  \begin{itemize}
-  \item Team restructured, Assistants role added [March 2005]
-  \item Joerg Jaspert and Jeroen van Wolffelaar join as Assistants [March 2005]
-  \item Kalle Kivimaa and Thomas Viehmann join as Assistants [February 2008]
-  \item Joerg Jaspert promoted to FTP Master [April 2008]
-  \item Anthony Towns and James Troup leave [April/May 2008]
-  \item Mark Hymers joins as Assistant [July 2008]
-  \item Frank Lichtenheld joins as Assistant [December 2008]
-  \item Thomas Viehmann resigned [December 2008]
-  \item Mike O'Connor joins as Assistant [January 2009]
-  \item Ryan Murray becomes inactive and leaves [March 2009]
-  \item Mark Hymers promoted to FTP Master [March 2009]
-  \item Kalle Kivimaa leaves [July 2009]
-  \end{itemize}
-\end{frame}
-
-\subsection{Present}
-\begin{frame}{The Team}{\alert{Me fail English? That's unpossible.}}
-  FTP Masters
-  \begin{itemize}
-  \item Joerg Jaspert - Ganneff
-  \item Mark Hymers - mhy
-  \end{itemize}
-
-  FTP Assistants
-  \begin{itemize}
-  \item Frank Lichtenheld - djpig
-  \item Mike O'Connor - stew
-  \end{itemize}
-
-  FTP Trainee
-  \begin{itemize}
-  \item Barry deFreese - bdefreese
-  \item You?!
-  \end{itemize}
-
-  Since April 2008 we almost always had people in the FTP Trainee role.
-\end{frame}
-
-\section{The roles}
-\subsection{FTP Master}
-\begin{frame}{FTP Master}{\alert{Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head.}}
-The FTP Master role, unix group \alert{debadmin}, is ...
- \begin{block}{(Lets cite Matthew Garrett)}
-   ... responsible for maintaining the infrastructure
-   required to support the archive. This takes the form of the scripts used
-   for processing uploaded packages, but also the flow of packages between
-   distributions.
- \end{block}
- \begin{itemize}
- \item Keep the archive running
- \item Keep the archive legal
- \item Support the teams that depend on it (Release, Security, soon backports/volatile)
- \item Keep the archive uptodate with the requirements of the project
- \end{itemize}
-\end{frame}
-
-\subsection{FTP Assistant}
-\begin{frame}{FTP Assistant}{\alert{Ganneff - see topic. our motto. yeah.}}
-The FTP Assistant role, group \alert{ftpteam}, created 2005, allows
-additions of people to the FTP Team without having to hand out full FTP
-Master rights.
-
-It allows
-\begin{itemize}
-\item to process NEW,
-\item to handle overrides,
-\item to remove packages.
-\end{itemize}
-
-Thus the most common actions can be done but the possible damage that
-can happen by accident is limited.
-\end{frame}
-
-\subsection{FTP Trainee}
-\begin{frame}{FTP Trainee}{\alert{Kids, you tried your best and you failed miserably. The lesson is, never try. \tiny \\ Hello i18n cabal. :)}}
-The FTP Trainee role, group \alert{ftptrainee}, was created in 2008 to
-allow easy training and testing of future team members.
-
-\begin{itemize}
-\item Initially called ``slave\_NEW''
-
-\pause
-
-\item 6 people trained so far
-\item 2 promoted to Assistants
-\end{itemize}
-
-Trainees can look at NEW and do the usual package checks, but they can
-not actually accept or reject a package. Instead they leave a note,
-which an Assistant or Master reads and acts on.
-\end{frame}
-
-\section{The job}
-\subsection{Archives}
-\begin{frame}{Archives}{\alert{You can't keep blaming yourself. Just blame yourself once, and move on.}}
-  \begin{itemize}
-  \item The main archive, ftp.debian.org
-  \item The security archive, security.debian.org
-  \item (Soon) The volatile archive, volatile.debian.org (integrated
-    into main archive)
-  \item (Soon) The backports.org archive, www.backports.org (-> backports.debian.org)
-  \end{itemize}
-\end{frame}
-
-\subsection{Removals and Overrides}
-\begin{frame}{Removals and Overrides}{\alert{mhy - I was upset to discover (via grepping my logs) that I actually said that quote in one of the subtitles verbatim}}
-  \begin{itemize}
-  \item Removals
-    \begin{itemize}
-    \item (Semi)-Automagic via cruft-report
-    \item Manual, with help from a nice html page
-    \end{itemize}
-  \item Overrides
-    \begin{itemize}
-    \item Priority / Section
-    \item Override disparity
-    \item Currently by mail, should be changed to using BTS
-    \end{itemize}
-  \end{itemize}
-\end{frame}
-
-\subsection{NEW}
-\begin{frame}{NEW}{\alert{mhy - Ganneff airlines: departing from a window near you}}
-
-NEW checking is about three things. In order of priority:
-\begin{itemize}
-\item trying to keep the archive legal,
-\item trying to keep the package namespace sane,
-\item trying to reduce the number of bugs in Debian.
-\end{itemize}
-
-A small list of most common checks
-\begin{itemize}
-\item Check the source for license problems
-  \begin{itemize}
-  \item incompatibility
-  \item non-free license terms
-  \end{itemize}
-\item (sane) package renames / splits
-\item Policy violation
-\item source for everything included?
-\item ...
-\end{itemize}
-\end{frame}
-
-\subsection{Code}
-\begin{frame}{Code}{\alert{Oh, so they have Internet on computers now!}}
-git clone https://ftp-master.debian.org/git/dak.git
-
-\begin{itemize}
-\item Python
-\item and some Shell
-\item and some Perl
-\item at least 9 years old
-\item at minimum 40 different authors
-\end{itemize}
-\end{frame}
-
-\section{How to help / join}
-
-\subsection{QA Team}
-\begin{frame}{Join the Army, err, QA Team}{\alert{mhy - oh god. Ganneff has been on TV? I'm surprised the sets didn't all explode}}
-  \begin{itemize}
-  \item Do QA work
-  \item File removal bugs for unmaintained, outdated or otherwise no longer needed packages
-  \end{itemize}
-\end{frame}
-
-\subsection{Bug triage}
-\begin{frame}{Bug triage}{\alert{I am so smart! I am so smart! S-M-R-T! I mean S-M-A-R-T...}}
-  \begin{itemize}
-  \item Retitle bugs to fit our removal format
-  \item Close bugs that don't apply (anymore)
-  \item Make sure removals can happen (rev-deps anyone?)
-  \end{itemize}
-\end{frame}
-
-\subsection{Write Code}
-\begin{frame}{Write Code}{\alert{I'm normally not a praying man, but if you're up there, please save me Superman.}}
-
-\end{frame}
-
-\subsection{Join the team}
-\begin{frame}{Join the team}{\alert{Hi, my name's Moe. Or as the ladies like to refer to me, 'hey you in the bushes'}}
-Join us. Mail \href{mailto:ftpmaster@debian.org}{ftpmaster@debian.org}.
-
-Join us. Mail \href{mailto:ftpmaster@debian.org}{ftpmaster@debian.org}.
-
-Join us. Mail \href{mailto:ftpmaster@debian.org}{ftpmaster@debian.org}.
-
-Join us. Mail \href{mailto:ftpmaster@debian.org}{ftpmaster@debian.org} NOW.
-\end{frame}
-
-\section{Final}
-
-\subsection{The future}
-\begin{frame}{The future}{\alert{Facts are meaningless. You could use facts to prove anything that's even remotely true!}}
-  \begin{itemize}
-  \item Replace database code in dak with sqlalchemy
-  \item source v3
-  \item Split long description out of Packages files, make it nothing
-    than another translation
-  \item debtags integration, sections death
-  \item tdebs
-  \item ddebs
-  \item lintian autorejects
-  \item autosigning
-  \item binary throw away
-  \item ...
-  \end{itemize}
-\end{frame}
-
-
-\begin{frame}{Thank you for sleeping through this talk}{\alert{Ganneff - (trust full, when had i been that drunk?)}}
-  \begin{center}
-  Joerg Jaspert \\
-  \href{mailto:ftpmaster@debian.org}{ftpmaster@debian.org}
-
-  \href{http://ftp-master.debian.org/}{http://ftp-master.debian.org/}
-
-  \href{https://ftp-master.debian.org/git/dak.git}{https://ftp-master.debian.org/git/dak.git}
-
-
-
-  \vfill
-
-
-  \end{center}
-\end{frame}
-
-
-\end{document}
diff --git a/scripts/debian/byhand-debian-faq b/scripts/debian/byhand-debian-faq
new file mode 100755 (executable)
index 0000000..b436dc7
--- /dev/null
@@ -0,0 +1,64 @@
+#! /bin/bash
+#
+# byhand-debian-faq - automatic BYHAND handling for debian-faq
+#
+# This file is part of dak.
+#
+# Copyright 2013, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+set -u
+
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. ${SCRIPTVARS}
+
+scriptname=byhand-debian-faq
+
+if [ $# -lt 4 ]; then
+    echo "usage: ${scriptname} <byhand-file> <version> <arch> <changes-file>" >&2
+    exit 1
+fi
+
+byhand_path="${1}"
+byhand_file="${byhand_path##*/}"
+version="${2}"
+architecture="${3}"
+changes="${4}"
+
+suite="$(awk '/^Distribution:/ { print $2 }' < "${changes}")"
+case "${suite}" in
+    unstable|sid)
+        : # okay
+        ;;
+    *)
+        echo "$0: not targeted at unstable" >&2
+        exit 1
+        ;;
+esac
+
+if [[ ! "${byhand_file}" =~ ^debian-faq\.en\.(html\.tar|txt|ps|pdf)\.gz$ ]]; then
+    echo "$0: unknown file '${byhand_file}'" >&2
+    exit 1
+fi
+
+target="${ftpdir}/doc/FAQ/"
+if [ ! -d "${target}" ]; then
+    mkdir -p "${target}"
+fi
+
+cp --remove-destination "${byhand_path}" "${target}"
+chmod 0644 "${target}/${byhand_file}"
index af03af4c8f3d3bfb12fbeab67139ae3cedc90791..3ba2f0057310ebcceac24592c2226c30c9301cf6 100755 (executable)
@@ -20,8 +20,9 @@ error() {
 }
 
 # Check validity of version number
-# Expected are: YYYYMMDD, YYYYMMDD.x, YYYYMMDD<suite>x, YYYYMMDD+<suite>x and the +b[0-9] on the end
-if ! echo "$VERSION" | grep -Eq "^[0-9]{8}(|(\.|\+?[a-z]+)[0-9]+)(\+b[0-9])?$"; then
+# Expected are: YYYYMMDD, YYYYMMDD.x, YYYYMMDD<suite>x, YYYYMMDD+<suite>x,
+# YYYYMMDD+debXuZ and the +b[0-9] on the end
+if ! echo "$VERSION" | grep -Eq "^[0-9]{8}((\.|\+?[a-z]+|\+deb[0-9]+u)[0-9]+)?(\+b[0-9])?$"; then
        error "Invalid version: '$VERSION'"
 fi
 
@@ -32,7 +33,7 @@ case $SUITE in
     "")
        error "Error: unable to determine suite from Changes file"
        ;;
-    unstable|sid)
+    unstable|sid|*-proposed-updates)
        : # nothing to do
        ;;
     *)
@@ -55,7 +56,7 @@ if [ -d "$TARGET/$VERSION" ]; then
 fi
 
 # We know the VERSION is sane by here, we just need to make sure we escape the + in +b1 (if any)
-# It needs 'g' as well as we may have +$DIST+b[0-9]
+# It needs 'g' as well as we may have +$DIST+b[0-9] or +debXuZ+bY
 VERSIONREGEXP="$(echo $VERSION | sed 's@+@\\\+@g')"
 
 # We know all data to be in ./installer-<arch>/<version>; see if there's
index d0ef0a7e24b2d83cebd15488ab86d373a9c51020..3afc8d52882aee9032c8fee8ea566824686ebd19 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 set -u
 set -e
@@ -11,7 +11,8 @@ fi
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 
-WIN32_LOADER_FILE="$1" # win32-loader_${VERSION}_${ARCH}{.exe,txt}
+WIN32_LOADER_PATH="$1" # win32-loader_${VERSION}_${ARCH}{.exe,txt}
+WIN32_LOADER_FILE="${WIN32_LOADER_PATH##*/}"
 VERSION="$2"
 ARCH="$3"
 CHANGES="$4"   # Changes file for the upload
@@ -32,6 +33,16 @@ case $SUITE in
            ;;
 esac
 
+case "${WIN32_LOADER_FILE}" in
+    win32-loader_*.exe|win32-loader_*.txt)
+        : # okay
+        ;;
+    *)
+        echo "Error: invalid filename for byhand-win32-loader"
+        exit 1
+        ;;
+esac
+
 # This must end with /
 TARGET="${ftpdir}/tools/win32-loader/${SUITE}/"
 
@@ -47,7 +58,7 @@ fi
 # Put said file into the tools directory
 # Use --remove-destination to avoid problems with the fact that the target may
 # be a hardlink and in that case we'd update multiple suites together
-cp --remove-destination "$WIN32_LOADER_FILE" "${TARGET}${TARGET_FILENAME}"
+cp --remove-destination "$WIN32_LOADER_PATH" "${TARGET}${TARGET_FILENAME}"
 
 # Chmod it correctly
 chmod 0644 "${TARGET}${TARGET_FILENAME}"
index 74494ad5c054178e95a584f4fe73472bd65491ed..71391154a9a223985fd2eec5e777f0e6d8562d16 100755 (executable)
@@ -81,7 +81,7 @@ elif [ ! -d "$PACKAGES_LISTS_DIR" ]; then
 fi
 
 #STABLE="squeeze"
-TESTING="wheezy"
+TESTING="jessie"
 UNSTABLE="sid"
 
 # Original SHA256SUMS, generated by i18n.debian.net
diff --git a/scripts/debian/dm-monitor b/scripts/debian/dm-monitor
deleted file mode 100755 (executable)
index 92a945d..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/bin/sh
-
-echo "Known debian maintainers:"
-
-eval $(dak admin config db-shell)
-
-psql --html <<EOF
-  SELECT uid.uid, uid.name, f.fingerprint
-    FROM uid LEFT OUTER JOIN fingerprint f ON (uid.id = f.uid) 
-   WHERE uid.uid LIKE '%@%'
-ORDER BY uid.uid;
-EOF
-
-echo "Debian maintainers not able to update any packages:"
-
-psql --html <<EOF
-  SELECT uid.uid, uid.name
-    FROM uid 
-   WHERE uid.uid LIKE 'dm:%'
-     AND uid.uid NOT IN (
-              SELECT u.uid
-                FROM src_uploaders su JOIN source s ON (su.source = s.id)
-                     JOIN src_associations sa ON (s.id = sa.source)
-                     JOIN maintainer m ON (su.maintainer = m.id)
-                     JOIN uid u ON 
-                      (m.name LIKE u.name || ' <%>' OR
-                       m.name LIKE '% <' || substring(u.uid FROM 4) || '>')
-               WHERE u.uid LIKE '%@%' AND sa.suite = 5
-         )
-ORDER BY uid.uid;
-EOF
-
-echo "Packages debian maintainers may update:"
-
-psql --html <<EOF
-  SELECT s.source, space_separated_list(s.version), u.uid
-    FROM src_uploaders su JOIN source s ON (su.source = s.id) 
-         JOIN src_associations sa ON (s.id = sa.source)
-         JOIN maintainer m ON (su.maintainer = m.id)
-         JOIN uid u ON (m.name LIKE u.name || ' <%>' OR
-                m.name LIKE '% <' || substring(u.uid FROM 4) || '>')
-   WHERE s.dm_upload_allowed = 't' GROUP BY s.source, s.version, u.uid
-ORDER BY u.uid, s.source, s.version;
-EOF
-
-echo "Source packages in the pool uploaded by debian maintainers:"
-
-psql --html <<EOF
-  SELECT s.source, s.version, s.install_date, u.uid
-    FROM source s JOIN fingerprint f ON (s.sig_fpr = f.id) 
-         JOIN uid u ON (f.uid = u.id)
-   WHERE u.uid LIKE '%@%'
-ORDER BY u.uid, s.source, s.version;
-EOF
-
-echo "Binary packages in the pool uploaded by debian maintainers:"
-
-psql --html <<EOF
-  SELECT b.package, b.version, a.arch_string AS arch, u.uid
-    FROM binaries b JOIN architecture a ON (b.architecture = a.id)
-         JOIN fingerprint f ON (b.sig_fpr = f.id) 
-         JOIN uid u ON (f.uid = u.id)
-   WHERE u.uid LIKE '%@%'
-ORDER BY u.uid, b.package, b.version;
-EOF
-
-echo "Recorded Uploaders:"
-
-psql --html <<EOF
-  SELECT s.source, s.version, m.name
-    FROM src_uploaders su JOIN source s ON (su.source = s.id) 
-         JOIN maintainer m ON (su.maintainer = m.id)
-ORDER BY m.name, s.source, s.version;
-EOF
-
-echo "Keys without a recorded uid:"
-
-psql --html <<EOF
-  SELECT *
-    FROM fingerprint f
-   WHERE f.uid IS NULL;
-EOF
-
index 4e2587faea2292368940066134ee5230a0a4ddb9..a5d54dc374caabe128a8e159554cb2b25f8cbe1e 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 
 # Original written from Jeroen van Wolffelaar <jeroen@debian.org>
 
index 5a0b853eafdef9093be6c1c9948e61819d1a1ecd..24ab88edb0ca54993467655046f25a6801047769 100755 (executable)
@@ -43,6 +43,11 @@ case "${IMPORTSUITE}" in
         INPUTFILE="/srv/release.debian.org/sets/squeeze-updates/current"
         DO_CHANGELOG="false"
         ;;
+    wheezy-updates)
+        # What file we look at.
+        INPUTFILE="/srv/release.debian.org/sets/wheezy-updates/current"
+        DO_CHANGELOG="false"
+        ;;
     *)
         echo "You are so wrong here that I can't even believe it. Sod off."
         exit 42
@@ -59,7 +64,7 @@ if [ "x${DO_CHANGELOG}x" = "xtruex" ]; then
     BRITNEY=" --britney"
 fi
 
-cat ${INPUTFILE} | dak control-suite --set ${IMPORTSUITE} ${BRITNEY}
+dak control-suite --set ${IMPORTSUITE} ${BRITNEY} < ${INPUTFILE}
 
 if [ "x${DO_CHANGELOG}x" = "xtruex" ]; then
     NOW=$(date "+%Y%m%d%H%M")
index 4fb040ea81ce8123b337bc22a5658dbf1951f4b7..21d07caab096d9c302f0c16a95095a7e745d3be4 100755 (executable)
@@ -51,7 +51,22 @@ function log () {
         echo "$(date +"%b %d %H:%M:%S") $(hostname -s) ${prefix}[$$]: $@"
 }
 
-export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+case "$(hostname)" in
+    franck)
+       SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+       archive=ftp-master
+        ;;
+    chopin)
+       SCRIPTVARS=/srv/security-master.debian.org/dak/config/debian-security/vars
+       archive=security-master
+       ;;
+    *)
+       echo "Unknown host $(hostname)" >&2
+       exit 1
+       ;;
+esac
+
+export SCRIPTVARS
 . $SCRIPTVARS
 
 function byebye_lock() {
@@ -113,7 +128,7 @@ if [ "$(hostname -s)" != "stabile" ]; then
     # on the other side should contain (one line, no #)
 # command="rsync --server -lHogDtpRe.Lsf --remove-source-files . /srv/morgue.debian.org/sync/ftp-master",
 # no-port-forwarding,no-X11-forwarding,no-agent-forwarding,from="ftp-master.debian.org" ssh-rsa...
-    rsync -aHq -e "ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 " --remove-source-files --from0 --files-from=${LISTFILE} $base/morgue/ morgue-sync:/srv/morgue.debian.org/sync/ftp-master
+    rsync -aHq -e "ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 " --remove-source-files --from0 --files-from=${LISTFILE} $base/morgue/ morgue-sync:/srv/morgue.debian.org/sync/$archive
 
     # And remove empty subdirs. To remove entire hierarchies we probably should run this
     # in a loop, but why bother? They'll be gone in a few days then, so meh.
index 7981e51ac89507f239a0de5cc0467122cadcca32..4d1c436310729d2f6bea62011a0d5a4f51c8d0e5 100644 (file)
@@ -1,3 +1,3 @@
-VRULE:1234691928#632a5b:lenny release
-VRULE:1281102258#0b19c1:squeeze freeze
 VRULE:1296908977#0b19c1:squeeze release
+VRULE:1341078720#00ff00:wheezy freeze
+VRULE:1367689920#00ff00:wheezy release
diff --git a/scripts/debian/sync-dd b/scripts/debian/sync-dd
new file mode 100755 (executable)
index 0000000..30fb1d9
--- /dev/null
@@ -0,0 +1,112 @@
+#! /bin/bash
+
+# Copyright (C) 2011,2013, Joerg Jaspert <joerg@debian.org>
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+set -e
+set -u
+set -E
+
+export LANG=C
+export LC_ALL=C
+
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+usage() {
+  echo "usage: $0 <lock> <host1> <host2> sync|pool"
+  echo
+  echo "sync dd-accessible copy of the archive"
+  echo
+  echo "arguments:"
+  echo "  lock:      file used for locking"
+  echo "  host1:     hostname for syncing /srv/ftp-master.debian.org"
+  echo "  host2:     hostname for syncing /srv/ftp.debian.org"
+  echo "  sync|pool: sync excludes ftp/, pool syncs ftp/ too"
+  exit ${1:-0}
+}
+
+if [ $# -ne 4 ]; then
+  usage 1
+fi
+
+lockfile="${lockdir}/${1}"
+host1="${2}"
+host2="${3}"
+mode="${4}"
+
+# extra options for rsync of /srv/ftp-master.debian.org
+extra1=""
+
+case "${mode}" in
+    pool|sync)
+       ;;
+    *)
+       echo "Unknown mode ${mode}." >&2
+       exit 1
+       ;;
+esac
+
+cleanup() {
+  rm -f "${lockfile}"
+}
+trap cleanup EXIT TERM HUP INT QUIT
+
+# Also, NEVER use --delete-excluded!
+if lockfile -r3 "${lockfile}"; then
+    rsync -aH -B8192 \
+       ${extra1} \
+        --exclude "/.nobackup" \
+        --exclude "/backup/*.xz" \
+       --exclude "/backup/dump*" \
+        --exclude "/build-queues/" \
+       --exclude "/database/*.db" \
+       --exclude ".da-backup.trace" \
+        --exclude "/export/changelogs/tmp*/" \
+        --exclude "/ftp" \
+       --exclude "lost+found" \
+       --exclude "/lock/" \
+        --exclude "/mirror" \
+       --exclude "/morgue/" \
+        --exclude "/queue/bts_version_track/" \
+       --exclude "/queue/unchecked/" \
+       --exclude "/s3kr1t" \
+       --exclude "/scripts/s3kr1t" \
+       --exclude "/tmp/" \
+       --delete --delete-after \
+       --timeout 3600 \
+       -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
+       /srv/ftp-master.debian.org/ "${host1}:/srv/ftp-master.debian.org/"
+   # command for the remote side:
+   # command="rsync --server -lHogDtpre.iLsf -B8192 --timeout=3600 --delete-after . /srv/ftp-master.debian.org/"
+
+   rsync -aH -B8192 \
+       --exclude "/.nobackup" \
+       --exclude mirror \
+       --exclude rsync/ \
+       --exclude lost+found \
+       --exclude .da-backup.trace \
+       --exclude web-users/ \
+       --delete --delete-after \
+       --timeout 3600 \
+       -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
+       /srv/ftp.debian.org/ "${host2}:/srv/ftp.debian.org/"
+   # command for the remote side:
+   # command="rsync --server -lHogDtpre.iLsf -B8192 --timeout=3600 --delete-after . /srv/ftp.debian.org/"
+else
+    echo "Couldn't get the lock, not syncing"
+    exit 0
+fi
index 2a89da403cb1483c502992a4352e8968ba8712e5..6c4ece0cf24c151d5f38ec4380350114cdfd4337 100755 (executable)
@@ -1,4 +1,6 @@
-#!/bin/sh -e
+#!/bin/bash
+
+set -e
 
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
diff --git a/scripts/debian/update-buildd-archive b/scripts/debian/update-buildd-archive
new file mode 100755 (executable)
index 0000000..a11b009
--- /dev/null
@@ -0,0 +1,68 @@
+#! /bin/bash
+#
+# Copyright 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+set -e
+set -u
+
+usage() {
+  echo "usage: $0 <source> <target>"
+  echo
+  echo "Update a minimalistic mirror for buildd archives."
+  exit ${1:-0}
+}
+
+if [ $# -ne 2 ]; then
+  usage 1
+fi
+
+source="${1}"
+dest="${2}"
+
+if [ ! -d "${source}/dists" -o ! -d "${source}/pool" ]; then
+  echo "${source}: does not look like a Debian archive" >&2
+  exit 1
+fi
+
+if [ ! -d "${dest}" ]; then
+  echo "${dest}: destination does not exist or is not a directory" >&2
+  exit 1
+fi
+
+# Make sure ${dest}/dists exists to avoid a special case later
+if [ ! -d "${dest}/dists" ]; then
+  mkdir "${dest}/dists"
+fi
+for olddir in dists.new dists.old; do
+  if [ -e "${dest}/${olddir}" ]; then
+    echo "Removing old ${olddir}..."
+    rm -r "${dest}/${olddir}"
+  fi
+done
+
+# Make sure ${dest}/pool exists
+if [ ! -e "${dest}/pool" ]; then
+  # Files are only removed from the build queues once they are no longer
+  # referenced. Having a symlink should thus not cause problems.
+  ln -s "${source}/pool" "${dest}/pool"
+fi
+
+# Finally copy dists/ to dists.new/, rename it and remove old version
+cp -a "${source}/dists" "${dest}/dists.new"
+mv "${dest}/dists" "${dest}/dists.old"
+mv "${dest}/dists.new" "${dest}/dists"
+rm -r "${dest}/dists.old"
index 3c3dcaf3d0dddadfb1aa9edcae51df779b6e0f15..950c8177ef209eca8ed88fb0d1c2650e385bada0 100755 (executable)
@@ -1,8 +1,10 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Fetches latest copy of mailing-lists.txt
 # Michael Beattie <mjb@debian.org>
 
+set -e
+
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 
index 3d1fb31e6709f72747dc0bbaf1e3358315803161..a711d8a624c82c23e63451ae0c4ce223f040531b 100755 (executable)
@@ -1,8 +1,10 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Very Very hackish script...  dont laugh.
 # Michael Beattie <mjb@debian.org>
 
+set -e
+
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 
index 76551ff344ee6cf5b2adb042b5655dc0410b685b..62d234cecc7d3da3703ee608aaabe2ff3ab30efe 100755 (executable)
@@ -3,6 +3,8 @@
 # Fetches latest copy of pseudo-packages
 # Joerg Jaspert <joerg@debian.org>
 
+set -e
+
 export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
 . $SCRIPTVARS
 
index 4b9acfb4cbd8dd0864728f966683284e3bb5e257..140ead9192b88a07faa830c632f86f98b9755e9f 100644 (file)
@@ -8,8 +8,8 @@ and the following packages for dak itself:
    binutils-multiarch python-yaml less python-ldap python-pyrss2gen python-rrdtool
    symlinks python-debian
 
-(the schema assumes at least postgresql 9.0; ftpmaster in Debian currently uses
-the squeeze postgresql 9.0 backport)
+(the schema assumes at least postgresql 9.1; ftpmaster in Debian currently uses
+the squeeze postgresql 9.1 backport)
 
 The following roles are assumed to exist:
  * dak: database superuser: needs to be an actual user
@@ -26,7 +26,7 @@ Set up the dak user:
 Create postgres roles and database:
 # sudo -u postgres psql
   CREATE USER dak CREATEROLE;
-  CREATE ROLE ftpmaster;
+  CREATE ROLE ftpmaster WITH ROLE dak;
   CREATE ROLE ftpteam WITH ROLE ftpmaster;
   CREATE ROLE ftptrainee WITH ROLE ftpmaster, ftpteam;
 
@@ -173,35 +173,14 @@ Add overrides, Edit overrides, Check, Manual reject, Note edit, Prod, [S]kip, Qu
 ACCEPT
 -----------------------------------------------------------------------
 
-At this stage, the package has been ACCEPTed from NEW into NEWSTAGE.
-We now need to finally ACCEPT it into the pool:
+At this stage, the package has been marked as ACCEPTed from NEW.
+We now need to process the NEW policy queue:
 
-# cd /srv/dak/queue/newstage
-# dak process-upload *.changes
+# dak process-policy new
 
 -----------------------------------------------------------------------
-hello_2.6-1_amd64.changes
-ACCEPT
-
-
-hello_2.6-1.debian.tar.gz
-  to main/h/hello/hello_2.6-1.debian.tar.gz
-hello_2.6-1.dsc
-  to main/h/hello/hello_2.6-1.dsc
-hello_2.6-1_amd64.deb
-  to main/h/hello/hello_2.6-1_amd64.deb
-hello_2.6.orig.tar.gz
-  to main/h/hello/hello_2.6.orig.tar.gz
-
-
-Override entries for your package:
-hello_2.6-1.dsc - optional devel
-hello_2.6-1_amd64.deb - optional devel
-
-Announcing to debian-devel-changes@lists.debian.org
-[A]ccept, Skip, Quit ?A
-Installing.
-Installed 1 package set, 646 KB.
+Processing changes file: hello_2.6-1_amd64.changes
+  ACCEPT
 -----------------------------------------------------------------------
 
 We can now see that dak knows about the package:
index b4c265fea96281ef285e089c7b22d78909706f70..55ff1c3ff6b990f9d78682adc55e6cec372ff3c3 100755 (executable)
@@ -14,6 +14,7 @@ debug
 devel
 doc
 editors
+education
 electronics
 embedded
 fonts
@@ -26,6 +27,7 @@ hamradio
 haskell
 httpd
 interpreters
+introspection
 java
 kde
 kernel
@@ -35,6 +37,7 @@ lisp
 localization
 mail
 math
+metapackages
 misc
 net
 news
index 35aff97d1b15169eae66f9d0df40094978182f03..3b6b27add119181e48cf008d11a518e530afda67 100644 (file)
@@ -28,8 +28,6 @@ Dinstall
    MyAdminAddress "dak@__DAKFQDN__";
    MyDistribution "TestDakInstance";
 
-   FutureTimeTravelGrace 28800; // 8 hours
-   PastCutoffYear "1984";
    SkipTime 10;
 
    DefaultSuite "unstable";
@@ -37,6 +35,7 @@ Dinstall
 
 Dir
 {
+  Base "__DAKBASE__";
   Root "__DAKBASE__/ftp/";
   Pool "__DAKBASE__/ftp/pool/";
   Templates "__DAKBASE__/templates/";
index 4ad32d9132b8a6153320d158364e3e7f3c9ffc56..b38eb42adc298ab4a4dfc104520f2cbfec7f757a 100644 (file)
@@ -29,8 +29,6 @@ Misc
  o __BUG_NUMBER__
  o __CONTROL_MESSAGE__
  o __MANUAL_REJECT_MESSAGE__
- o __SHORT_SUMMARY__
  o __SUMMARY__
- o __STABLE_WARNING__
  o __SUITE__
 
diff --git a/templates/process-accepted.install b/templates/process-accepted.install
deleted file mode 100644 (file)
index c807c78..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-From: __DAK_ADDRESS__
-To: __MAINTAINER_TO__
-__BCC__
-X-Debian: DAK
-X-Debian-Package: __SOURCE__
-Precedence: bulk
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-Subject: __CHANGES_FILENAME__ INSTALLED into __SUITE__
-
-__REJECT_MESSAGE__
-Installing:
-__SUMMARY__
-
-Thank you for your contribution to __DISTRO__.
diff --git a/templates/process-command.processed b/templates/process-command.processed
new file mode 100644 (file)
index 0000000..3fc6206
--- /dev/null
@@ -0,0 +1,13 @@
+From: __DAK_ADDRESS__
+To: __MAINTAINER_TO__
+Cc: __CC__
+__BCC__
+X-Debian: DAK
+Precedence: bulk
+Auto-Submitted: auto-generated
+MIME-Version: 1.0
+Content-Type: text/plain; charset="utf-8"
+Content-Transfer-Encoding: 8bit
+Subject: Results of processing __FILENAME__
+
+__RESULTS__
index 8fafaa9cb04d0aa52d30ebacd9fb957e91ec7c71..56acef1f91d01837ee805c94962173a89817b7eb 100644 (file)
@@ -4,6 +4,7 @@ To: bxa@ftp-master.debian.org
 __BCC__
 X-Debian: DAK
 Precedence: junk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
index 09b5e0c9e8b45b3b5150df2316669cade581fa94..78dfc464a5bad569fdc36655ffa542a8d45ff6da 100644 (file)
@@ -4,13 +4,16 @@ __BCC__
 X-Debian: DAK
 X-Debian-Package: __SOURCE__
 Precedence: bulk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
 Subject: __CHANGES_FILENAME__ ACCEPTED into __SUITE__
 
-__REJECT_MESSAGE__
+__WARNINGS__
+
 Accepted:
-__SUMMARY__
+
+__FILE_CONTENTS__
 
 Thank you for your contribution to __DISTRO__.
index 7297c91c87d7a5a641f74db7d38c697c04156ac4..95f8699870f201bac80037cb792b09473e99cec7 100644 (file)
@@ -9,6 +9,3 @@ Content-Transfer-Encoding: 8bit
 Subject: Accepted __SOURCE__ __VERSION__ (__ARCHITECTURE__)
 
 __FILE_CONTENTS__
-
-Accepted:
-__SHORT_SUMMARY__
index 8ab731728a054cf5f389531a5a4b8cbd9c793a92..535ce34790a7b429651677ad96d4fac35efa746a 100644 (file)
@@ -12,10 +12,7 @@ Source: __SOURCE__
 Source-Version: __VERSION__
 
 We believe that the bug you reported is fixed in the latest version of
-__SOURCE__, which is due to be installed in the __DISTRO__ FTP archive:
-
-__SHORT_SUMMARY__
-__STABLE_WARNING__
+__SOURCE__, which is due to be installed in the __DISTRO__ FTP archive.
 
 A summary of the changes between this version and the previous one is
 attached.
diff --git a/templates/process-unchecked.bug-experimental-fixed b/templates/process-unchecked.bug-experimental-fixed
deleted file mode 100644 (file)
index 34c5ca5..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-From: __MAINTAINER_FROM__
-To: control@__BUG_SERVER__
-Cc: __MAINTAINER_TO__
-__BCC__
-X-Debian: DAK
-X-Debian-Package: __SOURCE__
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-Subject: Fixed in upload of __SOURCE__ __VERSION__ to experimental
-
-__CONTROL_MESSAGE__
-quit
-
-This message was generated automatically in response to an
-upload to the experimental distribution.  The .changes file follows.
-
-__FILE_CONTENTS__
diff --git a/templates/process-unchecked.bug-nmu-fixed b/templates/process-unchecked.bug-nmu-fixed
deleted file mode 100644 (file)
index 45f6c73..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-From: __MAINTAINER_FROM__
-To: control@__BUG_SERVER__
-Cc: __MAINTAINER_TO__
-__BCC__
-X-Debian: DAK
-X-Debian-Package: __SOURCE__
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-Subject: Fixed in NMU of __SOURCE__ __VERSION__
-
-__CONTROL_MESSAGE__
-quit
-
-This message was generated automatically in response to a
-non-maintainer upload.  The .changes file follows.
-
-__FILE_CONTENTS__
index 6c3162fa2bb7287076fdd600428da3107df44fc3..4974b2f1bf637aa31714feef6b1421e1efb63dc6 100644 (file)
@@ -4,16 +4,14 @@ __BCC__
 X-Debian: DAK
 X-Debian-Package: __SOURCE__
 Precedence: bulk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
 Subject: __CHANGES_FILENAME__ is NEW
 
-__SUMMARY__
+__WARNINGS__
 
 Your package contains new components which requires manual editing of
 the override file.  It is ok otherwise, so please be patient.  New
 packages are usually added to the override file about once a week.
-
-You may have gotten the distribution wrong.  You'll get warnings above
-if files already exist in other distributions.
index b1dd84bdbad9c67387cadd8582d9b1e063523a2d..ab542fe0827439e0f5d0ddb7f7968ad048dfcf81 100644 (file)
@@ -4,6 +4,7 @@ __BCC__
 X-Debian: DAK
 X-Debian-Package: __SOURCE__
 Precedence: junk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
index 40957d7c5cc53a1637cfd0837cfa61e60124833c..d8edb371575962cbb4843c5e584a6fbe5358d997 100644 (file)
@@ -5,6 +5,7 @@ __CC__
 X-Debian: DAK
 X-Debian-Package: __SOURCE__
 Precedence: bulk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
diff --git a/templates/reject-proposed-updates.rejected b/templates/reject-proposed-updates.rejected
deleted file mode 100644 (file)
index 8b86cb5..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-From: __DAK_ADDRESS__
-To: __MAINTAINER_TO__
-__CC__
-__BCC__
-Reply-To: __STABLE_MAIL__
-X-Debian: DAK
-X-Debian-Package: __SOURCE__
-Precedence: bulk
-MIME-Version: 1.0
-Content-Type: text/plain; charset="utf-8"
-Content-Transfer-Encoding: 8bit
-Subject: __CHANGES_FILENAME__ REJECTED from proposed-updates
-
-Your package was rejected by an ftp master on behalf of
-__STABLE_REJECTOR__, if you have any questions or
-comments regarding this rejection, please address them to 
-__STABLE_REJECTOR__ by replying to this mail.
-
-The reason given for rejection was:
-
-__MANUAL_REJECT_MESSAGE__
-
-Please see:
-
-   __MORE_INFO_URL__
-
-for more details.
-
-===
-
-Your rejected .changes files is in queue/REJECT/; the other files
-have been removed from proposed-updates and will be auto-cleaned as
-normal.
index fc229ed67249ff706e764d640ecea352480c9c9a..d3cd09082f86ed2d2c795a008cbd4a6aee604d53 100644 (file)
@@ -17,9 +17,8 @@ database and may (or may not) still be in the pool; this is not a bug.
 The package(s) will be physically removed automatically when no suite
 references them (and in the case of source, when no binary references
 it).  Please also remember that the changes have been done on the
-master archive (__MASTER_ARCHIVE__) and will not propagate to any
-mirrors (__PRIMARY_MIRROR__ included) until the next cron.daily run at the
-earliest.
+master archive and will not propagate to any mirrors (__PRIMARY_MIRROR__
+included) until the next dinstall run at the earliest.
 
 Packages are usually not removed from testing by hand. Testing tracks
 unstable and will automatically remove packages which were removed
index 0dcc1b41d816bad557642fbf3ee0016d59f8e56a..e097a756e498bc69baa9e14f7abfe0746fc8a42a 100644 (file)
@@ -17,9 +17,8 @@ database and may (or may not) still be in the pool; this is not a bug.
 The package(s) will be physically removed automatically when no suite
 references them (and in the case of source, when no binary references
 it).  Please also remember that the changes have been done on the
-master archive (__MASTER_ARCHIVE__) and will not propagate to any
-mirrors (__PRIMARY_MIRROR__ included) until the next cron.daily run at the
-earliest.
+master archive and will not propagate to any mirrors (__PRIMARY_MIRROR__
+included) until the next dinstall run at the earliest.
 
 Packages are usually not removed from testing by hand. Testing tracks
 unstable and will automatically remove packages which were removed
@@ -27,9 +26,9 @@ from unstable when removing them from testing causes no dependency
 problems. The release team can force a removal from testing if it is
 really needed, please contact them if this should be the case.
 
-We try to close Bugs which have been reported against this package
-automatically.  But please check all old bugs, if they where closed
-correctly or should have been re-assign to another package.
+We try to close bugs which have been reported against this package
+automatically. But please check all old bugs, if they were closed
+correctly or should have been re-assigned to another package.
 
 Thank you for reporting the bug, which will now be closed.  If you
 have further comments please address them to __BUG_NUMBER__@__BUG_SERVER__.
diff --git a/templates/security-install.advisory b/templates/security-install.advisory
deleted file mode 100644 (file)
index 9036bd0..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-From: __DAK_ADDRESS__
-To: __WHOAMI__ <dak@security.debian.org>
-__BCC__
-X-Debian-Package: __SOURCE__
-Subject: Template Advisory __ADVISORY__
-
-------------------------------------------------------------------------
-Debian Security Advisory __ADVISORY__                  security@debian.org
-http://www.debian.org/security/                         __WHOAMI__
-__DATE__                   http://www.debian.org/security/faq
-------------------------------------------------------------------------
-
-Package        : __PACKAGE__
-Vulnerability  : XXX
-Problem type   : local/remote XXX
-Debian-specific: XXX
-CVE Id(s)      : XXX
-CERT advisory  : XXX
-BugTraq ID     : XXX
-Debian Bug     : XXX
-
-Several local/remote vulnerabilities have been discovered in...
-The Common
-Vulnerabilities and Exposures project identifies the following problems:
-
-[single issue]
-Foo discovered that
-
-
-[single issue]
-For the old stable distribution (etch), this problem has been fixed in version XXX
-__PACKAGE__
-
-For the stable distribution (lenny), this problem has been fixed in version XXX
-__PACKAGE__
-
-For the unstable distribution (sid), this problem has been fixed in
-version XXX
-
-[multiple issues]
-For the old stable distribution (etch), these problems have been fixed in version
-__PACKAGE__
-
-For the stable distribution (lenny), these problems have been fixed in version
-__PACKAGE__
-
-For the unstable distribution (sid), these problems have been fixed in
-version XXX
-
-We recommend that you upgrade your __PACKAGE__ package.
-
-Upgrade instructions
---------------------
-
-wget url
-        will fetch the file for you
-dpkg -i file.deb
-        will install the referenced file.
-
-If you are using the apt-get package manager, use the line for
-sources.list as given below:
-
-apt-get update
-        will update the internal database
-apt-get upgrade
-        will install corrected packages
-
-You may use an automated update by adding the resources from the
-footer to the proper configuration.
-
-
-Debian GNU/Linux 4.0 alias etch
--------------------------------
-
-Debian GNU/Linux 5.0 alias lenny
---------------------------------
-
-__ADVISORY_TEXT__
-
-
-  These files will probably be moved into the stable distribution on
-  its next update.
-
----------------------------------------------------------------------------------
-For apt-get: deb http://security.debian.org/ stable/updates main
-For dpkg-ftp: ftp://security.debian.org/debian-security dists/stable/updates/main
-Mailing list: debian-security-announce@lists.debian.org
-Package info: `apt-cache show <pkg>' and http://packages.debian.org/<pkg>
index 4e064a3f6518d42751e53a6651372e7d1f775d4e..eb9d8f0ac9776bb8a73f8b0b7cf6b645857de997 100644 (file)
@@ -3,6 +3,7 @@ To: __TRANSITION_EMAIL__
 __BCC__
 X-Debian: DAK
 Precedence: bulk
+Auto-Submitted: auto-generated
 MIME-Version: 1.0
 Content-Type: text/plain; charset="utf-8"
 Content-Transfer-Encoding: 8bit
index f258770983edbc6a7cc32459fda49c5b6e864833..73679b571c0fe0461b8d687daf6c967a0cab89b7 100755 (executable)
@@ -60,13 +60,6 @@ class PackageTestCase(DBDakTestCase):
         architectures = get_suite_architectures('lenny', skipall = True, session = self.session)
         self.assertEqual(3, len(architectures))
         self.assertTrue(self.arch['all'] not in architectures)
-        # check the function get_architecture_suites()
-        suites = get_architecture_suites('i386', self.session)
-        self.assertEqual(3, len(suites))
-        self.assertTrue(self.suite['lenny'] in suites)
-        suites = get_architecture_suites('kfreebsd-i386', self.session)
-        self.assertEqual(2, len(suites))
-        self.assertTrue(self.suite['lenny'] not in suites)
         # check overrides
         self.assertEqual(0, self.suite['lenny'].overrides.count())
 
diff --git a/tests/fixtures/packages/.gitignore b/tests/fixtures/packages/.gitignore
new file mode 100644 (file)
index 0000000..258ed43
--- /dev/null
@@ -0,0 +1,8 @@
+/*.changes
+/*.deb
+/*.diff.gz
+/*.dsc
+/*.tar.gz
+/stamp-*
+
+/*/debian/files
diff --git a/tests/fixtures/packages/Makefile b/tests/fixtures/packages/Makefile
new file mode 100644 (file)
index 0000000..0efd18c
--- /dev/null
@@ -0,0 +1,31 @@
+export GNUPGHOME = $(CURDIR)/gpg
+SHELL = /bin/bash
+
+TAR = nonfree-package_0.1 package_0.1 package-built-using_0.1
+PACKAGES = $(TAR) package_0.1-2 package_0.1-3
+
+all: packages
+
+tarballs: stamp-tarballs
+stamp-tarballs:
+       set -e; for t in $(TAR); do \
+          if [ ! -f $$t.orig.tar.gz ]; then \
+           tar -czf $$t.orig.tar.gz --exclude=debian $${t/_/-}; \
+         fi; \
+       done
+       touch $@
+
+packages: stamp-packages
+stamp-packages: stamp-tarballs
+       set -e; for p in $(PACKAGES); do \
+         (cd $${p/_/-}; dpkg-buildpackage); \
+       done
+       touch $@
+
+clean:
+       set -e; for p in $(PACKAGES); do \
+         make -C $${p/_/-} -f debian/rules clean; \
+       done
+       rm -f *.tar.gz *.dsc *.changes *.diff.gz *.deb
+       rm -f gpg/*~
+       rm -f stamp-*
diff --git a/tests/fixtures/packages/gpg/pubring.gpg b/tests/fixtures/packages/gpg/pubring.gpg
new file mode 100644 (file)
index 0000000..469c3f3
Binary files /dev/null and b/tests/fixtures/packages/gpg/pubring.gpg differ
diff --git a/tests/fixtures/packages/gpg/random_seed b/tests/fixtures/packages/gpg/random_seed
new file mode 100644 (file)
index 0000000..2f6cb5e
Binary files /dev/null and b/tests/fixtures/packages/gpg/random_seed differ
diff --git a/tests/fixtures/packages/gpg/secring.gpg b/tests/fixtures/packages/gpg/secring.gpg
new file mode 100644 (file)
index 0000000..e781f1b
Binary files /dev/null and b/tests/fixtures/packages/gpg/secring.gpg differ
diff --git a/tests/fixtures/packages/gpg/trustdb.gpg b/tests/fixtures/packages/gpg/trustdb.gpg
new file mode 100644 (file)
index 0000000..d0ac562
Binary files /dev/null and b/tests/fixtures/packages/gpg/trustdb.gpg differ
diff --git a/tests/fixtures/packages/nonfree-package-0.1/debian/changelog b/tests/fixtures/packages/nonfree-package-0.1/debian/changelog
new file mode 100644 (file)
index 0000000..ec3f4a1
--- /dev/null
@@ -0,0 +1,5 @@
+nonfree-package (0.1-1) unstable; urgency=low
+
+  * Initial release.
+
+ -- A Maintainer <maint@example.com>  Fri, 08 Jun 2012 18:10:01 +0200
diff --git a/tests/fixtures/packages/nonfree-package-0.1/debian/compat b/tests/fixtures/packages/nonfree-package-0.1/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/tests/fixtures/packages/nonfree-package-0.1/debian/control b/tests/fixtures/packages/nonfree-package-0.1/debian/control
new file mode 100644 (file)
index 0000000..f83f69c
--- /dev/null
@@ -0,0 +1,11 @@
+Source: nonfree-package
+Section: non-free/misc
+Priority: extra
+Maintainer: A Maintainer <maint@example.com>
+Build-Depends: debhelper (>= 7)
+
+Package: nonfree-package
+Architecture: all
+Depends: ${misc:Depends}
+Description: a package
+ a package
diff --git a/tests/fixtures/packages/nonfree-package-0.1/debian/nonfree-package.install b/tests/fixtures/packages/nonfree-package-0.1/debian/nonfree-package.install
new file mode 100644 (file)
index 0000000..af6371d
--- /dev/null
@@ -0,0 +1 @@
+some-file usr/share/nonfree-package
diff --git a/tests/fixtures/packages/nonfree-package-0.1/debian/rules b/tests/fixtures/packages/nonfree-package-0.1/debian/rules
new file mode 100755 (executable)
index 0000000..78c7615
--- /dev/null
@@ -0,0 +1,3 @@
+#! /usr/bin/make -f
+%:
+       dh $@
diff --git a/tests/fixtures/packages/nonfree-package-0.1/some-file b/tests/fixtures/packages/nonfree-package-0.1/some-file
new file mode 100644 (file)
index 0000000..083d0c2
--- /dev/null
@@ -0,0 +1 @@
+some-text
diff --git a/tests/fixtures/packages/package-0.1/debian/changelog b/tests/fixtures/packages/package-0.1/debian/changelog
new file mode 100644 (file)
index 0000000..fbb1609
--- /dev/null
@@ -0,0 +1,5 @@
+package (0.1-1) unstable; urgency=low
+
+  * Initial release.
+
+ -- A Maintainer <maint@example.com>  Fri, 08 Jun 2012 18:10:01 +0200
diff --git a/tests/fixtures/packages/package-0.1/debian/compat b/tests/fixtures/packages/package-0.1/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/tests/fixtures/packages/package-0.1/debian/control b/tests/fixtures/packages/package-0.1/debian/control
new file mode 100644 (file)
index 0000000..9ccf58a
--- /dev/null
@@ -0,0 +1,11 @@
+Source: package
+Section: misc
+Priority: extra
+Maintainer: A Maintainer <maint@example.com>
+Build-Depends: debhelper (>= 7)
+
+Package: package
+Architecture: all
+Depends: ${misc:Depends}
+Description: a package
+ a package
diff --git a/tests/fixtures/packages/package-0.1/debian/package.install b/tests/fixtures/packages/package-0.1/debian/package.install
new file mode 100644 (file)
index 0000000..fb44f77
--- /dev/null
@@ -0,0 +1 @@
+some-file usr/share/apackage
diff --git a/tests/fixtures/packages/package-0.1/debian/rules b/tests/fixtures/packages/package-0.1/debian/rules
new file mode 100755 (executable)
index 0000000..78c7615
--- /dev/null
@@ -0,0 +1,3 @@
+#! /usr/bin/make -f
+%:
+       dh $@
diff --git a/tests/fixtures/packages/package-0.1/some-file b/tests/fixtures/packages/package-0.1/some-file
new file mode 100644 (file)
index 0000000..083d0c2
--- /dev/null
@@ -0,0 +1 @@
+some-text
diff --git a/tests/fixtures/packages/package-built-using-0.1/debian/changelog b/tests/fixtures/packages/package-built-using-0.1/debian/changelog
new file mode 100644 (file)
index 0000000..744ddf6
--- /dev/null
@@ -0,0 +1,5 @@
+package-built-using (0.1-1) unstable; urgency=low
+
+  * Initial release.
+
+ -- A Maintainer <maint@example.com>  Fri, 08 Jun 2012 18:10:01 +0200
diff --git a/tests/fixtures/packages/package-built-using-0.1/debian/compat b/tests/fixtures/packages/package-built-using-0.1/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/tests/fixtures/packages/package-built-using-0.1/debian/control b/tests/fixtures/packages/package-built-using-0.1/debian/control
new file mode 100644 (file)
index 0000000..b51cc53
--- /dev/null
@@ -0,0 +1,12 @@
+Source: package-built-using
+Section: misc
+Priority: extra
+Maintainer: A Maintainer <maint@example.com>
+Build-Depends: debhelper (>= 7)
+
+Package: package-built-using
+Architecture: all
+Depends: ${misc:Depends}
+Built-Using: package (= 0.1-1)
+Description: a package
+ a package
diff --git a/tests/fixtures/packages/package-built-using-0.1/debian/package-built-using.install b/tests/fixtures/packages/package-built-using-0.1/debian/package-built-using.install
new file mode 100644 (file)
index 0000000..fb44f77
--- /dev/null
@@ -0,0 +1 @@
+some-file usr/share/apackage
diff --git a/tests/fixtures/packages/package-built-using-0.1/debian/rules b/tests/fixtures/packages/package-built-using-0.1/debian/rules
new file mode 100755 (executable)
index 0000000..78c7615
--- /dev/null
@@ -0,0 +1,3 @@
+#! /usr/bin/make -f
+%:
+       dh $@
diff --git a/tests/fixtures/packages/package-built-using-0.1/some-file b/tests/fixtures/packages/package-built-using-0.1/some-file
new file mode 100644 (file)
index 0000000..083d0c2
--- /dev/null
@@ -0,0 +1 @@
+some-text
diff --git a/tests/test_daklib_fstransactions.py b/tests/test_daklib_fstransactions.py
new file mode 100755 (executable)
index 0000000..41a109b
--- /dev/null
@@ -0,0 +1,119 @@
+#! /usr/bin/env python
+#
+# Copyright (C) 2012, Ansgar Burchardt <ansgar@debian.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from base_test import DakTestCase
+from daklib.fstransactions import FilesystemTransaction
+
+from unittest import main
+
+import os
+import shutil
+import tempfile
+
+
+class TemporaryDirectory:
+    def __init__(self):
+        self.directory = None
+    def __str__(self):
+        return self.directory
+    def filename(self, suffix):
+        return os.path.join(self.directory, suffix)
+    def __enter__(self):
+        self.directory = tempfile.mkdtemp()
+        return self
+    def __exit__(self, *args):
+        if self.directory is not None:
+            shutil.rmtree(self.directory)
+            self.directory = None
+        return None
+
+class FilesystemTransactionTestCase(DakTestCase):
+    def _copy_a_b(self, tmp, fs, **kwargs):
+        fs.copy(tmp.filename('a'), tmp.filename('b'), **kwargs)
+
+    def _write_to_a(self, tmp):
+        with open(tmp.filename('a'), 'w') as fh:
+            print >>fh, 'a'
+
+    def test_copy_non_existing(self):
+        def copy():
+            with TemporaryDirectory() as t:
+                with FilesystemTransaction() as fs:
+                    self._copy_a_b(t, fs)
+
+        self.assertRaises(IOError, copy)
+
+    def test_copy_existing_and_commit(self):
+        with TemporaryDirectory() as t:
+            self._write_to_a(t)
+
+            with FilesystemTransaction() as fs:
+                self._copy_a_b(t, fs)
+                self.assert_(os.path.exists(t.filename('a')))
+                self.assert_(os.path.exists(t.filename('b')))
+
+            self.assert_(os.path.exists(t.filename('a')))
+            self.assert_(os.path.exists(t.filename('b')))
+
+    def test_copy_existing_and_rollback(self):
+        with TemporaryDirectory() as t:
+            self._write_to_a(t)
+
+            class TestException(Exception):
+                pass
+            try:
+                with FilesystemTransaction() as fs:
+                    self._copy_a_b(t, fs)
+                    self.assert_(os.path.exists(t.filename('a')))
+                    self.assert_(os.path.exists(t.filename('b')))
+                    raise TestException()
+            except TestException:
+                pass
+
+            self.assert_(os.path.exists(t.filename('a')))
+            self.assert_(not os.path.exists(t.filename('b')))
+
+    def test_unlink_and_commit(self):
+        with TemporaryDirectory() as t:
+            self._write_to_a(t)
+            a = t.filename('a')
+            with FilesystemTransaction() as fs:
+                self.assert_(os.path.exists(a))
+                fs.unlink(a)
+                self.assert_(not os.path.exists(a))
+            self.assert_(not os.path.exists(a))
+
+    def test_unlink_and_rollback(self):
+        with TemporaryDirectory() as t:
+            self._write_to_a(t)
+            a = t.filename('a')
+            class TestException(Exception):
+                pass
+
+            try:
+                with FilesystemTransaction() as fs:
+                    self.assert_(os.path.exists(a))
+                    fs.unlink(a)
+                    self.assert_(not os.path.exists(a))
+                    raise TestException()
+            except TestException:
+                pass
+            self.assert_(os.path.exists(a))
+
+if __name__ == '__main__':
+    main()
index fe6bd91e984d4224a0cfcb37c2d5eca462c9f7cd..51812da03918b650bb57222cfe7b2b90612e5834 100755 (executable)
@@ -2,7 +2,7 @@
 
 from base_test import DakTestCase
 
-from daklib.dbconn import split_uploaders
+from daklib.textutils import split_uploaders
 
 import unittest
 
index 4f94796d8afd2b79be661e9011b7d0b8f6d46dc2..da8a1d6d23b1b44029c3fb2407fd71d21ff721cd 100644 (file)
@@ -69,8 +69,6 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armhf-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-arm-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hppa-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
@@ -84,10 +82,10 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
 
 # our log file
-$logfile = "$queued_dir/log";
+$logfile = "$queued_dir/run/log";
 
 # our pid file
-$pidfile = "$queued_dir/pid";
+$pidfile = "$queued_dir/run/pid";
 
 # upload method (ssh, copy, ftp)
 $upload_method = "copy";
index bbbfd9d60b4e5266fa3a30fac0162ecc1c18f4ee..32ee8fe9f27059edc6927a510706208d4c5292a6 100644 (file)
@@ -53,7 +53,7 @@ $valid_files = '(\.changes|\.tar\.(?:gz|bz2|xz)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
 $chmod_on_target = 0;
 
 # Do an md5sum check after upload?
-$check_md5sum = 1;
+$check_md5sum = 0;
 
 # name of the status file or named pipe in the incoming dir
 $statusfile = "$incoming/status";
@@ -63,13 +63,11 @@ $statusfile = "$incoming/status";
 $statusdelay = 30;
 
 # names of the keyring files
-@keyrings = ( "/srv/backports-master.debian.org/keyrings/keyring.gpg",
+@keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/debian-maintainers.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-alpha-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-arm-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hppa-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
@@ -82,22 +80,22 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
 
 # our log file
-$logfile = "$queued_dir/log";
+$logfile = "$queued_dir/run/log";
 
 # our pid file
-$pidfile = "$queued_dir/pid";
+$pidfile = "$queued_dir/run/pid";
 
 # upload method (ssh, copy, ftp)
-$upload_method = "copy";
+$upload_method = "ftp";
 
 # name of target host (ignored on copy method)
-$target = "localhost";
+$target = "ftp-master.debian.org";
 
 # login name on target host (for ssh, always 'ftp' for ftp, ignored for copy)
-$targetlogin = "queue";
+$targetlogin = "ftp";
 
 # incoming on target host
-$targetdir = "/srv/backports-master.debian.org/queue/unchecked/";
+$targetdir = "/pub/UploadQueue/";
 
 # incoming/delayed on target host
 $targetdir_delayed = "/srv/upload.debian.org/DEFERRED/%d-day";
index fd77e80bc3c6ee9ad261b62c1eb65a523d8bc94b..fc0ce0e224b0372e32083df5ae4a182f7e96058d 100644 (file)
@@ -66,9 +66,8 @@ $statusdelay = 30;
 @keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-alpha-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armhf-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-arm-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hppa-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
@@ -78,13 +77,14 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-mips-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-powerpc-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390x-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
 
 # our log file
-$logfile = "$queued_dir/log";
+$logfile = "$queued_dir/run/log";
 
 # our pid file
-$pidfile = "$queued_dir/pid";
+$pidfile = "$queued_dir/run/pid";
 
 # upload method (ssh, copy, ftp)
 $upload_method = "ftp";
diff --git a/tools/debianqueued-0.9/config-security-disembargo b/tools/debianqueued-0.9/config-security-disembargo
new file mode 100644 (file)
index 0000000..c449198
--- /dev/null
@@ -0,0 +1,157 @@
+#
+# example configuration file for debianqueued
+#
+
+# set to != 0 for debugging output (to log file)
+$debug = 0;
+
+# various programs:
+# -----------------
+$gpg       = "/usr/bin/gpg";
+$ssh       = "/usr/bin/ssh";
+$scp       = "/usr/bin/scp";
+$ssh_agent = "/usr/bin/ssh-agent";
+$ssh_add   = "/usr/bin/ssh-add";
+$md5sum    = "/usr/bin/md5sum";
+$mail      = "/usr/sbin/sendmail";
+$mkfifo    = "/usr/bin/mkfifo";
+$tar       = "/bin/tar"; # must be GNU tar!
+$gzip      = "/bin/gzip";
+$ar        = "/usr/bin/ar"; # must support p option, optional
+$ls        = "/bin/ls";
+$cp        = "/bin/cp";
+$chmod     = "/bin/chmod";
+
+# binaries which existance should be tested before each queue run
+#@test_binaries = ();
+
+# general options to ssh/scp
+$ssh_options = "-o'BatchMode yes' -o'FallBackToRsh no' ".
+               "-o'ForwardAgent no' -o'ForwardX11 no' ".
+               "-o'PasswordAuthentication no' -o'StrictHostKeyChecking yes'";
+
+# ssh key file to use for connects to master (empty: default ~/.ssh/identity)
+$ssh_key_file = "";
+
+# the incoming dir we live in
+$incoming = "/srv/queued/UploadQueue/OpenSecurityUploadQueue/";
+
+# the delayed incoming directories
+$incoming_delayed = "/srv/queued/UploadQueue/DELAYED/%d-day";
+
+# maximum delay directory, -1 for no delayed directory,
+# incoming_delayed and target_delayed need to exist.
+$max_delayed = -1;
+
+# files not to delete in $incoming (regexp)
+$keep_files = '(status|\.message|README)$';
+
+# file patterns that aren't deleted right away
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2|xz)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+
+# Change files to mode 644 locally (after md5 check) or only on master?
+$chmod_on_target = 0;
+
+# Do an md5sum check?
+$check_md5sum = 0;
+
+# name of the status file or named pipe in the incoming dir
+$statusfile = "$incoming/status";
+
+# if 0, status file implemented as FIFO; if > 0, status file is plain
+# file and updated with a delay of this many seconds
+$statusdelay = 30;
+
+# names of the keyring files
+@keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-alpha-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armhf-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-kfreebsd-amd64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-kfreebsd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-mipsel-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-mips-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-powerpc-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390x-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
+
+# our log file
+$logfile = "$queued_dir/run/log";
+
+# our pid file
+$pidfile = "$queued_dir/run/pid";
+
+# upload method (ssh, copy, ftp)
+$upload_method = "copy";
+
+# name of target host (ignored on copy method)
+$target = "localhost";
+
+# login name on target host (for ssh, always 'ftp' for ftp, ignored for copy)
+$targetlogin = "copy";
+
+# incoming on target host
+$targetdir = "/srv/security-master.debian.org/queue/unchecked-disembargo";
+
+# incoming/delayed on target host
+$targetdir_delayed = "/srv/queued/DEFERRED/%d-day";
+
+# select FTP debugging
+$ftpdebug = 0;
+
+# FTP timeout
+$ftptimeout = 900;
+
+# max. number of tries to upload
+$max_upload_retries = 8;
+
+# delay after first failed upload
+$upload_delay_1 = 30*60; # 30 min.
+
+# delay between successive failed uploads
+$upload_delay_2 = 4*60*60; # 4 hours
+
+# packages that must go to nonus.debian.org and thus are rejected here
+#@nonus_packages = qw(gpg-rsaidea);
+
+# timings:
+# --------
+#   time between two queue checks
+$queue_delay = 5*60; # 5 min.
+#   when are stray files deleted?
+$stray_remove_timeout = 24*60*60; # 1 day
+#   delay before reporting problems with a .changes file (not
+#   immediately for to-be-continued uploads)
+$problem_report_timeout = 30*60; # 30 min.
+#   delay before reporting that a .changes file is missing (not
+#   immediately for to-be-continued uploads)
+$no_changes_timeout = 30*60; # 30 min.
+#   when are .changes with persistent problems removed?
+$bad_changes_timeout = 2*24*60*60; # 2 days
+#   how long may a remote operation (ssh/scp) take?
+$remote_timeout = 3*60*60; # 3 hours
+
+# mail address of maintainer
+$maintainer_mail = "ftpmaster\@debian.org";
+
+# to override the TO address of ALL outgoing mail, set this value.
+$overridemail = "dak\@security.debian.org";
+
+# logfile rotating:
+# -----------------
+#    how often to rotate (in days)
+$log_age = 7;
+#    how much old logs to keep
+$log_keep = 4;
+#    send summary mail when rotating logs?
+$mail_summary = 1;
+#    write summary to file when rotating logs? (no if name empty)
+$summary_file = "$queued_dir/summary";
+
+# don't remove this, Perl needs it!
+1;
diff --git a/tools/debianqueued-0.9/config-security-embargoed b/tools/debianqueued-0.9/config-security-embargoed
new file mode 100644 (file)
index 0000000..f9808fe
--- /dev/null
@@ -0,0 +1,158 @@
+#
+# example configuration file for debianqueued
+#
+
+# set to != 0 for debugging output (to log file)
+$debug = 0;
+
+# various programs:
+# -----------------
+$gpg       = "/usr/bin/gpg";
+$ssh       = "/usr/bin/ssh";
+$scp       = "/usr/bin/scp";
+$ssh_agent = "/usr/bin/ssh-agent";
+$ssh_add   = "/usr/bin/ssh-add";
+$md5sum    = "/usr/bin/md5sum";
+$mail      = "/usr/sbin/sendmail";
+$mkfifo    = "/usr/bin/mkfifo";
+$tar       = "/bin/tar"; # must be GNU tar!
+$gzip      = "/bin/gzip";
+$ar        = "/usr/bin/ar"; # must support p option, optional
+$ls        = "/bin/ls";
+$cp        = "/bin/cp";
+$chmod     = "/bin/chmod";
+
+# binaries which existance should be tested before each queue run
+#@test_binaries = ();
+
+# general options to ssh/scp
+$ssh_options = "-o'BatchMode yes' -o'FallBackToRsh no' ".
+               "-o'ForwardAgent no' -o'ForwardX11 no' ".
+               "-o'PasswordAuthentication no' -o'StrictHostKeyChecking yes'";
+
+# ssh key file to use for connects to master (empty: default ~/.ssh/identity)
+$ssh_key_file = "";
+
+# the incoming dir we live in
+$incoming = "/srv/queued/UploadQueue/SecurityUploadQueue/";
+
+# the delayed incoming directories
+$incoming_delayed = "/srv/queued/UploadQueue/DELAYED/%d-day";
+
+# maximum delay directory, -1 for no delayed directory,
+# incoming_delayed and target_delayed need to exist.
+$max_delayed = -1;
+
+# files not to delete in $incoming (regexp)
+$keep_files = '(status|\.message|README)$';
+
+# file patterns that aren't deleted right away
+$valid_files = '(\.changes|\.tar\.(?:gz|bz2|xz)|\.dsc|\.u?deb|diff\.gz|\.sh)$';
+
+# Change files to mode 644 locally (after md5 check) or only on master?
+$chmod_on_target = 0;
+
+# Do an md5sum check?
+$check_md5sum = 0;
+
+# name of the status file or named pipe in the incoming dir
+$statusfile = "$incoming/status";
+
+# if 0, status file implemented as FIFO; if > 0, status file is plain
+# file and updated with a delay of this many seconds
+$statusdelay = 30;
+
+# names of the keyring files
+@keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-alpha-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armhf-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-kfreebsd-amd64-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-kfreebsd-i386-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-mipsel-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-mips-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-powerpc-keyring.gpg",
+             "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-s390x-keyring.gpg",
+              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
+
+# our log file
+$logfile = "$queued_dir/run/log";
+
+# our pid file
+$pidfile = "$queued_dir/run/pid";
+
+# upload method (ssh, copy, ftp)
+$upload_method = "copy";
+
+# name of target host (ignored on copy method)
+$target = "localhost";
+
+# login name on target host (for ssh, always 'ftp' for ftp, ignored for copy)
+$targetlogin = "queue";
+
+# incoming on target host
+$targetdir = "/srv/security-master.debian.org/queue/unchecked/";
+
+# incoming/delayed on target host
+$targetdir_delayed = "/srv/queued/DEFERRED/%d-day";
+
+# select FTP debugging
+$ftpdebug = 0;
+
+# FTP timeout
+$ftptimeout = 900;
+
+# max. number of tries to upload
+$max_upload_retries = 8;
+
+# delay after first failed upload
+$upload_delay_1 = 30*60; # 30 min.
+
+# delay between successive failed uploads
+$upload_delay_2 = 4*60*60; # 4 hours
+
+# packages that must go to nonus.debian.org and thus are rejected here
+#@nonus_packages = qw(gpg-rsaidea);
+
+# timings:
+# --------
+#   time between two queue checks
+$queue_delay = 5*60; # 5 min.
+#   when are stray files deleted?
+$stray_remove_timeout = 24*60*60; # 1 day
+#   delay before reporting problems with a .changes file (not
+#   immediately for to-be-continued uploads)
+$problem_report_timeout = 30*60; # 30 min.
+#   delay before reporting that a .changes file is missing (not
+#   immediately for to-be-continued uploads)
+$no_changes_timeout = 30*60; # 30 min.
+#   when are .changes with persistent problems removed?
+$bad_changes_timeout = 2*24*60*60; # 2 days
+#   how long may a remote operation (ssh/scp) take?
+$remote_timeout = 3*60*60; # 3 hours
+
+# mail address of maintainer
+$maintainer_mail = "ftpmaster\@debian.org";
+
+# to override the TO address of ALL outgoing mail, set this value.
+$overridemail = "dak\@security.debian.org";
+
+
+# logfile rotating:
+# -----------------
+#    how often to rotate (in days)
+$log_age = 7;
+#    how much old logs to keep
+$log_keep = 4;
+#    send summary mail when rotating logs?
+$mail_summary = 1;
+#    write summary to file when rotating logs? (no if name empty)
+$summary_file = "$queued_dir/summary";
+
+# don't remove this, Perl needs it!
+1;
index 47f456da0b14feaef9f2d635ff743448bcf74607..e1f997255f4959e2c22adc7b53e21485e93aef23 100644 (file)
@@ -69,8 +69,6 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-amd64-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armel-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-armhf-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-arm-keyring.gpg",
-              "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hppa-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-hurd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-i386-keyring.gpg",
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-ia64-keyring.gpg",
@@ -84,10 +82,10 @@ $statusdelay = 30;
               "/srv/keyring.debian.org/keyrings/buildd-keyrings/buildd-sparc-keyring.gpg");
 
 # our log file
-$logfile = "$queued_dir/log";
+$logfile = "$queued_dir/run/log";
 
 # our pid file
-$pidfile = "$queued_dir/pid";
+$pidfile = "$queued_dir/run/pid";
 
 # upload method (ssh, copy, ftp)
 $upload_method = "ftp";
index 2a45d5d88628226df990801451328b7154452de5..46ce787f64386c2246d09261f5e206fb388c278f 100755 (executable)
@@ -14,6 +14,7 @@
 #
 
 require 5.002;
+no lib '.';
 use strict;
 use POSIX;
 use POSIX qw( strftime sys_stat_h sys_wait_h signal_h );
@@ -23,8 +24,10 @@ use Socket qw( PF_INET AF_INET SOCK_STREAM );
 use Config;
 use Sys::Hostname;
 use File::Copy;
+use Digest::MD5;
 
 setlocale(&POSIX::LC_ALL, "C");
+$ENV{"LC_ALL"} = "C";
 
 # ---------------------------------------------------------------------------
 #                                                              configuration
@@ -44,16 +47,11 @@ $junk = $conf::valid_files;
 $junk = $conf::max_upload_retries;
 $junk = $conf::upload_delay_1;
 $junk = $conf::upload_delay_2;
-$junk = $conf::ar;
-$junk = $conf::gzip;
-$junk = $conf::cp;
 $junk = $conf::check_md5sum;
 
 #$junk = $conf::ls;
-$junk         = $conf::chmod;
 $junk         = $conf::ftpdebug;
 $junk         = $conf::ftptimeout;
-$junk         = $conf::no_changes_timeout;
 $junk         = @conf::nonus_packages;
 $junk         = @conf::test_binaries;
 $junk         = @conf::maintainer_mail;
@@ -69,6 +67,8 @@ package main;
 ($main::hostname, undef, undef, undef, undef) = gethostbyname(hostname());
 
 my %packages = ();
+my $re_file_safe_prefix = qr/\A([a-zA-Z0-9][a-zA-Z0-9_.:~+-]*)/s;
+my $re_file_safe = qr/$re_file_safe_prefix\z/s;
 
 # extract -r and -k args
 $main::arg = "";
@@ -79,7 +79,7 @@ if ( @ARGV == 1 && $ARGV[0] =~ /^-[rk]$/ ) {
 
 # test for another instance of the queued already running
 my ( $pid, $delayed_dirs, $adelayedcore );
-if ( open( PIDFILE, "<$conf::pidfile" ) ) {
+if ( open( PIDFILE, "<", $conf::pidfile ) ) {
   chomp( $pid = <PIDFILE> );
   close(PIDFILE);
   if ( !$pid ) {
@@ -179,7 +179,7 @@ do {
 # check if all programs exist
 my $prg;
 foreach $prg ( $conf::gpg, $conf::ssh, $conf::scp, $conf::ssh_agent,
-               $conf::ssh_add, $conf::md5sum, $conf::mail, $conf::mkfifo )
+               $conf::ssh_add, $conf::mail, $conf::mkfifo )
 {
   die "Required program $prg doesn't exist or isn't executable\n"
     if !-x $prg;
@@ -231,14 +231,10 @@ sub ftp_code();
 sub ftp_error();
 sub ssh_cmd($);
 sub scp_cmd(@);
-sub local_cmd($;$);
 sub check_alive(;$);
 sub check_incoming_writable();
 sub rm(@);
 sub md5sum($);
-sub is_debian_file($);
-sub get_maintainer($);
-sub debian_file_stem($);
 sub msg($@);
 sub debug(@);
 sub init_mail(;$);
@@ -316,7 +312,7 @@ chdir($conf::incoming)
 $SIG{"HUP"} = "IGNORE";
 
 # open logfile, make it unbuffered
-open( LOG, ">>$conf::logfile" )
+open( LOG, ">>", $conf::logfile )
   or die "Cannot open my logfile $conf::logfile: $!\n";
 chmod( 0644, $conf::logfile )
   or die "Cannot set modes of $conf::logfile: $!\n";
@@ -326,11 +322,11 @@ sleep(1);
 $SIG{"HUP"} = \&close_log;
 
 # redirect stdin, ... to /dev/null
-open( STDIN, "</dev/null" )
+open( STDIN, "<", "/dev/null" )
   or die "$main::progname: Can't redirect stdin to /dev/null: $!\n";
-open( STDOUT, ">&LOG" )
+open( STDOUT, ">&", \*LOG )
   or die "$main::progname: Can't redirect stdout to $conf::logfile: $!\n";
-open( STDERR, ">&LOG" )
+open( STDERR, ">&", \*LOG )
   or die "$main::progname: Can't redirect stderr to $conf::logfile: $!\n";
 
 # ok, from this point usually no "die" anymore, stderr is gone!
@@ -358,7 +354,7 @@ END {
 }
 
 # write the pid file
-open( PIDFILE, ">$conf::pidfile" )
+open( PIDFILE, ">", $conf::pidfile )
   or msg( "log", "Can't open $conf::pidfile: $!\n" );
 printf PIDFILE "%5d\n", $$;
 close(PIDFILE);
@@ -387,7 +383,7 @@ while (1) {
 
   # ping target only if there is the possibility that we'll contact it (but
   # also don't wait too long).
-  my @have_changes = <*.changes *.commands>;
+  my @have_changes = <*.changes *.commands *.dak-commands>;
   for ( my $delayed_dirs = 0 ;
         $delayed_dirs <= $conf::max_delayed ;
         $delayed_dirs++ )
@@ -487,9 +483,10 @@ sub check_dir() {
            return
          );
 
-    # look for *.commands files but not in delayed queues
+    # look for *.commands and *.dak-commands files but not in delayed queues
     if ( $adelay == -1 ) {
       foreach $file (<*.commands>) {
+        next unless $file =~ /$re_file_safe/;
         init_mail($file);
         block_signals();
         process_commands($file);
@@ -498,6 +495,16 @@ sub check_dir() {
         write_status_file() if $conf::statusdelay;
         finish_mail();
       } ## end foreach $file (<*.commands>)
+         foreach $file (<*.dak-commands>) {
+               next unless $file =~ /$re_file_safe/;
+               init_mail($file);
+               block_signals();
+               process_dak_commands($file);
+               unblock_signals();
+               $main::dstat = "c";
+               write_status_file() if $conf::statusdelay;
+               finish_mail();
+         }
     } ## end if ( $adelay == -1 )
     opendir( INC, "." )
       or (
@@ -513,6 +520,7 @@ sub check_dir() {
     @changes = grep /\.changes$/, @files;
     push( @keep_files, @changes );    # .changes files aren't stray
     foreach $file (@changes) {
+      next unless $file =~ /$re_file_safe/;
       init_mail($file);
 
       # wrap in an eval to allow jumpbacks to here with die in case
@@ -553,91 +561,12 @@ sub check_dir() {
       my ( $maint, $pattern, @job_files );
       if (    $file =~ /^junk-for-writable-test/
            || $file !~ m,$conf::valid_files,
+           || $file !~ /$re_file_safe/
            || $age >= $conf::stray_remove_timeout )
       {
         msg( "log",
              "Deleted stray file ${main::current_incoming_short}/$file\n" )
           if rm($file);
-      } elsif (
-        $age > $conf::no_changes_timeout
-        && is_debian_file($file)
-        &&
-
-        # not already reported
-          !( $stats[ST_MODE] & S_ISGID )
-        && ( $pattern   = debian_file_stem($file) )
-        && ( @job_files = glob($pattern) )
-        &&
-
-        # If a .changes is in the list, it has the same stem as the
-        # found file (probably a .orig.tar.gz). Don't report in this
-        # case.
-        !( grep( /\.changes$/, @job_files ) )
-              )
-      {
-        $maint = get_maintainer($file);
-
-        # Don't send a mail if this looks like the recompilation of a
-        # package for a non-i386 arch. For those, the maintainer field is
-        # useless :-(
-        if ( !grep( /(\.dsc|_(i386|all)\.deb)$/, @job_files ) ) {
-          msg( "log", "Found an upload without .changes and with no ",
-               ".dsc file\n" );
-          msg( "log",
-               "Not sending a report, because probably ",
-               "recompilation job\n" );
-        } elsif ($maint) {
-          init_mail();
-          $main::mail_addr = $maint;
-          $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/;
-          $main::mail_subject =
-            "Incomplete upload found in " . "Debian upload queue";
-          msg(
-               "mail",
-               "Probably you are the uploader of the following "
-                 . "file(s) in\n"
-             );
-          msg( "mail", "the Debian upload queue directory:\n  " );
-          msg( "mail", join( "\n  ", @job_files ), "\n" );
-          msg(
-               "mail",
-               "This looks like an upload, but a .changes file "
-                 . "is missing, so the job\n"
-             );
-          msg( "mail", "cannot be processed.\n\n" );
-          msg(
-               "mail",
-               "If no .changes file arrives within ",
-               print_time( $conf::stray_remove_timeout - $age ),
-               ", the files will be deleted.\n\n"
-             );
-          msg(
-               "mail",
-               "If you didn't upload those files, please just "
-                 . "ignore this message.\n"
-             );
-          finish_mail();
-          msg(
-               "log",
-               "Sending problem report for an upload without a "
-                 . ".changes\n"
-             );
-          msg( "log", "Maintainer: $maint\n" );
-        } else {
-          msg(
-               "log",
-               "Found an upload without .changes, but can't "
-                 . "find a maintainer address\n"
-             );
-        } ## end else [ if ( !grep( /(\.dsc|_(i386|all)\.deb)$/...
-        msg( "log", "Files: @job_files\n" );
-
-        # remember we already have sent a mail regarding this file
-        foreach (@job_files) {
-          my @st = stat($_);
-          next if !@st;    # file may have disappeared in the meantime
-          chmod +( $st[ST_MODE] |= S_ISGID ), $_;
-        }
       } else {
         debug(
 "found stray file ${main::current_incoming_short}/$file, deleting in ",
@@ -660,7 +589,7 @@ sub get_filelist_from_known_good_changes($) {
   my (@filenames);
 
   # parse the .changes file
-  open( CHANGES, "<$changes" )
+  open( CHANGES, "<", $changes )
     or die "$changes: $!\n";
 outer_loop: while (<CHANGES>) {
     if (/^Files:/i) {
@@ -671,7 +600,7 @@ outer_loop: while (<CHANGES>) {
 
         # forbid shell meta chars in the name, we pass it to a
         # subshell several times...
-        $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/;
+        $field[5] =~ /$re_file_safe/;
         if ( $1 ne $field[5] ) {
           msg( "log", "found suspicious filename $field[5]\n" );
           next;
@@ -702,17 +631,34 @@ sub process_changes($\@) {
   format_status_str( $main::current_changes,
                      "$main::current_incoming_short/$changes" );
   $main::dstat = "c";
+  $main::mail_addr = "";
   write_status_file() if $conf::statusdelay;
 
   @$keep_list = ();
   msg( "log", "processing ${main::current_incoming_short}/$changes\n" );
 
+  # run PGP on the file to check the signature
+  if ( !( $signator = pgp_check($changes) ) ) {
+    msg(
+       "log,mail",
+       "$main::current_incoming_short/$changes has bad PGP/GnuPG signature!\n"
+    );
+    goto remove_only_changes;
+  } elsif ( $signator eq "LOCAL ERROR" ) {
+
+    # An error has appened when starting pgp... Don't process the file,
+    # but also don't delete it
+    debug(
+"Can't PGP/GnuPG check $main::current_incoming_short/$changes -- don't process it for now"
+    );
+    return;
+  } ## end elsif ( $signator eq "LOCAL ERROR")
+
   # parse the .changes file
-  open( CHANGES, "<$changes" )
+  open( CHANGES, "<", $changes )
     or die "Cannot open ${main::current_incoming_short}/$changes: $!\n";
   $pgplines        = 0;
   $extralines      = 0;
-  $main::mail_addr = "";
   @files           = ();
 outer_loop: while (<CHANGES>) {
     if (/^---+(BEGIN|END) PGP .*---+$/) {
@@ -738,7 +684,7 @@ outer_loop: while (<CHANGES>) {
 
         # forbid shell meta chars in the name, we pass it to a
         # subshell several times...
-        $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/;
+        $field[5] =~ /$re_file_safe/;
         if ( $1 ne $field[5] ) {
           msg( "log", "found suspicious filename $field[5]\n" );
           msg(
@@ -859,7 +805,7 @@ outer_loop: while (<CHANGES>) {
   $failure_file = $changes . ".failures";
   $retries = $last_retry = 0;
   if ( -f $failure_file ) {
-    open( FAILS, "<$failure_file" )
+    open( FAILS, "<", $failure_file )
       or die "Cannot open $main::current_incoming_short/$failure_file: $!\n";
     my $line = <FAILS>;
     close(FAILS);
@@ -868,39 +814,6 @@ outer_loop: while (<CHANGES>) {
     push( @$keep_list, $failure_file );
   } ## end if ( -f $failure_file )
 
-  # run PGP on the file to check the signature
-  if ( !( $signator = pgp_check($changes) ) ) {
-    msg(
-       "log,mail",
-       "$main::current_incoming_short/$changes has bad PGP/GnuPG signature!\n"
-    );
-    msg( "log", "(uploader $main::mail_addr)\n" );
-  remove_only_changes:
-    msg(
-      "log,mail",
-"Removing $main::current_incoming_short/$changes, but keeping its associated ",
-      "files for now.\n"
-    );
-    rm($changes);
-
-    # Set SGID bit on associated files, so that the test for Debian files
-    # without a .changes doesn't consider them.
-    foreach (@filenames) {
-      my @st = stat($_);
-      next if !@st;    # file may have disappeared in the meantime
-      chmod +( $st[ST_MODE] |= S_ISGID ), $_;
-    }
-    return;
-  } elsif ( $signator eq "LOCAL ERROR" ) {
-
-    # An error has appened when starting pgp... Don't process the file,
-    # but also don't delete it
-    debug(
-"Can't PGP/GnuPG check $main::current_incoming_short/$changes -- don't process it for now"
-    );
-    return;
-  } ## end elsif ( $signator eq "LOCAL ERROR")
-
   die "Cannot stat ${main::current_incoming_short}/$changes (??): $!\n"
     if !( @changes_stats = stat($changes) );
 
@@ -1008,9 +921,7 @@ outer_loop: while (<CHANGES>) {
     return;
   } ## end if ( $retries > 0 && (...
 
-  if ( $conf::upload_method eq "ftp" ) {
-    return if !ftp_open();
-  }
+  return if !ftp_open();
 
   # check if the job is already present on target
   # (moved to here, to avoid bothering target as long as there are errors in
@@ -1056,7 +967,7 @@ outer_loop: while (<CHANGES>) {
       rm( $changes, @filenames, $failure_file );
     } else {
       $last_retry = time;
-      if ( open( FAILS, ">$failure_file" ) ) {
+      if ( open( FAILS, ">", $failure_file ) ) {
         print FAILS "$retries $last_retry\n";
         close(FAILS);
         chmod( 0600, $failure_file )
@@ -1087,6 +998,17 @@ outer_loop: while (<CHANGES>) {
   msg( "log",
        "$changes processed successfully (uploader $main::mail_addr)\n" );
 
+  return;
+
+  remove_only_changes:
+  msg(
+    "log,mail",
+    "Removing $main::current_incoming_short/$changes, but keeping its "
+    . "associated files for now.\n"
+    );
+  rm($changes);
+  return;
+
   # Check for files that have the same stem as the .changes (and weren't
   # mentioned there) and delete them. It happens often enough that people
   # upload a .orig.tar.gz where it isn't needed and also not in the
@@ -1117,6 +1039,57 @@ outer_loop: while (<CHANGES>) {
   #}
 } ## end sub process_changes($\@)
 
+#
+# process one .dak-commands file
+#
+sub process_dak_commands {
+  my $commands = shift;
+
+  msg("log", "processing ${main::current_incoming_short}/$commands\n");
+
+  # TODO: get mail address from signed contents
+  # and NOT implement a third parser for armored PGP...
+  $main::mail_addr = undef;
+
+  # check signature
+  my $signator = pgp_check($commands);
+  if (!$signator) {
+       msg("log,mail",
+           "$main::current_incoming_short/$commands has bad PGP/GnuPG signature!\n");
+       msg("log,mail",
+               "Removing $main::current_incoming_short/$commands\n");
+       rm($commands);
+       return;
+  }
+  elsif ($signator eq 'LOCAL ERROR') {
+       debug("Can't check signature for $main::current_incoming_short/$commands -- don't process it for now");
+       return;
+  }
+  msg("log,mail", "(PGP/GnuPG signature by $signator)\n");
+
+  return if !ftp_open();
+
+  # check target
+  my @filenames = ($commands);
+  if (my $ls_l = is_on_target($commands, @filenames)) {
+       msg("log,mail", "$main::current_incoming_short/$commands is already present on target host:\n");
+       msg("log,mail", "$ls_l\n");
+       msg("log,mail", "Job $commands removed.\n");
+       rm($commands);
+       return;
+  }
+
+  if (!copy_to_target($commands)) {
+       msg("log,mail", "$commands couldn't be uploaded to target.\n");
+       msg("log,mail", "Giving up and removing it.\n");
+       rm($commands);
+       return;
+  }
+
+  rm($commands);
+  msg("mail", "$commands uploaded successfully to $conf::target\n");
+}
+
 #
 # process one .commands file
 #
@@ -1128,17 +1101,35 @@ sub process_commands($) {
 
   format_status_str( $main::current_changes, $commands );
   $main::dstat = "c";
+  $main::mail_addr = "";
   write_status_file() if $conf::statusdelay;
 
   msg( "log", "processing $main::current_incoming_short/$commands\n" );
 
+  # run PGP on the file to check the signature
+  if ( !( $signator = pgp_check($commands) ) ) {
+    msg(
+      "log,mail",
+      "$main::current_incoming_short/$commands has bad PGP/GnuPG signature!\n"
+    );
+    goto remove;
+  } elsif ( $signator eq "LOCAL ERROR" ) {
+
+    # An error has appened when starting pgp... Don't process the file,
+    # but also don't delete it
+    debug(
+"Can't PGP/GnuPG check $main::current_incoming_short/$commands -- don't process it for now"
+    );
+    return;
+  } ## end elsif ( $signator eq "LOCAL ERROR")
+  msg( "log", "(PGP/GnuPG signature by $signator)\n" );
+
   # parse the .commands file
-  if ( !open( COMMANDS, "<$commands" ) ) {
+  if ( !open( COMMANDS, "<", $commands ) ) {
     msg( "log", "Cannot open $main::current_incoming_short/$commands: $!\n" );
     return;
   }
   $pgplines        = 0;
-  $main::mail_addr = "";
   @cmds            = ();
 outer_loop: while (<COMMANDS>) {
     if (/^---+(BEGIN|END) PGP .*---+$/) {
@@ -1187,27 +1178,6 @@ outer_loop: while (<COMMANDS>) {
     goto remove;
   } ## end if ( $pgplines < 3 )
 
-  # run PGP on the file to check the signature
-  if ( !( $signator = pgp_check($commands) ) ) {
-    msg(
-      "log,mail",
-      "$main::current_incoming_short/$commands has bad PGP/GnuPG signature!\n"
-    );
-  remove:
-    msg( "log,mail", "Removing $main::current_incoming_short/$commands\n" );
-    rm($commands);
-    return;
-  } elsif ( $signator eq "LOCAL ERROR" ) {
-
-    # An error has appened when starting pgp... Don't process the file,
-    # but also don't delete it
-    debug(
-"Can't PGP/GnuPG check $main::current_incoming_short/$commands -- don't process it for now"
-    );
-    return;
-  } ## end elsif ( $signator eq "LOCAL ERROR")
-  msg( "log", "(PGP/GnuPG signature by $signator)\n" );
-
   # now process commands
   msg(
     "mail",
@@ -1349,7 +1319,7 @@ outer_loop: while (<COMMANDS>) {
       } elsif ( $conf::upload_method ne "copy" ) {
         msg( "mail,log", "cancel not available\n" );
       } elsif (
-          $word[1] !~ m,^[a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*\.changes$, )
+          $word[1] !~ m,$re_file_safe_prefix\.changes\z, )
       {
         msg( "mail,log",
           "argument to cancel must be one .changes filename without path\n" );
@@ -1388,6 +1358,12 @@ outer_loop: while (<COMMANDS>) {
   rm($commands);
   msg( "log",
        "-- End of $main::current_incoming_short/$commands processing\n" );
+  return;
+
+  remove:
+  msg("log,mail", "Removing $main::current_incoming_short/$commands\n");
+  rm($commands);
+  return;
 } ## end sub process_commands($)
 
 sub age_delayed_queues() {
@@ -1504,9 +1480,14 @@ sub copy_to_target(@) {
       goto err if !$rv;
     }
   } else {
-    ( $msgs, $stat ) =
-      local_cmd( "$conf::cp @files $main::current_targetdir", 'NOCD' );
-    goto err if $stat;
+    for my $file (@files) {
+      eval { File::Copy::copy($file, $main::current_targetdir) };
+      if ($@) {
+        $stat = 1;
+        $msgs = $@;
+        goto err;
+      }
+    }
   }
 
   # check md5sums or sizes on target against our own
@@ -1545,9 +1526,14 @@ sub copy_to_target(@) {
         } ## end foreach $file (@files)
       } ## end if ( !$have_md5sums )
     } else {
-      ( $msgs, $stat ) = local_cmd("$conf::md5sum @files");
-      goto err if $stat;
-      @md5sum = split( "\n", $msgs );
+      for my $file (@files) {
+        my $md5 = eval { md5sum("$main::current_targetdir/$file") };
+        if ($@) {
+          $msgs = $@;
+          goto err;
+        }
+        push @md5sum, "$md5 $file" if $md5;
+      }
     }
 
     @expected_files = @files;
@@ -1593,8 +1579,12 @@ sub copy_to_target(@) {
         goto err if !$rv;
       } ## end foreach $file (@files)
     } else {
-      ( $msgs, $stat ) = local_cmd("$conf::chmod 644 @files");
-      goto err if $stat;
+      for my $file (@files) {
+        unless (chmod 0644, "$main::current_targetdir/$file") {
+          $msgs = "Could not chmod $file: $!";
+          goto err;
+        }
+      }
     }
   } ## end if ($conf::chmod_on_target)
 
@@ -1611,7 +1601,7 @@ err:
 
   # If "permission denied" was among the errors, test if the incoming is
   # writable at all.
-  if ( $msgs =~ /(permission denied|read-?only file)/i ) {
+  if ( $msgs && $msgs =~ /(permission denied|read-?only file)/i ) {
     if ( !check_incoming_writable() ) {
       msg( "log,mail", "(The incoming directory seems to be ",
            "unwritable.)\n" );
@@ -1647,30 +1637,78 @@ sub pgp_check($) {
   my $output = "";
   my $signator;
   my $found = 0;
-  my $stat;
+  my $stat = 1;
   local (*PIPE);
+  local $_;
+
+  if ($file =~ /$re_file_safe/) {
+    $file = $1;
+  } else {
+    msg( "log", "Tainted filename, skipping: $file\n" );
+    return "LOCAL ERROR";
+  }
+
+  # check the file has only one clear-signed section
+  my $fh;
+  unless (open $fh, "<", $file) {
+         msg("log,mail", "Could not open $file\n");
+         return "";
+  }
+  unless (<$fh> eq "-----BEGIN PGP SIGNED MESSAGE-----\n") {
+         msg("log,mail", "$file: does not start with a clearsigned message\n");
+         return "";
+  }
+  my $pgplines = 1;
+  while (<$fh>) {
+         if (/\A- /) {
+                 msg("log,mail", "$file: dash-escaped messages are not accepted\n");
+                 return "";
+         }
+         elsif ($_ eq "-----BEGIN PGP SIGNATURE-----\n"
+                    || $_ eq "-----END PGP SIGNATURE-----\n") {
+                 $pgplines++;
+         }
+         elsif (/\A--/) {
+                 msg("log,mail", "$file: unexpected OpenPGP armor\n");
+                 return "";
+         }
+         elsif ($pgplines > 3 && /\S/) {
+                 msg("log,mail", "$file: found text after end of signature\n");
+                 return "";
+         }
+  }
+  if ($pgplines != 3) {
+         msg("log,mail", "$file: doesn't seem to be a valid clearsigned OpenPGP message\n");
+         return "";
+  }
+  close $fh;
 
-  $stat = 1;
   if ( -x $conf::gpg ) {
-    debug(   "executing $conf::gpg --no-options --batch "
-           . "--no-default-keyring --always-trust "
-           . "--keyring "
-           . join( " --keyring ", @conf::keyrings )
-           . " --verify '$file'" );
-    if (
-         !open( PIPE,
-                    "$conf::gpg --no-options --batch "
-                  . "--no-default-keyring --always-trust "
-                  . "--keyring "
-                  . join( " --keyring ", @conf::keyrings )
-                  . " --verify '$file'"
-                  . " 2>&1 |"
-              )
-       )
-    {
-      msg( "log", "Can't open pipe to $conf::gpg: $!\n" );
+    my @command = ("$conf::gpg", "--no-options", "--batch", "--no-tty",
+                   "--trust-model", "always", "--no-default-keyring",
+                  (map +("--keyring" => $_), @conf::keyrings),
+                  "--verify", "-");
+    debug(   "executing " . join(" ", @command) );
+
+    my $child = open(PIPE, "-|");
+    if (!defined($child)) {
+      msg("log", "Can't open pipe to $conf::gpg: $!\n");
       return "LOCAL ERROR";
-    } ## end if ( !open( PIPE, "$conf::gpg --no-options --batch "...
+    }
+    if ($child == 0) {
+      unless (open(STDERR, ">&", \*STDOUT)) {
+        print "Could not redirect STDERR.";
+       exit(-1);
+      }
+      unless (open(STDIN, "<", $file)) {
+        print "Could not open $file: $!";
+       exit(-1);
+      }
+      { exec(@command) }; # BLOCK avoids warning about likely unreachable code
+      print "Could not exec gpg: $!";
+      exit(-1);
+    }
+
     $output .= $_ while (<PIPE>);
     close(PIPE);
     $stat = $?;
@@ -1743,7 +1781,7 @@ sub fork_statusd() {
 
     # open the FIFO for writing; this blocks until someone (probably ftpd)
     # opens it for reading
-    open( STATFIFO, ">$conf::statusfile" )
+    open( STATFIFO, ">", $conf::statusfile )
       or die "Cannot open $conf::statusfile\n";
     select(STATFIFO);
 
@@ -1789,7 +1827,7 @@ sub write_status_file() {
 
   return if !$conf::statusfile;
 
-  open( STATFILE, ">$conf::statusfile" )
+  open( STATFILE, ">", $conf::statusfile )
     or ( msg( "log", "Could not open $conf::statusfile: $!\n" ), return );
   my $oldsel = select(STATFILE);
 
@@ -1896,6 +1934,7 @@ sub send_status() {
 # open FTP connection to target host if not already open
 #
 sub ftp_open() {
+  return 1 unless $conf::upload_method eq "ftp";
 
   if ($main::FTP_chan) {
 
@@ -2028,19 +2067,6 @@ sub scp_cmd(@) {
   return ( $msg, $stat );
 } ## end sub scp_cmd(@)
 
-sub local_cmd($;$) {
-  my $cmd  = shift;
-  my $nocd = shift;
-  my ( $msg, $stat );
-
-  my $ecmd = ( $nocd ? "" : "cd $main::current_targetdir; " ) . $cmd;
-  debug("executing $ecmd");
-  $msg  = `($ecmd) 2>&1`;
-  $stat = $?;
-  return ( $msg, $stat );
-
-} ## end sub local_cmd($;$)
-
 #
 # check if target is alive (code stolen from Net::Ping.pm)
 #
@@ -2096,7 +2122,7 @@ sub check_incoming_writable() {
     my $file = "junk-for-writable-test-" . format_time();
     $file =~ s/[ :.]/-/g;
     local (*F);
-    open( F, ">$file" );
+    open( F, ">", $file );
     close(F);
     my $rv;
     ( $rv, $msg ) = ftp_cmd( "put", $file );
@@ -2105,8 +2131,10 @@ sub check_incoming_writable() {
     unlink $file;
     ftp_cmd( "delete", $file );
   } elsif ( $conf::upload_method eq "copy" ) {
-    ( $msg, $stat ) =
-      local_cmd( "rm -f $testfile; touch $testfile; " . "rm -f $testfile" );
+    unless(POSIX::access($main::current_targetdir, &POSIX::W_OK)) {
+      $msg = "No write access: $!";
+      $stat = 1;
+    }
   }
   chomp($msg);
   debug("exit status: $stat, output was: $msg");
@@ -2145,106 +2173,14 @@ sub rm(@) {
 #
 sub md5sum($) {
   my $file = shift;
-  my $line;
-
-  chomp( $line = `$conf::md5sum $file` );
-  debug( "md5sum($file): ",
-           $? ? "exit status $?"
-         : $line =~ /^(\S+)/ ? $1
-         :                     "match failed" );
-  return $? ? "" : $line =~ /^(\S+)/ ? $1 : "";
-} ## end sub md5sum($)
-
-#
-# check if a file probably belongs to a Debian upload
-#
-sub is_debian_file($) {
-  my $file = shift;
-  return $file =~ /\.(deb|dsc|(diff|tar)\.gz)$/
-    && $file !~ /\.orig\.tar\.gz/;
-}
-
-#
-# try to extract maintainer email address from some a non-.changes file
-# return "" if not possible
-#
-sub get_maintainer($) {
-  my $file       = shift;
-  my $maintainer = "";
-  local (*F);
-
-  if ( $file =~ /\.diff\.gz$/ ) {
-
-    # parse a diff
-    open( F, "$conf::gzip -dc '$file' 2>/dev/null |" ) or return "";
-    while (<F>) {
-
-      # look for header line of a file */debian/control
-      last if m,^\+\+\+\s+[^/]+/debian/control(\s+|$),;
-    }
-    while (<F>) {
-      last if /^---/;   # end of control file patch, no Maintainer: found
-                        # inside control file patch look for Maintainer: field
-      $maintainer = $1, last if /^\+Maintainer:\s*(.*)$/i;
-    }
-    while (<F>) { }     # read to end of file to avoid broken pipe
-    close(F) or return "";
-  } elsif ( $file =~ /\.(deb|dsc|tar\.gz)$/ ) {
-    if ( $file =~ /\.deb$/ && $conf::ar ) {
-
-      # extract control.tar.gz from .deb with ar, then let tar extract
-      # the control file itself
-      open( F,
-                "($conf::ar p '$file' control.tar.gz | "
-              . "$conf::tar -xOf - "
-              . "--use-compress-program $conf::gzip "
-              . "control) 2>/dev/null |"
-          ) or return "";
-    } elsif ( $file =~ /\.dsc$/ ) {
-
-      # just do a plain grep
-      debug("get_maint: .dsc, no cmd");
-      open( F, "<$file" ) or return "";
-    } elsif ( $file =~ /\.tar\.gz$/ ) {
-
-      # let tar extract a file */debian/control
-      open( F,
-                "$conf::tar -xOf '$file' "
-              . "--use-compress-program $conf::gzip "
-              . "\\*/debian/control 2>&1 |"
-          ) or return "";
-    } else {
-      return "";
-    }
-    while (<F>) {
-      $maintainer = $1, last if /^Maintainer:\s*(.*)$/i;
-    }
-    close(F) or return "";
-  } ## end elsif ( $file =~ /\.(deb|dsc|tar\.gz)$/)
-
-  return $maintainer;
-} ## end sub get_maintainer($)
-
-#
-# return a pattern that matches all files that probably belong to one job
-#
-sub debian_file_stem($) {
-  my $file = shift;
-  my ( $pkg, $version );
-
-  # strip file suffix
-  $file =~ s,\.(deb|dsc|changes|(orig\.)?tar\.gz|diff\.gz)$,,;
+  my $md5 = Digest::MD5->new;
 
-  # if not is *_* (name_version), can't derive a stem and return just
-  # the file's name
-  return $file if !( $file =~ /^([^_]+)_([^_]+)/ );
-  ( $pkg, $version ) = ( $1, $2 );
+  open my $fh, "<", $file or return "";
+  $md5->addfile($fh);
+  close $fh;
 
-  # strip Debian revision from version
-  $version =~ s/^(.*)-[\d.+-]+$/$1/;
-
-  return "${pkg}_${version}*";
-} ## end sub debian_file_stem($)
+  return $md5->hexdigest;
+} ## end sub md5sum($)
 
 #
 # output a messages to several destinations
@@ -2357,6 +2293,8 @@ Subject: $subject
 Date: $date
 X-Debian: DAK
 X-DAK: DAK
+Precedence: bulk
+Auto-Submitted: auto-generated
 __MESSAGE__
 
   if ( length $package ) {
@@ -2446,16 +2384,16 @@ sub close_log($) {
   close(STDOUT);
   close(STDERR);
 
-  open( LOG, ">>$conf::logfile" )
+  open( LOG, ">>", $conf::logfile )
     or die "Cannot open my logfile $conf::logfile: $!\n";
   chmod( 0644, $conf::logfile )
     or msg( "log", "Cannot set modes of $conf::logfile: $!\n" );
   select( ( select(LOG), $| = 1 )[0] );
 
-  open( STDOUT, ">&LOG" )
+  open( STDOUT, ">&", \*LOG )
     or msg( "log",
       "$main::progname: Can't redirect stdout to " . "$conf::logfile: $!\n" );
-  open( STDERR, ">&LOG" )
+  open( STDERR, ">&", \*LOG )
     or msg( "log",
       "$main::progname: Can't redirect stderr to " . "$conf::logfile: $!\n" );
   msg( "log", "Restart after SIGHUP\n" );
diff --git a/tools/obsolete_lintian_tags.pl b/tools/obsolete_lintian_tags.pl
new file mode 100755 (executable)
index 0000000..eeb0647
--- /dev/null
@@ -0,0 +1,48 @@
+#!/usr/bin/perl
+#
+# Generates a list of obsolete lintian autoreject tags
+# (C) 2012 Niels Thykier <nthykier@debian.org>
+# (C) 2012 Luca Falavigna <dktrkranz@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# version 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+# 02111-1307 USA
+
+
+use strict;
+use warnings;
+
+BEGIN {
+    $ENV{'LINTIAN_ROOT'} = '/usr/share/lintian'
+        unless defined $ENV{'LINTIAN_ROOT'};
+};
+
+use Getopt::Long;
+use lib "$ENV{'LINTIAN_ROOT'}/lib";
+use Lintian::Profile;
+
+my $profile = Lintian::Profile->new ('debian');
+my @lintian_tags = (sort $profile->tags(1));
+my $autoreject_tags = '../config/debian/lintian.tags';
+
+open (LINTIAN, $autoreject_tags) or die ('Could not open lintian tags file.');
+foreach my $tag (<LINTIAN>) {
+    if ($tag =~ m/\s+- \S+/) {
+        $tag =~ s/\s+- //;
+        chomp $tag;
+        print "$tag\n" if not grep (/^$tag$/i, @lintian_tags);
+    }
+}
+close (LINTIAN);
+
+exit 0;