]> git.decadent.org.uk Git - dak.git/commitdiff
merge from ftp-master
authorMike O'Connor <stew@vireo.org>
Thu, 5 Nov 2009 00:53:31 +0000 (19:53 -0500)
committerMike O'Connor <stew@vireo.org>
Thu, 5 Nov 2009 00:53:31 +0000 (19:53 -0500)
Signed-off-by: Mike O'Connor <stew@vireo.org>
86 files changed:
config/backports.org/cron.hourly
config/debian/common
config/debian/cron.dinstall
config/debian/cron.hourly
config/debian/cron.unchecked
config/debian/cron.weekly
config/debian/dinstall.functions [new file with mode: 0644]
config/debian/dinstall.variables [new file with mode: 0644]
config/debian/lintian.tags
dak/clean_suites.py
dak/dak.py
dak/dakdb/update22.py [new file with mode: 0755]
dak/dakdb/update23.py [changed mode: 0644->0755]
dak/dakdb/update25.py [new file with mode: 0644]
dak/generate_filelist.py [new file with mode: 0755]
dak/import_known_changes.py
dak/ls.py
dak/make_suite_file_list.py
dak/new_security_install.py
dak/process_accepted.py [deleted file]
dak/process_new.py
dak/process_unchecked.py [deleted file]
dak/process_upload.py [new file with mode: 0755]
dak/queue_report.py
dak/test/001/1.dsc [deleted file]
dak/test/001/2.dsc [deleted file]
dak/test/001/3.dsc [deleted file]
dak/test/001/4.dsc [deleted file]
dak/test/001/5.dsc [deleted file]
dak/test/001/6.dsc [deleted file]
dak/test/001/test.py [deleted file]
dak/test/002/empty.changes [deleted file]
dak/test/002/test.py [deleted file]
dak/test/003/krb5_1.2.2-4_m68k.changes [deleted file]
dak/test/003/test.py [deleted file]
dak/test/004/test.py [deleted file]
dak/test/005/bogus-post.changes [deleted file]
dak/test/005/bogus-pre.changes [deleted file]
dak/test/005/test.py [deleted file]
dak/test/005/valid.changes [deleted file]
dak/test/006/test.py [deleted file]
dak/update_db.py
daklib/binary.py [changed mode: 0755->0644]
daklib/changes.py [changed mode: 0755->0644]
daklib/config.py [changed mode: 0755->0644]
daklib/dak_exceptions.py [changed mode: 0755->0644]
daklib/daklog.py [changed mode: 0755->0644]
daklib/dbconn.py [changed mode: 0755->0644]
daklib/holding.py [changed mode: 0755->0644]
daklib/lintian.py [new file with mode: 0644]
daklib/queue.py [changed mode: 0755->0644]
daklib/queue_install.py [new file with mode: 0644]
daklib/regexes.py [changed mode: 0755->0644]
daklib/singleton.py [deleted file]
daklib/summarystats.py [changed mode: 0755->0644]
daklib/textutils.py [changed mode: 0755->0644]
daklib/urgencylog.py [changed mode: 0755->0644]
daklib/utils.py [changed mode: 0755->0644]
scripts/debian/copyoverrides [deleted file]
scripts/debian/mkchecksums [deleted file]
scripts/debian/mkfilesindices [deleted file]
scripts/debian/mklslar [deleted file]
scripts/debian/mkmaintainers [deleted file]
tests/base_test.py [new file with mode: 0644]
tests/fixtures/changes/1.changes [new file with mode: 0644]
tests/fixtures/changes/2.changes [new file with mode: 0644]
tests/fixtures/changes/bogus-post.changes [new file with mode: 0644]
tests/fixtures/changes/bogus-pre.changes [new file with mode: 0644]
tests/fixtures/changes/valid.changes [new file with mode: 0644]
tests/fixtures/dak.conf [new file with mode: 0644]
tests/fixtures/dsc/1.dsc [new file with mode: 0644]
tests/fixtures/dsc/2.dsc [new file with mode: 0644]
tests/fixtures/dsc/3.dsc [new file with mode: 0644]
tests/fixtures/dsc/4.dsc [new file with mode: 0644]
tests/fixtures/dsc/5.dsc [new file with mode: 0644]
tests/fixtures/dsc/6.dsc [new file with mode: 0644]
tests/fixtures/dsc/7.dsc [new file with mode: 0644]
tests/test_extract_component_from_section.py [new file with mode: 0755]
tests/test_fix_maintainer.py [new file with mode: 0755]
tests/test_formats.py
tests/test_imports.py [new file with mode: 0755]
tests/test_lintian.py [new file with mode: 0755]
tests/test_parse_changes.py [new file with mode: 0755]
tests/test_process_gpgv_output.py [new file with mode: 0755]
tests/test_regexes.py
tests/test_srcformats.py

index 45980065ee1822b7c8c6bdf4ba9b86811c9503ec..b5e0646270f135ece609196817b1adeb369e6ac5 100755 (executable)
@@ -53,6 +53,7 @@ symlinks -d -r $ftpdir
 
 cd $masterdir
 dak make-suite-file-list
+dak generate-filelist
 
 # Generate override files
 cd $overridedir
index 599f8dc55a02c517cfc3e39852a253b9c1161064..7cd759a9af32bbb9909918d4d94e3ed78dd93c34 100644 (file)
@@ -43,26 +43,6 @@ function make_buildd_dir () {
        find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
 }
 
-# move accepted NEW packages from stagedir into queue/accepted
-function acceptnew () {
-    cd $newstage
-    for file in $(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs); do
-        sed '1,/Files:/d' "${file}" | sed '/BEGIN PGP SIGNATURE/,$d' \
-            | while read notused1 notused2 notused3 notused4 NAME; do
-            if [ -z "${NAME}" ]; then
-                # Sometimes there is a newline after the Files:, ignore it
-                continue
-            fi
-            if [ -f "${NAME}" ]; then
-                mv --target-directory="${accepted}" "${NAME}"
-            else
-                log_error "Error, couldn't find file ${NAME} to move to ${accepted}"
-            fi
-        done
-        mv --target-directory="${accepted}"  "${file}" "${file%%.changes}.dak"
-    done
-}
-
 # Do the unchecked processing, in case we have files.
 function do_unchecked () {
     cd $unchecked
@@ -73,7 +53,7 @@ function do_unchecked () {
     UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
 
     echo "$timestamp": ${changes:-"Nothing to do"}  >> $report
-    dak process-unchecked -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
+    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
 }
 
 function sync_debbugs () {
index 200c7a646986cbf7b189a1d28a7c2dee3be5773a..71840f5a397e5e8f39c5e4152c3a33437b5c39da 100755 (executable)
@@ -44,417 +44,8 @@ export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars
 # common functions are "outsourced"
 . "${configdir}/common"
 
-# Timestamp. Used for dinstall stat graphs
-function ts() {
-        echo "Archive maintenance timestamp ($1): $(date +%H:%M:%S)"
-}
-
-# Cleanup actions
-function cleanup() {
-       rm -f ${LOCK_DAILY}
-       rm -f ${LOCK_ACCEPTED}
-}
-
-# If we error out this one is called, *FOLLOWED* by cleanup above
-function onerror() {
-    ERRDATE=$(date "+%Y.%m.%d-%H:%M:%S")
-
-    subject="ATTENTION ATTENTION!"
-    if [ "${error}" = "false" ]; then
-        subject="${subject} (continued)"
-    else
-        subject="${subject} (interrupted)"
-    fi
-    subject="${subject} dinstall error at ${ERRDATE} in ${STAGEFILE} - (Be quiet, Brain, or I'll stab you with a Q-tip)"
-
-    cat "${STAGEFILE}.log" | mail -s "${subject}" -a "X-Debian: DAK" cron@ftp-master.debian.org
-}
-
-########################################################################
-# the actual dinstall functions follow                                 #
-########################################################################
-
-# Setup the notice file to tell bad mirrors they used the wrong time
-function notice() {
-    rm -f "$NOTICE"
-    cat > "$NOTICE" <<EOF
-Packages are currently being installed and indices rebuilt.
-Maintenance is automatic, starting at 01|07|13|19:52 UTC,
-and ending about an hour later.  This file is then removed.
-
-You should not mirror the archive during this period. If you find this
-file on a Debian mirror please have a nice talk with the admin. They
-are doing something wrong.
-EOF
-}
-
-# pushing merkels QA user, part one
-function merkel1() {
-    log "Telling merkels QA user that we start dinstall"
-    ssh -2 -i ~dak/.ssh/push_merkel_qa  -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@merkel.debian.org sleep 1
-}
-
-# Create the postgres dump files
-function pgdump_pre() {
-    log "Creating pre-daily-cron-job backup of projectb database..."
-    pg_dump projectb > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
-}
-
-function pgdump_post() {
-    log "Creating post-daily-cron-job backup of projectb database..."
-    cd $base/backup
-    POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S)
-    pg_dump projectb > $base/backup/dump_$POSTDUMP
-    pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
-    ln -sf $base/backup/dump_$POSTDUMP current
-    ln -sf $base/backup/dumpall_$POSTDUMP currentall
-}
-
-# Load the dak-dev projectb
-function pgdakdev() {
-    cd $base/backup
-    echo "drop database projectb" | psql -p 5433 template1
-       cat currentall | psql -p 5433 template1
-    createdb -p 5433 -T template0 projectb
-    fgrep -v '\connect' current | psql -p 5433 projectb
-}
-
-# Updating various files
-function updates() {
-    log "Updating Bugs docu, Mirror list and mailing-lists.txt"
-    cd $configdir
-    $scriptsdir/update-bugdoctxt
-    $scriptsdir/update-mirrorlists
-    $scriptsdir/update-mailingliststxt
-    $scriptsdir/update-pseudopackages.sh
-}
-
-# Process (oldstable)-proposed-updates "NEW" queue
-function punew_do() {
-    cd "${queuedir}/${1}"
-    date -u -R >> REPORT
-    dak process-new -a -C COMMENTS >> REPORT || true
-    echo >> REPORT
-}
-function punew() {
-    log "Doing automated p-u-new processing"
-    punew_do "$1"
-}
-function opunew() {
-    log "Doing automated o-p-u-new processing"
-    punew_do "$1"
-}
-
-# The first i18n one, syncing new descriptions
-function i18n1() {
-    log "Synchronizing i18n package descriptions"
-    # First sync their newest data
-    cd ${scriptdir}/i18nsync
-    rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true
-
-    # Now check if we still know about the packages for which they created the files
-    # is the timestamp signed by us?
-    if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
-        # now read it. As its signed by us we are sure the content is what we expect, no need
-        # to do more here. And we only test -d a directory on it anyway.
-        TSTAMP=$(cat timestamp)
-        # do we have the dir still?
-        if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then
-            # Lets check!
-            if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
-                # Yay, worked, lets copy around
-                for dir in squeeze sid; do
-                    if [ -d dists/${dir}/ ]; then
-                        cd dists/${dir}/main/i18n
-                        rsync -aq --delete --delete-after  . ${ftpdir}/dists/${dir}/main/i18n/.
-                    fi
-                    cd ${scriptdir}/i18nsync
-                done
-            else
-                echo "ARRRR, bad guys, wrong files, ARRR"
-                echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail -s "Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head." debian-l10n-devel@lists.alioth.debian.org
-            fi
-        else
-            echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR"
-            echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail -s "Lisa, if you don't like your job you don't strike. You just go in every day and do it really half-assed. That's the American way." debian-l10n-devel@lists.alioth.debian.org
-        fi
-    else
-        echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR."
-        echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail -s "You can't keep blaming yourself. Just blame yourself once, and move on." debian-l10n-devel@lists.alioth.debian.org
-    fi
-}
-
-# Process the accepted queue
-function accepted() {
-    log "Processing queue/accepted"
-    rm -f "$accepted/REPORT"
-    dak process-accepted -pa -d "$accepted" > "$accepted/REPORT"
-    cat "$accepted/REPORT" | mail -s "Install for $(date +"%D - %R")" ftpmaster@ftp-master.debian.org
-    chgrp debadmin "$accepted/REPORT"
-    chmod 664 "$accepted/REPORT"
-}
-
-function cruft() {
-    log "Checking for cruft in overrides"
-    dak check-overrides
-}
-
-function msfl() {
-    log "Generating suite file lists for apt-ftparchive"
-    dak make-suite-file-list
-}
-
-function fingerprints() {
-    log "Updating fingerprints"
-    dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
-
-    OUTFILE=$(mktemp)
-    dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
-
-    if [ -s "${OUTFILE}" ]; then
-        /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
-From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
-To: <debian-project@lists.debian.org>
-Subject: Debian Maintainers Keyring changes
-Content-Type: text/plain; charset=utf-8
-MIME-Version: 1.0
-
-The following changes to the debian-maintainers keyring have just been activated:
-
-$(cat $OUTFILE)
-
-Debian distribution maintenance software,
-on behalf of the Keyring maintainers
-
-EOF
-    fi
-    rm -f "$OUTFILE"
-}
-
-function overrides() {
-    log "Writing overrides into text files"
-    cd $overridedir
-    dak make-overrides
-
-    # FIXME
-    rm -f override.sid.all3
-    for i in main contrib non-free main.debian-installer; do cat override.sid.$i >> override.sid.all3; done
-}
-
-function mpfm() {
-    log "Generating package / file mapping"
-    dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
-}
-
-function packages() {
-    log "Generating Packages and Sources files"
-    cd $configdir
-    GZIP='--rsyncable' ; export GZIP
-    apt-ftparchive generate apt.conf
-}
-
-function pdiff() {
-    log "Generating pdiff files"
-    dak generate-index-diffs
-}
-
-function release() {
-    log "Generating Release files"
-    dak generate-releases
-}
-
-function dakcleanup() {
-    log "Cleanup old packages/files"
-    dak clean-suites -m 10000
-    dak clean-queues
-}
-
-function buildd() {
-    # Needs to be rebuilt, as files have moved.  Due to unaccepts, we need to
-    # update this before wanna-build is updated.
-    log "Regenerating wanna-build/buildd information"
-    psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE suite = 5 AND queue = 0 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
-    symlinks -d /srv/incoming.debian.org/buildd > /dev/null
-    apt-ftparchive generate apt.conf.buildd
-}
-
-function buildd_dir() {
-    # Rebuilt the buildd dir to avoid long times of 403
-    log "Regenerating the buildd incoming dir"
-    STAMP=$(date "+%Y%m%d%H%M")
-    make_buildd_dir
-}
-
-function scripts() {
-    log "Running various scripts from $scriptsdir"
-    cd $scriptsdir
-    ./mkmaintainers
-    ./copyoverrides
-    ./mklslar
-    ./mkfilesindices
-    ./mkchecksums
-}
-
-function mirror() {
-    echo "Regenerating \"public\" mirror/ hardlink fun"
-    cd ${mirrordir}
-    rsync -aH --link-dest ${ftpdir} --exclude Archive_Maintenance_In_Progress --delete --delete-after --ignore-errors ${ftpdir}/. .
-}
-
-function wb() {
-    log "Trigger daily wanna-build run"
-    ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
-}
-
-function expire() {
-    log "Expiring old database dumps..."
-    cd $base/backup
-    $scriptsdir/expire_dumps -d . -p -f "dump_*"
-}
-
-function transitionsclean() {
-    log "Removing out of date transitions..."
-    cd $base
-    dak transitions -c -a
-}
-
-function reports() {
-    # Send a report on NEW/BYHAND packages
-    log "Nagging ftpteam about NEW/BYHAND packages"
-    dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org
-    # and one on crufty packages
-    log "Sending information about crufty packages"
-    dak cruft-report > $webdir/cruft-report-daily.txt
-    dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
-    cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org
-}
-
-function dm() {
-    log "Updating DM html page"
-    $scriptsdir/dm-monitor >$webdir/dm-uploaders.html
-}
-
-function bts() {
-    log "Categorizing uncategorized bugs filed against ftp.debian.org"
-    dak bts-categorize
-}
-
-function merkel2() {
-    # Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
-    log "Trigger merkel/flotows projectb sync"
-    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1
-    # Also trigger flotow, the ftpmaster test box
-    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1
-}
-
-function merkel3() {
-    # Push dak@merkel to tell it to sync the dd accessible parts. Returns immediately, the sync runs detached
-    log "Trigger merkels dd accessible parts sync"
-    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1
-}
-
-function mirrorpush() {
-    log "Starting the mirrorpush"
-    date -u > /srv/ftp.debian.org/web/mirrorstart
-    echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart
-    echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart
-    sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
-}
-
-function i18n2() {
-    log "Exporting package data foo for i18n project"
-    STAMP=$(date "+%Y%m%d%H%M")
-    mkdir -p ${scriptdir}/i18n/${STAMP}
-    cd ${scriptdir}/i18n/${STAMP}
-    dak control-suite -l stable > lenny
-    dak control-suite -l testing > squeeze
-    dak control-suite -l unstable > sid
-    echo "${STAMP}" > timestamp
-    gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
-    rm -f md5sum
-    md5sum * > md5sum
-    cd ${webdir}/
-    ln -sfT ${scriptdir}/i18n/${STAMP} i18n
-
-    cd ${scriptdir}
-    find ./i18n -mindepth 1 -maxdepth 1 -mtime +2 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
-}
-
-function stats() {
-    log "Updating stats data"
-    cd $configdir
-    $scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data
-    R --slave --vanilla < $base/misc/ftpstats.R
-    dak stats arch-space > $webdir/arch-space
-    dak stats pkg-nums > $webdir/pkg-nums
-}
-
-function aptftpcleanup() {
-    log "Clean up apt-ftparchive's databases"
-    cd $configdir
-    apt-ftparchive -q clean apt.conf
-}
-
-function compress() {
-    log "Compress old psql backups"
-    cd $base/backup/
-    find -maxdepth 1 -mindepth 1 -type f -name 'dump_pre_*' -mtime +2 -print0 | xargs -0 --no-run-if-empty rm
-
-    find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
-    while read dumpname; do
-        echo "Compressing $dumpname"
-        bzip2 -9fv "$dumpname"
-    done
-    find -maxdepth 1 -mindepth 1 -type f -name "dumpall_*" \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
-    while read dumpname; do
-        echo "Compressing $dumpname"
-        bzip2 -9fv "$dumpname"
-    done
-    finddup -l -d $base/backup
-}
-
-function logstats() {
-    $masterdir/tools/logs.py "$1"
-}
-
-# save timestamp when we start
-function savetimestamp() {
-       NOW=`date "+%Y.%m.%d-%H:%M:%S"`
-       echo ${NOW} > "${dbdir}/dinstallstart"
-}
-
-function maillogfile() {
-    cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org
-}
-
-function renamelogfile() {
-    if [ -f "${dbdir}/dinstallstart" ]; then
-        NOW=$(cat "${dbdir}/dinstallstart")
-#        maillogfile
-        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
-        logstats "$logdir/dinstall_${NOW}.log"
-        bzip2 -9 "$logdir/dinstall_${NOW}.log"
-    else
-        error "Problem, I don't know when dinstall started, unable to do log statistics."
-        NOW=`date "+%Y.%m.%d-%H:%M:%S"`
-#        maillogfile
-        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
-        bzip2 -9 "$logdir/dinstall_${NOW}.log"
-    fi
-}
-
-function testingsourcelist() {
-    dak ls -s testing -f heidi -r .| egrep 'source$' > ${webdir}/testing.list
-}
-
-# do a last run of process-unchecked before dinstall is on.
-function process_unchecked() {
-    log "Processing the unchecked queue"
-    acceptnew
-    UNCHECKED_WITHOUT_LOCK="-p"
-    do_unchecked
-    sync_debbugs
-}
+# source the dinstall functions
+. "${configdir}/dinstall.functions"
 
 ########################################################################
 ########################################################################
@@ -521,6 +112,9 @@ function stage() {
     # Make sure we are always at the same place.
     cd ${configdir}
 
+    # We always use the same umask. If a function wants to do different, fine, but we reset.
+    umask 022
+
     touch "${STAGEFILE}"
 
     if [ -n "${TIME}" ]; then
@@ -548,53 +142,19 @@ LOGFILE="$logdir/dinstall.log"
 
 exec >> "$LOGFILE" 2>&1
 
-# usually we are not using debug logs. Set to 1 if you want them.
-DEBUG=0
-
-# our name
-PROGRAM="dinstall"
-
-# where do we want mails to go? For example log entries made with error()
-if [ "x$(hostname -s)x" != "xriesx" ]; then
-    # Not our ftpmaster host
-    MAILTO=${MAILTO:-"root"}
-else
-    # Yay, ftpmaster
-    MAILTO=${MAILTO:-"ftpmaster@debian.org"}
-fi
+# And now source our default config
+. "${configdir}/dinstall.variables"
 
-# How many logfiles to keep
-LOGROTATE=${LOGROTATE:-400}
+# Make sure we start out with a sane umask setting
+umask 022
 
-# Marker for dinstall start
-DINSTALLSTART="${lockdir}/dinstallstart"
-# Marker for dinstall end
-DINSTALLEND="${lockdir}/dinstallend"
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
 
 touch "${DINSTALLSTART}"
 ts "startup"
 
-# Tell everyone we are doing some work
-NOTICE="$ftpdir/Archive_Maintenance_In_Progress"
-
-# lock cron.unchecked (it immediately exits when this exists)
-LOCK_DAILY="$lockdir/daily.lock"
-
-# Lock cron.unchecked from doing work
-LOCK_ACCEPTED="$lockdir/unchecked.lock"
-
-# Lock process-new from doing work
-LOCK_NEW="$lockdir/processnew.lock"
-
-# This file is simply used to indicate to britney whether or not
-# the Packages file updates completed sucessfully.  It's not a lock
-# from our point of view
-LOCK_BRITNEY="$lockdir/britney.lock"
-
-# If this file exists we exit immediately after the currently running
-# function is done
-LOCK_STOP="$lockdir/archive.stop"
-
 lockfile -l 3600 "${LOCK_DAILY}"
 trap onerror ERR
 trap cleanup EXIT TERM HUP INT QUIT
@@ -609,14 +169,6 @@ GO=(
 )
 stage $GO
 
-GO=(
-    FUNC="notice"
-    TIME=""
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-
 GO=(
     FUNC="merkel1"
     TIME="init"
@@ -647,7 +199,8 @@ GO=(
     ARGS="p-u-new"
     ERR=""
 )
-stage $GO
+### TODO: policy-new
+#stage $GO
 
 GO=(
     FUNC="opunew"
@@ -655,7 +208,8 @@ GO=(
     ARGS="o-p-u-new"
     ERR=""
 )
-stage $GO
+### TODO: policy-new
+#stage $GO
 
 GO=(
     FUNC="i18n1"
@@ -670,29 +224,12 @@ lockfile "$LOCK_NEW"
 
 GO=(
     FUNC="process_unchecked"
-    TIME=""
-    ARGS=""
-    ERR=""
-)
-stage $GO
-
-
-GO=(
-    FUNC="accepted"
-    TIME="accepted"
+    TIME="unchecked"
     ARGS=""
     ERR=""
 )
 stage $GO
 
-GO=(
-    FUNC="buildd_dir"
-    TIME="buildd_dir"
-    ARGS=""
-    ERR="false"
-)
-stage $GO
-
 GO=(
     FUNC="cruft"
     TIME="cruft"
@@ -712,6 +249,14 @@ GO=(
 )
 stage $GO
 
+GO=(
+    FUNC="filelist"
+    TIME="generate-filelist"
+    ARGS=""
+    ERR=""
+)
+stage $GO
+
 GO=(
     FUNC="fingerprints"
     TIME="import-keyring"
@@ -766,7 +311,8 @@ GO=(
     ARGS=""
     ERR=""
 )
-stage $GO
+### TODO: clean-* fixup
+#stage $GO
 
 GO=(
     FUNC="buildd"
@@ -800,7 +346,6 @@ GO=(
 )
 stage $GO &
 
-rm -f "${NOTICE}"
 rm -f "${LOCK_DAILY}"
 
 ts "locked part finished"
index 649445c293177aeae55c26cbe5629e94a2842dea..24955023e4add65bb5da426ca25b5ec85f0ef633 100755 (executable)
@@ -11,10 +11,10 @@ date -u > $ftpdir/project/trace/ftp-master.debian.org
 echo "Using dak v1" >> $ftpdir/project/trace/ftp-master.debian.org
 echo "Running on host: $(hostname -f)" >> $ftpdir/project/trace/ftp-master.debian.org
 dak import-users-from-passwd
-dak queue-report -n > $webdir/new.html
-dak queue-report -8 -d accepted,new,byhand,proposedupdates,oldproposedupdates
+#dak queue-report -n > $webdir/new.html
+#dak queue-report -8 -d accepted,new,byhand,proposedupdates,oldproposedupdates
 dak show-deferred > ${webdir}/deferred.html
-cd $queuedir/new ; dak show-new *.changes > /dev/null
+#cd $queuedir/new ; dak show-new *.changes > /dev/null
 $base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
 $base/dak/tools/removals.pl > $webdir/rss/removals.rss
 
index b3c078e30d3925ba565782627e34462e67656b5d..507fc5843ac8089803028c7f964daed859620e82 100755 (executable)
@@ -59,7 +59,7 @@ cleanup() {
 function do_buildd () {
     if lockfile -r3 $NOTICE; then
         LOCKDAILY="YES"
-        psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE queue = 0 AND suite = 5 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
+        psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
         cd $overridedir
         dak make-overrides &>/dev/null
         rm -f override.sid.all3 override.sid.all3.src
@@ -79,9 +79,9 @@ function do_buildd () {
 # the actual unchecked functions follow                                #
 ########################################################################
 
-lockfile -r3 "$LOCK_NEW"
-acceptnew
-rm -f "$LOCK_NEW"
+#lockfile -r3 "$LOCK_NEW"
+acceptnew
+#rm -f "$LOCK_NEW"
 
 # only run one cron.unchecked
 lockfile -r3 $LOCKFILE || exit 0
index 34f0c64a4a535aa04da929ef51c4f408ddd0e371..efb264f70a64fd3c410b42b5c870d7940c33d902 100755 (executable)
@@ -36,7 +36,7 @@ echo "Splitting queue/done"
 dak split-done > /dev/null
 
 # Vacuum the database
-echo "VACUUM; VACUUM ANALYZE;" | psql --no-psqlrc projectb 2>&1 | grep -v "^NOTICE:  Skipping.*only table owner can VACUUM it$"
+echo "VACUUM; VACUUM ANALYZE;" | psql --no-psqlrc projectb 2>&1
 
 # Do git cleanup stuff
 echo "Doing git stuff"
diff --git a/config/debian/dinstall.functions b/config/debian/dinstall.functions
new file mode 100644 (file)
index 0000000..57c67c7
--- /dev/null
@@ -0,0 +1,579 @@
+# Timestamp. Used for dinstall stat graphs
+function ts() {
+        echo "Archive maintenance timestamp ($1): $(date +%H:%M:%S)"
+}
+
+# Cleanup actions
+function cleanup() {
+       rm -f ${LOCK_DAILY}
+       rm -f ${LOCK_ACCEPTED}
+}
+
+# If we error out this one is called, *FOLLOWED* by cleanup above
+function onerror() {
+    ERRDATE=$(date "+%Y.%m.%d-%H:%M:%S")
+
+    subject="ATTENTION ATTENTION!"
+    if [ "${error}" = "false" ]; then
+        subject="${subject} (continued)"
+    else
+        subject="${subject} (interrupted)"
+    fi
+    subject="${subject} dinstall error at ${ERRDATE} in ${STAGEFILE} - (Be quiet, Brain, or I'll stab you with a Q-tip)"
+
+    cat "${STAGEFILE}.log" | mail -s "${subject}" -a "X-Debian: DAK" cron@ftp-master.debian.org
+}
+
+########################################################################
+# the actual dinstall functions follow                                 #
+########################################################################
+
+# pushing merkels QA user, part one
+function merkel1() {
+    log "Telling merkels QA user that we start dinstall"
+    ssh -2 -i ~dak/.ssh/push_merkel_qa  -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@merkel.debian.org sleep 1
+}
+
+# Create the postgres dump files
+function pgdump_pre() {
+    log "Creating pre-daily-cron-job backup of projectb database..."
+    pg_dump projectb > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
+}
+
+function pgdump_post() {
+    log "Creating post-daily-cron-job backup of projectb database..."
+    cd $base/backup
+    POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S)
+    pg_dump projectb > $base/backup/dump_$POSTDUMP
+    pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
+    ln -sf $base/backup/dump_$POSTDUMP current
+    ln -sf $base/backup/dumpall_$POSTDUMP currentall
+}
+
+# Load the dak-dev projectb
+function pgdakdev() {
+    cd $base/backup
+    echo "drop database projectb" | psql -p 5433 template1
+       cat currentall | psql -p 5433 template1
+    createdb -p 5433 -T template0 projectb
+    fgrep -v '\connect' current | psql -p 5433 projectb
+}
+
+# Updating various files
+function updates() {
+    log "Updating Bugs docu, Mirror list and mailing-lists.txt"
+    cd $configdir
+    $scriptsdir/update-bugdoctxt
+    $scriptsdir/update-mirrorlists
+    $scriptsdir/update-mailingliststxt
+    $scriptsdir/update-pseudopackages.sh
+}
+
+# Process (oldstable)-proposed-updates "NEW" queue
+function punew_do() {
+    cd "${queuedir}/${1}"
+    date -u -R >> REPORT
+    dak process-new -a -C COMMENTS >> REPORT || true
+    echo >> REPORT
+}
+function punew() {
+    log "Doing automated p-u-new processing"
+    punew_do "$1"
+}
+function opunew() {
+    log "Doing automated o-p-u-new processing"
+    punew_do "$1"
+}
+
+# The first i18n one, syncing new descriptions
+function i18n1() {
+    log "Synchronizing i18n package descriptions"
+    # First sync their newest data
+    cd ${scriptdir}/i18nsync
+    rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true
+
+    # Now check if we still know about the packages for which they created the files
+    # is the timestamp signed by us?
+    if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
+        # now read it. As its signed by us we are sure the content is what we expect, no need
+        # to do more here. And we only test -d a directory on it anyway.
+        TSTAMP=$(cat timestamp)
+        # do we have the dir still?
+        if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then
+            # Lets check!
+            if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
+                # Yay, worked, lets copy around
+                for dir in squeeze sid; do
+                    if [ -d dists/${dir}/ ]; then
+                        cd dists/${dir}/main/i18n
+                        rsync -aq --delete --delete-after  . ${ftpdir}/dists/${dir}/main/i18n/.
+                    fi
+                    cd ${scriptdir}/i18nsync
+                done
+            else
+                echo "ARRRR, bad guys, wrong files, ARRR"
+                echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail -s "Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head." debian-l10n-devel@lists.alioth.debian.org
+            fi
+        else
+            echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR"
+            echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail -s "Lisa, if you don't like your job you don't strike. You just go in every day and do it really half-assed. That's the American way." debian-l10n-devel@lists.alioth.debian.org
+        fi
+    else
+        echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR."
+        echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail -s "You can't keep blaming yourself. Just blame yourself once, and move on." debian-l10n-devel@lists.alioth.debian.org
+    fi
+}
+
+function cruft() {
+    log "Checking for cruft in overrides"
+    dak check-overrides
+}
+
+function msfl() {
+    log "Generating suite file lists for apt-ftparchive"
+    dak make-suite-file-list
+}
+
+function filelist() {
+    log "Generating file lists for apt-ftparchive"
+    dak generate-filelist
+}
+
+function fingerprints() {
+    log "Not updating fingerprints - scripts needs checking"
+
+#    log "Updating fingerprints"
+#    dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
+
+#    OUTFILE=$(mktemp)
+#    dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
+
+#    if [ -s "${OUTFILE}" ]; then
+#        /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
+#From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
+#To: <debian-project@lists.debian.org>
+#Subject: Debian Maintainers Keyring changes
+#Content-Type: text/plain; charset=utf-8
+#MIME-Version: 1.0
+#
+#The following changes to the debian-maintainers keyring have just been activated:
+#
+#$(cat $OUTFILE)
+#
+#Debian distribution maintenance software,
+#on behalf of the Keyring maintainers
+#
+#EOF
+#    fi
+#    rm -f "$OUTFILE"
+}
+
+function overrides() {
+    log "Writing overrides into text files"
+    cd $overridedir
+    dak make-overrides
+
+    # FIXME
+    rm -f override.sid.all3
+    for i in main contrib non-free main.debian-installer; do cat override.sid.$i >> override.sid.all3; done
+}
+
+function mpfm() {
+    log "Generating package / file mapping"
+    dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
+}
+
+function packages() {
+    log "Generating Packages and Sources files"
+    cd $configdir
+    GZIP='--rsyncable' ; export GZIP
+    apt-ftparchive generate apt.conf
+}
+
+function pdiff() {
+    log "Generating pdiff files"
+    dak generate-index-diffs
+}
+
+function release() {
+    log "Generating Release files"
+    dak generate-releases
+}
+
+function dakcleanup() {
+    log "Cleanup old packages/files"
+    dak clean-suites -m 10000
+    dak clean-queues
+}
+
+function buildd() {
+    # Needs to be rebuilt, as files have moved.  Due to unaccepts, we need to
+    # update this before wanna-build is updated.
+    log "Regenerating wanna-build/buildd information"
+    psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id =build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$'"  > $dbdir/dists/unstable_accepted.list
+    symlinks -d /srv/incoming.debian.org/buildd > /dev/null
+    apt-ftparchive generate apt.conf.buildd
+}
+
+function buildd_dir() {
+    # Rebuilt the buildd dir to avoid long times of 403
+    log "Regenerating the buildd incoming dir"
+    STAMP=$(date "+%Y%m%d%H%M")
+    make_buildd_dir
+}
+
+function mklslar() {
+    cd $ftpdir
+
+    FILENAME=ls-lR
+
+    log "Removing any core files ..."
+    find -type f -name core -print0 | xargs -0r rm -v
+
+    log "Checking permissions on files in the FTP tree ..."
+    find -type f \( \! -perm -444 -o -perm +002 \) -ls
+    find -type d \( \! -perm -555 -o -perm +002 \) -ls
+
+    log "Checking symlinks ..."
+    symlinks -rd .
+
+    log "Creating recursive directory listing ... "
+    rm -f .${FILENAME}.new
+    TZ=UTC ls -lR > .${FILENAME}.new
+
+    if [ -r ${FILENAME}.gz ] ; then
+        mv -f ${FILENAME}.gz ${FILENAME}.old.gz
+        mv -f .${FILENAME}.new ${FILENAME}
+        rm -f ${FILENAME}.patch.gz
+        zcat ${FILENAME}.old.gz | diff -u - ${FILENAME} | gzip --rsyncable -9cfn - >${FILENAME}.patch.gz
+        rm -f ${FILENAME}.old.gz
+    else
+        mv -f .${FILENAME}.new ${FILENAME}
+    fi
+
+    gzip --rsyncable -9cfN ${FILENAME} >${FILENAME}.gz
+    rm -f ${FILENAME}
+}
+
+function mkmaintainers() {
+    log -n 'Creating Maintainers index ... '
+
+    cd $indices
+    dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | \
+        sed -e "s/~[^  ]*\([   ]\)/\1/"  | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
+
+    set +e
+    cmp .new-maintainers Maintainers >/dev/null
+    rc=$?
+    set -e
+    if [ $rc = 1 ] || [ ! -f Maintainers ] ; then
+           log -n "installing Maintainers ... "
+           mv -f .new-maintainers Maintainers
+           gzip --rsyncable -9v <Maintainers >.new-maintainers.gz
+           mv -f .new-maintainers.gz Maintainers.gz
+    elif [ $rc = 0 ] ; then
+           log '(same as before)'
+           rm -f .new-maintainers
+    else
+           log cmp returned $rc
+           false
+    fi
+}
+
+function copyoverrides() {
+    log 'Copying override files into public view ...'
+
+    for f in $copyoverrides ; do
+           cd $overridedir
+           chmod g+w override.$f
+
+           cd $indices
+           rm -f .newover-$f.gz
+           pc="`gzip 2>&1 -9nv <$overridedir/override.$f >.newover-$f.gz`"
+           set +e
+           nf=override.$f.gz
+           cmp -s .newover-$f.gz $nf
+           rc=$?
+           set -e
+        if [ $rc = 0 ]; then
+                   rm -f .newover-$f.gz
+           elif [ $rc = 1 -o ! -f $nf ]; then
+                   log "   installing new $nf $pc"
+                   mv -f .newover-$f.gz $nf
+                   chmod g+w $nf
+           else
+                   log $? $pc
+                   exit 1
+           fi
+    done
+}
+
+function mkfilesindices() {
+    umask 002
+    cd $base/ftp/indices/files/components
+
+    ARCHLIST=$(tempfile)
+
+    log "Querying projectb..."
+    echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql projectb -At | sed 's/|//;s,^/srv/ftp.debian.org/ftp,.,' | sort >$ARCHLIST
+
+    includedirs () {
+        perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
+    }
+    poolfirst () {
+        perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
+    }
+
+    log "Generating sources list
+    (
+        sed -n 's/|$//p' $ARCHLIST
+        cd $base/ftp
+        find ./dists -maxdepth 1 \! -type d
+        find ./dists \! -type d | grep "/source/"
+    ) | sort -u | gzip --rsyncable -9 > source.list.gz
+
+    log "Generating arch lists
+
+    ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
+    for a in $ARCHES; do
+        (sed -n "s/|$a$//p" $ARCHLIST
+            sed -n 's/|all$//p' $ARCHLIST
+
+            cd $base/ftp
+            find ./dists -maxdepth 1 \! -type d
+            find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)"
+        ) | sort -u | gzip --rsyncable -9 > arch-$a.list.gz
+    done
+
+    log "Generating suite lists"
+
+    suite_list () {
+        printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t projectb
+
+        printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t projectb
+    }
+
+    printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At projectb |
+    while read id suite; do
+        [ -e $base/ftp/dists/$suite ] || continue
+        (
+            (cd $base/ftp
+                distname=$(cd dists; readlink $suite || echo $suite)
+                find ./dists/$distname \! -type d
+                for distdir in ./dists/*; do
+                    [ "$(readlink $distdir)" != "$distname" ] || echo $distdir
+                done
+            )
+            suite_list $id | tr -d ' ' | sed 's,^/srv/ftp.debian.org/ftp,.,'
+        ) | sort -u | gzip --rsyncable -9 > suite-${suite}.list.gz
+    done
+
+    log "Finding everything on the ftp site to generate sundries"
+
+    (cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
+
+    rm -f sundries.list
+    zcat *.list.gz | cat - *.list | sort -u |
+    diff - $ARCHLIST | sed -n 's/^> //p' > sundries.list
+
+    log "Generating files list"
+
+    for a in $ARCHES; do
+        (echo ./project/trace; zcat arch-$a.list.gz source.list.gz) |
+        cat - sundries.list dists.list project.list docs.list indices.list |
+        sort -u | poolfirst > ../arch-$a.files
+    done
+
+    (cd $base/ftp/
+           for dist in sid squeeze; do
+                   find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip --rsyncable -9 > $base/ftp/indices/files/components/translation-$dist.list.gz
+           done
+    )
+
+    (cat ../arch-i386.files ../arch-amd64.files; zcat suite-oldstable.list.gz suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-squeeze.list.gz) |
+    sort -u | poolfirst > ../typical.files
+
+    rm -f $ARCHLIST
+    log "Done!"
+}
+
+function mkchecksums() {
+    dsynclist=$dbdir/dsync.list
+    md5list=$indices/md5sums
+
+    log -n "Creating md5 / dsync index file ... "
+
+    cd "$ftpdir"
+    ${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
+    ${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n --rsyncable > ${md5list}.gz
+    ${bindir}/dsync-flist -q link-dups $dsynclist || true
+}
+
+function scripts() {
+    log "Running various scripts from $scriptsdir"
+    mkmaintainers
+    copyoverrides
+    mklslar
+    mkfilesindices
+    mkchecksums
+}
+
+function mirror() {
+    log "Regenerating \"public\" mirror/ hardlink fun"
+    cd ${mirrordir}
+    rsync -aH --link-dest ${ftpdir} --delete --delete-after --ignore-errors ${ftpdir}/. .
+}
+
+function wb() {
+    log "Trigger daily wanna-build run"
+    ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+}
+
+function expire() {
+    log "Expiring old database dumps..."
+    cd $base/backup
+    $scriptsdir/expire_dumps -d . -p -f "dump_*"
+}
+
+function transitionsclean() {
+    log "Removing out of date transitions..."
+    cd $base
+    dak transitions -c -a
+}
+
+function reports() {
+    # Send a report on NEW/BYHAND packages
+    log "Nagging ftpteam about NEW/BYHAND packages"
+    dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org
+    # and one on crufty packages
+    log "Sending information about crufty packages"
+    dak cruft-report > $webdir/cruft-report-daily.txt
+    dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
+    cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org
+}
+
+function dm() {
+    log "Updating DM html page"
+    $scriptsdir/dm-monitor >$webdir/dm-uploaders.html
+}
+
+function bts() {
+    log "Categorizing uncategorized bugs filed against ftp.debian.org"
+    dak bts-categorize
+}
+
+function merkel2() {
+    # Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
+    log "Trigger merkel/flotows projectb sync"
+    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1
+    # Also trigger flotow, the ftpmaster test box
+    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1
+}
+
+function merkel3() {
+    # Push dak@merkel to tell it to sync the dd accessible parts. Returns immediately, the sync runs detached
+    log "Trigger merkels dd accessible parts sync"
+    ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1
+}
+
+function mirrorpush() {
+    log "Starting the mirrorpush"
+    date -u > /srv/ftp.debian.org/web/mirrorstart
+    echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart
+    echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart
+    sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
+}
+
+function i18n2() {
+    log "Exporting package data foo for i18n project"
+    STAMP=$(date "+%Y%m%d%H%M")
+    mkdir -p ${scriptdir}/i18n/${STAMP}
+    cd ${scriptdir}/i18n/${STAMP}
+    dak control-suite -l stable > lenny
+    dak control-suite -l testing > squeeze
+    dak control-suite -l unstable > sid
+    echo "${STAMP}" > timestamp
+    gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
+    rm -f md5sum
+    md5sum * > md5sum
+    cd ${webdir}/
+    ln -sfT ${scriptdir}/i18n/${STAMP} i18n
+
+    cd ${scriptdir}
+    find ./i18n -mindepth 1 -maxdepth 1 -mtime +2 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+}
+
+function stats() {
+    log "Updating stats data"
+    cd $configdir
+    $scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data
+    R --slave --vanilla < $base/misc/ftpstats.R
+    dak stats arch-space > $webdir/arch-space
+    dak stats pkg-nums > $webdir/pkg-nums
+}
+
+function aptftpcleanup() {
+    log "Clean up apt-ftparchive's databases"
+    cd $configdir
+    apt-ftparchive -q clean apt.conf
+}
+
+function compress() {
+    log "Compress old psql backups"
+    cd $base/backup/
+    find -maxdepth 1 -mindepth 1 -type f -name 'dump_pre_*' -mtime +2 -print0 | xargs -0 --no-run-if-empty rm
+
+    find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
+    while read dumpname; do
+        echo "Compressing $dumpname"
+        bzip2 -9fv "$dumpname"
+    done
+    find -maxdepth 1 -mindepth 1 -type f -name "dumpall_*" \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
+    while read dumpname; do
+        echo "Compressing $dumpname"
+        bzip2 -9fv "$dumpname"
+    done
+    finddup -l -d $base/backup
+}
+
+function logstats() {
+    $masterdir/tools/logs.py "$1"
+}
+
+# save timestamp when we start
+function savetimestamp() {
+       NOW=`date "+%Y.%m.%d-%H:%M:%S"`
+       echo ${NOW} > "${dbdir}/dinstallstart"
+}
+
+function maillogfile() {
+    cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org
+}
+
+function renamelogfile() {
+    if [ -f "${dbdir}/dinstallstart" ]; then
+        NOW=$(cat "${dbdir}/dinstallstart")
+#        maillogfile
+        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
+        logstats "$logdir/dinstall_${NOW}.log"
+        bzip2 -9 "$logdir/dinstall_${NOW}.log"
+    else
+        error "Problem, I don't know when dinstall started, unable to do log statistics."
+        NOW=`date "+%Y.%m.%d-%H:%M:%S"`
+#        maillogfile
+        mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
+        bzip2 -9 "$logdir/dinstall_${NOW}.log"
+    fi
+}
+
+function testingsourcelist() {
+    dak ls -s testing -f heidi -r .| egrep 'source$' > ${webdir}/testing.list
+}
+
+# do a last run of process-unchecked before dinstall is on.
+function process_unchecked() {
+    log "Processing the unchecked queue"
+    UNCHECKED_WITHOUT_LOCK="-p"
+    do_unchecked
+    sync_debbugs
+}
diff --git a/config/debian/dinstall.variables b/config/debian/dinstall.variables
new file mode 100644 (file)
index 0000000..be5b382
--- /dev/null
@@ -0,0 +1,40 @@
+# usually we are not using debug logs. Set to 1 if you want them.
+DEBUG=0
+
+# our name
+PROGRAM="dinstall"
+
+# where do we want mails to go? For example log entries made with error()
+if [ "x$(hostname -s)x" != "xriesx" ]; then
+    # Not our ftpmaster host
+    MAILTO=${MAILTO:-"root"}
+else
+    # Yay, ftpmaster
+    MAILTO=${MAILTO:-"ftpmaster@debian.org"}
+fi
+
+# How many logfiles to keep
+LOGROTATE=${LOGROTATE:-400}
+
+# Marker for dinstall start
+DINSTALLSTART="${lockdir}/dinstallstart"
+# Marker for dinstall end
+DINSTALLEND="${lockdir}/dinstallend"
+
+# lock cron.unchecked (it immediately exits when this exists)
+LOCK_DAILY="$lockdir/daily.lock"
+
+# Lock cron.unchecked from doing work
+LOCK_ACCEPTED="$lockdir/unchecked.lock"
+
+# Lock process-new from doing work
+LOCK_NEW="$lockdir/processnew.lock"
+
+# This file is simply used to indicate to britney whether or not
+# the Packages file updates completed sucessfully.  It's not a lock
+# from our point of view
+LOCK_BRITNEY="$lockdir/britney.lock"
+
+# If this file exists we exit immediately after the currently running
+# function is done
+LOCK_STOP="$lockdir/archive.stop"
index bf489144599b2392f0bdcc11492af22caf8359fd..6a11a9ecd8a36ef19846b2a38338fa27b6f9b27f 100644 (file)
@@ -1,5 +1,5 @@
 lintian:
-  warning:
+  nonfatal:
     - statically-linked-binary
     - arch-independent-package-contains-binary-or-object
     - arch-dependent-file-in-usr-share
@@ -11,11 +11,26 @@ lintian:
     - usr-share-doc-symlink-without-dependency
     - mknod-in-maintainer-script
     - package-contains-info-dir-file
-    - copyright-lists-upstream-authors-with-dh_make-boilerplate
-  error:
+    - binary-or-shlib-defines-rpath
+    - non-etc-file-marked-as-conffile
+    - embedded-zlib
+    - no-shlibs-control-file
+    - copyright-contains-dh_make-todo-boilerplate
+    - preinst-interpreter-without-predepends
+    - control-interpreter-without-depends
+  fatal:
+    - debian-control-file-uses-obsolete-national-encoding
+    - malformed-deb-archive
+    - bad-package-name
+    - no-architecture-field
+    - package-contains-ancient-file
+    - forbidden-postrm-interpreter
+    - control-interpreter-in-usr-local
+    - package-uses-local-diversion
     - wrong-file-owner-uid-or-gid
     - bad-relation
     - FSSTND-dir-in-usr
+    - FSSTND-dir-in-var
     - binary-in-etc
     - missing-dependency-on-perlapi
     - section-is-dh_make-template
@@ -61,7 +76,6 @@ lintian:
     - uploader-address-is-on-localhost
     - no-source-field
     - source-field-does-not-match-pkg-name
-    - section-is-dh_make-template
     - build-depends-on-essential-package-without-using-version
     - depends-on-build-essential-package-without-using-version
     - build-depends-on-build-essential
index 72a1d5a8a545bc4695065e37706b9aa2c7a56721..99f0c8b4629162018a54936baf381814a4edd3da 100755 (executable)
@@ -164,6 +164,7 @@ SELECT id, filename FROM files f
   WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
+    AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
     AND last_used IS NULL
     ORDER BY filename""")
 
@@ -337,7 +338,7 @@ def clean_queue_build(now_date, delete_date, max_delete, session):
     our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
     count = 0
 
-    for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date):
+    for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date):
         if not os.path.exists(qf.filename):
             utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
             continue
index e424836f750b1a1f683d027ade063de8359ea5f4..cd42c3ed4bcaa70bd76bd17268c322c3bf43c8b0 100755 (executable)
@@ -33,6 +33,7 @@ G{importgraph}
 
 ################################################################################
 
+import os
 import sys
 import traceback
 import daklib.utils
@@ -66,15 +67,15 @@ def init():
 
         ("process-new",
          "Process NEW and BYHAND packages"),
-        ("process-unchecked",
+        ("process-upload",
          "Process packages in queue/unchecked"),
-        ("process-accepted",
-         "Install packages into the pool"),
 
         ("make-suite-file-list",
          "Generate lists of packages per suite for apt-ftparchive"),
         ("make-pkg-file-mapping",
          "Generate package <-> file mapping"),
+        ("generate-filelist",
+         "Generate file lists for apt-ftparchive"),
         ("generate-releases",
          "Generate Release files"),
         ("contents",
@@ -221,4 +222,6 @@ def main():
 ################################################################################
 
 if __name__ == "__main__":
+    os.environ['LANG'] = 'C'
+    os.environ['LC_ALL'] = 'C'
     main()
diff --git a/dak/dakdb/update22.py b/dak/dakdb/update22.py
new file mode 100755 (executable)
index 0000000..b6fbbb4
--- /dev/null
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Clean up queue SQL
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+import os
+import datetime
+import traceback
+
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+################################################################################
+
+def do_update(self):
+    print "Splitting up queues and fixing general design mistakes"
+
+    try:
+        c = self.db.cursor()
+
+        cnf = Config()
+
+        print "Adding build_queue table"
+        c.execute("""CREATE TABLE build_queue (
+                            id          SERIAL PRIMARY KEY,
+                            queue_name  TEXT NOT NULL UNIQUE,
+                            path        TEXT NOT NULL,
+                            copy_files  BOOL DEFAULT FALSE NOT NULL)""")
+
+        print "Adding policy_queue table"
+        c.execute("""CREATE TABLE policy_queue (
+                            id           SERIAL PRIMARY KEY,
+                            queue_name   TEXT NOT NULL UNIQUE,
+                            path         TEXT NOT NULL,
+                            perms        CHAR(4) NOT NULL DEFAULT '0660' CHECK (perms SIMILAR TO '^[0-7][0-7][0-7][0-7]$'),
+                            change_perms CHAR(4) NOT NULL DEFAULT '0660' CHECK (change_perms SIMILAR TO '^[0-7][0-7][0-7][0-7]$')
+                            )""")
+
+        print "Copying queues"
+        queues = {}
+        c.execute("""SELECT queue.id, queue.queue_name, queue.path, queue.copy_pool_files FROM queue""")
+
+        for q in c.fetchall():
+            queues[q[0]] = q[1]
+            if q[1] in ['accepted', 'buildd']:
+                # Move to build_queue_table
+                c.execute("""INSERT INTO build_queue (queue_name, path, copy_files)
+                                   VALUES ('%s', '%s', '%s')""" % (q[1], q[2], q[3]))
+
+            else:
+                # Move to policy_queue_table
+                c.execute("""INSERT INTO policy_queue (queue_name, path)
+                                   VALUES ('%s', '%s')""" % (q[1], q[2]))
+
+
+        print "Fixing up build_queue_files"
+        c.execute("""ALTER TABLE queue_files DROP CONSTRAINT queue_files_queueid_fkey""")
+        c.execute("""ALTER TABLE queue_files RENAME TO build_queue_files""")
+        c.execute("""ALTER TABLE build_queue_files RENAME COLUMN queueid TO build_queue_id""")
+
+        c.execute("""UPDATE build_queue_files
+                        SET build_queue_id = (SELECT build_queue.id FROM build_queue
+                                               WHERE build_queue.queue_name =
+                                                (SELECT queue.queue_name FROM queue
+                                                  WHERE queue.id = build_queue_files.build_queue_id))""")
+
+        c.execute("""ALTER TABLE build_queue_files
+                       ADD CONSTRAINT build_queue_files_build_queue_id_fkey
+                       FOREIGN KEY (build_queue_id)
+                       REFERENCES build_queue(id)
+                       ON DELETE CASCADE""")
+
+
+        c.execute("""ALTER TABLE suite DROP CONSTRAINT suite_policy_queue_id_fkey""")
+
+        c.execute("""UPDATE suite
+    SET policy_queue_id = (SELECT policy_queue.id FROM policy_queue
+                             WHERE policy_queue.queue_name =
+                              (SELECT queue.queue_name FROM queue
+                               WHERE queue.id = suite.policy_queue_id))""")
+
+        c.execute("""ALTER TABLE suite
+                       ADD CONSTRAINT suite_policy_queue_fkey
+                       FOREIGN KEY (policy_queue_id)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_approved_for_fkey""")
+        c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_in_queue_fkey""")
+
+        c.execute("""UPDATE known_changes
+    SET in_queue = (SELECT policy_queue.id FROM policy_queue
+                             WHERE policy_queue.queue_name =
+                              (SELECT queue.queue_name FROM queue
+                               WHERE queue.id = known_changes.in_queue))""")
+
+        c.execute("""ALTER TABLE known_changes
+                       ADD CONSTRAINT known_changes_in_queue_fkey
+                       FOREIGN KEY (in_queue)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+
+
+        c.execute("""UPDATE known_changes
+    SET approved_for = (SELECT policy_queue.id FROM policy_queue
+                               WHERE policy_queue.queue_name =
+                                (SELECT queue.queue_name FROM queue
+                                  WHERE queue.id = known_changes.approved_for))""")
+
+        c.execute("""ALTER TABLE known_changes
+                       ADD CONSTRAINT known_changes_approved_for_fkey
+                       FOREIGN KEY (in_queue)
+                       REFERENCES policy_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""ALTER TABLE suite_queue_copy RENAME TO suite_build_queue_copy""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy DROP CONSTRAINT suite_queue_copy_queue_fkey""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy RENAME COLUMN queue TO build_queue_id""")
+
+        c.execute("""UPDATE suite_build_queue_copy
+    SET build_queue_id = (SELECT build_queue.id FROM build_queue
+                                 WHERE build_queue.queue_name =
+                                (SELECT queue.queue_name FROM queue
+                                  WHERE queue.id = suite_build_queue_copy.build_queue_id))""")
+
+        c.execute("""ALTER TABLE suite_build_queue_copy
+                       ADD CONSTRAINT suite_build_queue_copy_build_queue_id_fkey
+                       FOREIGN KEY (build_queue_id)
+                       REFERENCES build_queue (id)
+                       ON DELETE RESTRICT""")
+
+        c.execute("""DROP TABLE changes_pending_files""")
+
+        c.execute("""CREATE TABLE changes_pending_files (
+                            id             SERIAL PRIMARY KEY,
+                            filename       TEXT NOT NULL UNIQUE,
+                            size           BIGINT NOT NULL,
+                            md5sum         TEXT NOT NULL,
+                            sha1sum        TEXT NOT NULL,
+                            sha256sum      TEXT NOT NULL )""")
+
+        c.execute("""CREATE TABLE changes_pending_files_map (
+                            file_id        INT4 NOT NULL REFERENCES changes_pending_files (id),
+                            change_id      INT4 NOT NULL REFERENCES known_changes (id),
+
+                            PRIMARY KEY (file_id, change_id))""")
+
+        c.execute("""CREATE TABLE changes_pending_source (
+                            id             SERIAL PRIMARY KEY,
+                            change_id      INT4 NOT NULL REFERENCES known_changes (id),
+                            source         TEXT NOT NULL,
+                            version        DEBVERSION NOT NULL,
+                            maintainer_id  INT4 NOT NULL REFERENCES maintainer (id),
+                            changedby_id   INT4 NOT NULL REFERENCES maintainer (id),
+                            sig_fpr        INT4 NOT NULL REFERENCES fingerprint (id),
+                            dm_upload_allowed BOOL NOT NULL DEFAULT FALSE )""")
+
+        c.execute("""CREATE TABLE changes_pending_source_files (
+                            pending_source_id INT4 REFERENCES changes_pending_source (id) NOT NULL,
+                            pending_file_id   INT4 REFERENCES changes_pending_files (id) NOT NULL,
+
+                            PRIMARY KEY (pending_source_id, pending_file_id) )""")
+
+        c.execute("""CREATE TABLE changes_pending_binaries (
+                            id                 SERIAL PRIMARY KEY,
+                            change_id          INT4 NOT NULL REFERENCES known_changes (id),
+                            package            TEXT NOT NULL,
+                            version            DEBVERSION NOT NULL,
+                            architecture_id    INT4 REFERENCES architecture (id) NOT NULL,
+                            source_id          INT4 REFERENCES source (id),
+                            pending_source_id  INT4 REFERENCES changes_pending_source (id),
+                            pending_file_id    INT4 REFERENCES changes_pending_files (id),
+
+                            UNIQUE (package, version, architecture_id),
+                            CHECK (source_id IS NOT NULL or pending_source_id IS NOT NULL ) )""")
+
+        print "Getting rid of old queue table"
+        c.execute("""DROP TABLE queue""")
+
+        print "Sorting out permission columns"
+        c.execute("""UPDATE policy_queue SET perms = '0664' WHERE queue_name IN ('proposedupdates', 'oldproposedupdates')""")
+
+        print "Moving known_changes table"
+        c.execute("""ALTER TABLE known_changes RENAME TO changes""")
+
+        print "Sorting out permissions"
+
+        for t in ['build_queue', 'policy_queue', 'build_queue_files',
+                  'changes_pending_binaries', 'changes_pending_source_files',
+                  'changes_pending_source', 'changes_pending_files',
+                  'changes_pool_files', 'suite_build_queue_copy']:
+            c.execute("GRANT SELECT ON %s TO public" % t)
+            c.execute("GRANT ALL ON %s TO ftpmaster" % t)
+
+        for s in ['queue_files_id_seq', 'build_queue_id_seq',
+                  'changes_pending_source_id_seq',
+                  'changes_pending_binaries_id_seq',
+                  'changes_pending_files_id_seq',
+                  'changes_pending_source_id_seq',
+                  'known_changes_id_seq',
+                  'policy_queue_id_seq']:
+            c.execute("GRANT USAGE ON %s TO ftpmaster" % s)
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '22' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.InternalError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg))
old mode 100644 (file)
new mode 100755 (executable)
index a61deb6..48abf01
@@ -1,11 +1,10 @@
 #!/usr/bin/env python
-# coding=utf8
 
 """
-Adding a trainee field to the process-new notes
+Add view for new generate_filelist command.
 
 @contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2009  Mike O'Connor <stew@debian.org>
+@copyright: 2009  Torsten Werner <twerner@debian.org>
 @license: GNU General Public License version 2 or later
 """
 
@@ -23,246 +22,43 @@ Adding a trainee field to the process-new notes
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
-################################################################################
-
-
-################################################################################
-
 import psycopg2
-import time
-from daklib.dak_exceptions import DBUpdateError
-
-################################################################################
-
-def suites():
-    """
-    return a list of suites to operate on
-    """
-    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
-        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
-    else:
-        suites = [ 'unstable', 'testing' ]
-#            suites = Config().SubTree("Suite").List()
-
-    return suites
-
-def arches(cursor, suite):
-    """
-    return a list of archs to operate on
-    """
-    arch_list = []
-    cursor.execute("""SELECT s.architecture, a.arch_string
-    FROM suite_architectures s
-    JOIN architecture a ON (s.architecture=a.id)
-    WHERE suite = :suite""", {'suite' : suite })
-
-    while True:
-        r = cursor.fetchone()
-        if not r:
-            break
-
-        if r[1] != "source" and r[1] != "all":
-            arch_list.append((r[0], r[1]))
-
-    return arch_list
 
 def do_update(self):
-    """
-    Adding contents table as first step to maybe, finally getting rid
-    of apt-ftparchive
-    """
-
-    print __doc__
+    print "Add views for generate_filelist to database."
 
     try:
         c = self.db.cursor()
 
-        c.execute("""CREATE TABLE pending_bin_contents (
-        id serial NOT NULL,
-        package text NOT NULL,
-        version debversion NOT NULL,
-        arch int NOT NULL,
-        filename text NOT NULL,
-        type int NOT NULL,
-        PRIMARY KEY(id))""" );
-
-        c.execute("""CREATE TABLE deb_contents (
-        filename text,
-        section text,
-        package text,
-        binary_id integer,
-        arch integer,
-        suite integer)""" )
-
-        c.execute("""CREATE TABLE udeb_contents (
-        filename text,
-        section text,
-        package text,
-        binary_id integer,
-        suite integer,
-        arch integer)""" )
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_arch_fkey
-        FOREIGN KEY (arch) REFERENCES architecture(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_arch_fkey
-        FOREIGN KEY (arch) REFERENCES architecture(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_pkey
-        PRIMARY KEY (filename,package,arch,suite);""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_pkey
-        PRIMARY KEY (filename,package,arch,suite);""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_suite_fkey
-        FOREIGN KEY (suite) REFERENCES suite(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_suite_fkey
-        FOREIGN KEY (suite) REFERENCES suite(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY deb_contents
-        ADD CONSTRAINT deb_contents_binary_fkey
-        FOREIGN KEY (binary_id) REFERENCES binaries(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""ALTER TABLE ONLY udeb_contents
-        ADD CONSTRAINT udeb_contents_binary_fkey
-        FOREIGN KEY (binary_id) REFERENCES binaries(id)
-        ON DELETE CASCADE;""")
-
-        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
-
-
-        suites = self.suites()
-
-        for suite in [i.lower() for i in suites]:
-            suite_id = DBConn().get_suite_id(suite)
-            arch_list = arches(c, suite_id)
-            arch_list = arches(c, suite_id)
-
-            for (arch_id,arch_str) in arch_list:
-                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
-
-            for section, sname in [("debian-installer","main"),
-                                  ("non-free/debian-installer", "nonfree")]:
-                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
-
-
-        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "DELETE" or event == "UPDATE":
-
-        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
-                                  ["int","int"]),
-                                  [TD["old"]["bin"], TD["old"]["suite"]])
-
-    if event == "INSERT" or event == "UPDATE":
-
-       content_data = plpy.execute(plpy.prepare(
-            """SELECT s.section, b.package, b.architecture, ot.type
-            FROM override o
-            JOIN override_type ot on o.type=ot.id
-            JOIN binaries b on b.package=o.package
-            JOIN files f on b.file=f.id
-            JOIN location l on l.id=f.location
-            JOIN section s on s.id=o.section
-            WHERE b.id=$1
-            AND o.suite=$2
-            """,
-            ["int", "int"]),
-            [TD["new"]["bin"], TD["new"]["suite"]])[0]
-
-       tablename="%s_contents" % content_data['type']
-
-       plpy.execute(plpy.prepare("""DELETE FROM %s
-                   WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
-                   ['text','int','int']),
-                   [content_data['package'],
-                   content_data['architecture'],
-                   TD["new"]["suite"]])
-
-       filenames = plpy.execute(plpy.prepare(
-           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
-           ["int"]),
-           [TD["new"]["bin"]])
-
-       for filename in filenames:
-           plpy.execute(plpy.prepare(
-               """INSERT INTO %s
-                   (filename,section,package,binary_id,arch,suite)
-                   VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
-               ["text","text","text","int","int","int"]),
-               [filename["file"],
-                content_data["section"],
-                content_data["package"],
-                TD["new"]["bin"],
-                content_data["architecture"],
-                TD["new"]["suite"]] )
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
-
-        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "UPDATE":
-
-        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
-        if otype["type"].endswith("deb"):
-            section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
-
-            table_name = "%s_contents" % otype["type"]
-            plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
-                                      ["text","text","int"]),
-                                      [section["section"],
-                                      TD["new"]["package"],
-                                      TD["new"]["suite"]])
-
-$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
-""")
-
-        c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
-                      RETURNS trigger AS  $$
-    event = TD["event"]
-    if event == "UPDATE" or event == "INSERT":
-        row = TD["new"]
-        r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
-                  JOIN binaries b ON b.architecture = sa.architecture
-                  WHERE b.id = $1 and sa.suite = $2""",
-                ["int", "int"]),
-                [row["bin"], row["suite"]])
-        if not len(r):
-            plpy.error("Illegal architecture for this suite")
-
-$$ LANGUAGE plpythonu VOLATILE;""")
-
-        c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
-                      BEFORE INSERT OR UPDATE ON bin_associations
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
-        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
-                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
-        c.execute("""CREATE TRIGGER override_contents_trigger
-                      AFTER UPDATE ON override
-                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
-
-
-        c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
-        c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
-
+        print "Drop old views."
+        c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch CASCADE")
+        c.execute("DROP VIEW IF EXISTS srcfiles_suite_component CASCADE")
+
+        print "Create new views."
+        c.execute("""
+CREATE VIEW binfiles_suite_component_arch AS
+  SELECT files.filename, binaries.type, location.path, location.component,
+         bin_associations.suite, binaries.architecture
+    FROM binaries
+    JOIN bin_associations ON binaries.id = bin_associations.bin
+    JOIN files ON binaries.file = files.id
+    JOIN location ON files.location = location.id;
+           """)
+        c.execute("""
+CREATE VIEW srcfiles_suite_component AS
+  SELECT files.filename, location.path, location.component,
+         src_associations.suite
+    FROM source
+    JOIN src_associations ON source.id = src_associations.source
+    JOIN files ON source.file = files.id
+    JOIN location ON files.location = location.id;
+           """)
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '23' WHERE name = 'db_revision'")
         self.db.commit()
 
-    except psycopg2.ProgrammingError, msg:
+    except psycopg2.InternalError, msg:
         self.db.rollback()
-        raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
+        raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
 
diff --git a/dak/dakdb/update25.py b/dak/dakdb/update25.py
new file mode 100644 (file)
index 0000000..a61deb6
--- /dev/null
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Adding a trainee field to the process-new notes
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import psycopg2
+import time
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+
+def suites():
+    """
+    return a list of suites to operate on
+    """
+    if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
+        suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
+    else:
+        suites = [ 'unstable', 'testing' ]
+#            suites = Config().SubTree("Suite").List()
+
+    return suites
+
+def arches(cursor, suite):
+    """
+    return a list of archs to operate on
+    """
+    arch_list = []
+    cursor.execute("""SELECT s.architecture, a.arch_string
+    FROM suite_architectures s
+    JOIN architecture a ON (s.architecture=a.id)
+    WHERE suite = :suite""", {'suite' : suite })
+
+    while True:
+        r = cursor.fetchone()
+        if not r:
+            break
+
+        if r[1] != "source" and r[1] != "all":
+            arch_list.append((r[0], r[1]))
+
+    return arch_list
+
+def do_update(self):
+    """
+    Adding contents table as first step to maybe, finally getting rid
+    of apt-ftparchive
+    """
+
+    print __doc__
+
+    try:
+        c = self.db.cursor()
+
+        c.execute("""CREATE TABLE pending_bin_contents (
+        id serial NOT NULL,
+        package text NOT NULL,
+        version debversion NOT NULL,
+        arch int NOT NULL,
+        filename text NOT NULL,
+        type int NOT NULL,
+        PRIMARY KEY(id))""" );
+
+        c.execute("""CREATE TABLE deb_contents (
+        filename text,
+        section text,
+        package text,
+        binary_id integer,
+        arch integer,
+        suite integer)""" )
+
+        c.execute("""CREATE TABLE udeb_contents (
+        filename text,
+        section text,
+        package text,
+        binary_id integer,
+        suite integer,
+        arch integer)""" )
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_arch_fkey
+        FOREIGN KEY (arch) REFERENCES architecture(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_arch_fkey
+        FOREIGN KEY (arch) REFERENCES architecture(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_pkey
+        PRIMARY KEY (filename,package,arch,suite);""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_pkey
+        PRIMARY KEY (filename,package,arch,suite);""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_suite_fkey
+        FOREIGN KEY (suite) REFERENCES suite(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_suite_fkey
+        FOREIGN KEY (suite) REFERENCES suite(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY deb_contents
+        ADD CONSTRAINT deb_contents_binary_fkey
+        FOREIGN KEY (binary_id) REFERENCES binaries(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""ALTER TABLE ONLY udeb_contents
+        ADD CONSTRAINT udeb_contents_binary_fkey
+        FOREIGN KEY (binary_id) REFERENCES binaries(id)
+        ON DELETE CASCADE;""")
+
+        c.execute("""CREATE INDEX ind_deb_contents_binary ON deb_contents(binary_id);""" )
+
+
+        suites = self.suites()
+
+        for suite in [i.lower() for i in suites]:
+            suite_id = DBConn().get_suite_id(suite)
+            arch_list = arches(c, suite_id)
+            arch_list = arches(c, suite_id)
+
+            for (arch_id,arch_str) in arch_list:
+                c.execute( "CREATE INDEX ind_deb_contents_%s_%s ON deb_contents (arch,suite) WHERE (arch=2 OR arch=%d) AND suite=$d"%(arch_str,suite,arch_id,suite_id) )
+
+            for section, sname in [("debian-installer","main"),
+                                  ("non-free/debian-installer", "nonfree")]:
+                c.execute( "CREATE INDEX ind_udeb_contents_%s_%s ON udeb_contents (section,suite) WHERE section=%s AND suite=$d"%(sname,suite,section,suite_id) )
+
+
+        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_bin_a() RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "DELETE" or event == "UPDATE":
+
+        plpy.execute(plpy.prepare("DELETE FROM deb_contents WHERE binary_id=$1 and suite=$2",
+                                  ["int","int"]),
+                                  [TD["old"]["bin"], TD["old"]["suite"]])
+
+    if event == "INSERT" or event == "UPDATE":
+
+       content_data = plpy.execute(plpy.prepare(
+            """SELECT s.section, b.package, b.architecture, ot.type
+            FROM override o
+            JOIN override_type ot on o.type=ot.id
+            JOIN binaries b on b.package=o.package
+            JOIN files f on b.file=f.id
+            JOIN location l on l.id=f.location
+            JOIN section s on s.id=o.section
+            WHERE b.id=$1
+            AND o.suite=$2
+            """,
+            ["int", "int"]),
+            [TD["new"]["bin"], TD["new"]["suite"]])[0]
+
+       tablename="%s_contents" % content_data['type']
+
+       plpy.execute(plpy.prepare("""DELETE FROM %s
+                   WHERE package=$1 and arch=$2 and suite=$3""" % tablename,
+                   ['text','int','int']),
+                   [content_data['package'],
+                   content_data['architecture'],
+                   TD["new"]["suite"]])
+
+       filenames = plpy.execute(plpy.prepare(
+           "SELECT bc.file FROM bin_contents bc where bc.binary_id=$1",
+           ["int"]),
+           [TD["new"]["bin"]])
+
+       for filename in filenames:
+           plpy.execute(plpy.prepare(
+               """INSERT INTO %s
+                   (filename,section,package,binary_id,arch,suite)
+                   VALUES($1,$2,$3,$4,$5,$6)""" % tablename,
+               ["text","text","text","int","int","int"]),
+               [filename["file"],
+                content_data["section"],
+                content_data["package"],
+                TD["new"]["bin"],
+                content_data["architecture"],
+                TD["new"]["suite"]] )
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+
+        c.execute( """CREATE OR REPLACE FUNCTION update_contents_for_override() RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "UPDATE":
+
+        otype = plpy.execute(plpy.prepare("SELECT type from override_type where id=$1",["int"]),[TD["new"]["type"]] )[0];
+        if otype["type"].endswith("deb"):
+            section = plpy.execute(plpy.prepare("SELECT section from section where id=$1",["int"]),[TD["new"]["section"]] )[0];
+
+            table_name = "%s_contents" % otype["type"]
+            plpy.execute(plpy.prepare("UPDATE %s set section=$1 where package=$2 and suite=$3" % table_name,
+                                      ["text","text","int"]),
+                                      [section["section"],
+                                      TD["new"]["package"],
+                                      TD["new"]["suite"]])
+
+$$ LANGUAGE plpythonu VOLATILE SECURITY DEFINER;
+""")
+
+        c.execute("""CREATE OR REPLACE FUNCTION update_contents_for_override()
+                      RETURNS trigger AS  $$
+    event = TD["event"]
+    if event == "UPDATE" or event == "INSERT":
+        row = TD["new"]
+        r = plpy.execute(plpy.prepare( """SELECT 1 from suite_architectures sa
+                  JOIN binaries b ON b.architecture = sa.architecture
+                  WHERE b.id = $1 and sa.suite = $2""",
+                ["int", "int"]),
+                [row["bin"], row["suite"]])
+        if not len(r):
+            plpy.error("Illegal architecture for this suite")
+
+$$ LANGUAGE plpythonu VOLATILE;""")
+
+        c.execute( """CREATE TRIGGER illegal_suite_arch_bin_associations_trigger
+                      BEFORE INSERT OR UPDATE ON bin_associations
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+        c.execute( """CREATE TRIGGER bin_associations_contents_trigger
+                      AFTER INSERT OR UPDATE OR DELETE ON bin_associations
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_bin_a();""")
+        c.execute("""CREATE TRIGGER override_contents_trigger
+                      AFTER UPDATE ON override
+                      FOR EACH ROW EXECUTE PROCEDURE update_contents_for_override();""")
+
+
+        c.execute( "CREATE INDEX ind_deb_contents_name ON deb_contents(package);");
+        c.execute( "CREATE INDEX ind_udeb_contents_name ON udeb_contents(package);");
+
+        self.db.commit()
+
+    except psycopg2.ProgrammingError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Unable to apply process-new update 14, rollback issued. Error message : %s" % (str(msg))
+
diff --git a/dak/generate_filelist.py b/dak/generate_filelist.py
new file mode 100755 (executable)
index 0000000..d0a6459
--- /dev/null
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+
+"""
+Generate file lists for apt-ftparchive.
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Torsten Werner <twerner@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+from daklib.dbconn import *
+from daklib.config import Config
+from daklib import utils
+import apt_pkg, os, sys
+
+def fetch(query, args, session):
+    return [path + filename for (path, filename) in \
+        session.execute(query, args).fetchall()]
+
+def getSources(suite, component, session):
+    query = """
+        SELECT path, filename
+            FROM srcfiles_suite_component
+            WHERE suite = :suite AND component = :component
+            ORDER BY filename
+    """
+    args = { 'suite': suite.suite_id,
+             'component': component.component_id }
+    return fetch(query, args, session)
+
+def getBinaries(suite, component, architecture, type, session):
+    query = """
+        SELECT path, filename
+            FROM binfiles_suite_component_arch
+            WHERE suite = :suite AND component = :component AND type = :type AND
+                  (architecture = :architecture OR architecture = 2)
+            ORDER BY filename
+    """
+    args = { 'suite': suite.suite_id,
+             'component': component.component_id,
+             'architecture': architecture.arch_id,
+             'type': type }
+    return fetch(query, args, session)
+
+def listPath(suite, component, architecture = None, type = None):
+    """returns full path to the list file"""
+    suffixMap = { 'deb': "binary-",
+                  'udeb': "debian-installer_binary-" }
+    if architecture:
+        suffix = suffixMap[type] + architecture.arch_string
+    else:
+        suffix = "source"
+    filename = "%s_%s_%s.list" % \
+        (suite.suite_name, component.component_name, suffix)
+    pathname = os.path.join(Config()["Dir::Lists"], filename)
+    return utils.open_file(pathname, "w")
+
+def writeSourceList(suite, component, session):
+    file = listPath(suite, component)
+    for filename in getSources(suite, component, session):
+        file.write(filename + '\n')
+    file.close()
+
+def writeBinaryList(suite, component, architecture, type, session):
+    file = listPath(suite, component, architecture, type)
+    for filename in getBinaries(suite, component, architecture, type, session):
+        file.write(filename + '\n')
+    file.close()
+
+def usage():
+    print """Usage: dak generate_filelist [OPTIONS]
+Create filename lists for apt-ftparchive.
+
+  -s, --suite=SUITE                    act on this suite
+  -c, --component=COMPONENT    act on this component
+  -a, --architecture=ARCH        act on this architecture
+  -h, --help                                 show this help and exit
+
+ARCH, COMPONENT and SUITE can be comma (or space) separated list, e.g.
+    --suite=testing,unstable"""
+    sys.exit()
+
+def main():
+    cnf = Config()
+    Arguments = [('h', "help",         "Filelist::Options::Help"),
+                 ('s', "suite",        "Filelist::Options::Suite", "HasArg"),
+                 ('c', "component",    "Filelist::Options::Component", "HasArg"),
+                 ('a', "architecture", "Filelist::Options::Architecture", "HasArg")]
+    query_suites = DBConn().session().query(Suite)
+    suites = [suite.suite_name for suite in query_suites.all()]
+    if not cnf.has_key('Filelist::Options::Suite'):
+        cnf['Filelist::Options::Suite'] = ','.join(suites)
+    # we can ask the database for components if 'mixed' is gone
+    if not cnf.has_key('Filelist::Options::Component'):
+        cnf['Filelist::Options::Component'] = 'main,contrib,non-free'
+    query_architectures = DBConn().session().query(Architecture)
+    architectures = \
+        [architecture.arch_string for architecture in query_architectures.all()]
+    if not cnf.has_key('Filelist::Options::Architecture'):
+        cnf['Filelist::Options::Architecture'] = ','.join(architectures)
+    cnf['Filelist::Options::Help'] = ''
+    apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+    Options = cnf.SubTree("Filelist::Options")
+    if Options['Help']:
+        usage()
+    session = DBConn().session()
+    suite_arch = session.query(SuiteArchitecture)
+    for suite_name in utils.split_args(Options['Suite']):
+        suite = query_suites.filter_by(suite_name = suite_name).one()
+        join = suite_arch.filter_by(suite_id = suite.suite_id)
+        for component_name in utils.split_args(Options['Component']):
+            component = session.query(Component).\
+                filter_by(component_name = component_name).one()
+            for architecture_name in utils.split_args(Options['Architecture']):
+                architecture = query_architectures.\
+                    filter_by(arch_string = architecture_name).one()
+                try:
+                    join.filter_by(arch_id = architecture.arch_id).one()
+                    if architecture_name == 'source':
+                        writeSourceList(suite, component, session)
+                    elif architecture_name != 'all':
+                        writeBinaryList(suite, component, architecture, 'deb', session)
+                        writeBinaryList(suite, component, architecture, 'udeb', session)
+                except:
+                    pass
+    # this script doesn't change the database
+    session.rollback()
+
+if __name__ == '__main__':
+    main()
+
index cdb1d3afd4a150bd8d59f0676a16f8fc697cef71..c8d5bf96c921a575bc738b9fec24fc6ee08447f3 100755 (executable)
@@ -32,7 +32,7 @@ import sys
 import os
 import logging
 import threading
-from daklib.dbconn import DBConn,get_knownchange
+from daklib.dbconn import DBConn, get_dbchange
 from daklib.config import Config
 import apt_pkg
 from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError
@@ -218,7 +218,7 @@ class ChangesGenerator(threading.Thread):
                                 continue
                             count += 1
 
-                            if not get_knownchange(changesfile, self.session):
+                            if not get_dbchange(changesfile, self.session):
                                 to_import = ChangesToImport(dirpath, changesfile, count)
                                 if self.die:
                                     return
index 4eb8e8c8da9b4625442a635cdb6ab68310df5649..544d366ce2e34d114380423420f1d152c569857c 100755 (executable)
--- a/dak/ls.py
+++ b/dak/ls.py
@@ -91,7 +91,7 @@ def main ():
     session = DBConn().session()
 
     # If cron.daily is running; warn the user that our output might seem strange
-    if os.path.exists(os.path.join(cnf["Dir::Root"], "Archive_Maintenance_In_Progress")):
+    if os.path.exists(os.path.join(cnf["Dir::Lock"], "daily.lock")):
         utils.warn("Archive maintenance is in progress; database inconsistencies are possible.")
 
     # Handle buildd maintenance helper options
index 349a4ae09115f4530e31ae3af0338be8f47696ef..8c13100998eb6b651d86358e720e654eb73dbc8d 100755 (executable)
@@ -361,7 +361,9 @@ SELECT s.id, s.source, 'source', s.version, l.path, f.filename, c.name, f.id,
                                    suite=suite, filetype = filetype)
     cleanup(packages, session)
     session.commit()
-    write_filelists(packages, dislocated_files, session)
+
+    # has been replaced by 'dak generate-filelist':
+    #write_filelists(packages, dislocated_files, session)
 
 ################################################################################
 
index 55a487499bce737630740778a2bf0afcc2de1380..23b765f6fb680498fd75c73110ae8ddc37eebdb7 100755 (executable)
@@ -25,7 +25,7 @@ import apt_pkg, os, sys, pwd, time, commands
 from daklib import queue
 from daklib import daklog
 from daklib import utils
-from daklib.dbconn import DBConn, get_or_set_queue, get_suite_architectures
+from daklib.dbconn import DBConn, get_build_queue, get_suite_architectures
 from daklib.regexes import re_taint_free
 
 Cnf = None
@@ -474,6 +474,7 @@ def _do_Approve():
     # 3. run dak make-suite-file-list / apt-ftparchve / dak generate-releases
     print "Updating file lists for apt-ftparchive..."
     spawn("dak make-suite-file-list")
+    spawn("dak generate-filelist")
     print "Updating Packages and Sources files..."
     spawn("/org/security.debian.org/dak/config/debian-security/map.sh")
     spawn("apt-ftparchive generate %s" % (utils.which_apt_conf_file()))
@@ -495,8 +496,8 @@ def _do_Disembargo():
     session = DBConn().session()
 
     dest = Cnf["Dir::Queue::Unembargoed"]
-    emb_q = get_or_set_queue("embargoed", session)
-    une_q = get_or_set_queue("unembargoed", session)
+    emb_q = get_build_queue("embargoed", session)
+    une_q = get_build_queue("unembargoed", session)
 
     for c in changes:
         print "Disembargoing %s" % (c)
diff --git a/dak/process_accepted.py b/dak/process_accepted.py
deleted file mode 100755 (executable)
index 7b78f08..0000000
+++ /dev/null
@@ -1,713 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Installs Debian packages from queue/accepted into the pool
-
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2006  James Troup <james@nocrew.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
-@license: GNU General Public License version 2 or later
-
-"""
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-###############################################################################
-
-#    Cartman: "I'm trying to make the best of a bad situation, I don't
-#              need to hear crap from a bunch of hippy freaks living in
-#              denial.  Screw you guys, I'm going home."
-#
-#    Kyle: "But Cartman, we're trying to..."
-#
-#    Cartman: "uhh.. screw you guys... home."
-
-###############################################################################
-
-import errno
-import fcntl
-import os
-import sys
-from datetime import datetime
-import apt_pkg
-
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.dbconn import *
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer, re_issource, re_fdnic
-from daklib.urgencylog import UrgencyLog
-from daklib.summarystats import SummaryStats
-from daklib.config import Config
-
-###############################################################################
-
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
-    global Options
-
-    # Initialize config and connection to db
-    cnf = Config()
-    DBConn()
-
-    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
-                 ('h',"help","Dinstall::Options::Help"),
-                 ('n',"no-action","Dinstall::Options::No-Action"),
-                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
-                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
-                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
-    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
-              "version", "directory"]:
-        if not cnf.has_key("Dinstall::Options::%s" % (i)):
-            cnf["Dinstall::Options::%s" % (i)] = ""
-
-    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
-    Options = cnf.SubTree("Dinstall::Options")
-
-    if Options["Help"]:
-        usage()
-
-    # If we have a directory flag, use it to find our files
-    if cnf["Dinstall::Options::Directory"] != "":
-        # Note that we clobber the list of files we were given in this case
-        # so warn if the user has done both
-        if len(changes_files) > 0:
-            utils.warn("Directory provided so ignoring files given on command line")
-
-        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
-    return changes_files
-
-###############################################################################
-
-def usage (exit_code=0):
-    print """Usage: dak process-accepted [OPTION]... [CHANGES]...
-  -a, --automatic           automatic run
-  -h, --help                show this help and exit.
-  -n, --no-action           don't do anything
-  -p, --no-lock             don't check lockfile !! for cron.daily only !!
-  -s, --no-mail             don't send any mail
-  -V, --version             display the version number and exit"""
-    sys.exit(exit_code)
-
-###############################################################################
-
-def action (u, stable_queue=None, log_urgency=True, session=None):
-    (summary, short_summary) = u.build_summaries()
-    pi = u.package_info()
-
-    (prompt, answer) = ("", "XXX")
-    if Options["No-Action"] or Options["Automatic"]:
-        answer = 'S'
-
-    if len(u.rejects) > 0:
-        print "REJECT\n" + pi
-        prompt = "[R]eject, Skip, Quit ?"
-        if Options["Automatic"]:
-            answer = 'R'
-    else:
-        print "INSTALL to " + ", ".join(u.pkg.changes["distribution"].keys())
-        print pi + summary,
-        prompt = "[I]nstall, Skip, Quit ?"
-        if Options["Automatic"]:
-            answer = 'I'
-
-    while prompt.find(answer) == -1:
-        answer = utils.our_raw_input(prompt)
-        m = re_default_answer.match(prompt)
-        if answer == "":
-            answer = m.group(1)
-        answer = answer[:1].upper()
-
-    if answer == 'R':
-        u.do_unaccept()
-        Logger.log(["unaccepted", u.pkg.changes_file])
-    elif answer == 'I':
-        if stable_queue:
-            stable_install(u, summary, short_summary, stable_queue, log_urgency)
-        else:
-            install(u, session, log_urgency)
-    elif answer == 'Q':
-        sys.exit(0)
-
-
-###############################################################################
-def add_poolfile(filename, datadict, location_id, session):
-    poolfile = PoolFile()
-    poolfile.filename = filename
-    poolfile.filesize = datadict["size"]
-    poolfile.md5sum = datadict["md5sum"]
-    poolfile.sha1sum = datadict["sha1sum"]
-    poolfile.sha256sum = datadict["sha256sum"]
-    poolfile.location_id = location_id
-
-    session.add(poolfile)
-    # Flush to get a file id (NB: This is not a commit)
-    session.flush()
-
-    return poolfile
-
-def add_dsc_to_db(u, filename, session):
-    entry = u.pkg.files[filename]
-    source = DBSource()
-
-    source.source = u.pkg.dsc["source"]
-    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
-    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
-    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
-    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    source.install_date = datetime.now().date()
-
-    dsc_component = entry["component"]
-    dsc_location_id = entry["location id"]
-
-    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
-    # Set up a new poolfile if necessary
-    if not entry.has_key("files id") or not entry["files id"]:
-        filename = entry["pool name"] + filename
-        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
-        entry["files id"] = poolfile.file_id
-
-    source.poolfile_id = entry["files id"]
-    session.add(source)
-    session.flush()
-
-    for suite_name in u.pkg.changes["distribution"].keys():
-        sa = SrcAssociation()
-        sa.source_id = source.source_id
-        sa.suite_id = get_suite(suite_name).suite_id
-        session.add(sa)
-
-    session.flush()
-
-    # Add the source files to the DB (files and dsc_files)
-    dscfile = DSCFile()
-    dscfile.source_id = source.source_id
-    dscfile.poolfile_id = entry["files id"]
-    session.add(dscfile)
-
-    for dsc_file, dentry in u.pkg.dsc_files.items():
-        df = DSCFile()
-        df.source_id = source.source_id
-
-        # If the .orig tarball is already in the pool, it's
-        # files id is stored in dsc_files by check_dsc().
-        files_id = dentry.get("files id", None)
-
-        # Find the entry in the files hash
-        # TODO: Bail out here properly
-        dfentry = None
-        for f, e in u.pkg.files.items():
-            if f == dsc_file:
-                dfentry = e
-                break
-
-        if files_id is None:
-            filename = dfentry["pool name"] + dsc_file
-
-            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
-            # FIXME: needs to check for -1/-2 and or handle exception
-            if found and obj is not None:
-                files_id = obj.file_id
-
-            # If still not found, add it
-            if files_id is None:
-                # HACK: Force sha1sum etc into dentry
-                dentry["sha1sum"] = dfentry["sha1sum"]
-                dentry["sha256sum"] = dfentry["sha256sum"]
-                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
-                files_id = poolfile.file_id
-
-        df.poolfile_id = files_id
-        session.add(df)
-
-    session.flush()
-
-    # Add the src_uploaders to the DB
-    uploader_ids = [source.maintainer_id]
-    if u.pkg.dsc.has_key("uploaders"):
-        for up in u.pkg.dsc["uploaders"].split(","):
-            up = up.strip()
-            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
-
-    added_ids = {}
-    for up in uploader_ids:
-        if added_ids.has_key(up):
-            utils.warn("Already saw uploader %s for source %s" % (up, source.source))
-            continue
-
-        added_ids[u]=1
-
-        su = SrcUploader()
-        su.maintainer_id = up
-        su.source_id = source.source_id
-        session.add(su)
-
-    session.flush()
-
-    return dsc_component, dsc_location_id
-
-def add_deb_to_db(u, filename, session):
-    """
-    Contrary to what you might expect, this routine deals with both
-    debs and udebs.  That info is in 'dbtype', whilst 'type' is
-    'deb' for both of them
-    """
-    cnf = Config()
-    entry = u.pkg.files[filename]
-
-    bin = DBBinary()
-    bin.package = entry["package"]
-    bin.version = entry["version"]
-    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
-    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
-    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
-    bin.binarytype = entry["dbtype"]
-
-    # Find poolfile id
-    filename = entry["pool name"] + filename
-    fullpath = os.path.join(cnf["Dir::Pool"], filename)
-    if not entry.get("location id", None):
-        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id
-
-    if not entry.get("files id", None):
-        poolfile = add_poolfile(filename, entry, entry["location id"], session)
-        entry["files id"] = poolfile.file_id
-
-    bin.poolfile_id = entry["files id"]
-
-    # Find source id
-    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
-    if len(bin_sources) != 1:
-        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
-                                  (bin.package, bin.version, bin.architecture.arch_string,
-                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
-    bin.source_id = bin_sources[0].source_id
-
-    # Add and flush object so it has an ID
-    session.add(bin)
-    session.flush()
-
-    # Add BinAssociations
-    for suite_name in u.pkg.changes["distribution"].keys():
-        ba = BinAssociation()
-        ba.binary_id = bin.binary_id
-        suite = get_suite(suite_name)
-        ba.suite_id = suite.suite_id
-
-        component_id = bin.poolfile.location.component_id;
-        component_id = bin.poolfile.location.component_id;
-
-        contents = copy_temporary_contents(bin os.path.basename(filename), None, session)
-        if not contents:
-            print "REJECT\nCould not determine contents of package %s" % bin.package
-            session.rollback()
-            raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
-
-                                                     
-        session.add(ba)
-
-
-    session.flush()
-
-
-
-def install(u, session, log_urgency=True):
-    cnf = Config()
-    summarystats = SummaryStats()
-
-    print "Installing."
-
-    Logger.log(["installing changes", u.pkg.changes_file])
-
-    # Ensure that we have all the hashes we need below.
-    u.ensure_hashes()
-    if len(u.rejects) > 0:
-        # There were errors.  Print them and SKIP the changes.
-        for msg in u.rejects:
-            utils.warn(msg)
-        return
-
-    # Add the .dsc file to the DB first
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "dsc":
-            dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session)
-
-    # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            add_deb_to_db(u, newfile, session)
-
-    # If this is a sourceful diff only upload that is moving
-    # cross-component we need to copy the .orig files into the new
-    # component too for the same reasons as above.
-    if u.pkg.changes["architecture"].has_key("source"):
-        for orig_file in u.pkg.orig_files.keys():
-            if not u.pkg.orig_files[orig_file].has_key("id"):
-                continue # Skip if it's not in the pool
-            orig_file_id = u.pkg.orig_files[orig_file]["id"]
-            if u.pkg.orig_files[orig_file]["location"] == dsc_location_id:
-                continue # Skip if the location didn't change
-
-            # Do the move
-            oldf = get_poolfile_by_id(orig_file_id, session)
-            old_filename = os.path.join(oldf.location.path, oldf.filename)
-            old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
-                       'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
-
-            new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
-
-            # TODO: Care about size/md5sum collisions etc
-            (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
-
-            if newf is None:
-                utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
-                newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
-
-                # TODO: Check that there's only 1 here
-                source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0]
-                dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
-                dscf.poolfile_id = newf.file_id
-                session.add(dscf)
-                session.flush()
-
-    # Install the files into the pool
-    for newfile, entry in u.pkg.files.items():
-        destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
-        utils.move(newfile, destination)
-        Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
-        summarystats.accept_bytes += float(entry["size"])
-
-    # Copy the .changes file across for suite which need it.
-    copy_changes = {}
-    copy_dot_dak = {}
-    for suite_name in u.pkg.changes["distribution"].keys():
-        if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
-            copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
-        # and the .dak file...
-        if cnf.has_key("Suite::%s::CopyDotDak" % (suite_name)):
-            copy_dot_dak[cnf["Suite::%s::CopyDotDak" % (suite_name)]] = ""
-
-    for dest in copy_changes.keys():
-        utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
-
-    for dest in copy_dot_dak.keys():
-        utils.copy(u.pkg.changes_file[:-8]+".dak", dest)
-
-    # We're done - commit the database changes
-    session.commit()
-
-    # Move the .changes into the 'done' directory
-    utils.move(u.pkg.changes_file,
-               os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file)))
-
-    # Remove the .dak file
-    os.unlink(u.pkg.changes_file[:-8] + ".dak")
-
-    if u.pkg.changes["architecture"].has_key("source") and log_urgency:
-        UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"])
-
-    # Our SQL session will automatically start a new transaction after
-    # the last commit
-
-    # Undo the work done in queue.py(accept) to help auto-building
-    # from accepted.
-    now_date = datetime.now()
-
-    for suite_name in u.pkg.changes["distribution"].keys():
-        if suite_name not in cnf.ValueList("Dinstall::QueueBuildSuites"):
-            continue
-
-        suite = get_suite(suite_name, session)
-        dest_dir = cnf["Dir::QueueBuild"]
-
-        if cnf.FindB("Dinstall::SecurityQueueBuild"):
-            dest_dir = os.path.join(dest_dir, suite_name)
-
-        for newfile, entry in u.pkg.files.items():
-            dest = os.path.join(dest_dir, newfile)
-
-            qb = get_queue_build(dest, suite.suite_id, session)
-
-            # Remove it from the list of packages for later processing by apt-ftparchive
-            if qb:
-                qb.last_used = now_date
-                qb.in_queue = False
-                session.add(qb)
-
-            if not cnf.FindB("Dinstall::SecurityQueueBuild"):
-                # Update the symlink to point to the new location in the pool
-                pool_location = utils.poolify(u.pkg.changes["source"], entry["component"])
-                src = os.path.join(cnf["Dir::Pool"], pool_location, os.path.basename(newfile))
-                if os.path.islink(dest):
-                    os.unlink(dest)
-                os.symlink(src, dest)
-
-        # Update last_used on any non-uploaded .orig symlink
-        for orig_file in u.pkg.orig_files.keys():
-            # Determine the .orig.tar.gz file name
-            if not u.pkg.orig_files[orig_file].has_key("id"):
-                continue # Skip files not in the pool
-            # XXX: do we really want to update the orig_files dict here
-            # instead of using a temporary variable?
-            u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file)
-
-            # Remove it from the list of packages for later processing by apt-ftparchive
-            qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session)
-            if qb:
-                qb.in_queue = False
-                qb.last_used = now_date
-                session.add(qb)
-
-    session.commit()
-
-    # Finally...
-    summarystats.accept_count += 1
-
-################################################################################
-
-def stable_install(u, session, summary, short_summary, fromsuite_name="proposed-updates"):
-    summarystats = SummaryStats()
-
-    fromsuite_name = fromsuite_name.lower()
-    tosuite_name = "Stable"
-    if fromsuite_name == "oldstable-proposed-updates":
-        tosuite_name = "OldStable"
-
-    print "Installing from %s to %s." % (fromsuite_name, tosuite_name)
-
-    fromsuite = get_suite(fromsuite_name)
-    tosuite = get_suite(tosuite_name)
-
-    # Add the source to stable (and remove it from proposed-updates)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "dsc":
-            package = u.pkg.dsc["source"]
-            # NB: not files[file]["version"], that has no epoch
-            version = u.pkg.dsc["version"]
-
-            source = get_sources_from_name(package, version, session)
-            if len(source) < 1:
-                utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s) in source table." % (package, version))
-            source = source[0]
-
-            # Remove from old suite
-            old = session.query(SrcAssociation).filter_by(source_id = source.source_id)
-            old = old.filter_by(suite_id = fromsuite.suite_id)
-            old.delete()
-
-            # Add to new suite
-            new = SrcAssociation()
-            new.source_id = source.source_id
-            new.suite_id = tosuite.suite_id
-            session.add(new)
-
-    # Add the binaries to stable (and remove it/them from proposed-updates)
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            package = entry["package"]
-            version = entry["version"]
-            architecture = entry["architecture"]
-
-            binary = get_binaries_from_name(package, version, [architecture, 'all'])
-
-            if len(binary) < 1:
-                utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s for %s architecture) in binaries table." % (package, version, architecture))
-            binary = binary[0]
-
-            # Remove from old suite
-            old = session.query(BinAssociation).filter_by(binary_id = binary.binary_id)
-            old = old.filter_by(suite_id = fromsuite.suite_id)
-            old.delete()
-
-            # Add to new suite
-            new = BinAssociation()
-            new.binary_id = binary.binary_id
-            new.suite_id = tosuite.suite_id
-            session.add(new)
-
-    session.commit()
-
-    utils.move(u.pkg.changes_file,
-               os.path.join(cnf["Dir::Morgue"], 'process-accepted', os.path.basename(u.pkg.changes_file)))
-
-    ## Update the Stable ChangeLog file
-    # TODO: URGH - Use a proper tmp file
-    new_changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + ".ChangeLog"
-    changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + "ChangeLog"
-    if os.path.exists(new_changelog_filename):
-        os.unlink(new_changelog_filename)
-
-    new_changelog = utils.open_file(new_changelog_filename, 'w')
-    for newfile, entry in u.pkg.files.items():
-        if entry["type"] == "deb":
-            new_changelog.write("%s/%s/binary-%s/%s\n" % (tosuite.suite_name,
-                                                          entry["component"],
-                                                          entry["architecture"],
-                                                          newfile))
-        elif re_issource.match(newfile):
-            new_changelog.write("%s/%s/source/%s\n" % (tosuite.suite_name,
-                                                       entry["component"],
-                                                       newfile))
-        else:
-            new_changelog.write("%s\n" % (newfile))
-
-    chop_changes = re_fdnic.sub("\n", u.pkg.changes["changes"])
-    new_changelog.write(chop_changes + '\n\n')
-
-    if os.access(changelog_filename, os.R_OK) != 0:
-        changelog = utils.open_file(changelog_filename)
-        new_changelog.write(changelog.read())
-
-    new_changelog.close()
-
-    if os.access(changelog_filename, os.R_OK) != 0:
-        os.unlink(changelog_filename)
-    utils.move(new_changelog_filename, changelog_filename)
-
-    summarystats.accept_count += 1
-
-    if not Options["No-Mail"] and u.pkg.changes["architecture"].has_key("source"):
-        u.Subst["__SUITE__"] = " into %s" % (tosuite)
-        u.Subst["__SUMMARY__"] = summary
-        u.Subst["__BCC__"] = "X-DAK: dak process-accepted"
-
-        if cnf.has_key("Dinstall::Bcc"):
-            u.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-
-        template = os.path.join(cnf["Dir::Templates"], 'process-accepted.install')
-
-        mail_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(mail_message)
-        u.announce(short_summary, True)
-
-    # Finally remove the .dak file
-    dot_dak_file = os.path.join(cnf["Suite::%s::CopyDotDak" % (fromsuite.suite_name)],
-                                os.path.basename(u.pkg.changes_file[:-8]+".dak"))
-    os.unlink(dot_dak_file)
-
-################################################################################
-
-def process_it(changes_file, stable_queue, log_urgency, session):
-    cnf = Config()
-    u = Upload()
-
-    overwrite_checks = True
-
-    # Absolutize the filename to avoid the requirement of being in the
-    # same directory as the .changes file.
-    cfile = os.path.abspath(changes_file)
-
-    # And since handling of installs to stable munges with the CWD
-    # save and restore it.
-    u.prevdir = os.getcwd()
-
-    if stable_queue:
-        old = cfile
-        cfile = os.path.basename(old)
-        os.chdir(cnf["Suite::%s::CopyDotDak" % (stable_queue)])
-        # overwrite_checks should not be performed if installing to stable
-        overwrite_checks = False
-
-    u.pkg.load_dot_dak(cfile)
-    u.update_subst()
-
-    if stable_queue:
-        u.pkg.changes_file = old
-
-    u.accepted_checks(overwrite_checks, session)
-    action(u, stable_queue, log_urgency, session)
-
-    # Restore CWD
-    os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
-    global Logger
-
-    cnf = Config()
-    summarystats = SummaryStats()
-    changes_files = init()
-    log_urgency = False
-    stable_queue = None
-
-    # -n/--dry-run invalidates some other options which would involve things happening
-    if Options["No-Action"]:
-        Options["Automatic"] = ""
-
-    # Check that we aren't going to clash with the daily cron job
-
-    if not Options["No-Action"] and os.path.exists("%s/Archive_Maintenance_In_Progress" % (cnf["Dir::Root"])) and not Options["No-Lock"]:
-        utils.fubar("Archive maintenance in progress.  Try again later.")
-
-    # If running from within proposed-updates; assume an install to stable
-    queue = ""
-    if os.getenv('PWD').find('oldstable-proposed-updates') != -1:
-        stable_queue = "Oldstable-Proposed-Updates"
-    elif os.getenv('PWD').find('proposed-updates') != -1:
-        stable_queue = "Proposed-Updates"
-
-    # Obtain lock if not in no-action mode and initialize the log
-    if not Options["No-Action"]:
-        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
-        try:
-            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError, e:
-            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
-                utils.fubar("Couldn't obtain lock; assuming another 'dak process-accepted' is already running.")
-            else:
-                raise
-        Logger = daklog.Logger(cnf, "process-accepted")
-        if not stable_queue and cnf.get("Dir::UrgencyLog"):
-            # Initialise UrgencyLog()
-            log_urgency = True
-            UrgencyLog()
-
-    # Sort the .changes files so that we process sourceful ones first
-    changes_files.sort(utils.changes_compare)
-
-
-    # Process the changes files
-    for changes_file in changes_files:
-        print "\n" + changes_file
-        session = DBConn().session()
-        process_it(changes_file, stable_queue, log_urgency, session)
-        session.close()
-
-    if summarystats.accept_count:
-        sets = "set"
-        if summarystats.accept_count > 1:
-            sets = "sets"
-        sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
-                                                             utils.size_type(int(summarystats.accept_bytes))))
-        Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
-
-    if not Options["No-Action"]:
-        Logger.close()
-        if log_urgency:
-            UrgencyLog().close()
-
-###############################################################################
-
-if __name__ == '__main__':
-    main()
index bec55df55c64b19a05b68bc6817cb2016866e229..e052af49a1909ee1d7f249251dffab4afa9d3be2 100755 (executable)
@@ -821,7 +821,7 @@ def _accept(upload):
     if Options["No-Action"]:
         return
     (summary, short_summary) = upload.build_summaries()
-    upload.accept(summary, short_summary, targetdir=Config()["Dir::Queue::Newstage"])
+    upload.accept(summary, short_summary, targetqueue)
     os.unlink(upload.pkg.changes_file[:-8]+".dak")
 
 def do_accept(upload):
@@ -832,7 +832,7 @@ def do_accept(upload):
 
         if cnf.FindB("Dinstall::SecurityQueueHandling"):
             upload.dump_vars(cnf["Dir::Queue::Embargoed"])
-            upload.move_to_dir(cnf["Dir::Queue::Embargoed"])
+            upload.move_to_queue(get_policy_queue('embargoed'))
             upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
             # Check for override disparities
             upload.Subst["__SUMMARY__"] = summary
@@ -898,6 +898,10 @@ def end():
 def main():
     global Options, Logger, Sections, Priorities
 
+    print "NO NEW PROCESSING CURRENTLY AVAILABLE"
+    print "(Go and do something more interesting)"
+    sys.exit(0)
+
     cnf = Config()
     session = DBConn().session()
 
diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py
deleted file mode 100755 (executable)
index 8a3e49d..0000000
+++ /dev/null
@@ -1,593 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Checks Debian packages from Incoming
-@contact: Debian FTP Master <ftpmaster@debian.org>
-@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
-@copyright: 2009  Joerg Jaspert <joerg@debian.org>
-@copyright: 2009  Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-# Originally based on dinstall by Guy Maor <maor@debian.org>
-
-################################################################################
-
-# Computer games don't affect kids. I mean if Pacman affected our generation as
-# kids, we'd all run around in a darkened room munching pills and listening to
-# repetitive music.
-#         -- Unknown
-
-################################################################################
-
-import errno
-import fcntl
-import os
-import sys
-import traceback
-import apt_pkg
-
-from daklib.dbconn import *
-from daklib import daklog
-from daklib.queue import *
-from daklib import utils
-from daklib.textutils import fix_maintainer
-from daklib.dak_exceptions import *
-from daklib.regexes import re_default_answer
-from daklib.summarystats import SummaryStats
-from daklib.holding import Holding
-from daklib.config import Config
-
-from types import *
-
-################################################################################
-
-
-################################################################################
-
-# Globals
-Options = None
-Logger = None
-
-###############################################################################
-
-def init():
-    global Options
-
-    apt_pkg.init()
-    cnf = Config()
-
-    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
-                 ('h',"help","Dinstall::Options::Help"),
-                 ('n',"no-action","Dinstall::Options::No-Action"),
-                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
-                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
-                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
-
-    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
-              "override-distribution", "version", "directory"]:
-        cnf["Dinstall::Options::%s" % (i)] = ""
-
-    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
-    Options = cnf.SubTree("Dinstall::Options")
-
-    if Options["Help"]:
-        usage()
-
-    # If we have a directory flag, use it to find our files
-    if cnf["Dinstall::Options::Directory"] != "":
-        # Note that we clobber the list of files we were given in this case
-        # so warn if the user has done both
-        if len(changes_files) > 0:
-            utils.warn("Directory provided so ignoring files given on command line")
-
-        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
-
-    return changes_files
-
-################################################################################
-
-def usage (exit_code=0):
-    print """Usage: dak process-unchecked [OPTION]... [CHANGES]...
-  -a, --automatic           automatic run
-  -h, --help                show this help and exit.
-  -n, --no-action           don't do anything
-  -p, --no-lock             don't check lockfile !! for cron.daily only !!
-  -s, --no-mail             don't send any mail
-  -V, --version             display the version number and exit"""
-    sys.exit(exit_code)
-
-################################################################################
-
-def action(u):
-    cnf = Config()
-
-    # changes["distribution"] may not exist in corner cases
-    # (e.g. unreadable changes files)
-    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType):
-        u.pkg.changes["distribution"] = {}
-
-    (summary, short_summary) = u.build_summaries()
-
-    # q-unapproved hax0ring
-    queue_info = {
-         "New": { "is": is_new, "process": acknowledge_new },
-         "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
-         "Byhand" : { "is": is_byhand, "process": do_byhand },
-         "OldStableUpdate" : { "is": is_oldstableupdate,
-                               "process": do_oldstableupdate },
-         "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate },
-         "Unembargo" : { "is": is_unembargo, "process": queue_unembargo },
-         "Embargo" : { "is": is_embargo, "process": queue_embargo },
-    }
-
-    queues = [ "New", "Autobyhand", "Byhand" ]
-    if cnf.FindB("Dinstall::SecurityQueueHandling"):
-        queues += [ "Unembargo", "Embargo" ]
-    else:
-        queues += [ "OldStableUpdate", "StableUpdate" ]
-
-    (prompt, answer) = ("", "XXX")
-    if Options["No-Action"] or Options["Automatic"]:
-        answer = 'S'
-
-    queuekey = ''
-
-    pi = u.package_info()
-
-    if len(u.rejects) > 0:
-        if u.upload_too_new():
-            print "SKIP (too new)\n" + pi,
-            prompt = "[S]kip, Quit ?"
-        else:
-            print "REJECT\n" + pi
-            prompt = "[R]eject, Skip, Quit ?"
-            if Options["Automatic"]:
-                answer = 'R'
-    else:
-        qu = None
-        for q in queues:
-            if queue_info[q]["is"](u):
-                qu = q
-                break
-        if qu:
-            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
-            queuekey = qu[0].upper()
-            if queuekey in "RQSA":
-                queuekey = "D"
-                prompt = "[D]ivert, Skip, Quit ?"
-            else:
-                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
-            if Options["Automatic"]:
-                answer = queuekey
-        else:
-            print "ACCEPT\n" + pi + summary,
-            prompt = "[A]ccept, Skip, Quit ?"
-            if Options["Automatic"]:
-                answer = 'A'
-
-    while prompt.find(answer) == -1:
-        answer = utils.our_raw_input(prompt)
-        m = re_default_answer.match(prompt)
-        if answer == "":
-            answer = m.group(1)
-        answer = answer[:1].upper()
-
-    if answer == 'R':
-        os.chdir(u.pkg.directory)
-        u.do_reject(0, pi)
-    elif answer == 'A':
-        u.pkg.add_known_changes( "Accepted" )
-        u.accept(summary, short_summary)
-        u.check_override()
-        u.remove()
-    elif answer == queuekey:
-        u.pkg.add_known_changes( qu )
-        queue_info[qu]["process"](u, summary, short_summary)
-        u.remove()
-    elif answer == 'Q':
-        sys.exit(0)
-
-################################################################################
-
-def package_to_suite(u, suite):
-    if not u.pkg.changes["distribution"].has_key(suite):
-        return False
-
-    ret = True
-
-    if not u.pkg.changes["architecture"].has_key("source"):
-        s = DBConn().session()
-        q = s.query(SrcAssociation.sa_id)
-        q = q.join(Suite).filter_by(suite_name=suite)
-        q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
-        q = q.filter_by(version=u.pkg.changes['version']).limit(1)
-
-        # NB: Careful, this logic isn't what you would think it is
-        # Source is already in {old-,}proposed-updates so no need to hold
-        # Instead, we don't move to the holding area, we just do an ACCEPT
-        if q.count() > 0:
-            ret = False
-
-        s.close()
-
-    return ret
-
-def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None):
-    cnf = Config()
-    dir = cnf["Dir::Queue::%s" % queue]
-
-    print "Moving to %s holding area" % queue.upper()
-    Logger.log(["Moving to %s" % queue, u.pkg.changes_file])
-
-    u.pkg.write_dot_dak(dir)
-    u.move_to_dir(dir, perms=perms)
-    if build:
-        get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
-
-    # Check for override disparities
-    u.check_override()
-
-    # Send accept mail, announce to lists and close bugs
-    if announce and not cnf["Dinstall::Options::No-Mail"]:
-        template = os.path.join(cnf["Dir::Templates"], announce)
-        u.update_subst()
-        u.Subst["__SUITE__"] = ""
-        mail_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(mail_message)
-        u.announce(short_summary, True)
-
-################################################################################
-
-def is_unembargo(u):
-    session = DBConn().session()
-    cnf = Config()
-
-    q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
-    if q.rowcount > 0:
-        session.close()
-        return True
-
-    oldcwd = os.getcwd()
-    os.chdir(cnf["Dir::Queue::Disembargo"])
-    disdir = os.getcwd()
-    os.chdir(oldcwd)
-
-    ret = False
-
-    if u.pkg.directory == disdir:
-        if u.pkg.changes["architecture"].has_key("source"):
-            if not Options["No-Action"]:
-                session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
-                session.commit()
-
-            ret = True
-
-    session.close()
-
-    return ret
-
-def queue_unembargo(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Unembargoed",
-                            perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_embargo(u):
-    # if embargoed queues are enabled always embargo
-    return True
-
-def queue_embargo(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Unembargoed",
-                            perms=0660, build=True, announce='process-unchecked.accepted')
-
-################################################################################
-
-def is_stableupdate(u):
-    return package_to_suite(u, 'proposed-updates')
-
-def do_stableupdate(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "ProposedUpdates",
-                            perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_oldstableupdate(u):
-    return package_to_suite(u, 'oldstable-proposed-updates')
-
-def do_oldstableupdate(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "OldProposedUpdates",
-                            perms=0664, build=False, announce=None)
-
-################################################################################
-
-def is_autobyhand(u):
-    cnf = Config()
-
-    all_auto = 1
-    any_auto = 0
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("byhand"):
-            any_auto = 1
-
-            # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
-            # don't contain underscores, and ARCH doesn't contain dots.
-            # further VER matches the .changes Version:, and ARCH should be in
-            # the .changes Architecture: list.
-            if f.count("_") < 2:
-                all_auto = 0
-                continue
-
-            (pckg, ver, archext) = f.split("_", 2)
-            if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
-                all_auto = 0
-                continue
-
-            ABH = cnf.SubTree("AutomaticByHandPackages")
-            if not ABH.has_key(pckg) or \
-              ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
-                print "not match %s %s" % (pckg, u.pkg.changes["source"])
-                all_auto = 0
-                continue
-
-            (arch, ext) = archext.split(".", 1)
-            if arch not in u.pkg.changes["architecture"]:
-                all_auto = 0
-                continue
-
-            u.pkg.files[f]["byhand-arch"] = arch
-            u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
-
-    return any_auto and all_auto
-
-def do_autobyhand(u, summary, short_summary):
-    print "Attempting AUTOBYHAND."
-    byhandleft = True
-    for f, entry in u.pkg.files.items():
-        byhandfile = f
-
-        if not entry.has_key("byhand"):
-            continue
-
-        if not entry.has_key("byhand-script"):
-            byhandleft = True
-            continue
-
-        os.system("ls -l %s" % byhandfile)
-
-        result = os.system("%s %s %s %s %s" % (
-                entry["byhand-script"],
-                byhandfile,
-                u.pkg.changes["version"],
-                entry["byhand-arch"],
-                os.path.abspath(u.pkg.changes_file)))
-
-        if result == 0:
-            os.unlink(byhandfile)
-            del entry
-        else:
-            print "Error processing %s, left as byhand." % (f)
-            byhandleft = True
-
-    if byhandleft:
-        do_byhand(u, summary, short_summary)
-    else:
-        u.accept(summary, short_summary)
-        u.check_override()
-        # XXX: We seem to be missing a u.remove() here
-        #      This might explain why we get byhand leftovers in unchecked - mhy
-
-################################################################################
-
-def is_byhand(u):
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("byhand"):
-            return True
-    return False
-
-def do_byhand(u, summary, short_summary):
-    return package_to_queue(u, summary, short_summary, "Byhand",
-                            perms=0660, build=False, announce=None)
-
-################################################################################
-
-def is_new(u):
-    for f in u.pkg.files.keys():
-        if u.pkg.files[f].has_key("new"):
-            return True
-    return False
-
-def acknowledge_new(u, summary, short_summary):
-    cnf = Config()
-
-    print "Moving to NEW holding area."
-    Logger.log(["Moving to new", u.pkg.changes_file])
-
-    u.pkg.write_dot_dak(cnf["Dir::Queue::New"])
-    u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644)
-
-    if not Options["No-Mail"]:
-        print "Sending new ack."
-        template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
-        u.update_subst()
-        u.Subst["__SUMMARY__"] = summary
-        new_ack_message = utils.TemplateSubst(u.Subst, template)
-        utils.send_mail(new_ack_message)
-
-################################################################################
-
-# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in
-# Incoming. -1 will reference the .orig.tar.gz, but -2 will not.
-# Upload.check_dsc_against_db() can find the .orig.tar.gz but it will
-# not have processed it during it's checks of -2.  If -1 has been
-# deleted or otherwise not checked by 'dak process-unchecked', the
-# .orig.tar.gz will not have been checked at all.  To get round this,
-# we force the .orig.tar.gz into the .changes structure and reprocess
-# the .changes file.
-
-def process_it(changes_file):
-    global Logger
-
-    cnf = Config()
-
-    holding = Holding()
-
-    u = Upload()
-    u.pkg.changes_file = changes_file
-    u.pkg.directory = os.getcwd()
-    u.logger = Logger
-    origchanges = os.path.join(u.pkg.directory, u.pkg.changes_file)
-
-    # Some defaults in case we can't fully process the .changes file
-    u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
-    u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
-
-    # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
-    bcc = "X-DAK: dak process-unchecked"
-    if cnf.has_key("Dinstall::Bcc"):
-        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
-    else:
-        u.Subst["__BCC__"] = bcc
-
-    # Remember where we are so we can come back after cd-ing into the
-    # holding directory.  TODO: Fix this stupid hack
-    u.prevdir = os.getcwd()
-
-    # TODO: Figure out something better for this (or whether it's even
-    #       necessary - it seems to have been for use when we were
-    #       still doing the is_unchecked check; reprocess = 2)
-    u.reprocess = 1
-
-    try:
-        # If this is the Real Thing(tm), copy things into a private
-        # holding directory first to avoid replacable file races.
-        if not Options["No-Action"]:
-            os.chdir(cnf["Dir::Queue::Holding"])
-
-            # Absolutize the filename to avoid the requirement of being in the
-            # same directory as the .changes file.
-            holding.copy_to_holding(origchanges)
-
-            # Relativize the filename so we use the copy in holding
-            # rather than the original...
-            changespath = os.path.basename(u.pkg.changes_file)
-
-        (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
-
-        if u.pkg.changes["fingerprint"]:
-            valid_changes_p = u.load_changes(changespath)
-        else:
-            valid_changes_p = False
-            u.rejects.extend(rejects)
-
-        if valid_changes_p:
-            while u.reprocess:
-                u.check_distributions()
-                u.check_files(not Options["No-Action"])
-                valid_dsc_p = u.check_dsc(not Options["No-Action"])
-                if valid_dsc_p and not Options["No-Action"]:
-                    u.check_source()
-                    u.check_lintian()
-                u.check_hashes()
-                u.check_urgency()
-                u.check_timestamps()
-                u.check_signed_by_key()
-
-        action(u)
-
-    except (SystemExit, KeyboardInterrupt):
-        raise
-
-    except:
-        print "ERROR"
-        traceback.print_exc(file=sys.stderr)
-
-    # Restore previous WD
-    os.chdir(u.prevdir)
-
-###############################################################################
-
-def main():
-    global Options, Logger
-
-    cnf = Config()
-    changes_files = init()
-
-    # -n/--dry-run invalidates some other options which would involve things happening
-    if Options["No-Action"]:
-        Options["Automatic"] = ""
-
-    # Initialize our Holding singleton
-    holding = Holding()
-
-    # Ensure all the arguments we were given are .changes files
-    for f in changes_files:
-        if not f.endswith(".changes"):
-            utils.warn("Ignoring '%s' because it's not a .changes file." % (f))
-            changes_files.remove(f)
-
-    if changes_files == []:
-        if cnf["Dinstall::Options::Directory"] == "":
-            utils.fubar("Need at least one .changes file as an argument.")
-        else:
-            sys.exit(0)
-
-    # Check that we aren't going to clash with the daily cron job
-    if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]:
-        utils.fubar("Archive maintenance in progress.  Try again later.")
-
-    # Obtain lock if not in no-action mode and initialize the log
-    if not Options["No-Action"]:
-        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
-        try:
-            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-        except IOError, e:
-            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
-                utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.")
-            else:
-                raise
-        Logger = daklog.Logger(cnf, "process-unchecked")
-
-    # Sort the .changes files so that we process sourceful ones first
-    changes_files.sort(utils.changes_compare)
-
-    # Process the changes files
-    for changes_file in changes_files:
-        print "\n" + changes_file
-        try:
-            process_it (changes_file)
-        finally:
-            if not Options["No-Action"]:
-                holding.clean()
-
-    accept_count = SummaryStats().accept_count
-    accept_bytes = SummaryStats().accept_bytes
-
-    if accept_count:
-        sets = "set"
-        if accept_count > 1:
-            sets = "sets"
-        print "Accepted %d package %s, %s." % (accept_count, sets, utils.size_type(int(accept_bytes)))
-        Logger.log(["total",accept_count,accept_bytes])
-
-    if not Options["No-Action"]:
-        Logger.close()
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/process_upload.py b/dak/process_upload.py
new file mode 100755 (executable)
index 0000000..155ba1d
--- /dev/null
@@ -0,0 +1,492 @@
+#!/usr/bin/env python
+
+"""
+Checks Debian packages from Incoming
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@copyright: 2009  Frank Lichtenheld <djpig@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+# based on process-unchecked and process-accepted
+
+## pu|pa: locking (daily.lock)
+## pu|pa: parse arguments -> list of changes files
+## pa: initialize urgency log
+## pu|pa: sort changes list
+
+## foreach changes:
+###  pa: load dak file
+##   pu: copy CHG to tempdir
+##   pu: check CHG signature
+##   pu: parse changes file
+##   pu: checks:
+##     pu: check distribution (mappings, rejects)
+##     pu: copy FILES to tempdir
+##     pu: check whether CHG already exists in CopyChanges
+##     pu: check whether FILES already exist in one of the policy queues
+##     for deb in FILES:
+##       pu: extract control information
+##       pu: various checks on control information
+##       pu|pa: search for source (in CHG, projectb, policy queues)
+##       pu|pa: check whether "Version" fulfills target suite requirements/suite propagation
+##       pu|pa: check whether deb already exists in the pool
+##     for src in FILES:
+##       pu: various checks on filenames and CHG consistency
+##       pu: if isdsc: check signature
+##     for file in FILES:
+##       pu: various checks
+##       pu: NEW?
+##       //pu: check whether file already exists in the pool
+##       pu: store what "Component" the package is currently in
+##     pu: check whether we found everything we were looking for in CHG
+##     pu: check the DSC:
+##       pu: check whether we need and have ONE DSC
+##       pu: parse the DSC
+##       pu: various checks //maybe drop some of the in favor of lintian
+##       pu|pa: check whether "Version" fulfills target suite requirements/suite propagation
+##       pu: check whether DSC_FILES is consistent with "Format"
+##       for src in DSC_FILES:
+##         pu|pa: check whether file already exists in the pool (with special handling for .orig.tar.gz)
+##     pu: create new tempdir
+##     pu: create symlink mirror of source
+##     pu: unpack source
+##     pu: extract changelog information for BTS
+##     //pu: create missing .orig symlink
+##     pu: check with lintian
+##     for file in FILES:
+##       pu: check checksums and sizes
+##     for file in DSC_FILES:
+##       pu: check checksums and sizes
+##     pu: CHG: check urgency
+##     for deb in FILES:
+##       pu: extract contents list and check for dubious timestamps
+##     pu: check that the uploader is actually allowed to upload the package
+###  pa: install:
+###    if stable_install:
+###      pa: remove from p-u
+###      pa: add to stable
+###      pa: move CHG to morgue
+###      pa: append data to ChangeLog
+###      pa: send mail
+###      pa: remove .dak file
+###    else:
+###      pa: add dsc to db:
+###        for file in DSC_FILES:
+###          pa: add file to file
+###          pa: add file to dsc_files
+###        pa: create source entry
+###        pa: update source associations
+###        pa: update src_uploaders
+###      for deb in FILES:
+###        pa: add deb to db:
+###          pa: add file to file
+###          pa: find source entry
+###          pa: create binaries entry
+###          pa: update binary associations
+###      pa: .orig component move
+###      pa: move files to pool
+###      pa: save CHG
+###      pa: move CHG to done/
+###      pa: change entry in queue_build
+##   pu: use dispatch table to choose target queue:
+##     if NEW:
+##       pu: write .dak file
+##       pu: move to NEW
+##       pu: send mail
+##     elsif AUTOBYHAND:
+##       pu: run autobyhand script
+##       pu: if stuff left, do byhand or accept
+##     elsif targetqueue in (oldstable, stable, embargo, unembargo):
+##       pu: write .dak file
+##       pu: check overrides
+##       pu: move to queue
+##       pu: send mail
+##     else:
+##       pu: write .dak file
+##       pu: move to ACCEPTED
+##       pu: send mails
+##       pu: create files for BTS
+##       pu: create entry in queue_build
+##       pu: check overrides
+
+# Integrity checks
+## GPG
+## Parsing changes (check for duplicates)
+## Parse dsc
+## file list checks
+
+# New check layout (TODO: Implement)
+## Permission checks
+### suite mappings
+### ACLs
+### version checks (suite)
+### override checks
+
+## Source checks
+### copy orig
+### unpack
+### BTS changelog
+### src contents
+### lintian
+### urgency log
+
+## Binary checks
+### timestamps
+### control checks
+### src relation check
+### contents
+
+## Database insertion (? copy from stuff)
+### BYHAND / NEW / Policy queues
+### Pool
+
+## Queue builds
+
+from errno import EACCES, EAGAIN
+import fcntl
+import os
+import sys
+import traceback
+import apt_pkg
+from sqlalchemy.orm.exc import NoResultFound
+
+from daklib import daklog
+from daklib.queue import *
+from daklib.queue_install import *
+from daklib import utils
+from daklib.dbconn import *
+from daklib.urgencylog import UrgencyLog
+from daklib.summarystats import SummaryStats
+from daklib.holding import Holding
+from daklib.config import Config
+
+###############################################################################
+
+Options = None
+Logger = None
+
+###############################################################################
+
+def usage (exit_code=0):
+    print """Usage: dak process-upload [OPTION]... [CHANGES]...
+  -a, --automatic           automatic run
+  -h, --help                show this help and exit.
+  -n, --no-action           don't do anything
+  -p, --no-lock             don't check lockfile !! for cron.daily only !!
+  -s, --no-mail             don't send any mail
+  -V, --version             display the version number and exit"""
+    sys.exit(exit_code)
+
+###############################################################################
+
+def action(u, session):
+    cnf = Config()
+    holding = Holding()
+
+    # changes["distribution"] may not exist in corner cases
+    # (e.g. unreadable changes files)
+    if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict):
+        u.pkg.changes["distribution"] = {}
+
+    (summary, short_summary) = u.build_summaries()
+
+    (prompt, answer) = ("", "XXX")
+    if Options["No-Action"] or Options["Automatic"]:
+        answer = 'S'
+
+    queuekey = ''
+
+    pi = u.package_info()
+
+    try:
+        chg = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one()
+    except NoResultFound, e:
+        chg = None
+
+    if len(u.rejects) > 0:
+        if u.upload_too_new():
+            print "SKIP (too new)\n" + pi,
+            prompt = "[S]kip, Quit ?"
+        else:
+            print "REJECT\n" + pi
+            prompt = "[R]eject, Skip, Quit ?"
+            if Options["Automatic"]:
+                answer = 'R'
+    else:
+        # Are we headed for NEW / BYHAND / AUTOBYHAND?
+        # Note that policy queues are no longer handled here
+        qu = determine_target(u)
+        if qu:
+            print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary)
+            queuekey = qu[0].upper()
+            if queuekey in "RQSA":
+                queuekey = "D"
+                prompt = "[D]ivert, Skip, Quit ?"
+            else:
+                prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower())
+            if Options["Automatic"]:
+                answer = queuekey
+        else:
+            # Does suite have a policy_queue configured
+            divert = False
+            for s in u.pkg.changes["distribution"].keys():
+                suite = get_suite(s, session)
+                if suite.policy_queue:
+                    if not chg or chg.approved_for_id != su.policy_queue.policy_queue_id:
+                        # This routine will check whether the upload is a binary
+                        # upload when the source is already in the target suite.  If
+                        # so, we skip the policy queue, otherwise we go there.
+                        divert = package_to_suite(u, suite.suite_name, session=session)
+                        if divert:
+                            print "%s for %s\n%s%s" % ( suite.policy_queue.queue_name.upper(),
+                                                        ", ".join(u.pkg.changes["distribution"].keys()),
+                                                        pi, summary)
+                            queuekey = "P"
+                            prompt = "[P]olicy, Skip, Quit ?"
+                            policyqueue = suite.policy_queue
+                            if Options["Automatic"]:
+                                answer = 'P'
+                            break
+
+            if not divert:
+                print "ACCEPT\n" + pi + summary,
+                prompt = "[A]ccept, Skip, Quit ?"
+                if Options["Automatic"]:
+                    answer = 'A'
+
+    while prompt.find(answer) == -1:
+        answer = utils.our_raw_input(prompt)
+        m = re_default_answer.match(prompt)
+        if answer == "":
+            answer = m.group(1)
+        answer = answer[:1].upper()
+
+    if answer == 'R':
+        os.chdir(u.pkg.directory)
+        u.do_reject(0, pi)
+    elif answer == 'A':
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+        u.accept(summary, short_summary, session)
+        u.check_override()
+        session.commit()
+        u.remove()
+    elif answer == 'P':
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+        package_to_queue(u, summary, short_summary, policyqueue, chg, session)
+        session.commit()
+        u.remove()
+    elif answer == queuekey:
+        if not chg:
+            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+        QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
+        session.commit()
+        u.remove()
+    elif answer == 'Q':
+        sys.exit(0)
+
+    session.commit()
+
+###############################################################################
+
+def cleanup():
+    h = Holding()
+    if not Options["No-Action"]:
+        h.clean()
+
+def process_it(changes_file, session):
+    global Logger
+
+    Logger.log(["Processing changes file", changes_file])
+
+    cnf = Config()
+
+    holding = Holding()
+
+    # TODO: Actually implement using pending* tables so that we don't lose track
+    #       of what is where
+
+    u = Upload()
+    u.pkg.changes_file = changes_file
+    u.pkg.directory = os.getcwd()
+    u.logger = Logger
+    origchanges = os.path.abspath(u.pkg.changes_file)
+
+    # Some defaults in case we can't fully process the .changes file
+    u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"]
+    u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"]
+
+    # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header
+    bcc = "X-DAK: dak process-upload"
+    if cnf.has_key("Dinstall::Bcc"):
+        u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"])
+    else:
+        u.Subst["__BCC__"] = bcc
+
+    # Remember where we are so we can come back after cd-ing into the
+    # holding directory.  TODO: Fix this stupid hack
+    u.prevdir = os.getcwd()
+
+    try:
+        # If this is the Real Thing(tm), copy things into a private
+        # holding directory first to avoid replacable file races.
+        if not Options["No-Action"]:
+            os.chdir(cnf["Dir::Queue::Holding"])
+
+            # Absolutize the filename to avoid the requirement of being in the
+            # same directory as the .changes file.
+            holding.copy_to_holding(origchanges)
+
+            # Relativize the filename so we use the copy in holding
+            # rather than the original...
+            changespath = os.path.basename(u.pkg.changes_file)
+        else:
+            changespath = origchanges
+
+        (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath)
+
+        if u.pkg.changes["fingerprint"]:
+            valid_changes_p = u.load_changes(changespath)
+        else:
+            valid_changes_p = False
+            u.rejects.extend(rejects)
+
+        if valid_changes_p:
+            u.check_distributions()
+            u.check_files(not Options["No-Action"])
+            valid_dsc_p = u.check_dsc(not Options["No-Action"])
+            if valid_dsc_p and not Options["No-Action"]:
+                u.check_source()
+                u.check_lintian()
+            u.check_hashes()
+            u.check_urgency()
+            u.check_timestamps()
+            u.check_signed_by_key()
+
+        action(u, session)
+
+    except (SystemExit, KeyboardInterrupt):
+        cleanup()
+        raise
+
+    except:
+        print "ERROR"
+        traceback.print_exc(file=sys.stderr)
+
+    cleanup()
+    # Restore previous WD
+    os.chdir(u.prevdir)
+
+###############################################################################
+
+def main():
+    global Options, Logger
+
+    cnf = Config()
+    summarystats = SummaryStats()
+    log_urgency = False
+
+    DBConn()
+
+    Arguments = [('a',"automatic","Dinstall::Options::Automatic"),
+                 ('h',"help","Dinstall::Options::Help"),
+                 ('n',"no-action","Dinstall::Options::No-Action"),
+                 ('p',"no-lock", "Dinstall::Options::No-Lock"),
+                 ('s',"no-mail", "Dinstall::Options::No-Mail"),
+                 ('d',"directory", "Dinstall::Options::Directory", "HasArg")]
+
+    for i in ["automatic", "help", "no-action", "no-lock", "no-mail",
+              "version", "directory"]:
+        if not cnf.has_key("Dinstall::Options::%s" % (i)):
+            cnf["Dinstall::Options::%s" % (i)] = ""
+
+    changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+    Options = cnf.SubTree("Dinstall::Options")
+
+    if Options["Help"]:
+        usage()
+
+    # -n/--dry-run invalidates some other options which would involve things happening
+    if Options["No-Action"]:
+        Options["Automatic"] = ""
+
+    # Check that we aren't going to clash with the daily cron job
+    if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]:
+        utils.fubar("Archive maintenance in progress.  Try again later.")
+
+    # Obtain lock if not in no-action mode and initialize the log
+    if not Options["No-Action"]:
+        lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
+        try:
+            fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError, e:
+            if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
+                utils.fubar("Couldn't obtain lock; assuming another 'dak process-upload' is already running.")
+            else:
+                raise
+        if cnf.get("Dir::UrgencyLog"):
+            # Initialise UrgencyLog()
+            log_urgency = True
+            UrgencyLog()
+
+    Logger = daklog.Logger(cnf, "process-upload", Options["No-Action"])
+
+    # If we have a directory flag, use it to find our files
+    if cnf["Dinstall::Options::Directory"] != "":
+        # Note that we clobber the list of files we were given in this case
+        # so warn if the user has done both
+        if len(changes_files) > 0:
+            utils.warn("Directory provided so ignoring files given on command line")
+
+        changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"])
+        Logger.log(["Using changes files from directory", cnf["Dinstall::Options::Directory"], len(changes_files)])
+    elif not len(changes_files) > 0:
+        utils.fubar("No changes files given and no directory specified")
+    else:
+        Logger.log(["Using changes files from command-line", len(changes_files)])
+
+    # Sort the .changes files so that we process sourceful ones first
+    changes_files.sort(utils.changes_compare)
+
+    # Process the changes files
+    for changes_file in changes_files:
+        print "\n" + changes_file
+        session = DBConn().session()
+        process_it(changes_file, session)
+        session.close()
+
+    if summarystats.accept_count:
+        sets = "set"
+        if summarystats.accept_count > 1:
+            sets = "sets"
+        print "Installed %d package %s, %s." % (summarystats.accept_count, sets,
+                                                utils.size_type(int(summarystats.accept_bytes)))
+        Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
+
+    if not Options["No-Action"]:
+        if log_urgency:
+            UrgencyLog().close()
+    Logger.close()
+
+###############################################################################
+
+if __name__ == '__main__':
+    main()
index 8e338e526181c9d5d16dd4737ae7b36e6dce725e..c9013a5239c956eb54c5c60e9ed656ca6aa9fe2d 100755 (executable)
@@ -39,7 +39,7 @@ import glob, os, stat, sys, time
 import apt_pkg
 
 from daklib import utils
-from daklib.changes import Changes
+from daklib.queue import Upload
 from daklib.dbconn import DBConn, has_new_comment
 from daklib.textutils import fix_maintainer
 from daklib.dak_exceptions import *
@@ -301,9 +301,9 @@ def process_changes_files(changes_files, type, log):
     # Read in all the .changes files
     for filename in changes_files:
         try:
-            c = Changes()
-            c.load_dot_dak(filename)
-            cache[filename] = copy(c.changes)
+            u = Upload()
+            u.load_changes(filename)
+            cache[filename] = copy(u.pkg.changes)
             cache[filename]["filename"] = filename
         except Exception, e:
             print "WARNING: Exception %s" % e
diff --git a/dak/test/001/1.dsc b/dak/test/001/1.dsc
deleted file mode 100644 (file)
index dfdd92f..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/2.dsc b/dak/test/001/2.dsc
deleted file mode 100644 (file)
index a6c9d85..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/3.dsc b/dak/test/001/3.dsc
deleted file mode 100644 (file)
index 211340e..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/4.dsc b/dak/test/001/4.dsc
deleted file mode 100644 (file)
index 91e361f..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/5.dsc b/dak/test/001/5.dsc
deleted file mode 100644 (file)
index db9d8d3..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
-
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/6.dsc b/dak/test/001/6.dsc
deleted file mode 100644 (file)
index ae36d64..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-
-Format: 1.0
-Source: amaya
-Version: 3.2.1-1
-Binary: amaya
-Maintainer: Steve Dunham <dunham@debian.org>
-Architecture: any
-Standards-Version: 2.4.0.0
-Files: 
- 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
- da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.2 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
-rhYnRmVuNMa8oYSvL4hl/Yw=
-=EFAA
------END PGP SIGNATURE-----
diff --git a/dak/test/001/test.py b/dak/test/001/test.py
deleted file mode 100644 (file)
index 8238c20..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-
-# Check utils.parse_changes()'s .dsc file validation
-# Copyright (C) 2000, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import utils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-def main ():
-    # Valid .dsc
-    utils.parse_changes('1.dsc',1)
-
-    # Missing blank line before signature body
-    try:
-        utils.parse_changes('2.dsc',1)
-    except utils.invalid_dsc_format_exc, line:
-        if line != 14:
-            fail("Incorrect line number ('%s') for test #2." % (line))
-    else:
-        fail("Test #2 wasn't recognised as invalid.")
-
-    # Missing blank line after signature header
-    try:
-        utils.parse_changes('3.dsc',1)
-    except utils.invalid_dsc_format_exc, line:
-        if line != 14:
-            fail("Incorrect line number ('%s') for test #3." % (line))
-    else:
-        fail("Test #3 wasn't recognised as invalid.")
-
-    # No blank lines at all
-    try:
-        utils.parse_changes('4.dsc',1)
-    except utils.invalid_dsc_format_exc, line:
-        if line != 19:
-            fail("Incorrect line number ('%s') for test #4." % (line))
-    else:
-        fail("Test #4 wasn't recognised as invalid.")
-
-    # Extra blank line before signature body
-    try:
-        utils.parse_changes('5.dsc',1)
-    except utils.invalid_dsc_format_exc, line:
-        if line != 15:
-            fail("Incorrect line number ('%s') for test #5." % (line))
-    else:
-        fail("Test #5 wasn't recognised as invalid.")
-
-    # Extra blank line after signature header
-    try:
-        utils.parse_changes('6.dsc',1)
-    except utils.invalid_dsc_format_exc, line:
-        if line != 5:
-            fail("Incorrect line number ('%s') for test #6." % (line))
-    else:
-        fail("Test #6 wasn't recognised as invalid.")
-
-    # Valid .dsc ; ignoring errors
-    utils.parse_changes('1.dsc', 0)
-
-    # Invalid .dsc ; ignoring errors
-    utils.parse_changes('2.dsc', 0)
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/test/002/empty.changes b/dak/test/002/empty.changes
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/dak/test/002/test.py b/dak/test/002/test.py
deleted file mode 100644 (file)
index 919a70a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-
-# Check utils.parse_changes()'s for handling empty files
-# Copyright (C) 2000, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import utils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-def main ():
-    # Empty .changes file; should raise a 'parse error' exception.
-    try:
-        utils.parse_changes('empty.changes', 0)
-    except utils.changes_parse_error_exc, line:
-        if line != "[Empty changes file]":
-            fail("Returned exception with unexcpected error message `%s'." % (line))
-    else:
-        fail("Didn't raise a 'parse error' exception for a zero-length .changes file.")
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/test/003/krb5_1.2.2-4_m68k.changes b/dak/test/003/krb5_1.2.2-4_m68k.changes
deleted file mode 100644 (file)
index 9d264c1..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-
-Format: 1.7
-Date: Fri, 20 Apr 2001 02:47:21 -0400
-Source: krb5
-Binary: krb5-kdc krb5-doc krb5-rsh-server libkrb5-dev libkrb53 krb5-ftpd
- krb5-clients krb5-user libkadm54 krb5-telnetd krb5-admin-server
-Architecture: m68k
-Version: 1.2.2-4
-Distribution: unstable
-Urgency: low
-Maintainer: buildd m68k user account <buildd@ax.westfalen.de>
-Changed-By: Sam Hartman <hartmans@debian.org>
-Description: 
- krb5-admin-server - Mit Kerberos master server (kadmind)
- krb5-clients - Secure replacements for ftp, telnet and rsh using MIT Kerberos
- krb5-ftpd  - Secure FTP server supporting MIT Kerberos
- krb5-kdc   - Mit Kerberos key server (KDC)
- krb5-rsh-server - Secure replacements for rshd and rlogind  using MIT Kerberos
- krb5-telnetd - Secure telnet server supporting MIT Kerberos
- krb5-user  - Basic programs to authenticate using MIT Kerberos
- libkadm54  - MIT Kerberos administration runtime libraries
- libkrb5-dev - Headers and development libraries for MIT Kerberos
- libkrb53   - MIT Kerberos runtime libraries
-Closes: 94407
-Changes: 
- krb5 (1.2.2-4) unstable; urgency=low
- .
-   * Fix shared libraries to build with gcc not ld to properly include
-     -lgcc symbols, closes: #94407
-Files: 
- 563dac1cdd3ba922f9301fe074fbfc80 65836 non-us/main optional libkadm54_1.2.2-4_m68k.deb
- bb620f589c17ab0ebea1aa6e10ca52ad 272198 non-us/main optional libkrb53_1.2.2-4_m68k.deb
- 40af6e64b3030a179e0de25bd95c95e9 143264 non-us/main optional krb5-user_1.2.2-4_m68k.deb
- ffe4e5e7b2cab162dc608d56278276cf 141870 non-us/main optional krb5-clients_1.2.2-4_m68k.deb
- 4fe01d1acb4b82ce0b8b72652a9a15ae 54592 non-us/main optional krb5-rsh-server_1.2.2-4_m68k.deb
- b3c8c617ea72008a33b869b75d2485bf 41292 non-us/main optional krb5-ftpd_1.2.2-4_m68k.deb
- 5908f8f60fe536d7bfc1ef3fdd9d74cc 42090 non-us/main optional krb5-telnetd_1.2.2-4_m68k.deb
- 650ea769009a312396e56503d0059ebc 160236 non-us/main optional krb5-kdc_1.2.2-4_m68k.deb
- 399c9de4e9d7d0b0f5626793808a4391 160392 non-us/main optional krb5-admin-server_1.2.2-4_m68k.deb
- 6f962fe530c3187e986268b4e4d27de9 398662 non-us/main optional libkrb5-dev_1.2.2-4_m68k.deb
-
------BEGIN PGP SIGNATURE-----
-Version: 2.6.3i
-Charset: noconv
-
-iQCVAwUBOvVPPm547I3m3eHJAQHyaQP+M7RXVEqZ2/xHiPzaPcZRJ4q7o0zbMaU8
-qG/Mi6kuR1EhRNMjMH4Cp6ctbhRDHK5FR/8v7UkOd+ETDAhiw7eqJnLC60EZxZ/H
-CiOs8JklAXDERkQ3i7EYybv46Gxx91pIs2nE4xVKnG16d/wFELWMBLY6skF1B2/g
-zZju3cuFCCE=
-=Vm59
------END PGP SIGNATURE-----
-
-
diff --git a/dak/test/003/test.py b/dak/test/003/test.py
deleted file mode 100755 (executable)
index ce07c11..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-
-# Check utils.parse_changes()'s for handling of multi-line fields
-# Copyright (C) 2000, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-# The deal here is that for the first 6 months of dak's
-# implementation it has been misparsing multi-line fields in .changes
-# files; specifically multi-line fields where there _is_ data on the
-# first line. So, for example:
-
-# Foo: bar baz
-#  bat bant
-
-# Became "foo: bar bazbat bant" rather than "foo: bar baz\nbat bant"
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import utils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-def main ():
-    # Valid .changes file with a multi-line Binary: field
-    try:
-        changes = utils.parse_changes('krb5_1.2.2-4_m68k.changes', 0)
-    except utils.changes_parse_error_exc, line:
-        fail("parse_changes() returned an exception with error message `%s'." % (line))
-
-    o = changes.get("binary", "")
-    if o != "":
-        del changes["binary"]
-    changes["binary"] = {}
-    for j in o.split():
-        changes["binary"][j] = 1
-
-    if not changes["binary"].has_key("krb5-ftpd"):
-        fail("parse_changes() is broken; 'krb5-ftpd' is not in the Binary: dictionary.")
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/test/004/test.py b/dak/test/004/test.py
deleted file mode 100755 (executable)
index 4aa6b48..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-
-# Check utils.extract_component_from_section()
-# Copyright (C) 2000, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import utils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-# prefix: non-US
-# component: main, contrib, non-free
-# section: games, admin, libs, [...]
-
-# [1] Order is as above.
-# [2] Prefix is optional for the default archive, but mandatory when
-#     uploads are going anywhere else.
-# [3] Default component is main and may be omitted.
-# [4] Section is optional.
-# [5] Prefix is case insensitive
-# [6] Everything else is case sensitive.
-
-def test(input, output):
-    result = utils.extract_component_from_section(input)
-    if result != output:
-        fail ("%s -> %r [should have been %r]" % (input, result, output))
-
-def main ():
-    # Err, whoops?  should probably be "utils", "main"...
-    input = "main/utils"; output = ("main/utils", "main")
-    test (input, output)
-
-
-    # Validate #3
-    input = "utils"; output = ("utils", "main")
-    test (input, output)
-
-    input = "non-free/libs"; output = ("non-free/libs", "non-free")
-    test (input, output)
-
-    input = "contrib/net"; output = ("contrib/net", "contrib")
-    test (input, output)
-
-
-    # Validate #3 with a prefix
-    input = "non-US"; output = ("non-US", "non-US/main")
-    test (input, output)
-
-
-    # Validate #4
-    input = "main"; output = ("main", "main")
-    test (input, output)
-
-    input = "contrib"; output = ("contrib", "contrib")
-    test (input, output)
-
-    input = "non-free"; output = ("non-free", "non-free")
-    test (input, output)
-
-
-    # Validate #4 with a prefix
-    input = "non-US/main"; output = ("non-US/main", "non-US/main")
-    test (input, output)
-
-    input = "non-US/contrib"; output = ("non-US/contrib", "non-US/contrib")
-    test (input, output)
-
-    input = "non-US/non-free"; output = ("non-US/non-free", "non-US/non-free")
-    test (input, output)
-
-
-    # Validate #5
-    input = "non-us"; output = ("non-us", "non-US/main")
-    test (input, output)
-
-    input = "non-us/contrib"; output = ("non-us/contrib", "non-US/contrib")
-    test (input, output)
-
-
-    # Validate #6 (section)
-    input = "utIls"; output = ("utIls", "main")
-    test (input, output)
-
-    # Others..
-    input = "non-US/libs"; output = ("non-US/libs", "non-US/main")
-    test (input, output)
-    input = "non-US/main/libs"; output = ("non-US/main/libs", "non-US/main")
-    test (input, output)
-    input = "non-US/contrib/libs"; output = ("non-US/contrib/libs", "non-US/contrib")
-    test (input, output)
-    input = "non-US/non-free/libs"; output = ("non-US/non-free/libs", "non-US/non-free")
-    test (input, output)
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/test/005/bogus-post.changes b/dak/test/005/bogus-post.changes
deleted file mode 100644 (file)
index 95e5a1f..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.7
-Date: Tue,  9 Sep 2003 01:16:01 +0100
-Source: gawk
-Binary: gawk
-Architecture: source i386
-Version: 1:3.1.3-2
-Distribution: unstable
-Urgency: low
-Maintainer: James Troup <james@nocrew.org>
-Changed-By: James Troup <james@nocrew.org>
-Description: 
- gawk       - GNU awk, a pattern scanning and processing language
-Closes: 204699 204701
-Changes: 
- gawk (1:3.1.3-2) unstable; urgency=low
- .
-   * debian/control (Standards-Version): bump to 3.6.1.0.
- .
-   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
-     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
-     upstream.  Closes: #204701
- .
-   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
-     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
-     forwarding it upstream.  Closes: #204699
-Files: 
- 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
- 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
- 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-
-iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
-fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
-=O7Gh
------END PGP SIGNATURE-----
-You: have been 0wned
diff --git a/dak/test/005/bogus-pre.changes b/dak/test/005/bogus-pre.changes
deleted file mode 100644 (file)
index 0234d8b..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-You: have been 0wned
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.7
-Date: Tue,  9 Sep 2003 01:16:01 +0100
-Source: gawk
-Binary: gawk
-Architecture: source i386
-Version: 1:3.1.3-2
-Distribution: unstable
-Urgency: low
-Maintainer: James Troup <james@nocrew.org>
-Changed-By: James Troup <james@nocrew.org>
-Description: 
- gawk       - GNU awk, a pattern scanning and processing language
-Closes: 204699 204701
-Changes: 
- gawk (1:3.1.3-2) unstable; urgency=low
- .
-   * debian/control (Standards-Version): bump to 3.6.1.0.
- .
-   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
-     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
-     upstream.  Closes: #204701
- .
-   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
-     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
-     forwarding it upstream.  Closes: #204699
-Files: 
- 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
- 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
- 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-
-iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
-fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
-=O7Gh
------END PGP SIGNATURE-----
diff --git a/dak/test/005/test.py b/dak/test/005/test.py
deleted file mode 100755 (executable)
index b5d3bbc..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-
-# Check utils.parse_changes() correctly ignores data outside the signed area
-# Copyright (C) 2004, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import utils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-def main ():
-    for file in [ "valid", "bogus-pre", "bogus-post" ]:
-        for strict_whitespace in [ 0, 1 ]:
-            try:
-                changes = utils.parse_changes("%s.changes" % (file), strict_whitespace)
-            except utils.changes_parse_error_exc, line:
-                fail("%s[%s]: parse_changes() returned an exception with error message `%s'." % (file, strict_whitespace, line))
-            oh_dear = changes.get("you")
-            if oh_dear:
-                fail("%s[%s]: parsed and accepted unsigned data!" % (file, strict_whitespace))
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
diff --git a/dak/test/005/valid.changes b/dak/test/005/valid.changes
deleted file mode 100644 (file)
index 0e77d27..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-Format: 1.7
-Date: Tue,  9 Sep 2003 01:16:01 +0100
-Source: gawk
-Binary: gawk
-Architecture: source i386
-Version: 1:3.1.3-2
-Distribution: unstable
-Urgency: low
-Maintainer: James Troup <james@nocrew.org>
-Changed-By: James Troup <james@nocrew.org>
-Description: 
- gawk       - GNU awk, a pattern scanning and processing language
-Closes: 204699 204701
-Changes: 
- gawk (1:3.1.3-2) unstable; urgency=low
- .
-   * debian/control (Standards-Version): bump to 3.6.1.0.
- .
-   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
-     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
-     upstream.  Closes: #204701
- .
-   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
-     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
-     forwarding it upstream.  Closes: #204699
-Files: 
- 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
- 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
- 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
-
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-
-iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
-fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
-=O7Gh
------END PGP SIGNATURE-----
diff --git a/dak/test/006/test.py b/dak/test/006/test.py
deleted file mode 100755 (executable)
index 51a3317..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Test textutils.fix_maintainer()
-# Copyright (C) 2004, 2006  James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-import os, sys
-
-sys.path.append(os.path.abspath('../../'))
-
-import textutils
-
-################################################################################
-
-def fail(message):
-    sys.stderr.write("%s\n" % (message))
-    sys.exit(1)
-
-################################################################################
-
-def check_valid(s, xa, xb, xc, xd):
-    (a, b, c, d) = textutils.fix_maintainer(s)
-    if a != xa:
-        fail("rfc822_maint: %s (returned) != %s (expected [From: '%s']" % (a, xa, s))
-    if b != xb:
-        fail("rfc2047_maint: %s (returned) != %s (expected [From: '%s']" % (b, xb, s))
-    if c != xc:
-        fail("name: %s (returned) != %s (expected [From: '%s']" % (c, xc, s))
-    if d != xd:
-        fail("email: %s (returned) != %s (expected [From: '%s']" % (d, xd, s))
-
-def check_invalid(s):
-    try:
-        textutils.fix_maintainer(s)
-        fail("%s was parsed successfully but is expected to be invalid." % (s))
-    except utils.ParseMaintError, unused:
-        pass
-
-def main ():
-    # Check Valid UTF-8 maintainer field
-    s = "Noèl Köthe <noel@debian.org>"
-    xa = "Noèl Köthe <noel@debian.org>"
-    xb = "=?utf-8?b?Tm/DqGwgS8O2dGhl?= <noel@debian.org>"
-    xc = "Noèl Köthe"
-    xd = "noel@debian.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check valid ISO-8859-1 maintainer field
-    s = "Noèl Köthe <noel@debian.org>"
-    xa = "Noèl Köthe <noel@debian.org>"
-    xb = "=?iso-8859-1?q?No=E8l_K=F6the?= <noel@debian.org>"
-    xc = "Noèl Köthe"
-    xd = "noel@debian.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check valid ASCII maintainer field
-    s = "James Troup <james@nocrew.org>"
-    xa = "James Troup <james@nocrew.org>"
-    xb = "James Troup <james@nocrew.org>"
-    xc = "James Troup"
-    xd = "james@nocrew.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check "Debian vs RFC822" fixup of names with '.' or ',' in them
-    s = "James J. Troup <james@nocrew.org>"
-    xa = "james@nocrew.org (James J. Troup)"
-    xb = "james@nocrew.org (James J. Troup)"
-    xc = "James J. Troup"
-    xd = "james@nocrew.org"
-    check_valid(s, xa, xb, xc, xd)
-    s = "James J, Troup <james@nocrew.org>"
-    xa = "james@nocrew.org (James J, Troup)"
-    xb = "james@nocrew.org (James J, Troup)"
-    xc = "James J, Troup"
-    xd = "james@nocrew.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check just-email form
-    s = "james@nocrew.org"
-    xa = " <james@nocrew.org>"
-    xb = " <james@nocrew.org>"
-    xc = ""
-    xd = "james@nocrew.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check bracketed just-email form
-    s = "<james@nocrew.org>"
-    xa = " <james@nocrew.org>"
-    xb = " <james@nocrew.org>"
-    xc = ""
-    xd = "james@nocrew.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check Krazy quoted-string local part email address
-    s = "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>"
-    xa = "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>"
-    xb = "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>"
-    xc = "Cris van Pelt"
-    xd = "\"Cris van Pelt\"@tribe.eu.org"
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check empty string
-    s = xa = xb = xc = xd = ""
-    check_valid(s, xa, xb, xc, xd)
-
-    # Check for missing email address
-    check_invalid("James Troup")
-    # Check for invalid email address
-    check_invalid("James Troup <james@nocrew.org")
-
-################################################################################
-
-if __name__ == '__main__':
-    main()
index 4e7704e42a57883e2f2643d83a4630ead71e8309..49a6b584d1fc59ea76f5b2cefde9380b4a6452ca 100755 (executable)
@@ -39,12 +39,13 @@ import time
 import errno
 
 from daklib import utils
+from daklib.config import Config
 from daklib.dak_exceptions import DBUpdateError
 
 ################################################################################
 
 Cnf = None
-required_database_schema = 21
+required_database_schema = 23
 
 ################################################################################
 
@@ -104,12 +105,13 @@ Updates dak's database schema to the lastest version. You should disable crontab
     def update_db(self):
         # Ok, try and find the configuration table
         print "Determining dak database revision ..."
+        cnf = Config()
 
         try:
             # Build a connect string
-            connect_str = "dbname=%s"% (Cnf["DB::Name"])
-            if Cnf["DB::Host"] != '': connect_str += " host=%s" % (Cnf["DB::Host"])
-            if Cnf["DB::Port"] != '-1': connect_str += " port=%d" % (int(Cnf["DB::Port"]))
+            connect_str = "dbname=%s"% (cnf["DB::Name"])
+            if cnf["DB::Host"] != '': connect_str += " host=%s" % (cnf["DB::Host"])
+            if cnf["DB::Port"] != '-1': connect_str += " port=%d" % (int(cnf["DB::Port"]))
 
             self.db = psycopg2.connect(connect_str)
 
@@ -133,22 +135,22 @@ Updates dak's database schema to the lastest version. You should disable crontab
             self.update_db_to_zero()
             database_revision = 0
 
-        print "dak database schema at " + str(database_revision)
-        print "dak version requires schema " + str(required_database_schema)
+        print "dak database schema at %d" % database_revision
+        print "dak version requires schema %d"  % required_database_schema
 
         if database_revision == required_database_schema:
             print "no updates required"
             sys.exit(0)
 
         for i in range (database_revision, required_database_schema):
-            print "updating database schema from " + str(database_revision) + " to " + str(i+1)
+            print "updating database schema from %d to %d" % (database_revision, i+1)
             try:
                 dakdb = __import__("dakdb", globals(), locals(), ['update'+str(i+1)])
                 update_module = getattr(dakdb, "update"+str(i+1))
                 update_module.do_update(self)
             except DBUpdateError, e:
                 # Seems the update did not work.
-                print "Was unable to update database schema from %s to %s." % (str(database_revision), str(i+1))
+                print "Was unable to update database schema from %d to %d." % (database_revision, i+1)
                 print "The error message received was %s" % (e)
                 utils.fubar("DB Schema upgrade failed")
             database_revision += 1
@@ -156,33 +158,30 @@ Updates dak's database schema to the lastest version. You should disable crontab
 ################################################################################
 
     def init (self):
-        global Cnf
-
-        Cnf = utils.get_conf()
+        cnf = Config()
         arguments = [('h', "help", "Update-DB::Options::Help")]
         for i in [ "help" ]:
-            if not Cnf.has_key("Update-DB::Options::%s" % (i)):
-                Cnf["Update-DB::Options::%s" % (i)] = ""
+            if not cnf.has_key("Update-DB::Options::%s" % (i)):
+                cnf["Update-DB::Options::%s" % (i)] = ""
 
-        arguments = apt_pkg.ParseCommandLine(Cnf, arguments, sys.argv)
+        arguments = apt_pkg.ParseCommandLine(cnf.Cnf, arguments, sys.argv)
 
-        options = Cnf.SubTree("Update-DB::Options")
+        options = cnf.SubTree("Update-DB::Options")
         if options["Help"]:
             self.usage()
         elif arguments:
             utils.warn("dak update-db takes no arguments.")
             self.usage(exit_code=1)
 
-
-        self.update_db()
-
         try:
-            lock_fd = os.open(Cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
+            lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT)
             fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
         except IOError, e:
             if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN':
                 utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.")
 
+        self.update_db()
+
 
 ################################################################################
 
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index fd09cb7..c1f8f5b
@@ -177,17 +177,9 @@ class Changes(object):
 
         return summary
 
+    @session_wrapper
     def remove_known_changes(self, session=None):
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
-
-        session.delete(get_knownchange(self.changes_file, session))
-
-        if privatetrans:
-            session.commit()
-            session.close()
-
+        session.delete(get_dbchange(self.changes_file, session))
 
     def mark_missing_fields(self):
         """add "missing" in fields which we will require for the known_changes table"""
@@ -195,180 +187,46 @@ class Changes(object):
             if (not self.changes.has_key(key)) or (not self.changes[key]):
                 self.changes[key]='missing'
 
+    @session_wrapper
     def add_known_changes(self, dirpath, session=None):
         """add "missing" in fields which we will require for the known_changes table"""
         cnf = Config()
-        privatetrans = False
-        if session is None:
-            session = DBConn().session()
-            privatetrans = True
 
         changesfile = os.path.join(dirpath, self.changes_file)
         filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile))
 
         self.mark_missing_fields()
 
+        multivalues = {}
+        for key in ("distribution", "architecture", "binary"):
+            if isinstance(self.changes[key], dict):
+                multivalues[key] = " ".join(self.changes[key].keys())
+            else:
+                multivalues[key] = self.changes[key].keys()
+
+        # TODO: Use ORM
         session.execute(
-            """INSERT INTO known_changes
+            """INSERT INTO changes
               (changesname, seen, source, binaries, architecture, version,
               distribution, urgency, maintainer, fingerprint, changedby, date)
               VALUES (:changesfile,:filetime,:source,:binary, :architecture,
               :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
-              { 'changesfile':self.changes_file,
-                'filetime':filetime,
-                'source':self.changes["source"],
-                'binary':self.changes["binary"],
-                'architecture':self.changes["architecture"],
-                'version':self.changes["version"],
-                'distribution':self.changes["distribution"],
-                'urgency':self.changes["urgency"],
-                'maintainer':self.changes["maintainer"],
-                'fingerprint':self.changes["fingerprint"],
-                'changedby':self.changes["changed-by"],
-                'date':self.changes["date"]} )
-
-        if privatetrans:
-            session.commit()
-            session.close()
-
-    def load_dot_dak(self, changesfile):
-        """
-        Update ourself by reading a previously created cPickle .dak dumpfile.
-        """
-
-        self.changes_file = changesfile
-        dump_filename = self.changes_file[:-8]+".dak"
-        dump_file = open_file(dump_filename)
-
-        p = Unpickler(dump_file)
-
-        self.changes.update(p.load())
-        self.dsc.update(p.load())
-        self.files.update(p.load())
-        self.dsc_files.update(p.load())
-
-        next_obj = p.load()
-        if isinstance(next_obj, dict):
-            self.orig_files.update(next_obj)
-        else:
-            # Auto-convert old dak files to new format supporting
-            # multiple tarballs
-            orig_tar_gz = None
-            for dsc_file in self.dsc_files.keys():
-                if dsc_file.endswith(".orig.tar.gz"):
-                    orig_tar_gz = dsc_file
-            self.orig_files[orig_tar_gz] = {}
-            if next_obj != None:
-                self.orig_files[orig_tar_gz]["id"] = next_obj
-            next_obj = p.load()
-            if next_obj != None and next_obj != "":
-                self.orig_files[orig_tar_gz]["location"] = next_obj
-            if len(self.orig_files[orig_tar_gz]) == 0:
-                del self.orig_files[orig_tar_gz]
-
-        dump_file.close()
-
-    def sanitised_files(self):
-        ret = {}
-        for name, entry in self.files.items():
-            ret[name] = {}
-            for i in CHANGESFIELDS_FILES:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def sanitised_changes(self):
-        ret = {}
-        # Mandatory changes fields
-        for i in CHANGESFIELDS_MANDATORY:
-            ret[i] = self.changes[i]
-
-        # Optional changes fields
-        for i in CHANGESFIELDS_OPTIONAL:
-            if self.changes.has_key(i):
-                ret[i] = self.changes[i]
-
-        return ret
-
-    def sanitised_dsc(self):
-        ret = {}
-        for i in CHANGESFIELDS_DSC:
-            if self.dsc.has_key(i):
-                ret[i] = self.dsc[i]
-
-        return ret
-
-    def sanitised_dsc_files(self):
-        ret = {}
-        for name, entry in self.dsc_files.items():
-            ret[name] = {}
-            # Mandatory dsc_files fields
-            for i in CHANGESFIELDS_DSCFILES_MANDATORY:
-                ret[name][i] = entry[i]
-
-            # Optional dsc_files fields
-            for i in CHANGESFIELDS_DSCFILES_OPTIONAL:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def sanitised_orig_files(self):
-        ret = {}
-        for name, entry in self.orig_files.items():
-            ret[name] = {}
-            # Optional orig_files fields
-            for i in CHANGESFIELDS_ORIGFILES:
-                if entry.has_key(i):
-                    ret[name][i] = entry[i]
-
-        return ret
-
-    def write_dot_dak(self, dest_dir):
-        """
-        Dump ourself into a cPickle file.
-
-        @type dest_dir: string
-        @param dest_dir: Path where the dumpfile should be stored
-
-        @note: This could just dump the dictionaries as is, but I'd like to avoid this so
-               there's some idea of what process-accepted & process-new use from
-               process-unchecked. (JT)
-
-        """
-
-        dump_filename = os.path.join(dest_dir, self.changes_file[:-8] + ".dak")
-        dump_file = open_file(dump_filename, 'w')
-
-        try:
-            os.chmod(dump_filename, 0664)
-        except OSError, e:
-            # chmod may fail when the dumpfile is not owned by the user
-            # invoking dak (like e.g. when NEW is processed by a member
-            # of ftpteam)
-            if e.errno == EPERM:
-                perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE])
-                # security precaution, should never happen unless a weird
-                # umask is set anywhere
-                if perms & stat.S_IWOTH:
-                    fubar("%s is world writable and chmod failed." % \
-                        (dump_filename,))
-                # ignore the failed chmod otherwise as the file should
-                # already have the right privileges and is just, at worst,
-                # unreadable for world
-            else:
-                raise
-
-        p = Pickler(dump_file, 1)
-
-        p.dump(self.sanitised_changes())
-        p.dump(self.sanitised_dsc())
-        p.dump(self.sanitised_files())
-        p.dump(self.sanitised_dsc_files())
-        p.dump(self.sanitised_orig_files())
-
-        dump_file.close()
+              { 'changesfile':  self.changes_file,
+                'filetime':     filetime,
+                'source':       self.changes["source"],
+                'binary':       multivalues["binary"],
+                'architecture': multivalues["architecture"],
+                'version':      self.changes["version"],
+                'distribution': multivalues["distribution"],
+                'urgency':      self.changes["urgency"],
+                'maintainer':   self.changes["maintainer"],
+                'fingerprint':  self.changes["fingerprint"],
+                'changedby':    self.changes["changed-by"],
+                'date':         self.changes["date"]} )
+
+        session.commit()
+
+        return session.query(DBChange).filter_by(changesname = self.changes_file).one()
 
     def unknown_files_fields(self, name):
         return sorted(list( set(self.files[name].keys()) -
old mode 100755 (executable)
new mode 100644 (file)
index 2f24cd3..2d0b8e8
@@ -32,25 +32,27 @@ import os
 import apt_pkg
 import socket
 
-from singleton import Singleton
-
 ################################################################################
 
 default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties
 
 def which_conf_file():
-    if os.getenv("DAK_CONFIG"):
-        return os.getenv("DAK_CONFIG")
-    else:
-        return default_config
+    return os.getenv("DAK_CONFIG", default_config)
 
-class Config(Singleton):
+class Config(object):
     """
     A Config object is a singleton containing
     information about the DAK configuration
     """
+
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(Config, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
+
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+            self._readconf()
 
     def _readconf(self):
         apt_pkg.init()
@@ -74,9 +76,6 @@ class Config(Singleton):
         self.Find = self.Cnf.Find
         self.FindB = self.Cnf.FindB
 
-    def _startup(self, *args, **kwargs):
-        self._readconf()
-
     def has_key(self, name):
         return self.Cnf.has_key(name)
 
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index 921f1da..d05dd15
@@ -37,7 +37,7 @@ import os
 import re
 import psycopg2
 import traceback
-import datetime
+from datetime import datetime
 
 from inspect import getargspec
 
@@ -53,7 +53,6 @@ from sqlalchemy.orm.exc import NoResultFound
 # Only import Config until Queue stuff is changed to store its config
 # in the database
 from config import Config
-from singleton import Singleton
 from textutils import fix_maintainer
 
 ################################################################################
@@ -125,6 +124,8 @@ def session_wrapper(fn):
 
     return wrapped
 
+__all__.append('session_wrapper')
+
 ################################################################################
 
 class Architecture(object):
@@ -430,6 +431,132 @@ __all__.append('BinaryACLMap')
 
 ################################################################################
 
+class BuildQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BuildQueue %s>' % self.queue_name
+
+    def add_file_from_pool(self, poolfile):
+        """Copies a file into the pool.  Assumes that the PoolFile object is
+        attached to the same SQLAlchemy session as the Queue object is.
+
+        The caller is responsible for committing after calling this function."""
+        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+
+        # Check if we have a file of this name or this ID already
+        for f in self.queuefiles:
+            if f.fileid is not None and f.fileid == poolfile.file_id or \
+               f.poolfile.filename == poolfile_basename:
+                   # In this case, update the BuildQueueFile entry so we
+                   # don't remove it too early
+                   f.lastused = datetime.now()
+                   DBConn().session().object_session(poolfile).add(f)
+                   return f
+
+        # Prepare BuildQueueFile object
+        qf = BuildQueueFile()
+        qf.build_queue_id = self.queue_id
+        qf.lastused = datetime.now()
+        qf.filename = poolfile_basename
+
+        targetpath = poolfile.fullpath
+        queuepath = os.path.join(self.path, poolfile_basename)
+
+        try:
+            if self.copy_files:
+                # We need to copy instead of symlink
+                import utils
+                utils.copy(targetpath, queuepath)
+                # NULL in the fileid field implies a copy
+                qf.fileid = None
+            else:
+                os.symlink(targetpath, queuepath)
+                qf.fileid = poolfile.file_id
+        except OSError:
+            return None
+
+        # Get the same session as the PoolFile is using and add the qf to it
+        DBConn().session().object_session(poolfile).add(qf)
+
+        return qf
+
+
+__all__.append('BuildQueue')
+
+@session_wrapper
+def get_build_queue(queuename, session=None):
+    """
+    Returns BuildQueue object for given C{queue name}, creating it if it does not
+    exist.
+
+    @type queuename: string
+    @param queuename: The name of the queue
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: BuildQueue
+    @return: BuildQueue object for the given queue
+    """
+
+    q = session.query(BuildQueue).filter_by(queue_name=queuename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_build_queue')
+
+################################################################################
+
+class BuildQueueFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+
+__all__.append('BuildQueueFile')
+
+################################################################################
+
+class ChangePendingBinary(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingBinary %s>' % self.change_pending_binary_id
+
+__all__.append('ChangePendingBinary')
+
+################################################################################
+
+class ChangePendingFile(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingFile %s>' % self.change_pending_file_id
+
+__all__.append('ChangePendingFile')
+
+################################################################################
+
+class ChangePendingSource(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<ChangePendingSource %s>' % self.change_pending_source_id
+
+__all__.append('ChangePendingSource')
+
+################################################################################
+
 class Component(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -855,6 +982,39 @@ def get_poolfile_like_name(filename, session=None):
 
 __all__.append('get_poolfile_like_name')
 
+@session_wrapper
+def add_poolfile(filename, datadict, location_id, session=None):
+    """
+    Add a new file to the pool
+
+    @type filename: string
+    @param filename: filename
+
+    @type datadict: dict
+    @param datadict: dict with needed data
+
+    @type location_id: int
+    @param location_id: database id of the location
+
+    @rtype: PoolFile
+    @return: the PoolFile object created
+    """
+    poolfile = PoolFile()
+    poolfile.filename = filename
+    poolfile.filesize = datadict["size"]
+    poolfile.md5sum = datadict["md5sum"]
+    poolfile.sha1sum = datadict["sha1sum"]
+    poolfile.sha256sum = datadict["sha256sum"]
+    poolfile.location_id = location_id
+
+    session.add(poolfile)
+    # Flush to get a file id (NB: This is not a commit)
+    session.flush()
+
+    return poolfile
+
+__all__.append('add_poolfile')
+
 ################################################################################
 
 class Fingerprint(object):
@@ -1094,19 +1254,19 @@ __all__.append('KeyringACLMap')
 
 ################################################################################
 
-class KnownChange(object):
+class DBChange(object):
     def __init__(self, *args, **kwargs):
         pass
 
     def __repr__(self):
-        return '<KnownChange %s>' % self.changesname
+        return '<DBChange %s>' % self.changesname
 
-__all__.append('KnownChange')
+__all__.append('DBChange')
 
 @session_wrapper
-def get_knownchange(filename, session=None):
+def get_dbchange(filename, session=None):
     """
-    returns knownchange object for given C{filename}.
+    returns DBChange object for given C{filename}.
 
     @type archive: string
     @param archive: the name of the arhive
@@ -1119,25 +1279,14 @@ def get_knownchange(filename, session=None):
     @return: Archive object for the given name (None if not present)
 
     """
-    q = session.query(KnownChange).filter_by(changesname=filename)
+    q = session.query(DBChange).filter_by(changesname=filename)
 
     try:
         return q.one()
     except NoResultFound:
         return None
 
-__all__.append('get_knownchange')
-
-################################################################################
-
-class KnownChangePendingFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
-
-__all__.append('KnownChangePendingFile')
+__all__.append('get_dbchange')
 
 ################################################################################
 
@@ -1524,6 +1673,42 @@ __all__.append('insert_pending_content_paths')
 
 ################################################################################
 
+class PolicyQueue(object):
+    def __init__(self, *args, **kwargs):
+        pass
+
+    def __repr__(self):
+        return '<PolicyQueue %s>' % self.queue_name
+
+__all__.append('PolicyQueue')
+
+@session_wrapper
+def get_policy_queue(queuename, session=None):
+    """
+    Returns PolicyQueue object for given C{queue name}
+
+    @type queuename: string
+    @param queuename: The name of the queue
+
+    @type session: Session
+    @param session: Optional SQLA session object (a temporary one will be
+    generated if not supplied)
+
+    @rtype: PolicyQueue
+    @return: PolicyQueue object for the given queue
+    """
+
+    q = session.query(PolicyQueue).filter_by(queue_name=queuename)
+
+    try:
+        return q.one()
+    except NoResultFound:
+        return None
+
+__all__.append('get_policy_queue')
+
+################################################################################
+
 class Priority(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1594,99 +1779,6 @@ __all__.append('get_priorities')
 
 ################################################################################
 
-class Queue(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<Queue %s>' % self.queue_name
-
-    def add_file_from_pool(self, poolfile):
-        """Copies a file into the pool.  Assumes that the PoolFile object is
-        attached to the same SQLAlchemy session as the Queue object is.
-
-        The caller is responsible for committing after calling this function."""
-        poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
-        # Check if we have a file of this name or this ID already
-        for f in self.queuefiles:
-            if f.fileid is not None and f.fileid == poolfile.file_id or \
-               f.poolfile.filename == poolfile_basename:
-                   # In this case, update the QueueFile entry so we
-                   # don't remove it too early
-                   f.lastused = datetime.now()
-                   DBConn().session().object_session(pf).add(f)
-                   return f
-
-        # Prepare QueueFile object
-        qf = QueueFile()
-        qf.queue_id = self.queue_id
-        qf.lastused = datetime.now()
-        qf.filename = dest
-
-        targetpath = qf.fullpath
-        queuepath = os.path.join(self.path, poolfile_basename)
-
-        try:
-            if self.copy_pool_files:
-                # We need to copy instead of symlink
-                import utils
-                utils.copy(targetfile, queuepath)
-                # NULL in the fileid field implies a copy
-                qf.fileid = None
-            else:
-                os.symlink(targetfile, queuepath)
-                qf.fileid = poolfile.file_id
-        except OSError:
-            return None
-
-        # Get the same session as the PoolFile is using and add the qf to it
-        DBConn().session().object_session(poolfile).add(qf)
-
-        return qf
-
-
-__all__.append('Queue')
-
-@session_wrapper
-def get_queue(queuename, session=None):
-    """
-    Returns Queue object for given C{queue name}, creating it if it does not
-    exist.
-
-    @type queuename: string
-    @param queuename: The name of the queue
-
-    @type session: Session
-    @param session: Optional SQLA session object (a temporary one will be
-    generated if not supplied)
-
-    @rtype: Queue
-    @return: Queue object for the given queue
-    """
-
-    q = session.query(Queue).filter_by(queue_name=queuename)
-
-    try:
-        return q.one()
-    except NoResultFound:
-        return None
-
-__all__.append('get_queue')
-
-################################################################################
-
-class QueueFile(object):
-    def __init__(self, *args, **kwargs):
-        pass
-
-    def __repr__(self):
-        return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
-
-__all__.append('QueueFile')
-
-################################################################################
-
 class Section(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -1917,6 +2009,186 @@ __all__.append('get_source_in_suite')
 
 ################################################################################
 
+@session_wrapper
+def add_dsc_to_db(u, filename, session=None):
+    entry = u.pkg.files[filename]
+    source = DBSource()
+    pfs = []
+
+    source.source = u.pkg.dsc["source"]
+    source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+    source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+    source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+    source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    source.install_date = datetime.now().date()
+
+    dsc_component = entry["component"]
+    dsc_location_id = entry["location id"]
+
+    source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+
+    # Set up a new poolfile if necessary
+    if not entry.has_key("files id") or not entry["files id"]:
+        filename = entry["pool name"] + filename
+        poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+        session.flush()
+        pfs.append(poolfile)
+        entry["files id"] = poolfile.file_id
+
+    source.poolfile_id = entry["files id"]
+    session.add(source)
+    session.flush()
+
+    for suite_name in u.pkg.changes["distribution"].keys():
+        sa = SrcAssociation()
+        sa.source_id = source.source_id
+        sa.suite_id = get_suite(suite_name).suite_id
+        session.add(sa)
+
+    session.flush()
+
+    # Add the source files to the DB (files and dsc_files)
+    dscfile = DSCFile()
+    dscfile.source_id = source.source_id
+    dscfile.poolfile_id = entry["files id"]
+    session.add(dscfile)
+
+    for dsc_file, dentry in u.pkg.dsc_files.items():
+        df = DSCFile()
+        df.source_id = source.source_id
+
+        # If the .orig tarball is already in the pool, it's
+        # files id is stored in dsc_files by check_dsc().
+        files_id = dentry.get("files id", None)
+
+        # Find the entry in the files hash
+        # TODO: Bail out here properly
+        dfentry = None
+        for f, e in u.pkg.files.items():
+            if f == dsc_file:
+                dfentry = e
+                break
+
+        if files_id is None:
+            filename = dfentry["pool name"] + dsc_file
+
+            (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+            # FIXME: needs to check for -1/-2 and or handle exception
+            if found and obj is not None:
+                files_id = obj.file_id
+                pfs.append(obj)
+
+            # If still not found, add it
+            if files_id is None:
+                # HACK: Force sha1sum etc into dentry
+                dentry["sha1sum"] = dfentry["sha1sum"]
+                dentry["sha256sum"] = dfentry["sha256sum"]
+                poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+                pfs.append(poolfile)
+                files_id = poolfile.file_id
+        else:
+            poolfile = get_poolfile_by_id(files_id, session)
+            if poolfile is None:
+                utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+            pfs.append(poolfile)
+
+        df.poolfile_id = files_id
+        session.add(df)
+
+    session.flush()
+
+    # Add the src_uploaders to the DB
+    uploader_ids = [source.maintainer_id]
+    if u.pkg.dsc.has_key("uploaders"):
+        for up in u.pkg.dsc["uploaders"].split(","):
+            up = up.strip()
+            uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
+
+    added_ids = {}
+    for up in uploader_ids:
+        if added_ids.has_key(up):
+            utils.warn("Already saw uploader %s for source %s" % (up, source.source))
+            continue
+
+        added_ids[u]=1
+
+        su = SrcUploader()
+        su.maintainer_id = up
+        su.source_id = source.source_id
+        session.add(su)
+
+    session.flush()
+
+    return dsc_component, dsc_location_id, pfs
+
+__all__.append('add_dsc_to_db')
+
+@session_wrapper
+def add_deb_to_db(u, filename, session=None):
+    """
+    Contrary to what you might expect, this routine deals with both
+    debs and udebs.  That info is in 'dbtype', whilst 'type' is
+    'deb' for both of them
+    """
+    cnf = Config()
+    entry = u.pkg.files[filename]
+
+    bin = DBBinary()
+    bin.package = entry["package"]
+    bin.version = entry["version"]
+    bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+    bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+    bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+    bin.binarytype = entry["dbtype"]
+
+    # Find poolfile id
+    filename = entry["pool name"] + filename
+    fullpath = os.path.join(cnf["Dir::Pool"], filename)
+    if not entry.get("location id", None):
+        entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
+
+    if entry.get("files id", None):
+        poolfile = get_poolfile_by_id(bin.poolfile_id)
+        bin.poolfile_id = entry["files id"]
+    else:
+        poolfile = add_poolfile(filename, entry, entry["location id"], session)
+        bin.poolfile_id = entry["files id"] = poolfile.file_id
+
+    # Find source id
+    bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+    if len(bin_sources) != 1:
+        raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+                                  (bin.package, bin.version, bin.architecture.arch_string,
+                                   filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+    bin.source_id = bin_sources[0].source_id
+
+    # Add and flush object so it has an ID
+    session.add(bin)
+    session.flush()
+
+    # Add BinAssociations
+    for suite_name in u.pkg.changes["distribution"].keys():
+        ba = BinAssociation()
+        ba.binary_id = bin.binary_id
+        ba.suite_id = get_suite(suite_name).suite_id
+        session.add(ba)
+
+    session.flush()
+
+    # Deal with contents - disabled for now
+    #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+    #if not contents:
+    #    print "REJECT\nCould not determine contents of package %s" % bin.package
+    #    session.rollback()
+    #    raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+
+    return poolfile
+
+__all__.append('add_deb_to_db')
+
+################################################################################
+
 class SourceACL(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -2258,63 +2530,72 @@ __all__.append('UploadBlock')
 
 ################################################################################
 
-class DBConn(Singleton):
+class DBConn(object):
     """
     database module init.
     """
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(DBConn, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self, *args, **kwargs):
-        self.debug = False
-        if kwargs.has_key('debug'):
-            self.debug = True
-        self.__createconn()
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+            self.debug = kwargs.has_key('debug')
+            self.__createconn()
 
     def __setuptables(self):
-        self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
-        self.tbl_archive = Table('archive', self.db_meta, autoload=True)
-        self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
-        self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
-        self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
-        self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
-        self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
-        self.tbl_component = Table('component', self.db_meta, autoload=True)
-        self.tbl_config = Table('config', self.db_meta, autoload=True)
-        self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
-        self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
-        self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
-        self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
-        self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
-        self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
-        self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
-        self.tbl_files = Table('files', self.db_meta, autoload=True)
-        self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
-        self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
-        self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True)
-        self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True)
-        self.tbl_location = Table('location', self.db_meta, autoload=True)
-        self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
-        self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
-        self.tbl_override = Table('override', self.db_meta, autoload=True)
-        self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
-        self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
-        self.tbl_priority = Table('priority', self.db_meta, autoload=True)
-        self.tbl_queue = Table('queue', self.db_meta, autoload=True)
-        self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
-        self.tbl_section = Table('section', self.db_meta, autoload=True)
-        self.tbl_source = Table('source', self.db_meta, autoload=True)
-        self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
-        self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
-        self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
-        self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
-        self.tbl_suite = Table('suite', self.db_meta, autoload=True)
-        self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
-        self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
-        self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
-        self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
-        self.tbl_uid = Table('uid', self.db_meta, autoload=True)
-        self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
+        tables = (
+            'architecture',
+            'archive',
+            'bin_associations',
+            'binaries',
+            'binary_acl',
+            'binary_acl_map',
+            'bin_contents'
+            'build_queue',
+            'build_queue_files',
+            'component',
+            'config',
+            'changes_pending_binaries',
+            'changes_pending_files',
+            'changes_pending_files_map',
+            'changes_pending_source',
+            'changes_pending_source_files',
+            'changes_pool_files',
+            'deb_contents',
+            'dsc_files',
+            'files',
+            'fingerprint',
+            'keyrings',
+            'changes',
+            'keyring_acl_map',
+            'location',
+            'maintainer',
+            'new_comments',
+            'override',
+            'override_type',
+            'pending_bin_contents',
+            'policy_queue',
+            'priority',
+            'section',
+            'source',
+            'source_acl',
+            'src_associations',
+            'src_format',
+            'src_uploaders',
+            'suite',
+            'suite_architectures',
+            'suite_src_formats',
+            'suite_build_queue_copy',
+            'udeb_contents',
+            'uid',
+            'upload_blocks',
+        )
+
+        for table_name in tables:
+            table = Table(table_name, self.db_meta, autoload=True)
+            setattr(self, 'tbl_%s' % table_name, table)
 
     def __setupmappers(self):
         mapper(Architecture, self.tbl_architecture,
@@ -2415,11 +2696,36 @@ class DBConn(Singleton):
                properties = dict(keyring_name = self.tbl_keyrings.c.name,
                                  keyring_id = self.tbl_keyrings.c.id))
 
-        mapper(KnownChange, self.tbl_known_changes,
-               properties = dict(known_change_id = self.tbl_known_changes.c.id,
+        mapper(DBChange, self.tbl_changes,
+               properties = dict(change_id = self.tbl_changes.c.id,
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
+                                 files = relation(ChangePendingFile,
+                                                  secondary=self.tbl_changes_pending_files_map,
+                                                  backref="changesfile"),
+                                 in_queue_id = self.tbl_changes.c.in_queue,
+                                 in_queue = relation(PolicyQueue,
+                                                     primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
+                                 approved_for_id = self.tbl_changes.c.approved_for))
+
+        mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
+               properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
+
+        mapper(ChangePendingFile, self.tbl_changes_pending_files,
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+
+        mapper(ChangePendingSource, self.tbl_changes_pending_source,
+               properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+                                 change = relation(DBChange),
+                                 maintainer = relation(Maintainer,
+                                                       primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+                                 changedby = relation(Maintainer,
+                                                      primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+                                 fingerprint = relation(Fingerprint),
+                                 source_files = relation(ChangePendingFile,
+                                                         secondary=self.tbl_changes_pending_source_files,
+                                                         backref="pending_sources")))
                                  files = relation(KnownChangePendingFile, backref="changesfile")))
 
         mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
@@ -2461,16 +2767,12 @@ class DBConn(Singleton):
                properties = dict(overridetype = self.tbl_override_type.c.type,
                                  overridetype_id = self.tbl_override_type.c.id))
 
+        mapper(PolicyQueue, self.tbl_policy_queue,
+               properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+
         mapper(Priority, self.tbl_priority,
                properties = dict(priority_id = self.tbl_priority.c.id))
 
-        mapper(Queue, self.tbl_queue,
-               properties = dict(queue_id = self.tbl_queue.c.id))
-
-        mapper(QueueFile, self.tbl_queue_files,
-               properties = dict(queue = relation(Queue, backref='queuefiles'),
-                                 poolfile = relation(PoolFile, backref='queueinstances')))
-
         mapper(Section, self.tbl_section,
                properties = dict(section_id = self.tbl_section.c.id,
                                  section=self.tbl_section.c.section))
@@ -2519,8 +2821,8 @@ class DBConn(Singleton):
 
         mapper(Suite, self.tbl_suite,
                properties = dict(suite_id = self.tbl_suite.c.id,
-                                 policy_queue = relation(Queue),
-                                 copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
+                                 policy_queue = relation(PolicyQueue),
+                                 copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
 
         mapper(SuiteArchitecture, self.tbl_suite_architectures,
                properties = dict(suite_id = self.tbl_suite_architectures.c.suite,
old mode 100755 (executable)
new mode 100644 (file)
index 0c472d1..b637738
@@ -30,19 +30,22 @@ import os
 from errno import ENOENT, EEXIST, EACCES
 import shutil
 
-from singleton import Singleton
 from config import Config
 from utils import fubar
 
 ###############################################################################
 
-class Holding(Singleton):
+class Holding(object):
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(Holding, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self):
-        self.in_holding = {}
-        self.holding_dir = Config()["Dir::Queue::Holding"]
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+
+            self.in_holding = {}
+            self.holding_dir = Config()["Dir::Queue::Holding"]
 
     def copy_to_holding(self, filename):
         base_filename = os.path.basename(filename)
diff --git a/daklib/lintian.py b/daklib/lintian.py
new file mode 100644 (file)
index 0000000..3d1afc8
--- /dev/null
@@ -0,0 +1,58 @@
+from regexes import re_parse_lintian
+
+def parse_lintian_output(output):
+    """
+    Parses Lintian output and returns a generator with the data.
+
+    >>> list(parse_lintian_output('W: pkgname: some-tag path/to/file'))
+    [('W', 'pkgname', 'some-tag', 'path/to/file')]
+    """
+
+    for line in output.split('\n'):
+        m = re_parse_lintian.match(line)
+        if m:
+            yield m.groupdict()
+
+def generate_reject_messages(parsed_tags, tag_definitions, log=lambda *args: args):
+    """
+    Generates package reject messages by comparing parsed lintian output with
+    tag definitions. Returns a generator containing the reject messages.
+    """
+
+    tags = set()
+    for values in tag_definitions.values():
+        for tag_name in values:
+            tags.add(tag_name)
+
+    for tag in parsed_tags:
+        tag_name = tag['tag']
+
+        if tag_name not in tags:
+            continue
+
+        # Was tag overridden?
+        if tag['level'] == 'O':
+
+            if tag_name in tag_definitions['nonfatal']:
+                # Overriding this tag is allowed.
+                pass
+
+            elif tag_name in tag_definitions['fatal']:
+                # Overriding this tag is NOT allowed.
+
+                log('ftpmaster does not allow tag to be overridable', tag_name)
+                yield "%(package)s: Overriden tag %(tag)s found, but this " \
+                    "tag may not be overridden." % tag
+
+        else:
+            # Tag is known and not overridden; reject
+            yield "%(package)s: lintian output: '%(tag)s %(description)s', " \
+                "automatically rejected package." % tag
+
+            # Now tell if they *might* override it.
+            if tag_name in tag_definitions['nonfatal']:
+                log("auto rejecting", "overridable", tag_name)
+                yield "%(package)s: If you have a good reason, you may " \
+                   "override this lintian tag." % tag
+            else:
+                log("auto rejecting", "not overridable", tag_name)
old mode 100755 (executable)
new mode 100644 (file)
index 1694deb..a91bcdf
@@ -38,6 +38,8 @@ import commands
 import shutil
 import textwrap
 from types import *
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.orm.exc import NoResultFound
 
 import yaml
 
@@ -46,11 +48,13 @@ from changes import *
 from regexes import *
 from config import Config
 from holding import Holding
+from urgencylog import UrgencyLog
 from dbconn import *
 from summarystats import SummaryStats
 from utils import parse_changes, check_dsc_files
 from textutils import fix_maintainer
 from binary import Binary
+from lintian import parse_lintian_output, generate_reject_messages
 
 ###############################################################################
 
@@ -285,6 +289,7 @@ class Upload(object):
         for title, messages in msgs:
             if messages:
                 msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
+        msg += '\n'
 
         return msg
 
@@ -434,12 +439,6 @@ class Upload(object):
         self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"])
         self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"])
 
-        # Check there isn't already a changes file of the same name in one
-        # of the queue directories.
-        base_filename = os.path.basename(filename)
-        if get_knownchange(base_filename):
-            self.rejects.append("%s: a file with this name already exists." % (base_filename))
-
         # Check the .changes is non-empty
         if not self.pkg.files:
             self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename))
@@ -722,7 +721,6 @@ class Upload(object):
     def per_suite_file_checks(self, f, suite, session):
         cnf = Config()
         entry = self.pkg.files[f]
-        archive = utils.where_am_i()
 
         # Skip byhand
         if entry.has_key("byhand"):
@@ -766,9 +764,9 @@ class Upload(object):
 
         # Determine the location
         location = cnf["Dir::Pool"]
-        l = get_location(location, entry["component"], archive, session)
+        l = get_location(location, entry["component"], session=session)
         if l is None:
-            self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %s, Archive: %s)" % (entry["component"], archive))
+            self.rejects.append("[INTERNAL ERROR] couldn't determine location (Component: %)" % entry["component"])
             entry["location id"] = -1
         else:
             entry["location id"] = l.location_id
@@ -796,17 +794,11 @@ class Upload(object):
             entry["othercomponents"] = res.fetchone()[0]
 
     def check_files(self, action=True):
-        archive = utils.where_am_i()
         file_keys = self.pkg.files.keys()
         holding = Holding()
         cnf = Config()
 
-        # XXX: As far as I can tell, this can no longer happen - see
-        #      comments by AJ in old revisions - mhy
-        # if reprocess is 2 we've already done this and we're checking
-        # things again for the new .orig.tar.gz.
-        # [Yes, I'm fully aware of how disgusting this is]
-        if action and self.reprocess < 2:
+        if action:
             cwd = os.getcwd()
             os.chdir(self.pkg.directory)
             for f in file_keys:
@@ -817,36 +809,31 @@ class Upload(object):
 
             os.chdir(cwd)
 
-        # Check there isn't already a .changes or .dak file of the same name in
-        # the proposed-updates "CopyChanges" or "CopyDotDak" storage directories.
+        # check we already know the changes file
         # [NB: this check must be done post-suite mapping]
         base_filename = os.path.basename(self.pkg.changes_file)
-        dot_dak_filename = base_filename[:-8] + ".dak"
 
-        for suite in self.pkg.changes["distribution"].keys():
-            copychanges = "Suite::%s::CopyChanges" % (suite)
-            if cnf.has_key(copychanges) and \
-                   os.path.exists(os.path.join(cnf[copychanges], base_filename)):
-                self.rejects.append("%s: a file with this name already exists in %s" \
-                           % (base_filename, cnf[copychanges]))
-
-            copy_dot_dak = "Suite::%s::CopyDotDak" % (suite)
-            if cnf.has_key(copy_dot_dak) and \
-                   os.path.exists(os.path.join(cnf[copy_dot_dak], dot_dak_filename)):
-                self.rejects.append("%s: a file with this name already exists in %s" \
-                           % (dot_dak_filename, Cnf[copy_dot_dak]))
-
-        self.reprocess = 0
+        session = DBConn().session()
+
+        try:
+            dbc = session.query(DBChange).filter_by(changesname=base_filename).one()
+            # if in the pool or in a queue other than unchecked, reject
+            if (dbc.in_queue is None) \
+                   or (dbc.in_queue is not None
+                       and dbc.in_queue.queue_name != 'unchecked'):
+                self.rejects.append("%s file already known to dak" % base_filename)
+        except NoResultFound, e:
+            # not known, good
+            pass
+
         has_binaries = False
         has_source = False
 
-        session = DBConn().session()
-
         for f, entry in self.pkg.files.items():
             # Ensure the file does not already exist in one of the accepted directories
-            for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+            for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
                 if not cnf.has_key("Dir::Queue::%s" % (d)): continue
-                if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
+                if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
                     self.rejects.append("%s file already exists in the %s directory." % (f, d))
 
             if not re_taint_free.match(f):
@@ -1084,15 +1071,10 @@ class Upload(object):
             self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
 
     def check_source(self):
-        # XXX: I'm fairly sure reprocess == 2 can never happen
-        #      AJT disabled the is_incoming check years ago - mhy
-        #      We should probably scrap or rethink the whole reprocess thing
         # Bail out if:
         #    a) there's no source
-        # or b) reprocess is 2 - we will do this check next time when orig
-        #       tarball is in 'files'
         # or c) the orig files are MIA
-        if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \
+        if not self.pkg.changes["architecture"].has_key("source") \
            or len(self.pkg.orig_files) == 0:
             return
 
@@ -1266,6 +1248,11 @@ class Upload(object):
     ###########################################################################
 
     def check_lintian(self):
+        """
+        Extends self.rejects by checking the output of lintian against tags
+        specified in Dinstall::LintianTags.
+        """
+
         cnf = Config()
 
         # Don't reject binary uploads
@@ -1273,24 +1260,22 @@ class Upload(object):
             return
 
         # Only check some distributions
-        valid_dist = False
         for dist in ('unstable', 'experimental'):
             if dist in self.pkg.changes['distribution']:
-                valid_dist = True
                 break
-
-        if not valid_dist:
+        else:
             return
 
+        # If we do not have a tagfile, don't do anything
         tagfile = cnf.get("Dinstall::LintianTags")
         if tagfile is None:
-            # We don't have a tagfile, so just don't do anything.
             return
 
         # Parse the yaml file
         sourcefile = file(tagfile, 'r')
         sourcecontent = sourcefile.read()
         sourcefile.close()
+
         try:
             lintiantags = yaml.load(sourcecontent)['lintian']
         except yaml.YAMLError, msg:
@@ -1300,78 +1285,42 @@ class Upload(object):
         # Try and find all orig mentioned in the .dsc
         symlinked = self.ensure_orig()
 
-        # Now setup the input file for lintian. lintian wants "one tag per line" only,
-        # so put it together like it. We put all types of tags in one file and then sort
-        # through lintians output later to see if its a fatal tag we detected, or not.
-        # So we only run lintian once on all tags, even if we might reject on some, but not
-        # reject on others.
-        # Additionally build up a set of tags
-        tags = set()
-        (fd, temp_filename) = utils.temp_filename()
+        # Setup the input file for lintian
+        fd, temp_filename = utils.temp_filename()
         temptagfile = os.fdopen(fd, 'w')
-        for tagtype in lintiantags:
-            for tag in lintiantags[tagtype]:
-                temptagfile.write("%s\n" % tag)
-                tags.add(tag)
+        for tags in lintiantags.values():
+            temptagfile.writelines(['%s\n' % x for x in tags])
         temptagfile.close()
 
-        # So now we should look at running lintian at the .changes file, capturing output
-        # to then parse it.
-        command = "lintian --show-overrides --tags-from-file %s %s" % (temp_filename, self.pkg.changes_file)
-        (result, output) = commands.getstatusoutput(command)
+        try:
+            cmd = "lintian --show-overrides --tags-from-file %s %s" % \
+                (temp_filename, self.pkg.changes_file)
 
-        # We are done with lintian, remove our tempfile and any symlinks we created
-        os.unlink(temp_filename)
-        for symlink in symlinked:
-            os.unlink(symlink)
+            result, output = commands.getstatusoutput(cmd)
+        finally:
+            # Remove our tempfile and any symlinks we created
+            os.unlink(temp_filename)
 
-        if (result == 2):
-            utils.warn("lintian failed for %s [return code: %s]." % (self.pkg.changes_file, result))
-            utils.warn(utils.prefix_multi_line_string(output, " [possible output:] "))
+            for symlink in symlinked:
+                os.unlink(symlink)
 
-        if len(output) == 0:
-            return
+        if result == 2:
+            utils.warn("lintian failed for %s [return code: %s]." % \
+                (self.pkg.changes_file, result))
+            utils.warn(utils.prefix_multi_line_string(output, \
+                " [possible output:] "))
 
         def log(*txt):
             if self.logger:
-                self.logger.log([self.pkg.changes_file, "check_lintian"] + list(txt))
-
-        # We have output of lintian, this package isn't clean. Lets parse it and see if we
-        # are having a victim for a reject.
-        # W: tzdata: binary-without-manpage usr/sbin/tzconfig
-        for line in output.split('\n'):
-            m = re_parse_lintian.match(line)
-            if m is None:
-                continue
-
-            etype = m.group(1)
-            epackage = m.group(2)
-            etag = m.group(3)
-            etext = m.group(4)
-
-            # So lets check if we know the tag at all.
-            if etag not in tags:
-                continue
+                self.logger.log(
+                    [self.pkg.changes_file, "check_lintian"] + list(txt)
+                )
 
-            if etype == 'O':
-                # We know it and it is overriden. Check that override is allowed.
-                if etag in lintiantags['warning']:
-                    # The tag is overriden, and it is allowed to be overriden.
-                    # Don't add a reject message.
-                    pass
-                elif etag in lintiantags['error']:
-                    # The tag is overriden - but is not allowed to be
-                    self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
-                    log("ftpmaster does not allow tag to be overridable", etag)
-            else:
-                # Tag is known, it is not overriden, direct reject.
-                self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
-                # Now tell if they *might* override it.
-                if etag in lintiantags['warning']:
-                    log("auto rejecting", "overridable", etag)
-                    self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
-                else:
-                    log("auto rejecting", "not overridable", etag)
+        # Generate messages
+        parsed_tags = parse_lintian_output(output)
+        self.rejects.extend(
+            generate_reject_messages(parsed_tags, lintiantags, log=log)
+        )
 
     ###########################################################################
     def check_urgency(self):
@@ -1493,7 +1442,7 @@ class Upload(object):
         #  or binary, whereas keys with no access might be able to
         #  upload some binaries)
         if fpr.source_acl.access_level == 'dm':
-            self.check_dm_source_upload(fpr, session)
+            self.check_dm_upload(fpr, session)
         else:
             # Check source-based permissions for other types
             if self.pkg.changes["architecture"].has_key("source"):
@@ -1837,13 +1786,13 @@ distribution."""
         return summary
 
     ###########################################################################
-
-    def accept (self, summary, short_summary, targetdir=None):
+    @session_wrapper
+    def accept (self, summary, short_summary, session=None):
         """
         Accept an upload.
 
-        This moves all files referenced from the .changes into the I{accepted}
-        queue, sends the accepted mail, announces to lists, closes bugs and
+        This moves all files referenced from the .changes into the pool,
+        sends the accepted mail, announces to lists, closes bugs and
         also checks for override disparities. If enabled it will write out
         the version history for the BTS Version Tracking and will finally call
         L{queue_build}.
@@ -1853,31 +1802,90 @@ distribution."""
 
         @type short_summary: string
         @param short_summary: Short summary
-
         """
 
         cnf = Config()
         stats = SummaryStats()
 
-        accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')
-
-        if targetdir is None:
-            targetdir = cnf["Dir::Queue::Accepted"]
+        print "Installing."
+        self.logger.log(["installing changes", self.pkg.changes_file])
 
-        print "Accepting."
-        if self.logger:
-            self.logger.log(["Accepting changes", self.pkg.changes_file])
+        poolfiles = []
 
-        self.pkg.write_dot_dak(targetdir)
+        # Add the .dsc file to the DB first
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "dsc":
+                dsc_component, dsc_location_id, pfs = add_dsc_to_db(self, newfile, session)
+                for j in pfs:
+                    poolfiles.append(j)
 
-        # Move all the files into the accepted directory
-        utils.move(self.pkg.changes_file, targetdir)
+        # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
+        for newfile, entry in self.pkg.files.items():
+            if entry["type"] == "deb":
+                poolfiles.append(add_deb_to_db(self, newfile, session))
 
-        for name, entry in sorted(self.pkg.files.items()):
-            utils.move(name, targetdir)
+        # If this is a sourceful diff only upload that is moving
+        # cross-component we need to copy the .orig files into the new
+        # component too for the same reasons as above.
+        if self.pkg.changes["architecture"].has_key("source"):
+            for orig_file in self.pkg.orig_files.keys():
+                if not self.pkg.orig_files[orig_file].has_key("id"):
+                    continue # Skip if it's not in the pool
+                orig_file_id = self.pkg.orig_files[orig_file]["id"]
+                if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
+                    continue # Skip if the location didn't change
+
+                # Do the move
+                oldf = get_poolfile_by_id(orig_file_id, session)
+                old_filename = os.path.join(oldf.location.path, oldf.filename)
+                old_dat = {'size': oldf.filesize,   'md5sum': oldf.md5sum,
+                           'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
+
+                new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
+
+                # TODO: Care about size/md5sum collisions etc
+                (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
+
+                if newf is None:
+                    utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
+                    newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
+
+                    # TODO: Check that there's only 1 here
+                    source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
+                    dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
+                    dscf.poolfile_id = newf.file_id
+                    session.add(dscf)
+                    session.flush()
+
+                    poolfiles.append(newf)
+
+        # Install the files into the pool
+        for newfile, entry in self.pkg.files.items():
+            destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
+            utils.move(newfile, destination)
+            self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
             stats.accept_bytes += float(entry["size"])
 
-        stats.accept_count += 1
+        # Copy the .changes file across for suite which need it.
+        copy_changes = {}
+        for suite_name in self.pkg.changes["distribution"].keys():
+            if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
+                copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
+
+        for dest in copy_changes.keys():
+            utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
+
+        # We're done - commit the database changes
+        session.commit()
+        # Our SQL session will automatically start a new transaction after
+        # the last commit
+
+        # Move the .changes into the 'done' directory
+        utils.move(self.pkg.changes_file,
+                   os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
+
+        if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
+            UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
 
         # Send accept mail, announce to lists, close bugs and check for
         # override disparities
@@ -1885,7 +1893,8 @@ distribution."""
             self.update_subst()
             self.Subst["__SUITE__"] = ""
             self.Subst["__SUMMARY__"] = summary
-            mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
+            mail_message = utils.TemplateSubst(self.Subst,
+                                               os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
             utils.send_mail(mail_message)
             self.announce(short_summary, 1)
 
@@ -1923,13 +1932,19 @@ distribution."""
             os.rename(temp_filename, filename)
             os.chmod(filename, 0644)
 
-        # This routine returns None on success or an error on failure
-        # TODO: Replace queue copying using the new queue.add_file_from_pool routine
-        #       and by looking up which queues in suite.copy_queues
-        #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
-        #if res:
-        #    utils.fubar(res)
+        session.commit()
+
+        # Set up our copy queues (e.g. buildd queues)
+        for suite_name in self.pkg.changes["distribution"].keys():
+            suite = get_suite(suite_name, session)
+            for q in suite.copy_queues:
+                for f in poolfiles:
+                    q.add_file_from_pool(f)
 
+        session.commit()
+
+        # Finally...
+        stats.accept_count += 1
 
     def check_override(self):
         """
@@ -1968,25 +1983,33 @@ distribution."""
     def remove(self, from_dir=None):
         """
         Used (for instance) in p-u to remove the package from unchecked
+
+        Also removes the package from holding area.
         """
         if from_dir is None:
-            os.chdir(self.pkg.directory)
-        else:
-            os.chdir(from_dir)
+            from_dir = self.pkg.directory
+        h = Holding()
 
         for f in self.pkg.files.keys():
-            os.unlink(f)
-        os.unlink(self.pkg.changes_file)
+            os.unlink(os.path.join(from_dir, f))
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
+
+        os.unlink(os.path.join(from_dir, self.pkg.changes_file))
+        if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
+            os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
 
     ###########################################################################
 
-    def move_to_dir (self, dest, perms=0660, changesperms=0664):
+    def move_to_queue (self, queue):
         """
-        Move files to dest with certain perms/changesperms
+        Move files to a destination queue using the permissions in the table
         """
-        utils.move(self.pkg.changes_file, dest, perms=changesperms)
+        h = Holding()
+        utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
+                   queue.path, perms=int(queue.change_perms, 8))
         for f in self.pkg.files.keys():
-            utils.move(f, dest, perms=perms)
+            utils.move(os.path.join(h.holding_dir, f), queue.path, perms=int(queue.perms, 8))
 
     ###########################################################################
 
@@ -2307,8 +2330,6 @@ distribution."""
     ################################################################################
 
     def check_source_against_db(self, filename, session):
-        """
-        """
         source = self.pkg.dsc.get("source")
         version = self.pkg.dsc.get("version")
 
@@ -2377,6 +2398,7 @@ distribution."""
                                 # This would fix the stupidity of changing something we often iterate over
                                 # whilst we're doing it
                                 del self.pkg.files[dsc_name]
+                                dsc_entry["files id"] = i.file_id
                                 if not orig_files.has_key(dsc_name):
                                     orig_files[dsc_name] = {}
                                 orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
diff --git a/daklib/queue_install.py b/daklib/queue_install.py
new file mode 100644 (file)
index 0000000..3283e1e
--- /dev/null
@@ -0,0 +1,267 @@
+#!/usr/bin/env python
+# vim:set et sw=4:
+
+"""
+Utility functions for process-upload
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006  James Troup <james@nocrew.org>
+@copyright: 2009  Joerg Jaspert <joerg@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import os
+
+from daklib import utils
+from daklib.dbconn import *
+from daklib.config import Config
+
+################################################################################
+
+def package_to_suite(u, suite_name, session):
+    if not u.pkg.changes["distribution"].has_key(suite_name):
+        return False
+
+    ret = True
+
+    if not u.pkg.changes["architecture"].has_key("source"):
+        q = session.query(SrcAssociation.sa_id)
+        q = q.join(Suite).filter_by(suite_name=suite_name)
+        q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
+        q = q.filter_by(version=u.pkg.changes['version']).limit(1)
+
+        # NB: Careful, this logic isn't what you would think it is
+        # Source is already in the target suite so no need to go to policy
+        # Instead, we don't move to the policy area, we just do an ACCEPT
+        if q.count() > 0:
+            ret = False
+
+    return ret
+
+def package_to_queue(u, summary, short_summary, queue, chg, session, announce=None):
+    cnf = Config()
+    dir = queue.path
+
+    print "Moving to %s policy queue" % queue.queue_name.upper()
+    u.logger.log(["Moving to %s" % queue.queue_name, u.pkg.changes_file])
+
+    u.move_to_queue(queue)
+    chg.in_queue_id = queue.policy_queue_id
+    session.add(chg)
+    session.commit()
+
+    # Check for override disparities
+    u.check_override()
+
+    # Send accept mail, announce to lists and close bugs
+    if announce and not cnf["Dinstall::Options::No-Mail"]:
+        template = os.path.join(cnf["Dir::Templates"], announce)
+        u.update_subst()
+        u.Subst["__SUITE__"] = ""
+        mail_message = utils.TemplateSubst(u.Subst, template)
+        utils.send_mail(mail_message)
+        u.announce(short_summary, True)
+
+################################################################################
+
+# TODO: This logic needs to be replaced with policy queues before we upgrade
+# security master
+
+#def is_unembargo(u):
+#    session = DBConn().session()
+#    cnf = Config()
+#
+#    q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
+#    if q.rowcount > 0:
+#        session.close()
+#        return True
+#
+#    oldcwd = os.getcwd()
+#    os.chdir(cnf["Dir::Queue::Disembargo"])
+#    disdir = os.getcwd()
+#    os.chdir(oldcwd)
+#
+#    ret = False
+#
+#    if u.pkg.directory == disdir:
+#        if u.pkg.changes["architecture"].has_key("source"):
+#            session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
+#            session.commit()
+#
+#            ret = True
+#
+#    session.close()
+#
+#    return ret
+#
+#def queue_unembargo(u, summary, short_summary, session=None):
+#    return package_to_queue(u, summary, short_summary, "Unembargoed",
+#                            perms=0660, build=True, announce='process-unchecked.accepted')
+#
+#################################################################################
+#
+#def is_embargo(u):
+#    # if embargoed queues are enabled always embargo
+#    return True
+#
+#def queue_embargo(u, summary, short_summary, session=None):
+#    return package_to_queue(u, summary, short_summary, "Unembargoed",
+#                            perms=0660, build=True, announce='process-unchecked.accepted')
+
+################################################################################
+
+def is_autobyhand(u):
+    cnf = Config()
+
+    all_auto = 1
+    any_auto = 0
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("byhand"):
+            any_auto = 1
+
+            # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
+            # don't contain underscores, and ARCH doesn't contain dots.
+            # further VER matches the .changes Version:, and ARCH should be in
+            # the .changes Architecture: list.
+            if f.count("_") < 2:
+                all_auto = 0
+                continue
+
+            (pckg, ver, archext) = f.split("_", 2)
+            if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
+                all_auto = 0
+                continue
+
+            ABH = cnf.SubTree("AutomaticByHandPackages")
+            if not ABH.has_key(pckg) or \
+              ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
+                print "not match %s %s" % (pckg, u.pkg.changes["source"])
+                all_auto = 0
+                continue
+
+            (arch, ext) = archext.split(".", 1)
+            if arch not in u.pkg.changes["architecture"]:
+                all_auto = 0
+                continue
+
+            u.pkg.files[f]["byhand-arch"] = arch
+            u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
+
+    return any_auto and all_auto
+
+def do_autobyhand(u, summary, short_summary, chg, session):
+    print "Attempting AUTOBYHAND."
+    byhandleft = True
+    for f, entry in u.pkg.files.items():
+        byhandfile = f
+
+        if not entry.has_key("byhand"):
+            continue
+
+        if not entry.has_key("byhand-script"):
+            byhandleft = True
+            continue
+
+        os.system("ls -l %s" % byhandfile)
+
+        result = os.system("%s %s %s %s %s" % (
+                entry["byhand-script"],
+                byhandfile,
+                u.pkg.changes["version"],
+                entry["byhand-arch"],
+                os.path.abspath(u.pkg.changes_file)))
+
+        if result == 0:
+            os.unlink(byhandfile)
+            del entry
+        else:
+            print "Error processing %s, left as byhand." % (f)
+            byhandleft = True
+
+    if byhandleft:
+        do_byhand(u, summary, short_summary, chg, session)
+    else:
+        u.accept(summary, short_summary, session)
+        u.check_override()
+
+################################################################################
+
+def is_byhand(u):
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("byhand"):
+            return True
+    return False
+
+def do_byhand(u, summary, short_summary, chg, session):
+    return package_to_queue(u, summary, short_summary,
+                            get_policy_queue('byhand'), chg, session,
+                            announce=None)
+
+################################################################################
+
+def is_new(u):
+    for f in u.pkg.files.keys():
+        if u.pkg.files[f].has_key("new"):
+            return True
+    return False
+
+def acknowledge_new(u, summary, short_summary, chg, session):
+    cnf = Config()
+
+    print "Moving to NEW queue."
+    u.logger.log(["Moving to new", u.pkg.changes_file])
+
+    q = get_policy_queue('new', session)
+
+    u.move_to_queue(q)
+    chg.in_queue_id = q.policy_queue_id
+    session.add(chg)
+    session.commit()
+
+    if not cnf["Dinstall::Options::No-Mail"]:
+        print "Sending new ack."
+        template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
+        u.update_subst()
+        u.Subst["__SUMMARY__"] = summary
+        new_ack_message = utils.TemplateSubst(u.Subst, template)
+        utils.send_mail(new_ack_message)
+
+################################################################################
+
+# q-unapproved hax0ring
+QueueInfo = {
+    "new": { "is": is_new, "process": acknowledge_new },
+    "autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
+    "byhand" : { "is": is_byhand, "process": do_byhand },
+}
+
+def determine_target(u):
+    cnf = Config()
+
+    # Statically handled queues
+    target = None
+
+    for q in ["new", "autobyhand", "byhand"]:
+        if QueueInfo[q]["is"](u):
+            target = q
+            break
+
+    return target
+
+###############################################################################
+
old mode 100755 (executable)
new mode 100644 (file)
index 6be9997..9040e21
@@ -112,4 +112,4 @@ re_user_mails = re.compile(r"^(pub|uid):[^rdin].*<(.*@.*)>.*$", re.MULTILINE);
 re_user_name = re.compile(r"^pub:.*:(.*)<.*$", re.MULTILINE);
 re_re_mark = re.compile(r'^RE:')
 
-re_parse_lintian = re.compile(r"^(W|E|O): (.*?): ([^ ]*) ?(.*)$")
+re_parse_lintian = re.compile(r"^(?P<level>W|E|O): (?P<package>.*?): (?P<tag>[^ ]*) ?(?P<description>.*)$")
diff --git a/daklib/singleton.py b/daklib/singleton.py
deleted file mode 100644 (file)
index 535a25a..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# vim:set et ts=4 sw=4:
-
-"""
-Singleton pattern code
-
-Inspiration for this very simple ABC was taken from various documents /
-tutorials / mailing lists.  This may not be thread safe but given that
-(as I write) large chunks of dak aren't even type-safe, I'll live with
-it for now
-
-@contact: Debian FTPMaster <ftpmaster@debian.org>
-@copyright: 2008  Mark Hymers <mhy@debian.org>
-@license: GNU General Public License version 2 or later
-"""
-
-################################################################################
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-################################################################################
-
-# < sgran> NCommander: in SQL, it's better to join than to repeat information
-# < tomv_w> that makes SQL the opposite to Debian mailing lists!
-
-################################################################################
-
-"""
-This class set implements objects that may need to be instantiated multiple
-times, but we don't want the overhead of actually creating and init'ing
-them more than once.  It also saves us using globals all over the place
-"""
-
-class Singleton(object):
-    """This is the ABC for other dak Singleton classes"""
-    __single = None
-    def __new__(cls, *args, **kwargs):
-        # Check to see if a __single exists already for this class
-        # Compare class types instead of just looking for None so
-        # that subclasses will create their own __single objects
-        if cls != type(cls.__single):
-            cls.__single = object.__new__(cls, *args, **kwargs)
-            cls.__single._startup(*args, **kwargs)
-        return cls.__single
-
-    def __init__(self, *args, **kwargs):
-        if type(self) == "Singleton":
-            raise NotImplementedError("Singleton is an ABC")
-
-    def _startup(self):
-        """
-        _startup is a private method used instead of __init__ due to the way
-        we instantiate this object
-        """
-        raise NotImplementedError("Singleton is an ABC")
-
old mode 100755 (executable)
new mode 100644 (file)
index 86300cc..60702c3
@@ -26,16 +26,15 @@ Simple summary class for dak
 
 ###############################################################################
 
-from singleton import Singleton
+class SummaryStats(object):
+    __shared_state = {}
 
-###############################################################################
-
-class SummaryStats(Singleton):
     def __init__(self, *args, **kwargs):
-        super(SummaryStats, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self):
-        self.reset_accept()
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
+            self.reset_accept()
 
     def reset_accept(self):
         self.accept_count = 0
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
index fb2e7fa..7d67905
@@ -29,33 +29,35 @@ Urgency Logger class for dak
 import os
 import time
 
-from singleton import Singleton
 from config import Config
 from utils import warn, open_file, move
 
 ###############################################################################
 
-class UrgencyLog(Singleton):
+class UrgencyLog(object):
     "Urgency Logger object"
+
+    __shared_state = {}
+
     def __init__(self, *args, **kwargs):
-        super(UrgencyLog, self).__init__(*args, **kwargs)
+        self.__dict__ = self.__shared_state
 
-    def _startup(self):
-        "Initialize a new Urgency Logger object"
+        if not getattr(self, 'initialised', False):
+            self.initialised = True
 
-        self.timestamp = time.strftime("%Y%m%d%H%M%S")
+            self.timestamp = time.strftime("%Y%m%d%H%M%S")
 
-        # Create the log directory if it doesn't exist
-        self.log_dir = Config()["Dir::UrgencyLog"]
+            # Create the log directory if it doesn't exist
+            self.log_dir = Config()["Dir::UrgencyLog"]
 
-        if not os.path.exists(self.log_dir) or not os.access(self.log_dir, os.W_OK):
-            warn("UrgencyLog directory %s does not exist or is not writeable, using /srv/ftp.debian.org/tmp/ instead" % (self.log_dir))
-            self.log_dir = '/srv/ftp.debian.org/tmp/'
+            if not os.path.exists(self.log_dir) or not os.access(self.log_dir, os.W_OK):
+                warn("UrgencyLog directory %s does not exist or is not writeable, using /srv/ftp.debian.org/tmp/ instead" % (self.log_dir))
+                self.log_dir = '/srv/ftp.debian.org/tmp/'
 
-        # Open the logfile
-        self.log_filename = "%s/.install-urgencies-%s.new" % (self.log_dir, self.timestamp)
-        self.log_file = open_file(self.log_filename, 'w')
-        self.writes = 0
+            # Open the logfile
+            self.log_filename = "%s/.install-urgencies-%s.new" % (self.log_dir, self.timestamp)
+            self.log_file = open_file(self.log_filename, 'w')
+            self.writes = 0
 
     def log(self, source, version, urgency):
         "Log an event"
old mode 100755 (executable)
new mode 100644 (file)
index c3e4dbb..b740175
@@ -714,24 +714,23 @@ def where_am_i ():
         return res[0]
 
 def which_conf_file ():
-    if os.getenv("DAK_CONFIG"):
-        print(os.getenv("DAK_CONFIG"))
-        return os.getenv("DAK_CONFIG")
-    else:
-        res = socket.gethostbyaddr(socket.gethostname())
-        # In case we allow local config files per user, try if one exists
-        if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
-            homedir = os.getenv("HOME")
-            confpath = os.path.join(homedir, "/etc/dak.conf")
-            if os.path.exists(confpath):
-                apt_pkg.ReadConfigFileISC(Cnf,default_config)
-
-        # We are still in here, so there is no local config file or we do
-        # not allow local files. Do the normal stuff.
-        if Cnf.get("Config::" + res[0] + "::DakConfig"):
-            return Cnf["Config::" + res[0] + "::DakConfig"]
-        else:
-            return default_config
+    if os.getenv('DAK_CONFIG'):
+        return os.getenv('DAK_CONFIG')
+
+    res = socket.gethostbyaddr(socket.gethostname())
+    # In case we allow local config files per user, try if one exists
+    if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
+        homedir = os.getenv("HOME")
+        confpath = os.path.join(homedir, "/etc/dak.conf")
+        if os.path.exists(confpath):
+            apt_pkg.ReadConfigFileISC(Cnf,default_config)
+
+    # We are still in here, so there is no local config file or we do
+    # not allow local files. Do the normal stuff.
+    if Cnf.get("Config::" + res[0] + "::DakConfig"):
+        return Cnf["Config::" + res[0] + "::DakConfig"]
+
+    return default_config
 
 def which_apt_conf_file ():
     res = socket.gethostbyaddr(socket.gethostname())
@@ -1506,7 +1505,8 @@ def get_changes_files(from_dir):
 apt_pkg.init()
 
 Cnf = apt_pkg.newConfiguration()
-apt_pkg.ReadConfigFileISC(Cnf,default_config)
+if not os.getenv("DAK_TEST"):
+    apt_pkg.ReadConfigFileISC(Cnf,default_config)
 
 if which_conf_file() != default_config:
     apt_pkg.ReadConfigFileISC(Cnf,which_conf_file())
diff --git a/scripts/debian/copyoverrides b/scripts/debian/copyoverrides
deleted file mode 100755 (executable)
index a90db62..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/sh
-
-set -e
-. $SCRIPTVARS
-echo 'Copying override files into public view ...'
-
-for f in $copyoverrides ; do
-       cd $overridedir
-       chmod g+w override.$f
-
-       cd $indices
-       rm -f .newover-$f.gz
-       pc="`gzip 2>&1 -9nv <$overridedir/override.$f >.newover-$f.gz`"
-       set +e
-       nf=override.$f.gz
-       cmp -s .newover-$f.gz $nf
-       rc=$?
-       set -e
-        if [ $rc = 0 ]; then
-               rm -f .newover-$f.gz
-       elif [ $rc = 1 -o ! -f $nf ]; then
-               echo "   installing new $nf $pc"
-               mv -f .newover-$f.gz $nf
-               chmod g+w $nf
-       else
-               echo $? $pc
-               exit 1
-       fi
-done
diff --git a/scripts/debian/mkchecksums b/scripts/debian/mkchecksums
deleted file mode 100755 (executable)
index f733e89..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-# Update the md5sums file
-
-set -e
-. $SCRIPTVARS
-
-dsynclist=$dbdir/dsync.list
-md5list=$indices/md5sums
-
-echo -n "Creating md5 / dsync index file ... "
-
-cd "$ftpdir"
-${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
-${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n > ${md5list}.gz
-${bindir}/dsync-flist -q link-dups $dsynclist || true
diff --git a/scripts/debian/mkfilesindices b/scripts/debian/mkfilesindices
deleted file mode 100755 (executable)
index c16fde6..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh -e
-
-export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars
-. $SCRIPTVARS
-umask 002
-
-cd $base/ftp/indices/files/components
-
-ARCHLIST=$(tempfile)
-
-echo "Querying projectb..."
-
-echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql projectb -At | sed 's/|//;s,^/srv/ftp.debian.org/ftp,.,' | sort >$ARCHLIST
-
-includedirs () {
-    perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
-}
-
-poolfirst () {
-    perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
-}
-
-echo "Generating sources list..."
-
-(
-  sed -n 's/|$//p' $ARCHLIST
-  cd $base/ftp
-  find ./dists -maxdepth 1 \! -type d
-  find ./dists \! -type d | grep "/source/"
-) | sort -u | gzip --rsyncable -9 > source.list.gz
-
-echo "Generating arch lists..."
-
-ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
-for a in $ARCHES; do
-  (sed -n "s/|$a$//p" $ARCHLIST
-   sed -n 's/|all$//p' $ARCHLIST
-
-   cd $base/ftp
-   find ./dists -maxdepth 1 \! -type d
-   find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)"
-  ) | sort -u | gzip --rsyncable -9 > arch-$a.list.gz
-done
-
-echo "Generating suite lists..."
-
-suite_list () {
-    printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t projectb
-
-    printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t projectb
-}
-
-printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At projectb |
-  while read id suite; do
-    [ -e $base/ftp/dists/$suite ] || continue
-    (
-     (cd $base/ftp
-      distname=$(cd dists; readlink $suite || echo $suite)
-      find ./dists/$distname \! -type d
-      for distdir in ./dists/*; do 
-        [ "$(readlink $distdir)" != "$distname" ] || echo $distdir
-      done
-     )
-     suite_list $id | tr -d ' ' | sed 's,^/srv/ftp.debian.org/ftp,.,'
-    ) | sort -u | gzip --rsyncable -9 > suite-${suite}.list.gz
-  done
-
-echo "Finding everything on the ftp site to generate sundries $(date +"%X")..."
-
-(cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
-
-rm -f sundries.list
-zcat *.list.gz | cat - *.list | sort -u | 
-  diff - $ARCHLIST | sed -n 's/^> //p' > sundries.list
-
-echo "Generating files list $(date +"%X")..."
-
-for a in $ARCHES; do
-  (echo ./project/trace; zcat arch-$a.list.gz source.list.gz) | 
-    cat - sundries.list dists.list project.list docs.list indices.list |
-    sort -u | poolfirst > ../arch-$a.files
-done
-
-(cd $base/ftp/
-       for dist in sid squeeze; do
-               find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip --rsyncable -9 > $base/ftp/indices/files/components/translation-$dist.list.gz
-       done
-)
-
-(cat ../arch-i386.files ../arch-amd64.files; zcat suite-oldstable.list.gz suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-squeeze.list.gz) |
-   sort -u | poolfirst > ../typical.files
-
-rm -f $ARCHLIST
-
-echo "Done!"
diff --git a/scripts/debian/mklslar b/scripts/debian/mklslar
deleted file mode 100755 (executable)
index 231f7f8..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# Update the ls-lR.
-
-set -e
-. $SCRIPTVARS
-
-cd $ftpdir
-
-filename=ls-lR
-
-echo "Removing any core files ..."
-find -type f -name core -print0 | xargs -0r rm -v
-
-echo "Checking permissions on files in the FTP tree ..."
-find -type f \( \! -perm -444 -o -perm +002 \) -ls
-find -type d \( \! -perm -555 -o -perm +002 \) -ls
-
-echo "Checking symlinks ..."
-symlinks -rd .
-
-echo "Creating recursive directory listing ... "
-rm -f .$filename.new
-TZ=UTC ls -lR | grep -v Archive_Maintenance_In_Progress > .$filename.new
-
-if [ -r ${filename}.gz ] ; then
-  mv -f ${filename}.gz $filename.old.gz
-  mv -f .$filename.new $filename
-  rm -f $filename.patch.gz
-  zcat $filename.old.gz | diff -u - $filename | gzip --rsyncable -9cfn - >$filename.patch.gz
-  rm -f $filename.old.gz
-else
-  mv -f .$filename.new $filename
-fi
-
-gzip --rsyncable -9cfN $filename >$filename.gz
-rm -f $filename
diff --git a/scripts/debian/mkmaintainers b/scripts/debian/mkmaintainers
deleted file mode 100755 (executable)
index 41e8727..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /bin/sh
-
-echo
-echo -n 'Creating Maintainers index ... '
-
-set -e
-. $SCRIPTVARS
-cd $base/misc/
-
-cd $indices
-dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | sed -e "s/~[^  ]*\([   ]\)/\1/"  | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
-
-set +e
-cmp .new-maintainers Maintainers >/dev/null
-rc=$?
-set -e
-if [ $rc = 1 ] || [ ! -f Maintainers ] ; then
-       echo -n "installing Maintainers ... "
-       mv -f .new-maintainers Maintainers
-       gzip --rsyncable -9v <Maintainers >.new-maintainers.gz
-       mv -f .new-maintainers.gz Maintainers.gz
-elif [ $rc = 0 ] ; then
-       echo '(same as before)'
-       rm -f .new-maintainers
-else
-       echo cmp returned $rc
-       false
-fi
diff --git a/tests/base_test.py b/tests/base_test.py
new file mode 100644 (file)
index 0000000..d99ce41
--- /dev/null
@@ -0,0 +1,20 @@
+import os
+import sys
+import unittest
+
+from os.path import abspath, dirname, join
+
+DAK_ROOT_DIR = dirname(dirname(abspath(__file__)))
+
+class DakTestCase(unittest.TestCase):
+    def setUp(self):
+        pass
+
+def fixture(*dirs):
+    return join(DAK_ROOT_DIR, 'tests', 'fixtures', *dirs)
+
+os.environ['DAK_TEST'] = '1'
+os.environ['DAK_CONFIG'] = fixture('dak.conf')
+
+if DAK_ROOT_DIR not in sys.path:
+    sys.path.insert(0, DAK_ROOT_DIR)
diff --git a/tests/fixtures/changes/1.changes b/tests/fixtures/changes/1.changes
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/fixtures/changes/2.changes b/tests/fixtures/changes/2.changes
new file mode 100644 (file)
index 0000000..9d264c1
--- /dev/null
@@ -0,0 +1,54 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+
+Format: 1.7
+Date: Fri, 20 Apr 2001 02:47:21 -0400
+Source: krb5
+Binary: krb5-kdc krb5-doc krb5-rsh-server libkrb5-dev libkrb53 krb5-ftpd
+ krb5-clients krb5-user libkadm54 krb5-telnetd krb5-admin-server
+Architecture: m68k
+Version: 1.2.2-4
+Distribution: unstable
+Urgency: low
+Maintainer: buildd m68k user account <buildd@ax.westfalen.de>
+Changed-By: Sam Hartman <hartmans@debian.org>
+Description: 
+ krb5-admin-server - Mit Kerberos master server (kadmind)
+ krb5-clients - Secure replacements for ftp, telnet and rsh using MIT Kerberos
+ krb5-ftpd  - Secure FTP server supporting MIT Kerberos
+ krb5-kdc   - Mit Kerberos key server (KDC)
+ krb5-rsh-server - Secure replacements for rshd and rlogind  using MIT Kerberos
+ krb5-telnetd - Secure telnet server supporting MIT Kerberos
+ krb5-user  - Basic programs to authenticate using MIT Kerberos
+ libkadm54  - MIT Kerberos administration runtime libraries
+ libkrb5-dev - Headers and development libraries for MIT Kerberos
+ libkrb53   - MIT Kerberos runtime libraries
+Closes: 94407
+Changes: 
+ krb5 (1.2.2-4) unstable; urgency=low
+ .
+   * Fix shared libraries to build with gcc not ld to properly include
+     -lgcc symbols, closes: #94407
+Files: 
+ 563dac1cdd3ba922f9301fe074fbfc80 65836 non-us/main optional libkadm54_1.2.2-4_m68k.deb
+ bb620f589c17ab0ebea1aa6e10ca52ad 272198 non-us/main optional libkrb53_1.2.2-4_m68k.deb
+ 40af6e64b3030a179e0de25bd95c95e9 143264 non-us/main optional krb5-user_1.2.2-4_m68k.deb
+ ffe4e5e7b2cab162dc608d56278276cf 141870 non-us/main optional krb5-clients_1.2.2-4_m68k.deb
+ 4fe01d1acb4b82ce0b8b72652a9a15ae 54592 non-us/main optional krb5-rsh-server_1.2.2-4_m68k.deb
+ b3c8c617ea72008a33b869b75d2485bf 41292 non-us/main optional krb5-ftpd_1.2.2-4_m68k.deb
+ 5908f8f60fe536d7bfc1ef3fdd9d74cc 42090 non-us/main optional krb5-telnetd_1.2.2-4_m68k.deb
+ 650ea769009a312396e56503d0059ebc 160236 non-us/main optional krb5-kdc_1.2.2-4_m68k.deb
+ 399c9de4e9d7d0b0f5626793808a4391 160392 non-us/main optional krb5-admin-server_1.2.2-4_m68k.deb
+ 6f962fe530c3187e986268b4e4d27de9 398662 non-us/main optional libkrb5-dev_1.2.2-4_m68k.deb
+
+-----BEGIN PGP SIGNATURE-----
+Version: 2.6.3i
+Charset: noconv
+
+iQCVAwUBOvVPPm547I3m3eHJAQHyaQP+M7RXVEqZ2/xHiPzaPcZRJ4q7o0zbMaU8
+qG/Mi6kuR1EhRNMjMH4Cp6ctbhRDHK5FR/8v7UkOd+ETDAhiw7eqJnLC60EZxZ/H
+CiOs8JklAXDERkQ3i7EYybv46Gxx91pIs2nE4xVKnG16d/wFELWMBLY6skF1B2/g
+zZju3cuFCCE=
+=Vm59
+-----END PGP SIGNATURE-----
+
+
diff --git a/tests/fixtures/changes/bogus-post.changes b/tests/fixtures/changes/bogus-post.changes
new file mode 100644 (file)
index 0000000..95e5a1f
--- /dev/null
@@ -0,0 +1,41 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.7
+Date: Tue,  9 Sep 2003 01:16:01 +0100
+Source: gawk
+Binary: gawk
+Architecture: source i386
+Version: 1:3.1.3-2
+Distribution: unstable
+Urgency: low
+Maintainer: James Troup <james@nocrew.org>
+Changed-By: James Troup <james@nocrew.org>
+Description: 
+ gawk       - GNU awk, a pattern scanning and processing language
+Closes: 204699 204701
+Changes: 
+ gawk (1:3.1.3-2) unstable; urgency=low
+ .
+   * debian/control (Standards-Version): bump to 3.6.1.0.
+ .
+   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
+     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
+     upstream.  Closes: #204701
+ .
+   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
+     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
+     forwarding it upstream.  Closes: #204699
+Files: 
+ 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
+ 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
+ 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.6 (GNU/Linux)
+
+iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
+fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
+=O7Gh
+-----END PGP SIGNATURE-----
+You: have been 0wned
diff --git a/tests/fixtures/changes/bogus-pre.changes b/tests/fixtures/changes/bogus-pre.changes
new file mode 100644 (file)
index 0000000..0234d8b
--- /dev/null
@@ -0,0 +1,41 @@
+You: have been 0wned
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.7
+Date: Tue,  9 Sep 2003 01:16:01 +0100
+Source: gawk
+Binary: gawk
+Architecture: source i386
+Version: 1:3.1.3-2
+Distribution: unstable
+Urgency: low
+Maintainer: James Troup <james@nocrew.org>
+Changed-By: James Troup <james@nocrew.org>
+Description: 
+ gawk       - GNU awk, a pattern scanning and processing language
+Closes: 204699 204701
+Changes: 
+ gawk (1:3.1.3-2) unstable; urgency=low
+ .
+   * debian/control (Standards-Version): bump to 3.6.1.0.
+ .
+   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
+     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
+     upstream.  Closes: #204701
+ .
+   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
+     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
+     forwarding it upstream.  Closes: #204699
+Files: 
+ 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
+ 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
+ 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.6 (GNU/Linux)
+
+iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
+fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
+=O7Gh
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/changes/valid.changes b/tests/fixtures/changes/valid.changes
new file mode 100644 (file)
index 0000000..0e77d27
--- /dev/null
@@ -0,0 +1,40 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.7
+Date: Tue,  9 Sep 2003 01:16:01 +0100
+Source: gawk
+Binary: gawk
+Architecture: source i386
+Version: 1:3.1.3-2
+Distribution: unstable
+Urgency: low
+Maintainer: James Troup <james@nocrew.org>
+Changed-By: James Troup <james@nocrew.org>
+Description: 
+ gawk       - GNU awk, a pattern scanning and processing language
+Closes: 204699 204701
+Changes: 
+ gawk (1:3.1.3-2) unstable; urgency=low
+ .
+   * debian/control (Standards-Version): bump to 3.6.1.0.
+ .
+   * 02_fix-ascii.dpatch: new patch from upstream to fix [[:ascii:]].
+     Thanks to <vle@gmx.net> for reporting the bug and forwarding it
+     upstream.  Closes: #204701
+ .
+   * 03_fix-high-char-ranges.dpatch: new patch from upstream to fix
+     [\x80-\xff].  Thanks to <vle@gmx.net> for reporting the bug and
+     forwarding it upstream.  Closes: #204699
+Files: 
+ 0e6542c48bcc9d9586fc8ebe4e7242a4 561 interpreters optional gawk_3.1.3-2.dsc
+ 50a29dce4a2c6e2ac38069eb7c41d9c4 8302 interpreters optional gawk_3.1.3-2.diff.gz
+ 5a255c7b421ac699804212e10205f22d 871114 interpreters optional gawk_3.1.3-2_i386.deb
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.6 (GNU/Linux)
+
+iEYEARECAAYFAj9dHWsACgkQgD/uEicUG7DUnACglndvU4LCA0/k36Qp873N0Sau
+fCwAoMdgIOUBcUfMqXvVnxdW03ev5bNB
+=O7Gh
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dak.conf b/tests/fixtures/dak.conf
new file mode 100644 (file)
index 0000000..a738840
--- /dev/null
@@ -0,0 +1,19 @@
+// For extract_component_from_section tests
+
+Component
+{
+  main
+  {
+       Description "Main";
+  };
+
+  contrib
+  {
+       Description "Contrib";
+  };
+
+  non-free
+  {
+       Description "Software that fails to meet the DFSG";
+  };
+};
diff --git a/tests/fixtures/dsc/1.dsc b/tests/fixtures/dsc/1.dsc
new file mode 100644 (file)
index 0000000..dfdd92f
--- /dev/null
@@ -0,0 +1,22 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/2.dsc b/tests/fixtures/dsc/2.dsc
new file mode 100644 (file)
index 0000000..a6c9d85
--- /dev/null
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/3.dsc b/tests/fixtures/dsc/3.dsc
new file mode 100644 (file)
index 0000000..211340e
--- /dev/null
@@ -0,0 +1,21 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/4.dsc b/tests/fixtures/dsc/4.dsc
new file mode 100644 (file)
index 0000000..91e361f
--- /dev/null
@@ -0,0 +1,19 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/5.dsc b/tests/fixtures/dsc/5.dsc
new file mode 100644 (file)
index 0000000..db9d8d3
--- /dev/null
@@ -0,0 +1,23 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/6.dsc b/tests/fixtures/dsc/6.dsc
new file mode 100644 (file)
index 0000000..ae36d64
--- /dev/null
@@ -0,0 +1,23 @@
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+
+Format: 1.0
+Source: amaya
+Version: 3.2.1-1
+Binary: amaya
+Maintainer: Steve Dunham <dunham@debian.org>
+Architecture: any
+Standards-Version: 2.4.0.0
+Files: 
+ 07f95f92b7cb0f12f7cf65ee5c5fbde2 4532418 amaya_3.2.1.orig.tar.gz
+ da06b390946745d9efaf9e7df8e05092 4817 amaya_3.2.1-1.diff.gz
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.0.2 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+iD8DBQE5j091iPgEjVqvb1kRAvFtAJ0asUAaac6ebfR3YeaH16HjL7F3GwCfV+AQ
+rhYnRmVuNMa8oYSvL4hl/Yw=
+=EFAA
+-----END PGP SIGNATURE-----
diff --git a/tests/fixtures/dsc/7.dsc b/tests/fixtures/dsc/7.dsc
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/test_extract_component_from_section.py b/tests/test_extract_component_from_section.py
new file mode 100755 (executable)
index 0000000..3493f41
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+from base_test import DakTestCase
+
+import unittest
+
+from daklib.utils import extract_component_from_section
+
+class ExtractComponentTestCase(DakTestCase):
+    """
+    prefix: non-US
+    component: main, contrib, non-free
+    section: games, admin, libs, [...]
+
+    [1] Order is as above.
+    [2] Prefix is optional for the default archive, but mandatory when
+        uploads are going anywhere else.
+    [3] Default component is main and may be omitted.
+    [4] Section is optional.
+    [5] Prefix is case insensitive
+    [6] Everything else is case sensitive.
+    """
+
+    def assertExtract(self, input, output):
+        self.assertEqual(
+            extract_component_from_section(input)[1],
+            output,
+        )
+
+    def test_1(self):
+        # Validate #3
+        self.assertExtract('utils', 'main')
+
+    def test_2(self):
+        # Err, whoops?  should probably be 'utils', 'main'...
+        self.assertExtract('main/utils', 'main')
+
+    def test_3(self):
+        self.assertExtract('non-free/libs', 'non-free')
+
+    def test_4(self):
+        self.assertExtract('contrib/net', 'contrib')
+
+    def test_5(self):
+        # Validate #4
+        self.assertExtract('main', 'main')
+
+    def test_6(self):
+        self.assertExtract('contrib', 'contrib')
+
+    def test_7(self):
+        self.assertExtract('non-free', 'non-free')
+
+    def test_8(self):
+        # Validate #6 (section)
+        self.assertExtract('utIls', 'main')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_fix_maintainer.py b/tests/test_fix_maintainer.py
new file mode 100755 (executable)
index 0000000..203fbfc
--- /dev/null
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from base_test import DakTestCase
+
+import unittest
+
+from daklib.textutils import fix_maintainer
+from daklib.dak_exceptions import ParseMaintError
+
+class FixMaintainerTestCase(DakTestCase):
+    def assertValid(self, input, a, b, c, d):
+        a_, b_, c_, d_ = fix_maintainer(input)
+
+        self.assertEqual(a, a_)
+        self.assertEqual(b, b_)
+        self.assertEqual(c, c_)
+        self.assertEqual(d, d_)
+
+    def assertNotValid(self, input):
+        self.assertRaises(ParseMaintError, lambda: fix_maintainer(input))
+
+    def testUTF8Maintainer(self):
+        # Check Valid UTF-8 maintainer field
+        self.assertValid(
+            "Noèl Köthe <noel@debian.org>",
+            "Noèl Köthe <noel@debian.org>",
+            "=?utf-8?b?Tm/DqGwgS8O2dGhl?= <noel@debian.org>",
+            "Noèl Köthe",
+            "noel@debian.org",
+        )
+
+    def testASCII(self):
+        # Check valid ASCII maintainer field
+        self.assertValid(
+            "James Troup <james@nocrew.org>",
+            "James Troup <james@nocrew.org>",
+            "James Troup <james@nocrew.org>",
+            "James Troup",
+            "james@nocrew.org",
+        )
+
+    def testRFC822(self):
+        # Check "Debian vs RFC822" fixup of names with '.' or ',' in them
+        self.assertValid(
+            "James J. Troup <james@nocrew.org>",
+            "james@nocrew.org (James J. Troup)",
+            "james@nocrew.org (James J. Troup)",
+            "James J. Troup",
+            "james@nocrew.org",
+        )
+
+    def testSimple(self):
+        self.assertValid(
+            "James J, Troup <james@nocrew.org>",
+            "james@nocrew.org (James J, Troup)",
+            "james@nocrew.org (James J, Troup)",
+            "James J, Troup",
+            "james@nocrew.org",
+        )
+
+    def testJustEmail(self):
+        # Check just-email form
+        self.assertValid(
+            "james@nocrew.org",
+            " <james@nocrew.org>",
+            " <james@nocrew.org>",
+            "",
+            "james@nocrew.org",
+        )
+
+    def testBracketedEmail(self):
+        # Check bracketed just-email form
+        self.assertValid(
+            "<james@nocrew.org>",
+            " <james@nocrew.org>",
+            " <james@nocrew.org>",
+            "",
+            "james@nocrew.org",
+        )
+
+    def testKrazy(self):
+        # Check Krazy quoted-string local part email address
+        self.assertValid(
+            "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>",
+            "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>",
+            "Cris van Pelt <\"Cris van Pelt\"@tribe.eu.org>",
+            "Cris van Pelt",
+            "\"Cris van Pelt\"@tribe.eu.org",
+        )
+
+    def testEmptyString(self):
+        # Check empty string
+        self.assertValid("", "", "", "", "")
+
+    def testMissingEmailAddress(self):
+        # Check for missing email address
+        self.assertNotValid("James Troup")
+
+    def testInvalidEmail(self):
+        # Check for invalid email address
+        self.assertNotValid("James Troup <james@nocrew.org")
+
+if __name__ == '__main__':
+    unittest.main()
index 1ae6860aa60659ba890e9f00c31b1c02a9945ac9..a897eeb0b7df98a92b0c7b9acde9039363cd2fba 100755 (executable)
@@ -1,14 +1,13 @@
 #!/usr/bin/env python
 
-import unittest
+from base_test import DakTestCase
 
-import os, sys
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+import unittest
 
 from daklib.formats import parse_format, validate_changes_format
 from daklib.dak_exceptions import UnknownFormatError
 
-class ParseFormatTestCase(unittest.TestCase):
+class ParseFormatTestCase(DakTestCase):
     def assertParse(self, format, expected):
         self.assertEqual(parse_format(format), expected)
 
@@ -30,7 +29,7 @@ class ParseFormatTestCase(unittest.TestCase):
         self.assertParse('1.2 (three)', (1, 2, 'three'))
         self.assertParseFail('0.0 ()')
 
-class ValidateChangesFormat(unittest.TestCase):
+class ValidateChangesFormat(DakTestCase):
     def assertValid(self, changes, field='files'):
         validate_changes_format(changes, field)
 
@@ -56,3 +55,6 @@ class ValidateChangesFormat(unittest.TestCase):
     def testFilesField(self):
         self.assertInvalid((1, 7), field='notfiles')
         self.assertValid((1, 8), field='notfiles')
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_imports.py b/tests/test_imports.py
new file mode 100755 (executable)
index 0000000..2b35ab0
--- /dev/null
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+from base_test import DakTestCase, DAK_ROOT_DIR
+
+import glob
+import unittest
+
+from os.path import join, basename, splitext
+
+class ImportTestCase(DakTestCase):
+    for filename in glob.glob(join(DAK_ROOT_DIR, 'dak', '*.py')):
+        cmd, ext = splitext(basename(filename))
+
+        def test_fn(self, cmd=cmd):
+            __import__('dak', fromlist=[cmd])
+
+        locals()['test_importing_%s' % cmd] = test_fn
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_lintian.py b/tests/test_lintian.py
new file mode 100755 (executable)
index 0000000..f4ed98e
--- /dev/null
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+
+from base_test import DakTestCase
+
+import unittest
+
+from daklib.lintian import parse_lintian_output, generate_reject_messages
+
+class ParseLintianTestCase(DakTestCase):
+    def assertParse(self, output, expected):
+        self.assertEqual(
+            list(parse_lintian_output(output)),
+            expected,
+        )
+
+    def testSimple(self):
+        self.assertParse(
+            'W: pkgname: some-tag path/to/file', [{
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'some-tag',
+                'description': 'path/to/file',
+            }],
+        )
+
+        self.assertParse('', [])
+        self.assertParse('\n\n', [])
+        self.assertParse('dummy error test', [])
+
+    def testBinaryNoDescription(self):
+        self.assertParse(
+            'W: pkgname: some-tag', [{
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'some-tag',
+                'description': '',
+            }],
+        )
+
+    def testSource(self):
+        self.assertParse(
+            'W: pkgname source: some-tag', [{
+                'level': 'W',
+                'package': 'pkgname source',
+                'tag': 'some-tag',
+                'description': '',
+            }]
+        )
+
+    def testSourceNoDescription(self):
+        self.assertParse(
+            'W: pkgname source: some-tag path/to/file', [{
+                'level': 'W',
+                'package': 'pkgname source',
+                'tag': 'some-tag',
+                'description': 'path/to/file',
+            }]
+        )
+
+class GenerateRejectMessages(DakTestCase):
+    def assertNumReject(self, input, defs, num):
+        msgs = list(generate_reject_messages(input, defs))
+        self.assertEqual(len(msgs), num)
+
+    def testUnknownTag(self):
+        self.assertNumReject([
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'unknown-tag',
+                'description': '',
+            }
+            ], {'fatal': ['known-tag'], 'nonfatal': []},
+            0,
+        )
+
+    def testFatalTags(self):
+        self.assertNumReject([
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'fatal-tag-1',
+                'description': '',
+            },
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'fatal-tag-2',
+                'description': '',
+            },
+            ], {'fatal': ['fatal-tag-1', 'fatal-tag-2'], 'nonfatal': []},
+            2,
+        )
+
+    def testMixture(self):
+        self.assertNumReject([
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'fatal-tag',
+                'description': '',
+            },
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'unknown-tag',
+                'description': '',
+            },
+            ], {'fatal': ['fatal-tag'], 'nonfatal': []},
+            1,
+        )
+
+    def testOverridable(self):
+        self.assertNumReject([
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'non-fatal-tag',
+                'description': '',
+            },
+            ], {'fatal': [], 'nonfatal': ['non-fatal-tag']},
+            1 + 1, # We add an extra 'reject' hint message
+        )
+
+    def testOverrideAllowed(self):
+        self.assertNumReject([
+                {'level': 'O',
+                'package': 'pkgname',
+                'tag': 'non-fatal-tag',
+                'description': ''},
+            ], {'fatal': [], 'nonfatal': ['non-fatal-tag']},
+            0,
+        )
+
+    def testOverrideNotAllowed(self):
+        self.assertNumReject([
+            {
+                'level': 'O',
+                'package': 'pkgname',
+                'tag': 'fatal-tag',
+                'description': '',
+            },
+            ], {'fatal': ['fatal-tag'], 'nonfatal': []},
+            1,
+        )
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_parse_changes.py b/tests/test_parse_changes.py
new file mode 100755 (executable)
index 0000000..2de4b8a
--- /dev/null
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+from base_test import DakTestCase, fixture
+
+import unittest
+
+from daklib.utils import parse_changes
+from daklib.dak_exceptions import InvalidDscError, ParseChangesError
+
+class ParseChangesTestCase(DakTestCase):
+    def assertParse(self, filename, *args):
+        return parse_changes(fixture(filename), *args)
+
+    def assertFails(self, filename, line=None, *args):
+        try:
+            self.assertParse(filename, *args)
+            self.fail('%s was not recognised as invalid' % filename)
+        except ParseChangesError:
+            pass
+        except InvalidDscError, actual_line:
+            if line is not None:
+                assertEqual(actual_line, line)
+
+class ParseDscTestCase(ParseChangesTestCase):
+    def test_1(self):
+        self.assertParse('dsc/1.dsc')
+
+    def test_1_ignoreErrors(self):
+        # Valid .dsc ; ignoring errors
+        self.assertParse('dsc/1.dsc', 0)
+
+    def test_2(self):
+        # Missing blank line before signature body
+        self.assertParse('dsc/2.dsc')
+
+    def test_2_ignoreErrors(self):
+        # Invalid .dsc ; ignoring errors
+        self.assertParse('dsc/2.dsc', 0)
+
+    def test_3(self):
+        # Missing blank line after signature header
+        self.assertParse('dsc/3.dsc')
+
+    def test_4(self):
+        # No blank lines at all
+        self.assertParse('dsc/4.dsc')
+
+    def test_5(self):
+        # Extra blank line before signature body
+        self.assertParse('dsc/5.dsc')
+
+    def test_6(self):
+        # Extra blank line after signature header
+        self.assertParse('dsc/6.dsc')
+
+class ParseChangesTestCase(ParseChangesTestCase):
+    def test_1(self):
+        # Empty changes
+        self.assertFails('changes/1.changes', line=5)
+
+    def test_2(self):
+        changes = self.assertParse('changes/2.changes', 0)
+
+        binaries = changes['binary']
+
+        self.assert_('krb5-ftpd' in binaries.split())
+
+    def test_3(self):
+        for filename in ('valid', 'bogus-pre', 'bogus-post'):
+            for strict_whitespace in (0, 1):
+                changes = self.assertParse(
+                    'changes/%s.changes' % filename,
+                    strict_whitespace,
+                )
+                self.failIf(changes.get('you'))
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_process_gpgv_output.py b/tests/test_process_gpgv_output.py
new file mode 100755 (executable)
index 0000000..ea1fb33
--- /dev/null
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+from base_test import DakTestCase
+
+import unittest
+
+from daklib.utils import process_gpgv_output
+
+class ProcessGPGVOutputTestCase(DakTestCase):
+    def assertParse(self, input, output):
+        self.assertEqual(process_gpgv_output(input)[0], output)
+
+    def assertNotParse(self, input):
+        ret = process_gpgv_output(input)
+        self.assertNotEqual(len(ret[1]), 0)
+
+    ##
+
+    def testEmpty(self):
+        self.assertParse('', {})
+
+    def testBroken(self):
+        self.assertNotParse('foo')
+        self.assertNotParse('  foo  ')
+        self.assertNotParse('[PREFIXPG:] KEY VAL1 VAL2 VAL3')
+
+    def testSimple(self):
+        self.assertParse(
+            '[GNUPG:] KEY VAL1 VAL2 VAL3',
+            {'KEY': ['VAL1', 'VAL2', 'VAL3']},
+        )
+
+    def testNoKeys(self):
+        self.assertParse('[GNUPG:] KEY', {'KEY': []})
+
+    def testDuplicate(self):
+        self.assertNotParse('[GNUPG:] TEST_KEY\n[GNUPG:] TEST_KEY')
+        self.assertNotParse('[GNUPG:] KEY VAL1\n[GNUPG:] KEY VAL2')
+
+    def testDuplicateSpecial(self):
+        # NODATA and friends are special
+        for special in ('NODATA', 'SIGEXPIRED', 'KEYEXPIRED'):
+            self.assertParse(
+                '[GNUPG:] %s\n[GNUPG:] %s' % (special, special),
+                {special: []},
+            )
+
+if __name__ == '__main__':
+    unittest.main()
index 766e73e49070a86fe798498aee38d9d9d29669dd..bde17275acd2c2509a3107254cbfc977880abaad 100755 (executable)
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 
-import unittest
-
-import os, sys
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from base_test import DakTestCase
 
 from daklib import regexes
 
-class re_single_line_field(unittest.TestCase):
+class re_single_line_field(DakTestCase):
     MATCH = regexes.re_single_line_field.match
 
     def testSimple(self):
@@ -32,31 +29,51 @@ class re_single_line_field(unittest.TestCase):
         self.assertEqual(self.MATCH('Foo::bar').groups(), ('Foo', ':bar'))
         self.assertEqual(self.MATCH('Foo: :bar').groups(), ('Foo', ':bar'))
 
-class re_parse_lintian(unittest.TestCase):
+class re_parse_lintian(DakTestCase):
     MATCH = regexes.re_parse_lintian.match
 
     def testBinary(self):
         self.assertEqual(
-            self.MATCH('W: pkgname: some-tag path/to/file').groups(),
-            ('W', 'pkgname', 'some-tag', 'path/to/file')
+            self.MATCH('W: pkgname: some-tag path/to/file').groupdict(),
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'some-tag',
+                'description': 'path/to/file',
+            }
         )
 
     def testBinaryNoDescription(self):
         self.assertEqual(
-            self.MATCH('W: pkgname: some-tag').groups(),
-            ('W', 'pkgname', 'some-tag', '')
+            self.MATCH('W: pkgname: some-tag').groupdict(),
+            {
+                'level': 'W',
+                'package': 'pkgname',
+                'tag': 'some-tag',
+                'description': '',
+            }
         )
 
     def testSource(self):
         self.assertEqual(
-            self.MATCH('W: pkgname source: some-tag').groups(),
-            ('W', 'pkgname source', 'some-tag', '')
+            self.MATCH('W: pkgname source: some-tag').groupdict(),
+            {
+                'level': 'W',
+                'package': 'pkgname source',
+                'tag': 'some-tag',
+                'description': '',
+            }
         )
 
     def testSourceNoDescription(self):
         self.assertEqual(
-            self.MATCH('W: pkgname source: some-tag path/to/file').groups(),
-            ('W', 'pkgname source', 'some-tag', 'path/to/file')
+            self.MATCH('W: pkgname source: some-tag path/to/file').groupdict(),
+            {
+                'level': 'W',
+                'package': 'pkgname source',
+                'tag': 'some-tag',
+                'description': 'path/to/file',
+            }
         )
 
 if __name__ == '__main__':
index 4ecaf8b7fcc83925238f2473ed7961714158eee2..fa6f3b386c34aa3706d62b359f2755dc31ca70c1 100755 (executable)
@@ -1,17 +1,13 @@
 #!/usr/bin/env python
 
-import unittest
-
-import os, sys
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-from collections import defaultdict
+from base_test import DakTestCase
 
 from daklib import srcformats
+from collections import defaultdict
 from daklib.formats import parse_format
 from daklib.dak_exceptions import UnknownFormatError
 
-class SourceFormatTestCase(unittest.TestCase):
+class SourceFormatTestCase(DakTestCase):
     def get_rejects(self, has_vars):
         has = defaultdict(lambda: 0)
         has.update(has_vars)
@@ -104,7 +100,7 @@ class FormatTreeQuiltTestCase(SourceFormatTestCase):
             'native_tar': 1,
         })
 
-class FormatFromStringTestCase(unittest.TestCase):
+class FormatFromStringTestCase(DakTestCase):
     def assertFormat(self, txt, klass):
         self.assertEqual(srcformats.get_format_from_string(txt), klass)