]> git.decadent.org.uk Git - dak.git/commitdiff
Merge remote branch 'buxy/pu/debianqueued-fix' into merge
authorJoerg Jaspert <joerg@debian.org>
Tue, 17 Nov 2009 16:56:02 +0000 (17:56 +0100)
committerJoerg Jaspert <joerg@debian.org>
Tue, 17 Nov 2009 16:56:02 +0000 (17:56 +0100)
* buxy/pu/debianqueued-fix:
  debianqueued: avoid removing .tar.bz2 files too early

Signed-off-by: Joerg Jaspert <joerg@debian.org>
27 files changed:
config/backports.org/cron.hourly
config/debian/common
config/debian/cron.buildd [deleted file]
config/debian/cron.dinstall
config/debian/cron.hourly
config/debian/cron.unchecked
config/debian/dak.conf
config/debian/dinstall.functions
config/debian/dinstall.variables
config/debian/lintian.tags
dak/check_overrides.py
dak/clean_suites.py
dak/dak.py
dak/dakdb/update24.py [new file with mode: 0755]
dak/generate_filelist.py
dak/import_known_changes.py
dak/import_new_files.py [new file with mode: 0755]
dak/make_suite_file_list.py
dak/manage_build_queues.py [new file with mode: 0755]
dak/new_security_install.py
dak/process_new.py
dak/process_upload.py
dak/queue_report.py
dak/update_db.py
daklib/changes.py
daklib/dbconn.py
daklib/queue.py [changed mode: 0644->0755]

index 45980065ee1822b7c8c6bdf4ba9b86811c9503ec..b5e0646270f135ece609196817b1adeb369e6ac5 100755 (executable)
@@ -53,6 +53,7 @@ symlinks -d -r $ftpdir
 
 cd $masterdir
 dak make-suite-file-list
+dak generate-filelist
 
 # Generate override files
 cd $overridedir
index 7cd759a9af32bbb9909918d4d94e3ed78dd93c34..8aa05a48a0e6a1097ac853244c30ce3d231cbf49 100644 (file)
@@ -25,22 +25,30 @@ function debug () {
     fi
 }
 
+function wbtrigger() {
+    MODE=${1:-"often"}
+    SSHOPT="-o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240"
+    if lockfile -r 3 -l 3600 "${LOCK_BUILDD}"; then
+        if [ "x${MODE}x" = "xdailyx" ]; then
+            ssh ${SSHOPT} wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+        elif [ "x${MODE}x" = "xoftenx" ]; then
+            ssh -q -q ${SSHOPT} wbadm@buildd /org/wanna-build/trigger.often
+        else
+            log_error "Unknown wb trigger mode called"
+        fi
+    fi
+    rm -f "${LOCK_BUILDD}"
+}
+
 # used by cron.dinstall *and* cron.unchecked.
 function make_buildd_dir () {
-       cd $configdir
-       apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd
-
-       cd  ${incoming}
-       rm -f buildd/Release*
-       apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="Debian" -o APT::FTPArchive::Release::Label="Debian" -o APT::FTPArchive::Release::Description="buildd incoming" -o APT::FTPArchive::Release::Architectures="${archs}" release buildd > Release
-       gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o Release.gpg Release
-       mv Release* buildd/.
+    dak manage-build-queues -a
 
-       cd ${incoming}
-       mkdir -p tree/${STAMP}
-       cp -al ${incoming}/buildd/. tree/${STAMP}/
-       ln -sfT tree/${STAMP} ${incoming}/builddweb
-       find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+    cd ${incoming}
+    mkdir -p tree/${STAMP}
+    cp -al ${incoming}/buildd/. tree/${STAMP}/
+    ln -sfT tree/${STAMP} ${incoming}/builddweb
+    find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
 }
 
 # Do the unchecked processing, in case we have files.
@@ -56,6 +64,19 @@ function do_unchecked () {
     dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
 }
 
+# Do the newstage processing, in case we have files.
+function do_newstage () {
+    cd $newstage
+
+    changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
+    report=$queuedir/REPORT
+    timestamp=$(date "+%Y-%m-%d %H:%M")
+    UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
+
+    echo "$timestamp": ${changes:-"Nothing to do in newstage"}  >> $report
+    dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+}
+
 function sync_debbugs () {
     # sync with debbugs
     echo "--" >> $report
diff --git a/config/debian/cron.buildd b/config/debian/cron.buildd
deleted file mode 100755 (executable)
index a5490e5..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /bin/sh
-#
-# Called from cron.unchecked to update wanna-build, each time it runs.
-#
-ssh -q -q -o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240 wbadm@buildd /org/wanna-build/trigger.often
-exit 0
index 4d1195a914768042d5a0274a56a1b6df3f4bffb0..e3c6f4e672290c8e07ee96d6354b649ec4d49f0a 100755 (executable)
@@ -228,7 +228,9 @@ GO=(
     ARGS=""
     ERR=""
 )
-stage $GO
+# disabled until p-u is faster than now. it runs often enough, so wont hurt to save
+# the time here.
+#stage $GO
 
 GO=(
     FUNC="cruft"
@@ -249,6 +251,14 @@ GO=(
 )
 stage $GO
 
+GO=(
+    FUNC="filelist"
+    TIME="generate-filelist"
+    ARGS=""
+    ERR=""
+)
+stage $GO
+
 GO=(
     FUNC="fingerprints"
     TIME="import-keyring"
@@ -271,7 +281,7 @@ GO=(
     ARGS=""
     ERR="false"
 )
-stage $GO
+stage $GO &
 
 GO=(
     FUNC="packages"
@@ -279,6 +289,8 @@ GO=(
     ARGS=""
     ERR=""
 )
+# Careful: When we ever go and remove this monster-long thing, we have to check the backgrounded
+# functions before it. We no longer have a 1.5hour sync point then.
 stage $GO
 
 GO=(
@@ -303,20 +315,51 @@ GO=(
     ARGS=""
     ERR=""
 )
-### TODO: clean-* fixup
-#stage $GO
+stage $GO
+
+GO=(
+    FUNC="buildd_dir"
+    TIME="buildd_dir"
+    ARGS=""
+    ERR=""
+)
+stage $GO
+
+GO=(
+    FUNC="mkmaintainers"
+    TIME="mkmaintainers"
+    ARGS=""
+    ERR=""
+)
+stage $GO
 
 GO=(
-    FUNC="buildd"
-    TIME="buildd"
+    FUNC="copyoverrides"
+    TIME="copyoverrides"
     ARGS=""
     ERR=""
 )
 stage $GO
 
 GO=(
-    FUNC="scripts"
-    TIME="scripts"
+    FUNC="mklslar"
+    TIME="mklslar"
+    ARGS=""
+    ERR=""
+)
+stage $GO
+
+GO=(
+    FUNC="mkfilesindices"
+    TIME="mkfilesindices"
+    ARGS=""
+    ERR=""
+)
+stage $GO
+
+GO=(
+    FUNC="mkchecksums"
+    TIME="mkchecksums"
     ARGS=""
     ERR=""
 )
@@ -404,7 +447,7 @@ GO=(
     ARGS=""
     ERR="false"
 )
-stage $GO
+stage $GO &
 
 GO=(
     FUNC="i18n2"
@@ -412,7 +455,7 @@ GO=(
     ARGS=""
     ERR="false"
 )
-stage $GO
+stage $GO &
 
 GO=(
     FUNC="stats"
@@ -428,9 +471,9 @@ GO=(
     ARGS=""
     ERR="false"
 )
-stage $GO
+stage $GO &
 
-rm -f ${LOCK_BRITNEY}
+rm -f "${LOCK_BRITNEY}"
 
 GO=(
     FUNC="pgdakdev"
index 24955023e4add65bb5da426ca25b5ec85f0ef633..9dd1450473a24d674db70207cc722419097e435a 100755 (executable)
@@ -11,8 +11,9 @@ date -u > $ftpdir/project/trace/ftp-master.debian.org
 echo "Using dak v1" >> $ftpdir/project/trace/ftp-master.debian.org
 echo "Running on host: $(hostname -f)" >> $ftpdir/project/trace/ftp-master.debian.org
 dak import-users-from-passwd
-#dak queue-report -n > $webdir/new.html
-#dak queue-report -8 -d accepted,new,byhand,proposedupdates,oldproposedupdates
+dak queue-report -n > $webdir/new.html
+# We used to have accepted in here, but it doesn't exist in that form any more
+dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates
 dak show-deferred > ${webdir}/deferred.html
 #cd $queuedir/new ; dak show-new *.changes > /dev/null
 $base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
index 507fc5843ac8089803028c7f964daed859620e82..a9e59f2f373a7b7c006d08f49c17c011b1f55c40 100755 (executable)
@@ -33,6 +33,7 @@ LOCKDAILY=""
 LOCKFILE="$lockdir/unchecked.lock"
 LOCK_NEW="$lockdir/processnew.lock"
 NOTICE="$lockdir/daily.lock"
+LOCK_BUILDD="$lockdir/buildd.lock"
 
 # our name
 PROGRAM="unchecked"
@@ -59,7 +60,6 @@ cleanup() {
 function do_buildd () {
     if lockfile -r3 $NOTICE; then
         LOCKDAILY="YES"
-        psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
         cd $overridedir
         dak make-overrides &>/dev/null
         rm -f override.sid.all3 override.sid.all3.src
@@ -70,8 +70,7 @@ function do_buildd () {
             fi
         done
         make_buildd_dir
-
-        . $configdir/cron.buildd
+        wbtrigger "often"
     fi
 }
 
@@ -79,6 +78,10 @@ function do_buildd () {
 # the actual unchecked functions follow                                #
 ########################################################################
 
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
+
 #lockfile -r3 "$LOCK_NEW"
 # acceptnew
 #rm -f "$LOCK_NEW"
@@ -87,6 +90,7 @@ function do_buildd () {
 lockfile -r3 $LOCKFILE || exit 0
 trap cleanup 0
 
+do_newstage
 do_unchecked
 
 if [ ! -z "$changes" ]; then
index b254a7ad5ad8a4243887065934c5325a1efd991b..ceafaeda9e9a783ba73133e722685a0b4498535e 100644 (file)
@@ -557,7 +557,6 @@ Dir
   UrgencyLog "/srv/release.debian.org/britney/input/urgencies/";
   Queue
   {
-    Accepted "/srv/ftp.debian.org/queue/accepted/";
     Byhand "/srv/ftp.debian.org/queue/byhand/";
     ProposedUpdates "/srv/ftp.debian.org/queue/p-u-new/";
     OldProposedUpdates "/srv/ftp.debian.org/queue/o-p-u-new/";
index a78c2c8db6547439535a1d7d3ea70d0cbe4367a4..ddce96920c45b73add7b903c0612b1c03e2b9668 100644 (file)
@@ -134,33 +134,38 @@ function msfl() {
     dak make-suite-file-list
 }
 
+function filelist() {
+    log "Generating file lists for apt-ftparchive"
+    dak generate-filelist
+}
+
 function fingerprints() {
     log "Not updating fingerprints - scripts needs checking"
 
-#    log "Updating fingerprints"
-#    dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
-
-#    OUTFILE=$(mktemp)
-#    dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
-
-#    if [ -s "${OUTFILE}" ]; then
-#        /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
-#From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
-#To: <debian-project@lists.debian.org>
-#Subject: Debian Maintainers Keyring changes
-#Content-Type: text/plain; charset=utf-8
-#MIME-Version: 1.0
-#
-#The following changes to the debian-maintainers keyring have just been activated:
-#
-#$(cat $OUTFILE)
-#
-#Debian distribution maintenance software,
-#on behalf of the Keyring maintainers
-#
-#EOF
-#    fi
-#    rm -f "$OUTFILE"
+    log "Updating fingerprints"
+    dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
+
+    OUTFILE=$(mktemp)
+    dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
+
+    if [ -s "${OUTFILE}" ]; then
+        /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
+From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
+To: <debian-project@lists.debian.org>
+Subject: Debian Maintainers Keyring changes
+Content-Type: text/plain; charset=utf-8
+MIME-Version: 1.0
+
+The following changes to the debian-maintainers keyring have just been activated:
+
+$(cat $OUTFILE)
+
+Debian distribution maintenance software,
+on behalf of the Keyring maintainers
+
+EOF
+    fi
+    rm -f "$OUTFILE"
 }
 
 function overrides() {
@@ -201,15 +206,6 @@ function dakcleanup() {
     dak clean-queues
 }
 
-function buildd() {
-    # Needs to be rebuilt, as files have moved.  Due to unaccepts, we need to
-    # update this before wanna-build is updated.
-    log "Regenerating wanna-build/buildd information"
-    psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id =build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$'"  > $dbdir/dists/unstable_accepted.list
-    symlinks -d /srv/incoming.debian.org/buildd > /dev/null
-    apt-ftparchive generate apt.conf.buildd
-}
-
 function buildd_dir() {
     # Rebuilt the buildd dir to avoid long times of 403
     log "Regenerating the buildd incoming dir"
@@ -251,54 +247,41 @@ function mklslar() {
 }
 
 function mkmaintainers() {
-    log -n 'Creating Maintainers index ... '
+    log 'Creating Maintainers index ... '
 
     cd $indices
     dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | \
-        sed -e "s/~[^  ]*\([   ]\)/\1/"  | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
-
-    set +e
-    cmp .new-maintainers Maintainers >/dev/null
-    rc=$?
-    set -e
-    if [ $rc = 1 ] || [ ! -f Maintainers ] ; then
-           log -n "installing Maintainers ... "
+        sed -e "s/~[^  ]*\([   ]\)/\1/"  | \
+        awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
+
+    if ! cmp -s .new-maintainers Maintainers || [ ! -f Maintainers ]; then
+           log "installing Maintainers ... "
            mv -f .new-maintainers Maintainers
            gzip --rsyncable -9v <Maintainers >.new-maintainers.gz
            mv -f .new-maintainers.gz Maintainers.gz
-    elif [ $rc = 0 ] ; then
-           log '(same as before)'
-           rm -f .new-maintainers
     else
-           log cmp returned $rc
-           false
+        rm -f .new-maintainers
     fi
 }
 
 function copyoverrides() {
     log 'Copying override files into public view ...'
 
-    for f in $copyoverrides ; do
+    for ofile in $copyoverrides ; do
            cd $overridedir
-           chmod g+w override.$f
+           chmod g+w override.$ofile
 
            cd $indices
-           rm -f .newover-$f.gz
-           pc="`gzip 2>&1 -9nv <$overridedir/override.$f >.newover-$f.gz`"
-           set +e
-           nf=override.$f.gz
-           cmp -s .newover-$f.gz $nf
-           rc=$?
-           set -e
-        if [ $rc = 0 ]; then
-                   rm -f .newover-$f.gz
-           elif [ $rc = 1 -o ! -f $nf ]; then
-                   log "   installing new $nf $pc"
-                   mv -f .newover-$f.gz $nf
-                   chmod g+w $nf
-           else
-                   log $? $pc
-                   exit 1
+
+           newofile=override.$ofile.gz
+           rm -f .newover-$ofile.gz
+           pc="`gzip 2>&1 -9nv <$overridedir/override.$ofile >.newover-$ofile.gz`"
+        if ! cmp -s .newover-$ofile.gz $newofile || [ ! -f $newofile ]; then
+                   log "   installing new $newofile $pc"
+                   mv -f .newover-$ofile.gz $newofile
+                   chmod g+w $newofile
+        else
+                   rm -f .newover-$ofile.gz
            fi
     done
 }
@@ -319,7 +302,7 @@ function mkfilesindices() {
         perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
     }
 
-    log "Generating sources list
+    log "Generating sources list"
     (
         sed -n 's/|$//p' $ARCHLIST
         cd $base/ftp
@@ -327,7 +310,7 @@ function mkfilesindices() {
         find ./dists \! -type d | grep "/source/"
     ) | sort -u | gzip --rsyncable -9 > source.list.gz
 
-    log "Generating arch lists
+    log "Generating arch lists"
 
     ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
     for a in $ARCHES; do
@@ -364,7 +347,6 @@ function mkfilesindices() {
     done
 
     log "Finding everything on the ftp site to generate sundries"
-
     (cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
 
     rm -f sundries.list
@@ -404,15 +386,6 @@ function mkchecksums() {
     ${bindir}/dsync-flist -q link-dups $dsynclist || true
 }
 
-function scripts() {
-    log "Running various scripts from $scriptsdir"
-    mkmaintainers
-    copyoverrides
-    mklslar
-    mkfilesindices
-    mkchecksums
-}
-
 function mirror() {
     log "Regenerating \"public\" mirror/ hardlink fun"
     cd ${mirrordir}
@@ -421,7 +394,7 @@ function mirror() {
 
 function wb() {
     log "Trigger daily wanna-build run"
-    ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+    wbtrigger "daily"
 }
 
 function expire() {
index be5b382a3e228fbf191fc930cf9bede585aa2657..353a3aebcc3bc2db74688dd89209acd1244be139 100644 (file)
@@ -38,3 +38,6 @@ LOCK_BRITNEY="$lockdir/britney.lock"
 # If this file exists we exit immediately after the currently running
 # function is done
 LOCK_STOP="$lockdir/archive.stop"
+
+# Lock buildd updates
+LOCK_BUILDD="$lockdir/buildd.lock"
index 6a11a9ecd8a36ef19846b2a38338fa27b6f9b27f..7a0fe891e4762aa32a382fc94e474fb07b470a64 100644 (file)
@@ -18,6 +18,8 @@ lintian:
     - copyright-contains-dh_make-todo-boilerplate
     - preinst-interpreter-without-predepends
     - control-interpreter-without-depends
+    - dir-or-file-in-var-www
+    - wrong-file-owner-uid-or-gid
   fatal:
     - debian-control-file-uses-obsolete-national-encoding
     - malformed-deb-archive
@@ -27,7 +29,6 @@ lintian:
     - forbidden-postrm-interpreter
     - control-interpreter-in-usr-local
     - package-uses-local-diversion
-    - wrong-file-owner-uid-or-gid
     - bad-relation
     - FSSTND-dir-in-usr
     - FSSTND-dir-in-var
@@ -37,7 +38,6 @@ lintian:
     - package-installs-python-pyc
     - library-in-debug-or-profile-should-not-be-stripped
     - binary-file-compressed-with-upx
-    - html-changelog-without-text-version
     - file-in-usr-marked-as-conffile
     - build-info-in-binary-control-file-section
     - debian-control-with-duplicate-fields
@@ -61,7 +61,6 @@ lintian:
     - package-not-lowercase
     - no-version-field
     - bad-version-number
-    - upstream-version-not-numeric
     - no-architecture-field
     - magic-arch-in-arch-list
     - too-many-architectures
@@ -76,18 +75,10 @@ lintian:
     - uploader-address-is-on-localhost
     - no-source-field
     - source-field-does-not-match-pkg-name
-    - build-depends-on-essential-package-without-using-version
-    - depends-on-build-essential-package-without-using-version
-    - build-depends-on-build-essential
-    - executable-in-usr-share-doc
     - symlink-has-too-many-up-segments
-    - debian-rules-is-symlink
     - debian-rules-not-a-makefile
     - debian-rules-missing-required-target
     - maintainer-script-removes-device-files
-    - no-standards-version-field
-    - invalid-standards-version
-    - dir-or-file-in-var-www
     - dir-or-file-in-tmp
     - dir-or-file-in-mnt
     - dir-or-file-in-opt
index 1e9a6d6b1d0ecc219e4478ad6d256dd8f80430c8..2987f9be10978ff58a2abc22f287a92d8bfb47e7 100755 (executable)
@@ -77,11 +77,6 @@ Check for cruft in overrides.
 
 ################################################################################
 
-def gen_blacklist(dir):
-    for entry in os.listdir(dir):
-        entry = entry.split('_')[0]
-        blacklist[entry] = 1
-
 def process(osuite, affected_suites, originosuite, component, otype, session):
     global Logger, Options, sections, priorities
 
@@ -342,8 +337,6 @@ def main ():
     else:
         Logger = daklog.Logger(cnf, "check-overrides", 1)
 
-    gen_blacklist(cnf["Dir::Queue::Accepted"])
-
     for osuite in cnf.SubTree("Check-Overrides::OverrideSuites").List():
         if "1" != cnf["Check-Overrides::OverrideSuites::%s::Process" % osuite]:
             continue
index 99f0c8b4629162018a54936baf381814a4edd3da..a30d3d8a62452a9503ef474bbcd3d6d459a60d42 100755 (executable)
@@ -62,28 +62,45 @@ def check_binaries(now_date, delete_date, max_delete, session):
     # deletion.
 
     q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
- WHERE f.last_used IS NULL AND b.file = f.id
-   AND NOT EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
-
+SELECT b.file, f.filename
+         FROM binaries b
+    LEFT JOIN files f
+      ON (b.file = f.id)
+   WHERE f.last_used IS NULL
+     AND b.id NOT IN
+         (SELECT ba.bin FROM bin_associations ba)
+     AND f.id NOT IN
+         (SELECT bqf.fileid FROM build_queue_files bqf)""")
     for i in q.fetchall():
         Logger.log(["set lastused", i[1]])
-        session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
-                        {'lastused': now_date, 'fileid': i[0]})
-    session.commit()
+        if not Options["No-Action"]:
+            session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
+                            {'lastused': now_date, 'fileid': i[0]})
+
+    if not Options["No-Action"]:
+        session.commit()
 
     # Check for any binaries which are marked for eventual deletion
     # but are now used again.
 
     q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
-   WHERE f.last_used IS NOT NULL AND f.id = b.file
-    AND EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
+SELECT b.file, f.filename
+         FROM binaries b
+    LEFT JOIN files f
+      ON (b.file = f.id)
+   WHERE f.last_used IS NOT NULL
+     AND (b.id IN
+          (SELECT ba.bin FROM bin_associations ba)
+          OR f.id IN
+          (SELECT bqf.fileid FROM build_queue_files bqf))""")
 
     for i in q.fetchall():
         Logger.log(["unset lastused", i[1]])
-        session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
-    session.commit()
+        if not Options["No-Action"]:
+            session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
+
+    if not Options["No-Action"]:
+        session.commit()
 
 ########################################
 
@@ -93,10 +110,17 @@ def check_sources(now_date, delete_date, max_delete, session):
     # Get the list of source packages not in a suite and not used by
     # any binaries.
     q = session.execute("""
-SELECT s.id, s.file, f.filename FROM source s, files f
-  WHERE f.last_used IS NULL AND s.file = f.id
-    AND NOT EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id)
-    AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)""")
+SELECT s.id, s.file, f.filename
+       FROM source s
+  LEFT JOIN files f
+    ON (s.file = f.id)
+  WHERE f.last_used IS NULL
+   AND s.id NOT IN
+        (SELECT sa.source FROM src_associations sa)
+   AND s.id NOT IN
+        (SELECT b.source FROM binaries b)
+   AND f.id NOT IN
+        (SELECT bqf.fileid FROM build_queue_files bqf)""")
 
     #### XXX: this should ignore cases where the files for the binary b
     ####      have been marked for deletion (so the delay between bins go
@@ -109,9 +133,10 @@ SELECT s.id, s.file, f.filename FROM source s, files f
 
         # Mark the .dsc file for deletion
         Logger.log(["set lastused", dsc_fname])
-        session.execute("""UPDATE files SET last_used = :last_used
-                                    WHERE id = :dscfileid AND last_used IS NULL""",
-                        {'last_used': now_date, 'dscfileid': dsc_file_id})
+        if not Options["No-Action"]:
+            session.execute("""UPDATE files SET last_used = :last_used
+                                WHERE id = :dscfileid AND last_used IS NULL""",
+                            {'last_used': now_date, 'dscfileid': dsc_file_id})
 
         # Mark all other files references by .dsc too if they're not used by anyone else
         x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
@@ -123,30 +148,34 @@ SELECT s.id, s.file, f.filename FROM source s, files f
             y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
             if len(y.fetchall()) == 1:
                 Logger.log(["set lastused", file_name])
-                session.execute("""UPDATE files SET last_used = :lastused
-                                  WHERE id = :fileid AND last_used IS NULL""",
-                                {'lastused': now_date, 'fileid': file_id})
+                if not Options["No-Action"]:
+                    session.execute("""UPDATE files SET last_used = :lastused
+                                       WHERE id = :fileid AND last_used IS NULL""",
+                                    {'lastused': now_date, 'fileid': file_id})
 
-    session.commit()
+    if not Options["No-Action"]:
+        session.commit()
 
     # Check for any sources which are marked for deletion but which
     # are now used again.
-
     q = session.execute("""
 SELECT f.id, f.filename FROM source s, files f, dsc_files df
   WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
     AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
-      OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)))""")
+      OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id))
+      OR (EXISTS (SELECT 1 FROM build_queue_files bqf WHERE bqf.fileid = s.file)))""")
 
     #### XXX: this should also handle deleted binaries specially (ie, not
     ####      reinstate sources because of them
 
     for i in q.fetchall():
         Logger.log(["unset lastused", i[1]])
-        session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
-                        {'fileid': i[0]})
+        if not Options["No-Action"]:
+            session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
+                            {'fileid': i[0]})
 
-    session.commit()
+    if not Options["No-Action"]:
+        session.commit()
 
 ########################################
 
@@ -164,7 +193,7 @@ SELECT id, filename FROM files f
   WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
     AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
-    AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
+    AND NOT EXISTS (SELECT 1 FROM build_queue_files qf WHERE qf.fileid = f.id)
     AND last_used IS NULL
     ORDER BY filename""")
 
@@ -174,10 +203,12 @@ SELECT id, filename FROM files f
         for x in ql:
             utils.warn("orphaned file: %s" % x)
             Logger.log(["set lastused", x[1], "ORPHANED FILE"])
-            session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
-                            {'lastused': now_date, 'fileid': x[0]})
+            if not Options["No-Action"]:
+                 session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
+                                 {'lastused': now_date, 'fileid': x[0]})
 
-        session.commit()
+        if not Options["No-Action"]:
+            session.commit()
 
 def clean_binaries(now_date, delete_date, max_delete, session):
     # We do this here so that the binaries we remove will have their
@@ -206,7 +237,7 @@ def clean(now_date, delete_date, max_delete, session):
 
     cur_date = now_date.strftime("%Y-%m-%d")
     dest = os.path.join(cnf["Dir::Morgue"], cnf["Clean-Suites::MorgueSubDir"], cur_date)
-    if not os.path.exists(dest):
+    if not Options["No-Action"] and not os.path.exists(dest):
         os.mkdir(dest)
 
     # Delete from source
@@ -326,46 +357,13 @@ SELECT f.id, f.fingerprint FROM fingerprint f
 
 ################################################################################
 
-def clean_queue_build(now_date, delete_date, max_delete, session):
-
-    cnf = Config()
-
-    if not cnf.ValueList("Dinstall::QueueBuildSuites") or Options["No-Action"]:
-        return
-
-    print "Cleaning out queue build symlinks..."
-
-    our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
-    count = 0
-
-    for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date):
-        if not os.path.exists(qf.filename):
-            utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
-            continue
-
-        if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(qf.filename):
-            utils.fubar("%s (from queue_build) should be a symlink but isn't." % (qf.filename))
-
-        Logger.log(["delete queue build", qf.filename])
-        if not Options["No-Action"]:
-            os.unlink(qf.filename)
-            session.delete(qf)
-        count += 1
-
-    if not Options["No-Action"]:
-        session.commit()
-
-    if count:
-        Logger.log(["total", count])
-        print "Cleaned %d queue_build files." % (count)
-
-################################################################################
-
 def clean_empty_directories(session):
     """
     Removes empty directories from pool directories.
     """
 
+    print "Cleaning out empty directories..."
+
     count = 0
 
     cursor = session.execute(
@@ -432,7 +430,6 @@ def main():
     clean(now_date, delete_date, max_delete, session)
     clean_maintainers(now_date, delete_date, max_delete, session)
     clean_fingerprints(now_date, delete_date, max_delete, session)
-    clean_queue_build(now_date, delete_date, max_delete, session)
     clean_empty_directories(session)
 
     Logger.close()
index cd42c3ed4bcaa70bd76bd17268c322c3bf43c8b0..1d9336dbfb1e5ee9c364726e5b6e3c1d836c87bb 100755 (executable)
@@ -84,6 +84,8 @@ def init():
          "Generate .diff/Index files"),
         ("clean-suites",
          "Clean unused/superseded packages from the archive"),
+        ("manage-build-queues",
+         "Clean and update metadata for build queues"),
         ("clean-queues",
          "Clean cruft from incoming"),
         ("clean-proposed-updates",
diff --git a/dak/dakdb/update24.py b/dak/dakdb/update24.py
new file mode 100755 (executable)
index 0000000..4e8c505
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+"""
+Add some meta info to queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+import psycopg2
+
+def do_update(self):
+    print "Add meta info columns to queues."
+
+    try:
+        c = self.db.cursor()
+
+        c.execute("ALTER TABLE policy_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+        c.execute("ALTER TABLE policy_queue ADD COLUMN origin TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE policy_queue ADD COLUMN label TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE policy_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE policy_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE policy_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+        c.execute("""ALTER TABLE policy_queue
+                       ADD CONSTRAINT policy_queue_meta_sanity_check
+                           CHECK ( (generate_metadata IS FALSE)
+                                OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+        c.execute("ALTER TABLE build_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+        c.execute("ALTER TABLE build_queue ADD COLUMN origin TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE build_queue ADD COLUMN label TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE build_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE build_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+        c.execute("ALTER TABLE build_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+        c.execute("""ALTER TABLE build_queue
+                       ADD CONSTRAINT build_queue_meta_sanity_check
+                           CHECK ( (generate_metadata IS FALSE)
+                                OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+        print "Committing"
+        c.execute("UPDATE config SET value = '24' WHERE name = 'db_revision'")
+        self.db.commit()
+
+    except psycopg2.InternalError, msg:
+        self.db.rollback()
+        raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
index 02f5f18518b7985e31ddcb2e0099837e6db54b6f..d0a64596eff5579ec51b42db2a357af0b2dbaf79 100755 (executable)
@@ -36,6 +36,7 @@ def getSources(suite, component, session):
         SELECT path, filename
             FROM srcfiles_suite_component
             WHERE suite = :suite AND component = :component
+            ORDER BY filename
     """
     args = { 'suite': suite.suite_id,
              'component': component.component_id }
@@ -47,6 +48,7 @@ def getBinaries(suite, component, architecture, type, session):
             FROM binfiles_suite_component_arch
             WHERE suite = :suite AND component = :component AND type = :type AND
                   (architecture = :architecture OR architecture = 2)
+            ORDER BY filename
     """
     args = { 'suite': suite.suite_id,
              'component': component.component_id,
index c8d5bf96c921a575bc738b9fec24fc6ee08447f3..da696aedc079b2cf846a3490b8ae0676e733538f 100755 (executable)
@@ -201,7 +201,7 @@ class ChangesGenerator(threading.Thread):
     def run(self):
         cnf = Config()
         count = 1
-        for directory in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
+        for directory in [ "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
             checkdir = cnf["Dir::Queue::%s" % (directory) ]
             if os.path.exists(checkdir):
                 print "Looking into %s" % (checkdir)
@@ -257,7 +257,7 @@ class ImportThread(threading.Thread):
                 changesfile = os.path.join(to_import.dirpath, to_import.changesfile)
                 changes.changes = parse_changes(changesfile, signing_rules=-1)
                 changes.changes["fingerprint"] = check_signature(changesfile)
-                changes.add_known_changes(to_import.dirpath, self.session)
+                changes.add_known_changes(to_import.dirpath, session=self.session)
                 self.session.commit()
 
             except InvalidDscError, line:
diff --git a/dak/import_new_files.py b/dak/import_new_files.py
new file mode 100755 (executable)
index 0000000..f33c30f
--- /dev/null
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Import known_changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009  Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+
+################################################################################
+
+import sys
+import os
+import logging
+import threading
+import glob
+import apt_pkg
+from daklib.dbconn import DBConn, get_dbchange, get_policy_queue, session_wrapper, ChangePendingFile, get_location, check_poolfile
+from daklib.config import Config
+from daklib.queue import Upload
+from daklib.utils import poolify
+
+# where in dak.conf all of our configuration will be stowed
+options_prefix = "NewFiles"
+options_prefix = "%s::Options" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
+
+def usage (exit_code=0):
+    print """Usage: dak import-new-files [options]
+
+OPTIONS
+     -v, --verbose
+        show verbose information messages
+
+     -q, --quiet
+        supress all output but errors
+
+"""
+    sys.exit(exit_code)
+
+class ImportNewFiles(object):
+    @session_wrapper
+    def __init__(self, session=None):
+        cnf = Config()
+        try:
+            newq = get_policy_queue('new', session)
+            for changes_fn in glob.glob(newq.path + "/*.changes"):
+                changes_bn = os.path.basename(changes_fn)
+                chg = get_dbchange(changes_bn, session)
+
+                u = Upload()
+                success = u.load_changes(changes_fn)
+                u.pkg.changes_file = changes_bn
+                u.check_hashes()
+
+                if not chg:
+                    chg = u.pkg.add_known_changes(newq.path, newq.policy_queue_id, session)
+                    session.add(chg)
+
+                if not success:
+                    log.critical("failed to load %s" % changes_fn)
+                    sys.exit(1)
+                else:
+                    log.critical("ACCLAIM: %s" % changes_fn)
+
+                files=[]
+                for chg_fn in u.pkg.files.keys():
+                    try:
+                        f = open(os.path.join(newq.path, chg_fn))
+                        cpf = ChangePendingFile()
+                        cpf.filename = chg_fn
+                        cpf.size = u.pkg.files[chg_fn]['size']
+                        cpf.md5sum = u.pkg.files[chg_fn]['md5sum']
+
+                        if u.pkg.files[chg_fn].has_key('sha1sum'):
+                            cpf.sha1sum = u.pkg.files[chg_fn]['sha1sum']
+                        else:
+                            log.warning("Having to generate sha1sum for %s" % chg_fn)
+                            f.seek(0)
+                            cpf.sha1sum = apt_pkg.sha1sum(f)
+
+                        if u.pkg.files[chg_fn].has_key('sha256sum'):
+                            cpf.sha256sum = u.pkg.files[chg_fn]['sha256sum']
+                        else:
+                            log.warning("Having to generate sha256sum for %s" % chg_fn)
+                            f.seek(0)
+                            cpf.sha256sum = apt_pkg.sha256sum(f)
+
+                        session.add(cpf)
+                        files.append(cpf)
+                        f.close()
+                    except IOError:
+                        # Can't find the file, try to look it up in the pool
+                        poolname = poolify(u.pkg.changes["source"], u.pkg.files[chg_fn]["component"])
+                        l = get_location(cnf["Dir::Pool"], u.pkg.files[chg_fn]["component"], session=session)
+                        if not l:
+                            log.critical("ERROR: Can't find location for %s (component %s)" % (chg_fn, u.pkg.files[chg_fn]["component"]))
+
+                        found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+                                                         u.pkg.files[chg_fn]['size'],
+                                                         u.pkg.files[chg_fn]["md5sum"],
+                                                         l.location_id,
+                                                         session=session)
+
+                        if found is None:
+                            log.critical("ERROR: Found multiple files for %s in pool" % chg_fn)
+                            sys.exit(1)
+                        elif found is False and poolfile is not None:
+                            log.critical("ERROR: md5sum / size mismatch for %s in pool" % chg_fn)
+                            sys.exit(1)
+                        else:
+                            if poolfile is None:
+                                log.critical("ERROR: Could not find %s in pool" % chg_fn)
+                                sys.exit(1)
+                            else:
+                                chg.poolfiles.append(poolfile)
+
+
+                chg.files = files
+
+
+            session.commit()
+            
+        except KeyboardInterrupt:
+            print("Caught C-c; terminating.")
+            utils.warn("Caught C-c; terminating.")
+            self.plsDie()
+
+
+def main():
+    cnf = Config()
+
+    arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+                 ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+                 ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+                ]
+
+    args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+    num_threads = 1
+
+    if len(args) > 0:
+        usage(1)
+
+    if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+        usage(0)
+
+    level=logging.INFO
+    if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+        level=logging.ERROR
+
+    elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+        level=logging.DEBUG
+
+
+    logging.basicConfig( level=level,
+                         format='%(asctime)s %(levelname)s %(message)s',
+                         stream = sys.stderr )
+
+    ImportNewFiles()
+
+
+if __name__ == '__main__':
+    main()
index 349a4ae09115f4530e31ae3af0338be8f47696ef..8c13100998eb6b651d86358e720e654eb73dbc8d 100755 (executable)
@@ -361,7 +361,9 @@ SELECT s.id, s.source, 'source', s.version, l.path, f.filename, c.name, f.id,
                                    suite=suite, filetype = filetype)
     cleanup(packages, session)
     session.commit()
-    write_filelists(packages, dislocated_files, session)
+
+    # has been replaced by 'dak generate-filelist':
+    #write_filelists(packages, dislocated_files, session)
 
 ################################################################################
 
diff --git a/dak/manage_build_queues.py b/dak/manage_build_queues.py
new file mode 100755 (executable)
index 0000000..a40a1db
--- /dev/null
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+"""Manage build queues"""
+# Copyright (C) 2000, 2001, 2002, 2006  James Troup <james@nocrew.org>
+# Copyright (C) 2009  Mark Hymers <mhy@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+import os, os.path, stat, sys
+from datetime import datetime
+import apt_pkg
+
+from daklib import daklog
+from daklib.dbconn import *
+from daklib.config import Config
+
+################################################################################
+
+Options = None
+Logger = None
+
+################################################################################
+
+def usage (exit_code=0):
+    print """Usage: dak manage-build-queues [OPTIONS] buildqueue1 buildqueue2
+Manage the contents of one or more build queues
+
+  -a, --all                  run on all known build queues
+  -n, --no-action            don't do anything
+  -h, --help                 show this help and exit"""
+
+    sys.exit(exit_code)
+
+################################################################################
+
+def main ():
+    global Options, Logger
+
+    cnf = Config()
+
+    for i in ["Help", "No-Action", "All"]:
+        if not cnf.has_key("Manage-Build-Queues::Options::%s" % (i)):
+            cnf["Manage-Build-Queues::Options::%s" % (i)] = ""
+
+    Arguments = [('h',"help","Manage-Build-Queues::Options::Help"),
+                 ('n',"no-action","Manage-Build-Queues::Options::No-Action"),
+                 ('a',"all","Manage-Build-Queues::Options::All")]
+
+    queue_names = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+    Options = cnf.SubTree("Manage-Build-Queues::Options")
+
+    if Options["Help"]:
+        usage()
+
+    Logger = daklog.Logger(cnf, 'manage-build-queues', Options['No-Action'])
+
+    starttime = datetime.now()
+
+    session = DBConn().session()
+
+    if Options["All"]:
+        if len(queue_names) != 0:
+            print "E: Cannot use both -a and a queue_name"
+            sys.exit(1)
+        queues = session.query(BuildQueue).all()
+
+    else:
+        queues = []
+        for q in queue_names:
+            queue = get_build_queue(q.lower(), session)
+            if queue:
+                queues.append(queue)
+            else:
+                Logger.log(['cannot find queue %s' % q])
+
+    # For each given queue, look up object and call manage_queue
+    for q in queues:
+        Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
+        q.clean_and_update(starttime, Logger, dryrun=Options["No-Action"])
+
+    Logger.close()
+
+#######################################################################################
+
+if __name__ == '__main__':
+    main()
index f8706ed9b98e70ef5eacf5ddd0d7d14954c6eb94..23b765f6fb680498fd75c73110ae8ddc37eebdb7 100755 (executable)
@@ -474,6 +474,7 @@ def _do_Approve():
     # 3. run dak make-suite-file-list / apt-ftparchve / dak generate-releases
     print "Updating file lists for apt-ftparchive..."
     spawn("dak make-suite-file-list")
+    spawn("dak generate-filelist")
     print "Updating Packages and Sources files..."
     spawn("/org/security.debian.org/dak/config/debian-security/map.sh")
     spawn("apt-ftparchive generate %s" % (utils.which_apt_conf_file()))
index e052af49a1909ee1d7f249251dffab4afa9d3be2..4b2cff3ea157c8cc8f8b44b60b23d221b6ba68aa 100755 (executable)
@@ -60,7 +60,7 @@ from daklib.dbconn import *
 from daklib.queue import *
 from daklib import daklog
 from daklib import utils
-from daklib.regexes import re_no_epoch, re_default_answer, re_isanum
+from daklib.regexes import re_no_epoch, re_default_answer, re_isanum, re_package
 from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
 from daklib.summarystats import SummaryStats
 from daklib.config import Config
@@ -77,7 +77,7 @@ Sections = None
 ################################################################################
 
 def recheck(upload, session):
-    upload.recheck()
+# STU: I'm not sure, but I don't thin kthis is necessary any longer:    upload.recheck(session)
     if len(upload.rejects) > 0:
         answer = "XXX"
         if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]:
@@ -95,7 +95,6 @@ def recheck(upload, session):
 
         if answer == 'R':
             upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
-            os.unlink(upload.pkg.changes_file[:-8]+".dak")
             return 0
         elif answer == 'S':
             return 0
@@ -159,7 +158,8 @@ def sort_changes(changes_files, session):
     for filename in changes_files:
         u = Upload()
         try:
-            u.pkg.load_dot_dak(filename)
+            u.pkg.changes_file = filename
+            u.load_changes(filename)
             u.update_subst()
             cache[filename] = copy.copy(u.pkg.changes)
             cache[filename]["filename"] = filename
@@ -599,6 +599,7 @@ def prod_maintainer (note, upload):
 def do_new(upload, session):
     print "NEW\n"
     files = upload.pkg.files
+    upload.check_files(not Options["No-Action"])
     changes = upload.pkg.changes
     cnf = Config()
 
@@ -665,6 +666,7 @@ def do_new(upload, session):
             try:
                 check_daily_lock()
                 done = add_overrides (new, upload, session)
+                do_accept(upload, session)
                 Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
             except CantGetLockError:
                 print "Hello? Operator! Give me the number for 911!"
@@ -680,7 +682,6 @@ def do_new(upload, session):
                                        note=get_new_comments(changes.get("source", ""), session=session))
             if not aborted:
                 Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
-                os.unlink(upload.pkg.changes_file[:-8]+".dak")
                 done = 1
         elif answer == 'N':
             edit_note(get_new_comments(changes.get("source", ""), session=session),
@@ -769,7 +770,6 @@ def do_byhand(upload, session):
         elif answer == 'M':
             Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
             upload.do_reject(manual=1, reject_message=Options["Manual-Reject"])
-            os.unlink(upload.pkg.changes_file[:-8]+".dak")
             done = 1
         elif answer == 'S':
             done = 1
@@ -817,14 +817,48 @@ def lock_package(package):
     finally:
         os.unlink(path)
 
-def _accept(upload):
+class clean_holding(object):
+    def __init__(self,pkg):
+        self.pkg = pkg
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, type, value, traceback):
+        h = Holding()
+
+        for f in self.pkg.files.keys():
+            if os.path.exists(os.path.join(h.holding_dir, f)):
+                os.unlink(os.path.join(h.holding_dir, f))
+
+
+
+def changes_to_newstage(upload, session):
+    """move a changes file to newstage"""
+    new = get_policy_queue('new', session );
+    newstage = get_policy_queue('newstage', session );
+
+    chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
+    chg.approved_for = newstage.policy_queue_id
+
+    for f in chg.files:
+        # update the changes_pending_files row
+        f.queue = newstage
+        utils.move(os.path.join(new.path, f.filename), newstage.path, perms=int(newstage.perms, 8))
+
+    utils.move(os.path.join(new.path, upload.pkg.changes_file), newstage.path, perms=int(newstage.perms, 8))
+    chg.in_queue = newstage
+    session.commit()
+
+def _accept(upload, session):
     if Options["No-Action"]:
         return
     (summary, short_summary) = upload.build_summaries()
-    upload.accept(summary, short_summary, targetqueue)
-    os.unlink(upload.pkg.changes_file[:-8]+".dak")
+    # upload.accept(summary, short_summary, targetqueue)
+
+    changes_to_newstage(upload, session)
 
-def do_accept(upload):
+def do_accept(upload, session):
     print "ACCEPT"
     cnf = Config()
     if not Options["No-Action"]:
@@ -838,12 +872,18 @@ def do_accept(upload):
             upload.Subst["__SUMMARY__"] = summary
         else:
             # Just a normal upload, accept it...
-            _accept(upload)
+            _accept(upload, session)
 
 def do_pkg(changes_file, session):
+    new_queue = get_policy_queue('new', session );
     u = Upload()
-    u.pkg.load_dot_dak(changes_file)
+    u.pkg.changes_file = changes_file
+    (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
+    u.load_changes(changes_file)
+    u.pkg.directory = new_queue.path
     u.update_subst()
+    u.logger = Logger
+    origchanges = os.path.abspath(u.pkg.changes_file)
 
     cnf = Config()
     bcc = "X-DAK: dak process-new"
@@ -853,27 +893,48 @@ def do_pkg(changes_file, session):
         u.Subst["__BCC__"] = bcc
 
     files = u.pkg.files
+    for deb_filename, f in files.items():
+        if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+            u.binary_file_checks(deb_filename, session)
+            u.check_binary_against_db(deb_filename, session)
+        else:
+            u.source_file_checks(deb_filename, session)
+            u.check_source_against_db(deb_filename, session)
+
+        u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
 
     try:
         with lock_package(u.pkg.changes["source"]):
-            if not recheck(u, session):
-                return
+            with clean_holding(u.pkg):
+                if not recheck(u, session):
+                    return
 
-            (new, byhand) = check_status(files)
-            if new or byhand:
+                # FIXME: This does need byhand checks added!
+                new = determine_new(u.pkg.changes, files)
                 if new:
                     do_new(u, session)
-                if byhand:
-                    do_byhand(u, session)
-                (new, byhand) = check_status(files)
-
-            if not new and not byhand:
-                try:
-                    check_daily_lock()
-                    do_accept(u)
-                except CantGetLockError:
-                    print "Hello? Operator! Give me the number for 911!"
-                    print "Dinstall in the locked area, cant process packages, come back later"
+                else:
+                    try:
+                        check_daily_lock()
+                        do_accept(u, session)
+                    except CantGetLockError:
+                        print "Hello? Operator! Give me the number for 911!"
+                        print "Dinstall in the locked area, cant process packages, come back later"
+#             (new, byhand) = check_status(files)
+#             if new or byhand:
+#                 if new:
+#                     do_new(u, session)
+#                 if byhand:
+#                     do_byhand(u, session)
+#                 (new, byhand) = check_status(files)
+
+#             if not new and not byhand:
+#                 try:
+#                     check_daily_lock()
+#                     do_accept(u)
+#                 except CantGetLockError:
+#                     print "Hello? Operator! Give me the number for 911!"
+#                     print "Dinstall in the locked area, cant process packages, come back later"
     except AlreadyLockedError, e:
         print "Seems to be locked by %s already, skipping..." % (e)
 
@@ -898,10 +959,6 @@ def end():
 def main():
     global Options, Logger, Sections, Priorities
 
-    print "NO NEW PROCESSING CURRENTLY AVAILABLE"
-    print "(Go and do something more interesting)"
-    sys.exit(0)
-
     cnf = Config()
     session = DBConn().session()
 
@@ -917,7 +974,8 @@ def main():
 
     changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
     if len(changes_files) == 0:
-        changes_files = utils.get_changes_files(cnf["Dir::Queue::New"])
+        new_queue = get_policy_queue('new', session );
+        changes_files = utils.get_changes_files(new_queue.path)
 
     Options = cnf.SubTree("Process-New::Options")
 
index 155ba1dd124823a719ddd44c8b50113d2125ecc7..f408e177e7a15caf69a0a02db625818a503712c9 100755 (executable)
@@ -196,6 +196,15 @@ def usage (exit_code=0):
 
 ###############################################################################
 
+def byebye():
+    if not Options["No-Action"]:
+        # Clean out the queue files
+        session = DBConn().session()
+        session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
+        session.commit()
+
+
+
 def action(u, session):
     cnf = Config()
     holding = Holding()
@@ -283,24 +292,27 @@ def action(u, session):
         u.do_reject(0, pi)
     elif answer == 'A':
         if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
+        session.commit()
         u.accept(summary, short_summary, session)
         u.check_override()
+        chg.clean_from_queue()
         session.commit()
         u.remove()
     elif answer == 'P':
         if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
         package_to_queue(u, summary, short_summary, policyqueue, chg, session)
         session.commit()
         u.remove()
     elif answer == queuekey:
         if not chg:
-            chg = u.pkg.add_known_changes(holding.holding_dir, session)
+            chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
         QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
         session.commit()
         u.remove()
     elif answer == 'Q':
+        byebye()
         sys.exit(0)
 
     session.commit()
@@ -481,9 +493,12 @@ def main():
                                                 utils.size_type(int(summarystats.accept_bytes)))
         Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
 
+    byebye()
+
     if not Options["No-Action"]:
         if log_urgency:
             UrgencyLog().close()
+
     Logger.close()
 
 ###############################################################################
index c9013a5239c956eb54c5c60e9ed656ca6aa9fe2d..e270cc71e109a124b77e0ffbb76abaa042c33137 100755 (executable)
@@ -40,7 +40,7 @@ import apt_pkg
 
 from daklib import utils
 from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment
+from daklib.dbconn import DBConn, has_new_comment, DBChange
 from daklib.textutils import fix_maintainer
 from daklib.dak_exceptions import *
 
@@ -277,12 +277,12 @@ def table_row(source, version, arch, last_mod, maint, distribution, closes, fing
     (name, mail) = changedby.split(":", 1)
     print "<span class=\"changed-by\">Changed-By: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span><br/>" % (utils.html_escape(mail), utils.html_escape(name))
 
-    try:
-        (login, domain) = sponsor.split("@", 1)
-        print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span>@debian.org<br/>" % (utils.html_escape(login), utils.html_escape(login))
-    except Exception, e:
-        print "WARNING: Exception %s" % e
-        pass
+    if sponsor:
+        try:
+            (login, domain) = sponsor.split("@", 1)
+            print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span>@debian.org<br/>" % (utils.html_escape(login), utils.html_escape(login))
+        except Exception, e:
+            pass
 
     print "<span class=\"signature\">Fingerprint: %s</span>" % (fingerprint)
     print "</td>"
@@ -296,6 +296,7 @@ def table_row(source, version, arch, last_mod, maint, distribution, closes, fing
 ############################################################
 
 def process_changes_files(changes_files, type, log):
+    session = DBConn().session()
     msg = ""
     cache = {}
     # Read in all the .changes files
@@ -362,6 +363,13 @@ def process_changes_files(changes_files, type, log):
         arches = {}
         versions = {}
         for j in i[1]["list"]:
+            changesbase = os.path.basename(j["filename"])
+            try:
+                dbc = session.query(DBChange).filter_by(changesname=changesbase).one()
+            except Exception, e:
+                print "Can't find changes file in NEW for %s (%s)" % (changesbase, e)
+                dbc = None
+
             if Cnf.has_key("Queue-Report::Options::New") or Cnf.has_key("Queue-Report::Options::822"):
                 try:
                     (maintainer["maintainer822"], maintainer["maintainer2047"],
@@ -385,9 +393,15 @@ def process_changes_files(changes_files, type, log):
 
                 distribution=j["distribution"].keys()
                 closes=j["closes"].keys()
-                fingerprint=j["fingerprint"]
-                if j.has_key("sponsoremail"):
-                    sponsor=j["sponsoremail"]
+                if dbc:
+                    fingerprint = dbc.fingerprint
+
+                # TODO: This won't work now as it never gets set
+                #       Fix so that we compare the changed-by/maintainer and the signing key
+                #       Should probably be done somewhere more central
+                #if j.has_key("sponsoremail"):
+                #    sponsor=j["sponsoremail"]
+
             for arch in j["architecture"].keys():
                 arches[arch] = ""
             version = j["version"]
index 49a6b584d1fc59ea76f5b2cefde9380b4a6452ca..ecdd99a79bf2ef0d76d96be91ba446c27ce8ec30 100755 (executable)
@@ -45,7 +45,7 @@ from daklib.dak_exceptions import DBUpdateError
 ################################################################################
 
 Cnf = None
-required_database_schema = 23
+required_database_schema = 24
 
 ################################################################################
 
index c1f8f5ba89f5168f0d322c7d82fe04b1164be17c..ca9609ef5f0c3ff7e41db27d15f07a78b53088d1 100644 (file)
@@ -188,7 +188,7 @@ class Changes(object):
                 self.changes[key]='missing'
 
     @session_wrapper
-    def add_known_changes(self, dirpath, session=None):
+    def add_known_changes(self, dirpath, in_queue=None, session=None):
         """add "missing" in fields which we will require for the known_changes table"""
         cnf = Config()
 
@@ -202,31 +202,77 @@ class Changes(object):
             if isinstance(self.changes[key], dict):
                 multivalues[key] = " ".join(self.changes[key].keys())
             else:
-                multivalues[key] = self.changes[key].keys()
-
-        # TODO: Use ORM
-        session.execute(
-            """INSERT INTO changes
-              (changesname, seen, source, binaries, architecture, version,
-              distribution, urgency, maintainer, fingerprint, changedby, date)
-              VALUES (:changesfile,:filetime,:source,:binary, :architecture,
-              :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
-              { 'changesfile':  self.changes_file,
-                'filetime':     filetime,
-                'source':       self.changes["source"],
-                'binary':       multivalues["binary"],
-                'architecture': multivalues["architecture"],
-                'version':      self.changes["version"],
-                'distribution': multivalues["distribution"],
-                'urgency':      self.changes["urgency"],
-                'maintainer':   self.changes["maintainer"],
-                'fingerprint':  self.changes["fingerprint"],
-                'changedby':    self.changes["changed-by"],
-                'date':         self.changes["date"]} )
+                multivalues[key] = self.changes[key]
+
+        chg = DBChange()
+        chg.changesname = self.changes_file
+        chg.seen = filetime
+        chg.in_queue_id = in_queue
+        chg.source = self.changes["source"]
+        chg.binaries = multivalues["binary"]
+        chg.architecture = multivalues["architecture"]
+        chg.version = self.changes["version"]
+        chg.distribution = multivalues["distribution"]
+        chg.urgency = self.changes["urgency"]
+        chg.maintainer = self.changes["maintainer"]
+        chg.fingerprint = self.changes["fingerprint"]
+        chg.changedby = self.changes["changed-by"]
+        chg.date = self.changes["date"]
+        
+        session.add(chg)
+
+        files = []
+        for chg_fn, entry in self.files.items():
+            try:
+                f = open(os.path.join(dirpath, chg_fn))
+                cpf = ChangePendingFile()
+                cpf.filename = chg_fn
+                cpf.size = entry['size']
+                cpf.md5sum = entry['md5sum']
+
+                if entry.has_key('sha1sum'):
+                    cpf.sha1sum = entry['sha1sum']
+                else:
+                    f.seek(0)
+                    cpf.sha1sum = apt_pkg.sha1sum(f)
+
+                if entry.has_key('sha256sum'):
+                    cpf.sha256sum = entry['sha256sum']
+                else:
+                    f.seek(0)
+                    cpf.sha256sum = apt_pkg.sha256sum(f)
+
+                session.add(cpf)
+                files.append(cpf)
+                f.close()
+
+            except IOError:
+                # Can't find the file, try to look it up in the pool
+                poolname = poolify(entry["source"], entry["component"])
+                l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+                found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+                                                 entry['size'],
+                                                 entry["md5sum"],
+                                                 l.location_id,
+                                                 session=session)
+
+                if found is None:
+                    Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+                elif found is False and poolfile is not None:
+                    Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+                else:
+                    if poolfile is None:
+                        Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+                    else:
+                        chg.poolfiles.append(poolfile)
+
+        chg.files = files
 
         session.commit()
-
-        return session.query(DBChange).filter_by(changesname = self.changes_file).one()
+        chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
+        
+        return chg
 
     def unknown_files_fields(self, name):
         return sorted(list( set(self.files[name].keys()) -
index 361dcf42cfc3b1eaad30d1f3dbc72785678e8c02..1418d081856192b9574fa6a63800138df9c36794 100644 (file)
@@ -37,7 +37,9 @@ import os
 import re
 import psycopg2
 import traceback
-from datetime import datetime
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
 
 from inspect import getargspec
 
@@ -429,6 +431,45 @@ __all__.append('BinaryACLMap')
 
 ################################################################################
 
+MINIMAL_APT_CONF="""
+Dir
+{
+   ArchiveDir "%(archivepath)s";
+   OverrideDir "/srv/ftp.debian.org/scripts/override/";
+   CacheDir "/srv/ftp.debian.org/database/";
+};
+
+Default
+{
+   Packages::Compress ". bzip2 gzip";
+   Sources::Compress ". bzip2 gzip";
+   DeLinkLimit 0;
+   FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+   Packages "Packages";
+   Contents " ";
+
+   BinOverride "override.sid.all3";
+   BinCacheDB "packages-accepted.db";
+
+   FileList "%(filelist)s";
+
+   PathPrefix "";
+   Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+   Sources "Sources";
+   BinOverride "override.sid.all3";
+   SrcOverride "override.sid.all3.src";
+   FileList "%(filelist)s";
+};
+"""
+
 class BuildQueue(object):
     def __init__(self, *args, **kwargs):
         pass
@@ -436,6 +477,143 @@ class BuildQueue(object):
     def __repr__(self):
         return '<BuildQueue %s>' % self.queue_name
 
+    def write_metadata(self, starttime, force=False):
+        # Do we write out metafiles?
+        if not (force or self.generate_metadata):
+            return
+
+        session = DBConn().session().object_session(self)
+
+        fl_fd = fl_name = ac_fd = ac_name = None
+        tempdir = None
+        arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+        startdir = os.getcwd()
+
+        try:
+            # Grab files we want to include
+            newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+            # Write file list with newer files
+            (fl_fd, fl_name) = mkstemp()
+            for n in newer:
+                os.write(fl_fd, '%s\n' % n.fullpath)
+            os.close(fl_fd)
+
+            # Write minimal apt.conf
+            # TODO: Remove hardcoding from template
+            (ac_fd, ac_name) = mkstemp()
+            os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+                                                'filelist': fl_name})
+            os.close(ac_fd)
+
+            # Run apt-ftparchive generate
+            os.chdir(os.path.dirname(ac_name))
+            os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+            # Run apt-ftparchive release
+            # TODO: Eww - fix this
+            bname = os.path.basename(self.path)
+            os.chdir(self.path)
+            os.chdir('..')
+
+            # We have to remove the Release file otherwise it'll be included in the
+            # new one
+            try:
+                os.unlink(os.path.join(bname, 'Release'))
+            except OSError:
+                pass
+
+            os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+            # Sign if necessary
+            if self.signingkey:
+                cnf = Config()
+                keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+                if cnf.has_key("Dinstall::SigningPubKeyring"):
+                    keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+                os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+            # Move the files if we got this far
+            os.rename('Release', os.path.join(bname, 'Release'))
+            if self.signingkey:
+                os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+        # Clean up any left behind files
+        finally:
+            os.chdir(startdir)
+            if fl_fd:
+                try:
+                    os.close(fl_fd)
+                except OSError:
+                    pass
+
+            if fl_name:
+                try:
+                    os.unlink(fl_name)
+                except OSError:
+                    pass
+
+            if ac_fd:
+                try:
+                    os.close(ac_fd)
+                except OSError:
+                    pass
+
+            if ac_name:
+                try:
+                    os.unlink(ac_name)
+                except OSError:
+                    pass
+
+    def clean_and_update(self, starttime, Logger, dryrun=False):
+        """WARNING: This routine commits for you"""
+        session = DBConn().session().object_session(self)
+
+        if self.generate_metadata and not dryrun:
+            self.write_metadata(starttime)
+
+        # Grab files older than our execution time
+        older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+        for o in older:
+            killdb = False
+            try:
+                if dryrun:
+                    Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+                else:
+                    Logger.log(["I: Removing %s from the queue" % o.fullpath])
+                    os.unlink(o.fullpath)
+                    killdb = True
+            except OSError, e:
+                # If it wasn't there, don't worry
+                if e.errno == ENOENT:
+                    killdb = True
+                else:
+                    # TODO: Replace with proper logging call
+                    Logger.log(["E: Could not remove %s" % o.fullpath])
+
+            if killdb:
+                session.delete(o)
+
+        session.commit()
+
+        for f in os.listdir(self.path):
+            if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'):
+                continue
+
+            try:
+                r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+            except NoResultFound:
+                fp = os.path.join(self.path, f)
+                if dryrun:
+                    Logger.log(["I: Would remove unused link %s" % fp])
+                else:
+                    Logger.log(["I: Removing unused link %s" % fp])
+                    try:
+                        os.unlink(fp)
+                    except OSError:
+                        Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
     def add_file_from_pool(self, poolfile):
         """Copies a file into the pool.  Assumes that the PoolFile object is
         attached to the same SQLAlchemy session as the Queue object is.
@@ -516,7 +694,12 @@ class BuildQueueFile(object):
         pass
 
     def __repr__(self):
-        return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+        return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
+
+    @property
+    def fullpath(self):
+        return os.path.join(self.buildqueue.path, self.filename)
+
 
 __all__.append('BuildQueueFile')
 
@@ -969,7 +1152,7 @@ def get_poolfile_like_name(filename, session=None):
     """
 
     # TODO: There must be a way of properly using bind parameters with %FOO%
-    q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+    q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
 
     return q.all()
 
@@ -1254,6 +1437,19 @@ class DBChange(object):
     def __repr__(self):
         return '<DBChange %s>' % self.changesname
 
+    def clean_from_queue(self):
+        session = DBConn().session().object_session(self)
+
+        # Remove changes_pool_files entries
+        self.poolfiles = []
+
+        # Remove changes_pending_files references
+        self.files = []
+
+        # Clear out of queue
+        self.in_queue = None
+        self.approved_for_id = None
+
 __all__.append('DBChange')
 
 @session_wrapper
@@ -2660,6 +2856,16 @@ class DBConn(object):
                                  poolfiles = relation(PoolFile,
                                                       secondary=self.tbl_changes_pool_files,
                                                       backref="changeslinks"),
+                                 seen = self.tbl_changes.c.seen,
+                                 source = self.tbl_changes.c.source,
+                                 binaries = self.tbl_changes.c.binaries,
+                                 architecture = self.tbl_changes.c.architecture,
+                                 distribution = self.tbl_changes.c.distribution,
+                                 urgency = self.tbl_changes.c.urgency,
+                                 maintainer = self.tbl_changes.c.maintainer,
+                                 changedby = self.tbl_changes.c.changedby,
+                                 date = self.tbl_changes.c.date,
+                                 version = self.tbl_changes.c.version,
                                  files = relation(ChangePendingFile,
                                                   secondary=self.tbl_changes_pending_files_map,
                                                   backref="changesfile"),
@@ -2672,7 +2878,12 @@ class DBConn(object):
                properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
 
         mapper(ChangePendingFile, self.tbl_changes_pending_files,
-               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+               properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+                                 filename = self.tbl_changes_pending_files.c.filename,
+                                 size = self.tbl_changes_pending_files.c.size,
+                                 md5sum = self.tbl_changes_pending_files.c.md5sum,
+                                 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+                                 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
 
         mapper(ChangePendingSource, self.tbl_changes_pending_source,
                properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
old mode 100644 (file)
new mode 100755 (executable)
index a91bcdf..c7e66a4
@@ -115,8 +115,8 @@ def determine_new(changes, files, warn=1):
     # Build up a list of potentially new things
     for name, f in files.items():
         # Skip byhand elements
-        if f["type"] == "byhand":
-            continue
+#        if f["type"] == "byhand":
+#            continue
         pkg = f["package"]
         priority = f["priority"]
         section = f["section"]
@@ -218,7 +218,7 @@ def check_valid(new):
 def check_status(files):
     new = byhand = 0
     for f in files.keys():
-        if files[f]["type"] == "byhand":
+        if files[f].has_key("byhand"):
             byhand = 1
         elif files[f].has_key("new"):
             new = 1
@@ -661,7 +661,7 @@ class Upload(object):
                     entry["new"] = 1
                 else:
                     dsc_file_exists = False
-                    for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+                    for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
                         if cnf.has_key("Dir::Queue::%s" % (myq)):
                             if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
                                 dsc_file_exists = True
@@ -820,7 +820,7 @@ class Upload(object):
             # if in the pool or in a queue other than unchecked, reject
             if (dbc.in_queue is None) \
                    or (dbc.in_queue is not None
-                       and dbc.in_queue.queue_name != 'unchecked'):
+                       and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
                 self.rejects.append("%s file already known to dak" % base_filename)
         except NoResultFound, e:
             # not known, good
@@ -1225,7 +1225,7 @@ class Upload(object):
                 continue
 
             # Look in some other queues for the file
-            queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+            queues = ('New', 'Byhand', 'ProposedUpdates',
                 'OldProposedUpdates', 'Embargoed', 'Unembargoed')
 
             for queue in queues:
@@ -2453,7 +2453,7 @@ distribution."""
                 else:
                     # TODO: Record the queues and info in the DB so we don't hardcode all this crap
                     # Not there? Check the queue directories...
-                    for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+                    for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
                         if not Cnf.has_key("Dir::Queue::%s" % (directory)):
                             continue
                         in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
@@ -2502,7 +2502,7 @@ distribution."""
                     source_epochless_version = re_no_epoch.sub('', source_version)
                     dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
                     found = False
-                    for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+                    for q in ["Embargoed", "Unembargoed", "Newstage"]:
                         if cnf.has_key("Dir::Queue::%s" % (q)):
                             if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
                                 found = True