' Release
rm -f ${base}/buildd/dists/${suite}-backports/Release.gpg
- gpg --no-options --batch --no-tty --secret-keyring ${basedir}/s3kr1t/dot-gnupg/secring.gpg --output "Release.gpg" --armor --detach-sign "Release"
+ gpg --no-options --batch --no-tty --secret-keyring ${base}/s3kr1t/dot-gnupg/secring.gpg --output "Release.gpg" --armor --detach-sign "Release"
done
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "backports-changes@lists.backports.org";
Origin "Backports.org archive";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- hppa;
- hurd-i386;
- i386;
- ia64;
- m68k;
- mips;
- mipsel;
- powerpc;
- s390;
- sh;
- sparc;
- };
Announce "backports-changes@lists.backports.org";
Origin "Backports.org archive";
Description "Backports for the Etch Distribution";
updates/contrib;
updates/non-free;
};
- Architectures
- {
- source;
- all;
- amd64;
- alpha;
- arm;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "dak@security.debian.org";
Version "";
Origin "Debian";
updates/contrib;
updates/non-free;
};
- Architectures
- {
- source;
- all;
- amd64;
- alpha;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "dak@security.debian.org";
Version "";
Origin "Debian";
updates/contrib;
updates/non-free;
};
- Architectures
- {
- source;
- all;
- amd64;
- alpha;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "dak@security.debian.org";
Version "";
Origin "Debian";
Location
{
- /org/security.debian.org/ftp/dists/
- {
- Archive "security";
- Type "legacy";
- };
-
/org/security.debian.org/ftp/pool/
{
Archive "security";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
// Version "4.0r1";
Origin "Debian";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
CopyChanges "dists/oldstable-proposed-updates/";
CopyDotDak "/srv/ftp.debian.org/queue/oldstable-proposed-updates/";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
// Version "5.0r0";
Origin "Debian";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- arm;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-changes@lists.debian.org";
CopyChanges "dists/proposed-updates/";
CopyDotDak "/srv/ftp.debian.org/queue/proposed-updates/";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Testing distribution - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Testing distribution updates - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- m68k;
- };
Announce "debian-testing-changes@lists.debian.org";
Origin "Debian";
Description "Debian Etch for m68k - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- hurd-i386;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-devel-changes@lists.debian.org";
Origin "Debian";
Description "Debian Unstable - Not Released";
contrib;
non-free;
};
- Architectures
- {
- source;
- all;
- alpha;
- amd64;
- armel;
- hppa;
- hurd-i386;
- i386;
- ia64;
- mips;
- mipsel;
- powerpc;
- s390;
- sparc;
- };
Announce "debian-devel-changes@lists.debian.org";
Origin "Debian";
Description "Experimental packages - not released; use at your own risk.";
hppa "HP PA RISC";
amd64 "AMD64";
arm "ARM";
+ armel "ARM eabi";
i386 "Intel ia32";
ia64 "Intel ia64";
m68k "Motorola Mc680x0";
Description "Software that fails to meet the DFSG";
MeetsDFSG "false";
};
-
- mixed // **NB:** only used for overrides; not yet used in other code
- {
- Description "Legacy Mixed";
- MeetsDFSG "false";
- };
};
Section
--- /dev/null
+#!/bin/bash
+# No way I try to deal with a crippled sh just for POSIX foo.
+
+# Copyright (C) 2009 Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+# exit on errors
+set -e
+# make sure to only use defined variables
+set -u
+
+# import the general variable set.
+export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+########################################################################
+# Functions #
+########################################################################
+# Rotate logfiles
+function savelog() {
+ torotate="$1"
+ count=${2:-${LOGROTATE}}
+ while [ ${count} -gt 0 ]; do
+ prev=$(( count - 1 ))
+ if [ -e "${torotate}.${prev}" ]; then
+ mv "${torotate}.${prev}" "${torotate}.${count}"
+ fi
+ count=$prev
+ done
+ mv "${torotate}" "${torotate}.0"
+}
+
+# log something (basically echo it together with a timestamp)
+#
+# Set $PROGRAM to a string to have it added to the output.
+function log () {
+ if [ -z "${PROGRAM}" ]; then
+ echo "$(date +"%b %d %H:%M:%S") $(hostname -s) [$$] $@"
+ else
+ echo "$(date +"%b %d %H:%M:%S") $(hostname -s) ${PROGRAM}[$$]: $@"
+ fi
+}
+
+# log the message using log() but then also send a mail
+# to the address configured in MAILTO (if non-empty)
+function error () {
+ log "$@"
+ if [ -z "${MAILTO}" ]; then
+ echo "$@" | mail -e -s "[$PROGRAM@$(hostname -s)] ERROR [$$]" ${MAILTO}
+ fi
+}
+
+# debug log, only output when DEBUG=1
+function debug () {
+ if [ $DEBUG -eq 1 ]; then
+ log "$*"
+ fi
+}
+
+# Timestamp. Used for dinstall stat graphs
+function ts() {
+ TS=$(($TS+1));
+ echo "Archive maintenance timestamp $TS ($1): $(date +%H:%M:%S)"
+}
+
+# Cleanup actions
+function cleanup() {
+ savelog "$LOGFILE"
+}
+
+# Setup the notice file to tell bad mirrors they used the wrong time
+function notice() {
+ rm -f "$NOTICE"
+ cat > "$NOTICE" <<EOF
+Packages are currently being installed and indices rebuilt.
+Maintenance is automatic, starting at 01|07|13|19:52 UTC,
+and ending about an hour later. This file is then removed.
+
+You should not mirror the archive during this period.
+EOF
+}
+
+# ushing merkels QA user, part one
+function merkel1() {
+ log "Telling merkels QA user that we start dinstall"
+ ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@merkel.debian.org sleep 1
+}
+
+# Create the postgres dump files
+function pgdump_pre() {
+ log "Creating pre-daily-cron-job backup of projectb database..."
+ pg_dump projectb > $base/backup/dump_$(date +%Y.%m.%d-%H:%M:%S)
+}
+
+function pgdump_post() {
+ log "Creating post-daily-cron-job backup of projectb database..."
+ cd $base/backup
+ POSTDUMP=$base/backup/dump_$(date +%Y.%m.%d-%H:%M:%S)
+ pg_dump projectb > $POSTDUMP
+ ln -sf $POSTDUMP current
+}
+
+# Updating various files
+function updates() {
+ log "Updating Bugs docu, Mirror list and mailing-lists.txt"
+ cd $configdir
+ $scriptsdir/update-bugdoctxt
+ $scriptsdir/update-mirrorlists
+ $scriptsdir/update-mailingliststxt
+ $scriptsdir/update-pseudopackages.sh
+}
+
+# Process (oldstable)-proposed-updates "NEW" queue
+function punew_do() {
+ cd "${queuedir}/${1}"
+ date -u -R >> REPORT
+ dak process-new -a -C COMMENTS >> REPORT || true
+ echo >> REPORT
+}
+function punew() {
+ log "Doing automated p-u-new processing"
+ punew_do "$1"
+}
+function opunew() {
+ log "Doing automated o-p-u-new processing"
+ punew_do "$1"
+}
+
+# The first i18n one, syncing new descriptions
+function i18n1() {
+ log "Synchronizing i18n package descriptions"
+ # First sync their newest data
+ cd ${scriptdir}/i18nsync
+ rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true
+
+ # Now check if we still know about the packages for which they created the files
+ # is the timestamp signed by us?
+ if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
+ # now read it. As its signed by us we are sure the content is what we expect, no need
+ # to do more here. And we only test -d a directory on it anyway.
+ TSTAMP=$(cat timestamp)
+ # do we have the dir still?
+ if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then
+ # Lets check!
+ if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
+ # Yay, worked, lets copy around
+ for dir in squeeze sid; do
+ if [ -d dists/${dir}/ ]; then
+ cd dists/${dir}/main/i18n
+ rsync -aq --delete --delete-after . ${ftpdir}/dists/${dir}/main/i18n/.
+ fi
+ cd ${scriptdir}/i18nsync
+ done
+ else
+ echo "ARRRR, bad guys, wrong files, ARRR"
+ echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail -s "Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head." debian-l10n-devel@lists.alioth.debian.org
+ fi
+ else
+ echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR"
+ echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail -s "Lisa, if you don't like your job you don't strike. You just go in every day and do it really half-assed. That's the American way." debian-l10n-devel@lists.alioth.debian.org
+ fi
+ else
+ echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR."
+ echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail -s "You can't keep blaming yourself. Just blame yourself once, and move on." debian-l10n-devel@lists.alioth.debian.org
+ fi
+}
+
+# Process the accepted queue
+function accepted() {
+ log "Processing queue/accepted"
+ cd "$accepted"
+ rm -f REPORT
+ dak process-accepted -pa *.changes | tee REPORT | \
+ mail -s "Install for $(date +"%D - %R")" ftpmaster@ftp-master.debian.org
+ chgrp debadmin REPORT
+ chmod 664 REPORT
+}
+
+function cruft() {
+ log "Checking for cruft in overrides"
+ dak check-overrides
+
+ log "Fixing symlinks in $ftpdir"
+ symlinks -d -r $ftpdir
+}
+
+function msfl() {
+ log "Generating suite file lists for apt-ftparchive"
+ dak make-suite-file-list
+}
+
+function fingerprints() {
+ log "Updating fingerprints"
+ dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
+}
+
+function overrides() {
+ log "Writing overrides into text files"
+ cd $overridedir
+ dak make-overrides
+
+ # FIXME
+ rm -f override.sid.all3
+ for i in main contrib non-free main.debian-installer; do cat override.sid.$i >> override.sid.all3; done
+}
+
+function mpfm() {
+ log "Generating package / file mapping"
+ dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
+}
+
+function packages() {
+ log "Generating Packages and Sources files"
+ cd $configdir
+ apt-ftparchive generate apt.conf
+}
+
+function pdiff() {
+ log "Generating pdiff files"
+ dak generate-index-diffs
+}
+
+function release() {
+ log "Generating Release files"
+ dak generate-releases
+}
+
+function cleanup() {
+ log "Cleanup old packages/files"
+ dak clean-suites
+ dak clean-queues
+}
+
+function buildd() {
+ # Needs to be rebuilt, as files have moved. Due to unaccepts, we need to
+ # update this before wanna-build is updated.
+ log "Regenerating wanna-build/buildd information"
+ psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE suite = 5 AND queue = 0 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
+ symlinks -d /srv/incoming.debian.org/buildd > /dev/null
+ apt-ftparchive generate apt.conf.buildd
+}
+
+function scripts() {
+ log "Running various scripts from $scriptsdir"
+ cd $scriptsdir
+ ./mkmaintainers
+ ./copyoverrides
+ ./mklslar
+ ./mkfilesindices
+ ./mkchecksums
+}
+
+function mirror() {
+ echo "Regenerating \"public\" mirror/ hardlink fun"
+ cd ${mirrordir}
+ rsync -aH --link-dest ${ftpdir} --delete --delete-after --ignore-errors ${ftpdir}/. .
+}
+
+function wb() {
+ log "Trigger daily wanna-build run"
+ ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+}
+
+function expire() {
+ log "Expiring old database dumps..."
+ cd $base/backup
+ $scriptsdir/expire_dumps -d . -p -f "dump_*"
+}
+
+function reports() {
+ # Send a report on NEW/BYHAND packages
+ log "Nagging ftpteam about NEW/BYHAND packages"
+ dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org
+ # and one on crufty packages
+ log "Sending information about crufty packages"
+ dak cruft-report > $webdir/cruft-report-daily.txt
+ dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
+ cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org
+}
+
+function dm() {
+ log "Updating DM html page"
+ $scriptsdir/dm-monitor >$webdir/dm-uploaders.html
+}
+
+function bts() {
+ log "Categorizing uncategorized bugs filed against ftp.debian.org"
+ dak bts-categorize
+}
+
+function merkel2() {
+ # Push katie@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
+ log "Trigger merkels projectb sync"
+ ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb katie@merkel.debian.org sleep 1
+}
+
+function runparts() {
+ log "Using run-parts to run scripts in $base/scripts/distmnt"
+ run-parts --report $base/scripts/distmnt
+}
+
+function i18n2() {
+ log "Exporting package data foo for i18n project"
+ STAMP=$(date "+%Y%m%d%H%M")
+ mkdir -p ${scriptdir}/i18n/${STAMP}
+ cd ${scriptdir}/i18n/${STAMP}
+ dak control-suite -l stable > lenny
+ dak control-suite -l testing > squeeze
+ dak control-suite -l unstable > sid
+ echo "${STAMP}" > timestamp
+ gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 6070D3A1 --detach-sign -o timestamp.gpg timestamp
+ rm -f md5sum
+ md5sum * > md5sum
+ cd ${webdir}/
+ ln -sfT ${scriptdir}/i18n/${STAMP} i18n
+
+ cd ${scriptdir}
+ find ./i18n -mtime +2 -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+}
+
+function stats() {
+ log "Updating stats data"
+ cd $configdir
+ $scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data
+ R --slave --vanilla < $base/misc/ftpstats.R
+}
+
+function aptftpcleanup() {
+ log "Clean up apt-ftparchive's databases"
+ cd $configdir
+ apt-ftparchive -q clean apt.conf
+}
+
+function compress() {
+ log "Compress old psql backups"
+ cd $base/backup/
+ find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mtime +1 |
+ while read dumpname; do
+ echo "Compressing $dumpname"
+ bzip2 -9v "$dumpname"
+ done
+}
+
+function logstats() {
+ $masterdir/tools/logs.py "$LOGFILE"
+}
+
+########################################################################
+########################################################################
+
+# Function to save which stage we are in, so we can restart an interrupted
+# dinstall. Or even run actions in parallel, if we dare to, by simply
+# backgrounding the call to this function. But that should only really be
+# done for things we dont care much about.
+#
+# This should be called with the first argument being an array, with the
+# members
+# - FUNC - the function name to call
+# - ARGS - Possible arguments to hand to the function. Can be the empty string
+# - TS - The timestamp name. Can be the empty string
+# - ERR - if this is the string false, then the call will be surrounded by
+# set +e ... set -e calls, so errors in the function do not exit
+# dinstall. Can be the empty string, meaning true.
+#
+# MAKE SURE TO KEEP THIS THE LAST FUNCTION, AFTER ALL THE VARIOUS ONES
+# ADDED FOR DINSTALL FEATURES!
+function stage() {
+ ARGS='GO[@]'
+ local "${!ARGS}"
+
+ if [ -f "${stagedir}/${FUNC}" ]; then
+ stamptime=$(/usr/bin/stat -c %Z "${stagedir}/${FUNC}")
+ unixtime=$(date +%s)
+ difference=$(( $unixtime - $stamptime ))
+ if [ ${difference} -ge 14400 ]; then
+ error "Did already run ${FUNC}, stagefile exists, but that was ${difference} seconds ago. Please check."
+ else
+ log "Did already run ${FUNC}, not calling again..."
+ fi
+ return
+ fi
+
+ debug "Now calling function ${FUNC}. Arguments: ${ARGS}. Timestamp: ${TS}"
+
+ # Make sure we are always at the same place. If a function wants to be elsewhere,
+ # it has to cd first!
+ cd ${configdir}
+
+ if [ "${ERR}" = "false"]; then
+ set +e
+ fi
+ ${FUNC} ${ARGS}
+
+ # No matter what happened in the function, we make sure we have set -e default state back
+ set -e
+
+ # Make sure we are always at the same place.
+ cd ${configdir}
+
+ touch "${stagedir}/${FUNC}"
+
+ if [ -n "${TIME}" ]; then
+ ts "${TIME}"
+ fi
+}
+
+########################################################################
+
+# We need logs.
+LOGFILE="$logdir/dinstall.log"
+
+exec > "$LOGFILE" 2>&1
+
+# usually we are not using debug logs. Set to 1 if you want them.
+DEBUG=0
+
+# our name
+PROGRAM="dinstall"
+
+# where do we want mails to go? For example log entries made with error()
+if [ "x$(hostname -s)x" != "xriesx" ]; then
+ # Not our ftpmaster host
+ MAILTO=${MAILTO:-"root"}
+else
+ # Yay, ftpmaster
+ MAILTO=${MAILTO:-"ftpmaster@debian.org"}
+fi
+
+# How many logfiles to keep
+LOGROTATE=${LOGROTATE:-400}
+
+# Timestamps start at -1. so first gets 0
+TS=-1
+ts "startup"
+
+# Tell everyone we are doing some work
+NOTICE="$ftpdir/Archive_Maintenance_In_Progress"
+
+# lock cron.unchecked (it immediately exits when this exists)
+LOCK_DAILY="$lockdir/daily.lock"
+
+# Lock process-new and cron.unchecked from doing work
+LOCK_ACCEPTED="$lockdir/unchecked.lock"
+
+# This file is simply used to indicate to britney whether or not
+# the Packages file updates completed sucessfully. It's not a lock
+# from our point of view
+LOCK_BRITNEY="$lockdir/britney.lock"
+
+lockfile -l 3600 "${LOCK_DAILY}"
+trap cleanup EXIT ERR TERM HUP INT QUIT
+
+touch "${LOCK_BRITNEY}"
+
+GO=(
+ FUNC="notice"
+ TIME=""
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="merkel1"
+ TIME="init"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="pgdump_pre"
+ TIME="pg_dump1"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="updates"
+ TIME="External Updates"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="punew"
+ TIME="p-u-new"
+ ARGS="p-u-new"
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="opunew"
+ TIME="o-p-u-new"
+ ARGS="o-p-u-new"
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="i18n1"
+ TIME="i18n 1"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+lockfile "$LOCK_ACCEPTED"
+
+GO=(
+ FUNC="accepted"
+ TIME="accepted"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="cruft"
+ TIME="cruft"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+rm -f $LOCKAC
+
+GO=(
+ FUNC="msfl"
+ TIME="make-suite-file-list"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="fingerprints"
+ TIME="import-keyring"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="overrides"
+ TIME="overrides"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="mpfm"
+ TIME="pkg-file-mapping"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="packages"
+ TIME="apt-ftparchive"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="pdiff"
+ TIME="pdiff"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="release"
+ TIME="release files"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="cleanup"
+ TIME="cleanup"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="buildd"
+ TIME="buildd"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="scripts"
+ TIME="scripts"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="mirror"
+ TIME="mirror hardlinks"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="wb"
+ TIME="w-b"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+rm -f "${NOTICE}"
+rm -f "${LOCK_DAILY}"
+
+ts "locked part finished"
+
+GO=(
+ FUNC="pgdump_post"
+ TIME="pg_dump2"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="expire"
+ TIME="expire_dumps"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="reports"
+ TIME="reports"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="dm"
+ TIME=""
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="bts"
+ TIME=""
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="merkel2"
+ TIME="merkel projectb push"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+ulimit -m 90000 -d 90000 -s 10000 -v 200000
+
+GO=(
+ FUNC="runparts"
+ TIME="run-parts"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="i18n2"
+ TIME="i18n 2"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+GO=(
+ FUNC="stats"
+ TIME="stats"
+ ARGS=""
+ ERR="false"
+)
+stage $GO
+
+rm -f ${BRITNEYLOCK}
+
+GO=(
+ FUNC="aptftpcleanup"
+ TIME="apt-ftparchive cleanup"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="compress"
+ TIME="compress"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+log "Daily cron scripts successful, all done"
+
+exec > /dev/null 2>&1
+
+GO=(
+ FUNC="logstats"
+ TIME=""
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org
+
+# Now, at the very (successful) end of dinstall, make sure we remove
+# our stage files, so the next dinstall run will do it all again.
+rm -f "${stagedir}/*"
scriptsdir=$base/dak/scripts/debian/
dbdir=$base/database/
lockdir=$base/lock/
+stagedir=$lockdir/stages
overridedir=$scriptdir/override
extoverridedir=$scriptdir/external-overrides
logdir=$base/log/cron/
global Cnf, projectB
keyrings = None
- Cnf = daklib.utils.get_conf()
+ Cnf = utils.get_conf()
Arguments = [('h',"help","Add-User::Options::Help"),
('c',"create","Add-User::Options::Create"),
usage()
projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"]))
- daklib.database.init(Cnf, projectB)
+ database.init(Cnf, projectB)
if not keyrings:
keyrings = Cnf.ValueList("Dinstall::GPGKeyring")
# Ignore the PGP keyring for download of new keys. Ignore errors, if key is missing it will
# barf with the next commands.
cmd = "gpg --no-secmem-warning --no-default-keyring %s --recv-keys %s" \
- % (daklib.utils.gpg_keyring_args(keyrings), Cnf["Add-User::Options::Key"])
+ % (utils.gpg_keyring_args(keyrings), Cnf["Add-User::Options::Key"])
(result, output) = commands.getstatusoutput(cmd)
cmd = "gpg --with-colons --no-secmem-warning --no-auto-check-trustdb --no-default-keyring %s --with-fingerprint --list-key %s" \
- % (daklib.utils.gpg_keyring_args(keyrings),
+ % (utils.gpg_keyring_args(keyrings),
Cnf["Add-User::Options::Key"])
(result, output) = commands.getstatusoutput(cmd)
m = re_gpg_fingerprint.search(output)
if not m:
print output
- daklib.utils.fubar("0x%s: (1) No fingerprint found in gpg output but it returned 0?\n%s" \
- % (Cnf["Add-User::Options::Key"], daklib.utils.prefix_multi_line_string(output, \
+ utils.fubar("0x%s: (1) No fingerprint found in gpg output but it returned 0?\n%s" \
+ % (Cnf["Add-User::Options::Key"], utils.prefix_multi_line_string(output, \
" [GPG output:] ")))
primary_key = m.group(1)
primary_key = primary_key.replace(" ","")
u = re_user_address.search(output)
if not u:
print output
- daklib.utils.fubar("0x%s: (2) No userid found in gpg output but it returned 0?\n%s" \
- % (Cnf["Add-User::Options::Key"], daklib.utils.prefix_multi_line_string(output, " [GPG output:] ")))
+ utils.fubar("0x%s: (2) No userid found in gpg output but it returned 0?\n%s" \
+ % (Cnf["Add-User::Options::Key"], utils.prefix_multi_line_string(output, " [GPG output:] ")))
uid = u.group(1)
n = re_user_name.search(output)
name = n.group(1)
print "0x%s -> %s <%s> -> %s -> %s" % (Cnf["Add-User::Options::Key"], name, emails[0], uid, primary_key)
prompt = "Add user %s with above data (y/N) ? " % (uid)
- yn = daklib.utils.our_raw_input(prompt).lower()
+ yn = utils.our_raw_input(prompt).lower()
if yn == "y":
# Create an account for the user?
% (pwcrypt, name, uid)
(result, output) = commands.getstatusoutput(cmd)
if (result != 0):
- daklib.utils.fubar("Invocation of '%s' failed:\n%s\n" % (cmd, output), result)
+ utils.fubar("Invocation of '%s' failed:\n%s\n" % (cmd, output), result)
try:
summary+=createMail(uid, password, Cnf["Add-User::Options::Key"], Cnf["Dinstall::GPGKeyring"])
except:
summary=""
- daklib.utils.warn("Could not prepare password information for mail, not sending password.")
+ utils.warn("Could not prepare password information for mail, not sending password.")
# Now add user to the database.
projectB.query("BEGIN WORK")
- uid_id = daklib.database.get_or_set_uid_id(uid)
+ uid_id = database.get_or_set_uid_id(uid)
projectB.query('CREATE USER "%s"' % (uid))
projectB.query("COMMIT WORK")
# The following two are kicked out in rhona, so we don't set them. kelly adds
# them as soon as she installs a package with unknown ones, so no problems to expect here.
# Just leave the comment in, to not think about "Why the hell aren't they added" in
# a year, if we ever touch uma again.
-# maint_id = daklib.database.get_or_set_maintainer_id(name)
+# maint_id = database.get_or_set_maintainer_id(name)
# projectB.query("INSERT INTO fingerprint (fingerprint, uid) VALUES ('%s', '%s')" % (primary_key, uid_id))
# Lets add user to the email-whitelist file if its configured.
if Cnf.has_key("Dinstall::MailWhiteList") and Cnf["Dinstall::MailWhiteList"] != "":
- file = daklib.utils.open_file(Cnf["Dinstall::MailWhiteList"], "a")
+ file = utils.open_file(Cnf["Dinstall::MailWhiteList"], "a")
for mail in emails:
file.write(mail+'\n')
file.close()
# Should we send mail to the newly added user?
if Cnf.FindB("Add-User::SendEmail"):
mail = name + "<" + emails[0] +">"
- Upload = daklib.queue.Upload(Cnf)
+ Upload = queue.Upload(Cnf)
Subst = Upload.Subst
Subst["__NEW_MAINTAINER__"] = mail
Subst["__UID__"] = uid
Subst["__FROM_ADDRESS__"] = Cnf["Dinstall::MyEmailAddress"]
Subst["__HOSTNAME__"] = Cnf["Dinstall::MyHost"]
Subst["__SUMMARY__"] = summary
- new_add_message = daklib.utils.TemplateSubst(Subst,Cnf["Dir::Templates"]+"/add-user.added")
- daklib.utils.send_mail(new_add_message)
+ new_add_message = utils.TemplateSubst(Subst,Cnf["Dir::Templates"]+"/add-user.added")
+ utils.send_mail(new_add_message)
else:
uid = None
count = 0
suite = 'unstable'
for component in Cnf.SubTree("Component").List():
- if component == "mixed":
- continue
component = component.lower()
list_filename = '%s%s_%s_source.list' % (Cnf["Dir::Lists"], suite, component)
list_file = utils.open_file(list_filename)
"""
for suite in [ "stable", "testing", "unstable" ]:
for component in Cnf.ValueList("Suite::%s::Components" % (suite)):
- architectures = Cnf.ValueList("Suite::%s::Architectures" % (suite))
+ architectures = database.get_suite_architectures(suite)
for arch in [ i.lower() for i in architectures ]:
if arch == "source":
validate_sources(suite, component)
utils.fubar("Couldn't find id's of all suites: %s" % suites)
for component in Cnf.SubTree("Component").List():
- if component == "mixed":
- continue; # Ick
# It is crucial for the dsc override creation based on binary
# overrides that 'dsc' goes first
otypes = Cnf.ValueList("OverrideType")
suite = "stable"
stable = {}
components = Cnf.ValueList("Suite::%s::Components" % (suite))
- architectures = filter(utils.real_arch, Cnf.ValueList("Suite::%s::Architectures" % (suite)))
+ architectures = filter(utils.real_arch, database.get_suite_architectures(suite))
for component in components:
for architecture in architectures:
filename = "%s/dists/%s/%s/binary-%s/Packages" % (Cnf["Dir::Root"], suite, component, architecture)
arch_all_id = database.get_architecture_id("all")
dsc_type_id = database.get_override_type_id("dsc")
- for arch in Cnf.ValueList("Suite::%s::Architectures" % (src_suite)):
+ for arch in database.get_suite_architectures(src_suite_id):
if arch == "source":
continue
################################################################################
+# we unfortunately still have broken stuff in headers
latin1_q = """SET CLIENT_ENCODING TO 'LATIN1'"""
+# get all the arches delivered for a given suite
+# this should probably exist somehere common
arches_q = """PREPARE arches_q as
SELECT s.architecture, a.arch_string
FROM suite_architectures s
JOIN architecture a ON (s.architecture=a.id)
WHERE suite = $1"""
+# find me the .deb for a given binary id
debs_q = """PREPARE debs_q as
SELECT b.id, f.filename FROM bin_assoc_by_arch baa
JOIN binaries b ON baa.bin=b.id
WHERE suite = $1
AND arch = $2"""
+# ask if we already have contents associated with this binary
olddeb_q = """PREPARE olddeb_q as
SELECT 1 FROM content_associations
WHERE binary_pkg = $1
LIMIT 1"""
+# find me all of the contents for a given .deb
contents_q = """PREPARE contents_q as
SELECT (p.path||'/'||n.file) AS fn,
comma_separated_list(s.section||'/'||b.package)
GROUP BY fn
ORDER BY fn"""
+# find me all of the contents for a given .udeb
udeb_contents_q = """PREPARE udeb_contents_q as
SELECT (p.path||'/'||n.file) as fn,
comma_separated_list(s.section||'/'||b.package)
GROUP BY fn
ORDER BY fn"""
+# clear out all of the temporarily stored content associations
+# this should be run only after p-a has run. after a p-a
+# run we should have either accepted or rejected every package
+# so there should no longer be anything in the queue
+remove_temp_contents_cruft_q = """DELETE FROM temp_content_associations"""
+
+# delete any filenames we are storing which have no binary associated with them
+remove_filename_cruft_q = """DELETE FROM content_file_names
+ WHERE id IN (SELECT cfn.id FROM content_file_names cfn
+ LEFT JOIN content_associations ca
+ ON ca.filename=cfn.id
+ WHERE ca.id IS NULL)""" );
+
+# delete any paths we are storing which have no binary associated with them
+remove_filepath_cruft_q = """DELETE FROM content_file_paths
+ WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
+ LEFT JOIN content_associations ca
+ ON ca.filepath=cfn.id
+ WHERE ca.id IS NULL)"""
class Contents(object):
"""
Class capable of generating Contents-$arch.gz files
self.header = None
def _getHeader(self):
- # Internal method to return the header for Contents.gz files
+ """
+ Internal method to return the header for Contents.gz files
+
+ This is boilerplate which explains the contents of the file and how
+ it can be used.
+ """
if self.header == None:
if Config().has_key("Contents::Header"):
try:
print( "header: %s" % self.header )
h.close()
except:
- log.error( "error openeing header file: %d\n%s" % (Config()["Contents::Header"],
- traceback.format_exc() ))
+ log.error( "error opening header file: %d\n%s" % (Config()["Contents::Header"],
+ traceback.format_exc() ))
self.header = False
else:
print( "no header" )
_goal_column = 54
def _write_content_file(self, cursor, filename):
- # Internal method for writing all the results to a given file
+ """
+ Internal method for writing all the results to a given file.
+ The cursor should have a result set generated from a query already.
+ """
f = gzip.open(Config()["Dir::Root"] + filename, "w")
try:
header = self._getHeader()
def cruft(self):
"""
- remove files/paths from the DB which are no longer referenced by binaries
+ remove files/paths from the DB which are no longer referenced
+ by binaries and clean the temporary table
"""
cursor = DBConn().cursor();
cursor.execute( "BEGIN WORK" )
- cursor.execute( """DELETE FROM content_file_names
- WHERE id IN (SELECT cfn.id FROM content_file_names cfn
- LEFT JOIN content_associations ca
- ON ca.filename=cfn.id
- WHERE ca.id IS NULL)""" );
- cursor.execute( """DELETE FROM content_file_paths
- WHERE id IN (SELECT cfn.id FROM content_file_paths cfn
- LEFT JOIN content_associations ca
- ON ca.filepath=cfn.id
- WHERE ca.id IS NULL)""" );
+ cursor.execute( remove_temp_contents_cruft_q )
+ cursor.execute( remove_filename_cruft_q )
+ cursor.execute( remove_filepath_cruft_q )
cursor.execute( "COMMIT" )
################################################################################
def _suites(self):
- # return a list of suites to operate on
+ """
+ return a list of suites to operate on
+ """
if Config().has_key( "%s::%s" %(options_prefix,"Suite")):
suites = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Suite")])
else:
return suites
def _arches(self, cursor, suite):
- # return a list of archs to operate on
+ """
+ return a list of archs to operate on
+ """
arch_list = [ ]
if Config().has_key( "%s::%s" %(options_prefix,"Arch")):
archs = utils.split_args(Config()[ "%s::%s" %(options_prefix,"Arch")])
level=logging.DEBUG
- logging.basicConfig( level=logging.DEBUG,
+ logging.basicConfig( level=level,
format='%(asctime)s %(levelname)s %(message)s',
stream = sys.stderr )
if suite != "experimental":
check_components.append('main/debian-installer');
for component in check_components:
- architectures = filter(utils.real_arch, Cnf.ValueList("Suite::%s::Architectures" % (suite)))
+ architectures = filter(utils.real_arch, database.get_suite_architectures(suite))
for architecture in architectures:
filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (Cnf["Dir::Root"], suite, component, architecture)
# apt_pkg.ParseTagFile needs a real file handle
################################################################################
-import sys, imp
-import daklib.utils, daklib.extensions
+import sys
+import imp
+import daklib.utils
+import daklib.extensions
################################################################################
("generate-releases",
"Generate Release files"),
("contents",
- "Generate contest files"),
+ "Generate content files"),
("generate-index-diffs",
"Generate .diff/Index files"),
("clean-suites",
################################################################################
def do_update(self):
-vvvvvvvvvvvvvvvvvvvv
print "Note: to be able to enable the the PL/Perl (plperl) procedural language, we do"
print "need postgresql-plperl-$postgres-version installed. Make sure that this is the"
print "case before you continue. Interrupt if it isn't, sleeping 5 seconds now."
print "(We need to be database superuser for this to work!)"
time.sleep (5)
-^^^^^^^^^^^^^^^^^^^^
try:
c = self.db.cursor()
#!/usr/bin/env python
-# coding=utf8
-
"""
-Debian Archive Kit Database Update Script
-Copyright © 2008 Michael Casadevall <mcasadevall@debian.org>
-Copyright © 2008 Roger Leigh <rleigh@debian.org>
+Database Update Script - Get suite_architectures table use sane values
-Debian Archive Kit Database Update Script 2
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
################################################################################
-# <tomv_w> really, if we want to screw ourselves, let's find a better way.
-# <Ganneff> rm -rf /srv/ftp.debian.org
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.utils import get_conf
################################################################################
-import psycopg2, time
-
-################################################################################
+suites = {} #: Cache of existing suites
+archs = {} #: Cache of existing architectures
def do_update(self):
- print "Adding content fields to database"
+ """ Execute the DB update """
+ print "Lets make suite_architecture table use sane values"
+ Cnf = get_conf()
+
+ query = "INSERT into suite_architectures (suite, architecture) VALUES (%s, %s)" #: Update query
try:
c = self.db.cursor()
- c.execute("""CREATE TABLE content_file_paths (
- id serial primary key not null,
- path text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_file_names (
- id serial primary key not null,
- file text unique not null
- )""")
-
- c.execute("""CREATE TABLE content_associations (
- id serial not null,
- binary_pkg int4 not null references binaries(id) on delete cascade,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE TABLE temp_content_associations (
- id serial not null,
- package text not null,
- version debversion not null,
- filepath int4 not null references content_file_paths(id) on delete cascade,
- filename int4 not null references content_file_names(id) on delete cascade
- );""")
-
- c.execute("""CREATE FUNCTION comma_concat(text, text) RETURNS text
- AS $_$select case
- WHEN $2 is null or $2 = '' THEN $1
- WHEN $1 is null or $1 = '' THEN $2
- ELSE $1 || ',' || $2
- END$_$
- LANGUAGE sql""")
-
- c.execute("""CREATE AGGREGATE comma_separated_list (
- BASETYPE = text,
- SFUNC = comma_concat,
- STYPE = text,
- INITCOND = ''
- );""")
-
- c.execute( "CREATE INDEX content_assocaitions_binary ON content_associations(binary_pkg)" )
-
- c.execute("UPDATE config SET value = '2' WHERE name = 'db_revision'")
- self.db.commit()
+ c.execute("DELETE FROM suite_architectures;")
- print "REMINDER: Remember to fully regenerate the Contents files before running import-contents"
- print ""
- print "Pausing for five seconds ..."
- time.sleep (5)
+ c.execute("SELECT id, arch_string FROM architecture;")
+ a=c.fetchall()
+ for arch in a:
+ archs[arch[1]]=arch[0]
+
+ c.execute("SELECT id,suite_name FROM suite")
+ s=c.fetchall()
+ for suite in s:
+ suites[suite[1]]=suite[0]
+
+ for suite in Cnf.SubTree("Suite").List():
+ print "Processing suite %s" % (suite)
+ architectures = Cnf.SubTree("Suite::" + suite).ValueList("Architectures")
+ suite = suite.lower()
+ for arch in architectures:
+ c.execute(query, [suites[suite], archs[arch]])
+
+ c.execute("UPDATE config SET value = '4' WHERE name = 'db_revision'")
+
+ self.db.commit()
except psycopg2.ProgrammingError, msg:
self.db.rollback()
- print "FATAL: Unable to apply debversion table update 2!"
- print "Error Message: " + str(msg)
- print "Database changes have been rolled back."
+ raise DBUpdateError, "Unable to apply sanity to suite_architecture table, rollback issued. Error message : %s" % (str(msg))
--- /dev/null
+#!/usr/bin/env python
+
+"""
+Database Update Script - Fix bin_assoc_by_arch view
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@license: GNU General Public License version 2 or later
+
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+
+################################################################################
+
+def do_update(self):
+ """ Execute the DB update """
+
+ print "Fixing bin_assoc_by_arch view"
+ try:
+ c = self.db.cursor()
+ c.execute("DROP VIEW bin_assoc_by_arch")
+
+ c.execute("""CREATE OR REPLACE VIEW bin_assoc_by_arch AS
+ SELECT ba.suite, ba.bin, a.id AS arch
+ FROM bin_associations ba
+ JOIN binaries b ON ba.bin = b.id, architecture a
+ WHERE a.id > 2 AND (b.architecture = 2 OR b.architecture = a.id) """)
+ c.execute("UPDATE config SET value = '5' WHERE name = 'db_revision'")
+
+ self.db.commit()
+
+ except psycopg2.ProgrammingError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Unable to recreate bin_assoc_by_arch view, rollback issued. Error message : %s" % (str(msg))
SubSec = Cnf.SubTree("Location::%s" % (location))
archive_id = database.get_archive_id(SubSec["archive"])
type = SubSec.Find("type")
- if type == "legacy-mixed":
- projectB.query("INSERT INTO location (path, archive, type) VALUES ('%s', %d, '%s')" % (location, archive_id, SubSec["type"]))
- else:
- for component in Cnf.SubTree("Component").List():
- component_id = database.get_component_id(component)
- projectB.query("INSERT INTO location (path, component, archive, type) VALUES ('%s', %d, %d, '%s')" %
- (location, component_id, archive_id, SubSec["type"]))
+ for component in Cnf.SubTree("Component").List():
+ component_id = database.get_component_id(component)
+ projectB.query("INSERT INTO location (path, component, archive, type) VALUES ('%s', %d, %d, '%s')" %
+ (location, component_id, archive_id, SubSec["type"]))
def update_architectures ():
projectB.query("DELETE FROM architecture")
for i in ("Version", "Origin", "Description"):
if SubSec.has_key(i):
projectB.query("UPDATE suite SET %s = '%s' WHERE suite_name = '%s'" % (i.lower(), SubSec[i], suite.lower()))
- for architecture in Cnf.ValueList("Suite::%s::Architectures" % (suite)):
+ for architecture in database.get_suite_architectures(suite):
architecture_id = database.get_architecture_id (architecture)
projectB.query("INSERT INTO suite_architectures (suite, architecture) VALUES (currval('suite_id_seq'), %d)" % (architecture_id))
SubSec = Cnf.SubTree("Location::%s" % (location))
server = SubSec["Archive"]
type = Cnf.Find("Location::%s::Type" % (location))
- if type == "legacy-mixed":
- sources = location + 'Sources.gz'
- suite = Cnf.Find("Location::%s::Suite" % (location))
- do_sources(sources, suite, "", server)
- elif type == "legacy" or type == "pool":
+ if type == "pool":
for suite in Cnf.ValueList("Location::%s::Suites" % (location)):
for component in Cnf.SubTree("Component").List():
sources = Cnf["Dir::Root"] + "dists/" + Cnf["Suite::%s::CodeName" % (suite)] + '/' + component + '/source/' + 'Sources.gz'
SubSec = Cnf.SubTree("Location::%s" % (location))
server = SubSec["Archive"]
type = Cnf.Find("Location::%s::Type" % (location))
- if type == "legacy-mixed":
- packages = location + 'Packages'
- suite = Cnf.Find("Location::%s::Suite" % (location))
- print 'Processing '+location+'...'
- process_packages (packages, suite, "", server)
- elif type == "legacy" or type == "pool":
+ if type == "pool":
for suite in Cnf.ValueList("Location::%s::Suites" % (location)):
udeb_components = map(lambda x: x+"/debian-installer",
Cnf.ValueList("Suite::%s::UdebComponents" % suite))
for component in Cnf.SubTree("Component").List() + udeb_components:
- architectures = filter(utils.real_arch,
- Cnf.ValueList("Suite::%s::Architectures" % (suite)))
+ architectures = filter(utils.real_arch, database.get_suite_architectures(suite))
for architecture in architectures:
packages = Cnf["Dir::Root"] + "dists/" + Cnf["Suite::%s::CodeName" % (suite)] + '/' + component + '/binary-' + architecture + '/Packages'
print 'Processing '+packages+'...'
self.projectB = projectB
def do_archive(self):
- """Initalize the archive table."""
+ """initalize the archive table."""
c = self.projectB.cursor()
c.execute("DELETE FROM archive")
c = self.projectB.cursor()
c.execute("DELETE FROM location")
- loc_add_mixed = "INSERT INTO location (path, archive, type) " + \
- "VALUES (%s, %s, %s)"
-
loc_add = "INSERT INTO location (path, component, archive, type) " + \
"VALUES (%s, %s, %s, %s)"
utils.fubar("Archive '%s' for location '%s' not found."
% (location_config["Archive"], location))
location_type = location_config.get("type")
- if location_type == "legacy-mixed":
- c.execute(loc_add_mixed, [location, archive_id, location_config["type"]])
- elif location_type == "legacy" or location_type == "pool":
+ if location_type == "pool":
for component in self.Cnf.SubTree("Component").List():
component_id = self.projectB.get_component_id(component)
c.execute(loc_add, [location, component_id, archive_id, location_type])
architecture_id = self.projectB.get_architecture_id (architecture)
if architecture_id < 0:
utils.fubar("architecture '%s' not found in architecture"
- " table for suite %s."
- % (architecture, suite))
+ " table for suite %s."
+ % (architecture, suite))
c.execute(sa_add, [architecture_id])
self.projectB.commit()
sys.stderr.write("Processing %s...\n" % (suite))
override_suite = Cnf["Suite::%s::OverrideCodeName" % (suite)]
for component in Cnf.SubTree("Component").List():
- if component == "mixed":
- continue # Ick
for otype in Cnf.ValueList("OverrideType"):
if otype == "deb":
suffix = ""
################################################################################
-def write_legacy_mixed_filelist(suite, list, packages, dislocated_files):
- # Work out the filename
- filename = os.path.join(Cnf["Dir::Lists"], "%s_-_all.list" % (suite))
- output = utils.open_file(filename, "w")
- # Generate the final list of files
- files = {}
- for fileid in list:
- path = packages[fileid]["path"]
- filename = packages[fileid]["filename"]
- file_id = packages[fileid]["file_id"]
- if suite == "stable" and dislocated_files.has_key(file_id):
- filename = dislocated_files[file_id]
- else:
- filename = path + filename
- if files.has_key(filename):
- utils.warn("%s (in %s) is duplicated." % (filename, suite))
- else:
- files[filename] = ""
- # Sort the files since apt-ftparchive doesn't
- keys = files.keys()
- keys.sort()
- # Write the list of files out
- for outfile in keys:
- output.write(outfile+'\n')
- output.close()
-
-############################################################
-
def write_filelist(suite, component, arch, type, list, packages, dislocated_files):
# Work out the filename
if arch != "source":
else:
binary_types = [ "deb" ]
if not Options["Architecture"]:
- architectures = Cnf.ValueList("Suite::%s::Architectures" % (suite))
+ architectures = database.get_suite_architectures(suite)
else:
architectures = utils.split_args(Options["Architectures"])
for arch in [ i.lower() for i in architectures ]:
filelist.extend(d[suite][component]["all"][packagetype])
write_filelist(suite, component, arch, packagetype, filelist,
packages, dislocated_files)
- else: # legacy-mixed suite
- filelist = []
- for component in d[suite].keys():
- for arch in d[suite][component].keys():
- for packagetype in d[suite][component][arch].keys():
- filelist.extend(d[suite][component][arch][packagetype])
- write_legacy_mixed_filelist(suite, filelist, packages, dislocated_files)
+ else: # something broken
+ utils.warn("Suite %s has no components." % (suite))
################################################################################
ver, suite)
adv += "%s\n%s\n\n" % (suite_header, "-"*len(suite_header))
- arches = Cnf.ValueList("Suite::%s::Architectures" % suite)
+ arches = database.get_suite_architectures(suite)
if "source" in arches:
arches.remove("source")
if "all" in arches:
+++ /dev/null
-#!/usr/bin/env python
-
-""" Poolify (move packages from "legacy" type locations to pool locations) """
-# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-################################################################################
-
-# "Welcome to where time stands still,
-# No one leaves and no one will."
-# - Sanitarium - Metallica / Master of the puppets
-
-################################################################################
-
-import os, pg, re, stat, sys
-import apt_pkg, apt_inst
-import daklib.database
-import daklib.utils
-from daklib.regexes import re_isadeb, re_extract_src_version, re_no_epoch, re_issource
-
-################################################################################
-
-Cnf = None
-projectB = None
-
-################################################################################
-
-def usage (exit_code=0):
- print """Usage: dak poolize [OPTIONS]
-Migrate packages from legacy locations into the pool.
-
- -l, --limit=AMOUNT only migrate AMOUNT Kb of packages
- -n, --no-action don't do anything
- -v, --verbose explain what is being done
- -h, --help show this help and exit"""
-
- sys.exit(exit_code)
-
-################################################################################
-
-# Q is a python-postgresql query result set and must have the
-# following four columns:
-# o files.id (as 'files_id')
-# o files.filename
-# o location.path
-# o component.name (as 'component')
-#
-# limit is a value in bytes or -1 for no limit (use with care!)
-# verbose and no_action are booleans
-
-def poolize (q, limit, verbose, no_action):
- poolized_size = 0L
- poolized_count = 0
-
- # Parse -l/--limit argument
- qd = q.dictresult()
- for qid in qd:
- legacy_filename = qid["path"]+qid["filename"]
- size = os.stat(legacy_filename)[stat.ST_SIZE]
- if (poolized_size + size) > limit and limit >= 0:
- daklib.utils.warn("Hit %s limit." % (daklib.utils.size_type(limit)))
- break
- poolized_size += size
- poolized_count += 1
- base_filename = os.path.basename(legacy_filename)
- destination_filename = base_filename
- # Work out the source package name
- if re_isadeb.match(base_filename):
- control = apt_pkg.ParseSection(apt_inst.debExtractControl(daklib.utils.open_file(legacy_filename)))
- package = control.Find("Package", "")
- source = control.Find("Source", package)
- if source.find("(") != -1:
- m = re_extract_src_version.match(source)
- source = m.group(1)
- # If it's a binary, we need to also rename the file to include the architecture
- version = control.Find("Version", "")
- architecture = control.Find("Architecture", "")
- if package == "" or version == "" or architecture == "":
- daklib.utils.fubar("%s: couldn't determine required information to rename .deb file." % (legacy_filename))
- version = re_no_epoch.sub('', version)
- destination_filename = "%s_%s_%s.deb" % (package, version, architecture)
- else:
- m = re_issource.match(base_filename)
- if m:
- source = m.group(1)
- else:
- daklib.utils.fubar("expansion of source filename '%s' failed." % (legacy_filename))
- # Work out the component name
- component = qid["component"]
- if component == "":
- q = projectB.query("SELECT DISTINCT(c.name) FROM override o, component c WHERE o.package = '%s' AND o.component = c.id;" % (source))
- ql = q.getresult()
- if not ql:
- daklib.utils.fubar("No override match for '%s' so I can't work out the component." % (source))
- if len(ql) > 1:
- daklib.utils.fubar("Multiple override matches for '%s' so I can't work out the component." % (source))
- component = ql[0][0]
- # Work out the new location
- q = projectB.query("SELECT l.id FROM location l, component c WHERE c.name = '%s' AND c.id = l.component AND l.type = 'pool';" % (component))
- ql = q.getresult()
- if len(ql) != 1:
- daklib.utils.fubar("couldn't determine location ID for '%s'. [query returned %d matches, not 1 as expected]" % (source, len(ql)))
- location_id = ql[0][0]
- # First move the files to the new location
- pool_location = daklib.utils.poolify (source, component)
- pool_filename = pool_location + destination_filename
- destination = Cnf["Dir::Pool"] + pool_location + destination_filename
- if os.path.exists(destination):
- daklib.utils.fubar("'%s' already exists in the pool; serious FUBARity." % (legacy_filename))
- if verbose:
- print "Moving: %s -> %s" % (legacy_filename, destination)
- if not no_action:
- daklib.utils.move(legacy_filename, destination)
- # Then Update the DB's files table
- if verbose:
- print "SQL: UPDATE files SET filename = '%s', location = '%s' WHERE id = '%s'" % (pool_filename, location_id, qid["files_id"])
- if not no_action:
- q = projectB.query("UPDATE files SET filename = '%s', location = '%s' WHERE id = '%s'" % (pool_filename, location_id, qid["files_id"]))
-
- sys.stderr.write("Poolized %s in %s files.\n" % (daklib.utils.size_type(poolized_size), poolized_count))
-
-################################################################################
-
-def main ():
- global Cnf, projectB
-
- Cnf = daklib.utils.get_conf()
-
- for i in ["help", "limit", "no-action", "verbose" ]:
- if not Cnf.has_key("Poolize::Options::%s" % (i)):
- Cnf["Poolize::Options::%s" % (i)] = ""
-
-
- Arguments = [('h',"help","Poolize::Options::Help"),
- ('l',"limit", "Poolize::Options::Limit", "HasArg"),
- ('n',"no-action","Poolize::Options::No-Action"),
- ('v',"verbose","Poolize::Options::Verbose")]
-
- apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
- Options = Cnf.SubTree("Poolize::Options")
-
- if Options["Help"]:
- usage()
-
- projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"]))
- daklib.database.init(Cnf, projectB)
-
- if not Options["Limit"]:
- limit = -1
- else:
- limit = int(Options["Limit"]) * 1024
-
- # -n/--no-action implies -v/--verbose
- if Options["No-Action"]:
- Options["Verbose"] = "true"
-
- # Sanity check the limit argument
- if limit > 0 and limit < 1024:
- daklib.utils.fubar("-l/--limit takes an argument with a value in kilobytes.")
-
- # Grab a list of all files not already in the pool
- q = projectB.query("""
-SELECT l.path, f.filename, f.id as files_id, c.name as component
- FROM files f, location l, component c WHERE
- NOT EXISTS (SELECT 1 FROM location l WHERE l.type = 'pool' AND f.location = l.id)
- AND NOT (f.filename ~ '^potato') AND f.location = l.id AND l.component = c.id
-UNION SELECT l.path, f.filename, f.id as files_id, null as component
- FROM files f, location l WHERE
- NOT EXISTS (SELECT 1 FROM location l WHERE l.type = 'pool' AND f.location = l.id)
- AND NOT (f.filename ~ '^potato') AND f.location = l.id AND NOT EXISTS
- (SELECT 1 FROM location l WHERE l.component IS NOT NULL AND f.location = l.id);""")
-
- poolize(q, limit, Options["Verbose"], Options["No-Action"])
-
-#######################################################################################
-
-if __name__ == '__main__':
- main()
###############################################################################
-import errno, fcntl, os, sys, time, re
+import errno
+import fcntl
+import os
+import sys
+import time
+import re
import apt_pkg, commands
from daklib import database
from daklib import logging
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
-
if not database.copy_temporary_contents(package, version, files[newfile]):
reject("Missing contents for package")
- # If the .orig.tar.gz is in a legacy directory we need to poolify
- # it, so that apt-get source (and anything else that goes by the
- # "Directory:" field in the Sources.gz file) works.
- orig_tar_id = Upload.pkg.orig_tar_id
- orig_tar_location = Upload.pkg.orig_tar_location
- legacy_source_untouchable = Upload.pkg.legacy_source_untouchable
- if orig_tar_id and orig_tar_location == "legacy":
- q = projectB.query("SELECT DISTINCT ON (f.id) l.path, f.filename, f.id as files_id, df.source, df.id as dsc_files_id, f.size, f.md5sum FROM files f, dsc_files df, location l WHERE df.source IN (SELECT source FROM dsc_files WHERE file = %s) AND f.id = df.file AND l.id = f.location AND (l.type = 'legacy' OR l.type = 'legacy-mixed')" % (orig_tar_id))
- qd = q.dictresult()
- for qid in qd:
- # Is this an old upload superseded by a newer -sa upload? (See check_dsc() for details)
- if legacy_source_untouchable.has_key(qid["files_id"]):
- continue
- # First move the files to the new location
- legacy_filename = qid["path"] + qid["filename"]
- pool_location = utils.poolify (changes["source"], files[newfile]["component"])
- pool_filename = pool_location + os.path.basename(qid["filename"])
- destination = Cnf["Dir::Pool"] + pool_location
- utils.move(legacy_filename, destination)
- # Then Update the DB's files table
- q = projectB.query("UPDATE files SET filename = '%s', location = '%s' WHERE id = '%s'" % (pool_filename, dsc_location_id, qid["files_id"]))
-
- # If this is a sourceful diff only upload that is moving non-legacy
+ # If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig.tar.gz into the new
# component too for the same reasons as above.
#
if changes["architecture"].has_key("source") and orig_tar_id and \
- orig_tar_location != "legacy" and orig_tar_location != dsc_location_id:
+ orig_tar_location != dsc_location_id:
q = projectB.query("SELECT l.path, f.filename, f.size, f.md5sum, f.sha1sum, f.sha256sum FROM files f, location l WHERE f.id = %s AND f.location = l.id" % (orig_tar_id))
ql = q.getresult()[0]
old_filename = ql[0] + ql[1]
################################################################################
-import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback, tarfile
+import commands
+import errno
+import fcntl
+import os
+import re
+import shutil
+import stat
+import sys
+import time
+import tempfile
+import traceback
+import tarfile
import apt_inst, apt_pkg
from debian_bundle import deb822
from daklib.dbconn import DBConn
(source, dest) = args[1:3]
if changes["distribution"].has_key(source):
for arch in changes["architecture"].keys():
- if arch not in Cnf.ValueList("Suite::%s::Architectures" % (source)):
+ if arch not in database.get_suite_architectures(source):
reject("Mapping %s to %s for unreleased architecture %s." % (source, dest, arch),"")
del changes["distribution"][source]
changes["distribution"][dest] = 1
default_suite = Cnf.get("Dinstall::DefaultSuite", "Unstable")
architecture = control.Find("Architecture")
upload_suite = changes["distribution"].keys()[0]
- if architecture not in Cnf.ValueList("Suite::%s::Architectures" % (default_suite)) and architecture not in Cnf.ValueList("Suite::%s::Architectures" % (upload_suite)):
+ if architecture not in database.get_suite_architectures(default_suite) and architecture not in database.get_suite_architectures(upload_suite):
reject("Unknown architecture '%s'." % (architecture))
# Ensure the architecture of the .deb is one of the ones
################################################################################
-import commands, os, pg, re, sys
-import apt_pkg, apt_inst
+import commands
+import os
+import pg
+import re
+import sys
+import apt_pkg
+import apt_inst
from daklib import database
from daklib import utils
from daklib.dak_exceptions import *
if arches:
all_arches = set(arches)
else:
- all_arches = set(Cnf.ValueList("Suite::%s::Architectures" % suites[0]))
+ all_arches = set(database.get_suite_architectures(suites[0]))
all_arches -= set(["source", "all"])
for architecture in all_arches:
deps = {}
import pg, sys
import apt_pkg
from daklib import utils
+from daklib import database
################################################################################
for suite in suite_list:
suite_id = suite_ids[suite]
suite_arches[suite_id] = {}
- for arch in Cnf.ValueList("Suite::%s::Architectures" % (suite)):
+ for arch in database.get_suite_architectures(suite_id):
suite_arches[suite_id][arch] = ""
suite_id_list.append(suite_id)
output_list = [ output_format(i) for i in suite_list ]
mode = args[0].lower()
projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"]))
+ database.init(Cnf, projectB)
if mode == "arch-space":
per_arch_space_use()
#!/usr/bin/env python
-""" Database Update Main Script """
+""" Database Update Main Script
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
# Copyright (C) 2008 Michael Casadevall <mcasadevall@debian.org>
+@license: GNU General Public License version 2 or later
+"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
################################################################################
-import psycopg2, sys, fcntl, os
+import psycopg2
+import sys
+import fcntl
+import os
import apt_pkg
import time
import errno
from daklib import database
from daklib import utils
+from daklib.dak_exceptions import DBUpdateError
################################################################################
Cnf = None
projectB = None
-required_database_schema = 4
+required_database_schema = 5
################################################################################
sys.exit(0)
for i in range (database_revision, required_database_schema):
- print "updating databse schema from " + str(database_revision) + " to " + str(i+1)
+ print "updating database schema from " + str(database_revision) + " to " + str(i+1)
try:
dakdb = __import__("dakdb", globals(), locals(), ['update'+str(i+1)])
update_module = getattr(dakdb, "update"+str(i+1))
""" DB access functions
@group readonly: get_suite_id, get_section_id, get_priority_id, get_override_type_id,
get_architecture_id, get_archive_id, get_component_id, get_location_id,
- get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites
+ get_source_id, get_suite_version, get_files_id, get_maintainer, get_suites,
+ get_suite_architectures
@group read/write: get_or_set*, set_files_id
@contact: Debian FTP Master <ftpmaster@debian.org>
maintainer_id_cache = {} #: cache for maintainers
keyring_id_cache = {} #: cache for keyrings
source_id_cache = {} #: cache for sources
+
files_id_cache = {} #: cache for files
maintainer_cache = {} #: cache for maintainer names
fingerprint_id_cache = {} #: cache for fingerprints
cache_preloaded = True
+def get_suite_architectures(suite):
+ """
+ Returns list of architectures for C{suite}.
+
+ @type suite: string, int
+ @param suite: the suite name or the suite_id
+
+ @rtype: list
+ @return: the list of architectures for I{suite}
+ """
+
+ suite_id = None
+ if type(suite) == str:
+ suite_id = get_suite_id(suite)
+ elif type(suite) == int:
+ suite_id = suite
+ else:
+ return None
+
+ sql = """ SELECT a.arch_string FROM suite_architectures sa
+ JOIN architecture a ON (a.id = sa.architecture)
+ WHERE suite='%s' """ % (suite_id)
+
+ q = projectB.query(sql)
+ return map(lambda x: x[0], q.getresult())
+
################################################################################
def get_or_set_maintainer_id (maintainer):
subst = {
"__PACKAGE__": package,
"__VERSION__": version,
+ "__TO_ADDRESS__": Cnf["Dinstall::MyAdminAddress"]
"__DAK_ADDRESS__": Cnf["Dinstall::MyEmailAddress"]
}
- message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/bts-categorize")
+ message = utils.TemplateSubst(Subst, Cnf["Dir::Templates"]+"/missing-contents")
utils.send_mail( message )
exists = DBConn().insert_content_path(package, version, deb)
self.accept_count = 0
self.accept_bytes = 0L
self.reject_message = ""
- self.pkg = Pkg(changes = {}, dsc = {}, dsc_files = {}, files = {},
- legacy_source_untouchable = {})
+ self.pkg = Pkg(changes = {}, dsc = {}, dsc_files = {}, files = {})
# Initialize the substitution template mapping global
Subst = self.Subst = {}
self.pkg.dsc.clear()
self.pkg.files.clear()
self.pkg.dsc_files.clear()
- self.pkg.legacy_source_untouchable.clear()
self.pkg.orig_tar_id = None
self.pkg.orig_tar_location = ""
self.pkg.orig_tar_gz = None
self.pkg.dsc.update(p.load())
self.pkg.files.update(p.load())
self.pkg.dsc_files.update(p.load())
- self.pkg.legacy_source_untouchable.update(p.load())
self.pkg.orig_tar_id = p.load()
self.pkg.orig_tar_location = p.load()
dsc = self.pkg.dsc
files = self.pkg.files
dsc_files = self.pkg.dsc_files
- legacy_source_untouchable = self.pkg.legacy_source_untouchable
orig_tar_id = self.pkg.orig_tar_id
orig_tar_location = self.pkg.orig_tar_location
d_dsc_files[file_entry][i] = dsc_files[file_entry][i]
for i in [ d_changes, d_dsc, d_files, d_dsc_files,
- legacy_source_untouchable, orig_tar_id, orig_tar_location ]:
+ orig_tar_id, orig_tar_location ]:
p.dump(i)
dump_file.close()
self.reject_message = ""
files = self.pkg.files
dsc_files = self.pkg.dsc_files
- legacy_source_untouchable = self.pkg.legacy_source_untouchable
self.pkg.orig_tar_gz = None
# Try and find all files mentioned in the .dsc. This has
actual_size = os.stat(old_file)[stat.ST_SIZE]
if actual_md5 == dsc_files[dsc_file]["md5sum"] and actual_size == int(dsc_files[dsc_file]["size"]):
x = i
- else:
- legacy_source_untouchable[i[3]] = ""
old_file = x[0] + x[1]
old_file_fh = utils.open_file(old_file)
# See install() in process-accepted...
self.pkg.orig_tar_id = x[3]
self.pkg.orig_tar_gz = old_file
- if suite_type == "legacy" or suite_type == "legacy-mixed":
- self.pkg.orig_tar_location = "legacy"
- else:
- self.pkg.orig_tar_location = x[4]
+ self.pkg.orig_tar_location = x[4]
else:
# Not there? Check the queue directories...
if len(args) >= 1:
timestamp = args[0]
if timestamp.count("T") == 0:
- expiredate = time.strftime("%Y-%m-%d", time.gmtime(timestamp))
+ try:
+ expiredate = time.strftime("%Y-%m-%d", time.gmtime(float(timestamp)))
+ except ValueError:
+ expiredate = "unknown (%s)" % (timestamp)
else:
expiredate = timestamp
reject("The key used to sign %s has expired on %s" % (sig_filename, expiredate))
+++ /dev/null
-<!-- -*- mode: sgml; mode: fold -*- -->
-<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [
-
-<!ENTITY % dakent SYSTEM "dak.ent">
-%dakent;
-
-]>
-
-<refentry>
- &dak-docinfo;
-
- <refmeta>
- <refentrytitle>dak_poolize</>
- <manvolnum>1</>
- </refmeta>
-
- <!-- Man page title -->
- <refnamediv>
- <refname>dak poolize</>
- <refpurpose>Utility to poolize files (move them from legacy to pool location)</>
- </refnamediv>
-
- <!-- Arguments -->
- <refsynopsisdiv>
- <cmdsynopsis>
- <command>dak poolize</>
- <arg><option><replaceable>options</replaceable></></arg>
- </cmdsynopsis>
- </refsynopsisdiv>
-
- <RefSect1><Title>Description</>
- <para>
- <command>dak poolize</command> is the command line tool to poolize files; i.e. move files from legacy locations to their corresponding pool locations.
- </PARA>
- </REFSECT1>
-
- <RefSect1><Title>Options</>
- <VariableList>
- <varlistentry>
- <term><option>-l/--limit</option>=<replaceable>size in kilobytes</replaceable></term>
- <listitem>
- <para>Set the maximum amount of data to poolize. <emphasis>Note:</emphasis> Without this option, all files will be poolized.</para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>-n/--no-action</option></term>
- <listitem>
- <para>Don't actually do anything, just show what would be done.</para>
- </listitem>
- </varlistentry>
- </VariableList>
- </RefSect1>
-
- <RefSect1><Title>Diagnostics</>
- <para>
- <command>dak poolize</command> returns zero on normal operation, non-zero on error.
- </PARA>
- </RefSect1>
-
- &manauthor;
-
-</refentry>
From: __DAK_ADDRESS__
+To: __TO_ADDRESS__
X-Debian: DAK
X-Debian-Package: __PACKAGE__
MIME-Version: 1.0
To: __MAINTAINER_TO__
__BCC__
X-Debian: DAK
-X-Debian-Package: __PACKAGE__
+X-Debian-Package: __SOURCE__
Precedence: bulk
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"