+# -*- mode:sh -*-
# Timestamp. Used for dinstall stat graphs
function ts() {
echo "Archive maintenance timestamp ($1): $(date +%H:%M:%S)"
# pushing merkels QA user, part one
function merkel1() {
log "Telling merkels QA user that we start dinstall"
- ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@merkel.debian.org sleep 1
+ ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@qa.debian.org sleep 1
}
# Create the postgres dump files
function pgdump_pre() {
- log "Creating pre-daily-cron-job backup of projectb database..."
- pg_dump projectb > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
+ log "Creating pre-daily-cron-job backup of $PGDATABASE database..."
+ pg_dump > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
}
function pgdump_post() {
- log "Creating post-daily-cron-job backup of projectb database..."
+ log "Creating post-daily-cron-job backup of $PGDATABASE database..."
cd $base/backup
POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S)
- pg_dump projectb > $base/backup/dump_$POSTDUMP
- pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
+ pg_dump > $base/backup/dump_$POSTDUMP
+ #pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
ln -sf $base/backup/dump_$POSTDUMP current
- ln -sf $base/backup/dumpall_$POSTDUMP currentall
+ #ln -sf $base/backup/dumpall_$POSTDUMP currentall
}
# Load the dak-dev projectb
function pgdakdev() {
+ # Make sure to unset any possible psql variables so we don't drop the wrong
+ # f****** database by accident
+ local PGDATABASE
+ unset PGDATABASE
+ local PGHOST
+ unset PGHOST
+ local PGPORT
+ unset PGPORT
+ local PGUSER
+ unset PGUSER
cd $base/backup
- echo "drop database projectb" | psql -p 5433 template1
- cat currentall | psql -p 5433 template1
- createdb -p 5433 -T template0 projectb
- fgrep -v '\connect' current | psql -p 5433 projectb
+ echo "drop database projectb" | psql -p 5434 template1
+ #cat currentall | psql -p 5433 template1
+ createdb -p 5434 -T template1 projectb
+ fgrep -v '\connect' current | psql -p 5434 projectb
}
# Updating various files
# Process (oldstable)-proposed-updates "NEW" queue
function punew_do() {
- cd "${queuedir}/${1}"
date -u -R >> REPORT
- dak process-new -a -C COMMENTS >> REPORT || true
+ dak process-policy $1 | tee -a REPORT | mail -e -s "NEW changes in $1" debian-release@lists.debian.org
echo >> REPORT
}
function punew() {
log "Doing automated p-u-new processing"
+ cd "${queuedir}/p-u-new"
punew_do "$1"
}
function opunew() {
log "Doing automated o-p-u-new processing"
+ cd "${queuedir}/o-p-u-new"
punew_do "$1"
}
# Now check if we still know about the packages for which they created the files
# is the timestamp signed by us?
- if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
+ if $(gpgv --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
# now read it. As its signed by us we are sure the content is what we expect, no need
# to do more here. And we only test -d a directory on it anyway.
TSTAMP=$(cat timestamp)
dak check-overrides
}
-function msfl() {
- log "Generating suite file lists for apt-ftparchive"
- dak make-suite-file-list
+function dominate() {
+ log "Removing obsolete source and binary associations"
+ dak dominate
}
function filelist() {
log "Generating Packages and Sources files"
cd $configdir
GZIP='--rsyncable' ; export GZIP
- apt-ftparchive generate apt.conf
+ #apt-ftparchive generate apt.conf
+ dak generate-packages-sources
}
function pdiff() {
fi
}
+function mkuploaders() {
+ log 'Creating Uploaders index ... '
+
+ cd $indices
+ dak make-maintainers -u ${scriptdir}/masterfiles/pseudo-packages.maintainers | \
+ sed -e "s/~[^ ]*\([ ]\)/\1/" | \
+ awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-uploaders
+
+ if ! cmp -s .new-uploaders Uploaders || [ ! -f Uploaders ]; then
+ log "installing Uploaders ... "
+ mv -f .new-uploaders Uploaders
+ gzip --rsyncable -9v <Uploaders >.new-uploaders.gz
+ mv -f .new-uploaders.gz Uploaders.gz
+ else
+ rm -f .new-uploaders
+ fi
+}
+
function copyoverrides() {
log 'Copying override files into public view ...'
ARCHLIST=$(tempfile)
- log "Querying projectb..."
- echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql projectb -At | sed 's/|//;s,^/srv/ftp.debian.org/ftp,.,' | sort >$ARCHLIST
+ log "Querying $PGDATABASE..."
+ echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql -At | sed 's/|//;s,^/srv/ftp-master.debian.org/ftp,.,' | sort >$ARCHLIST
includedirs () {
perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
log "Generating suite lists"
suite_list () {
- printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t projectb
+ printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t
- printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t projectb
+ printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t
}
- printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At projectb |
+ printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At |
while read id suite; do
[ -e $base/ftp/dists/$suite ] || continue
(
[ "$(readlink $distdir)" != "$distname" ] || echo $distdir
done
)
- suite_list $id | tr -d ' ' | sed 's,^/srv/ftp.debian.org/ftp,.,'
+ suite_list $id | tr -d ' ' | sed 's,^/srv/ftp-master.debian.org/ftp,.,'
) | sort -u | gzip --rsyncable -9 > suite-${suite}.list.gz
done
done
)
- (cat ../arch-i386.files ../arch-amd64.files; zcat suite-oldstable.list.gz suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-squeeze.list.gz) |
+ (cat ../arch-i386.files ../arch-amd64.files; zcat suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-squeeze.list.gz) |
sort -u | poolfirst > ../typical.files
rm -f $ARCHLIST
function mirror() {
log "Regenerating \"public\" mirror/ hardlink fun"
+ DATE_SERIAL=$(date +"%Y%m%d01")
+ FILESOAPLUS1=$(awk '/serial/ { print $3+1 }' ${TRACEFILE} )
+ if [ "$DATE_SERIAL" -gt "$FILESOAPLUS1" ]; then
+ SERIAL="$DATE_SERIAL"
+ else
+ SERIAL="$FILESOAPLUS1"
+ fi
+ date -u > ${TRACEFILE}
+ echo "Using dak v1" >> ${TRACEFILE}
+ echo "Running on host: $(hostname -f)" >> ${TRACEFILE}
+ echo "Archive serial: ${SERIAL}" >> ${TRACEFILE}
cd ${mirrordir}
rsync -aH --link-dest ${ftpdir} --delete --delete-after --ignore-errors ${ftpdir}/. .
}
-function wb() {
- log "Trigger daily wanna-build run"
- wbtrigger "daily"
-}
-
function expire() {
log "Expiring old database dumps..."
cd $base/backup
function merkel2() {
# Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
- log "Trigger merkel/flotows projectb sync"
+ log "Trigger merkel/flotows $PGDATABASE sync"
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1
# Also trigger flotow, the ftpmaster test box
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1
dak control-suite -l testing > squeeze
dak control-suite -l unstable > sid
echo "${STAMP}" > timestamp
- gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
+ gpg --secret-keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp-master.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
rm -f md5sum
md5sum * > md5sum
cd ${webdir}/
do_unchecked
sync_debbugs
}
+
+# do a run of newstage only before dinstall is on.
+function newstage() {
+ log "Processing the newstage queue"
+ UNCHECKED_WITHOUT_LOCK="-p"
+ do_newstage
+}
+
+# Function to update a "statefile" telling people what we are doing
+# (more or less).
+#
+# This should be called with the argument(s)
+# - Status name we want to show.
+#
+function state() {
+ RIGHTNOW="$(date -u +"%a %b %d %T %Z %Y (%s)")"
+ cat >"${DINSTALLSTATE}" <<EOF
+Dinstall start: ${DINSTALLBEGIN}
+Current action: ${1}
+Action start: ${RIGHTNOW}
+EOF
+}
+
+# extract changelogs and stuff
+function changelogs() {
+ log "Extracting changelogs"
+ dak make-changelog -e
+ mkdir -p ${exportpublic}/changelogs
+ cd ${exportpublic}/changelogs
+ rsync -aHW --delete --delete-after --ignore-errors ${exportdir}/changelogs/. .
+}