cd $masterdir
dak make-suite-file-list
+dak generate-filelist
# Generate override files
cd $overridedir
fi
}
+function wbtrigger() {
+ MODE=${1:-"often"}
+ SSHOPT="-o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240"
+ if lockfile -r 3 -l 3600 "${LOCK_BUILDD}"; then
+ if [ "x${MODE}x" = "xdailyx" ]; then
+ ssh ${SSHOPT} wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+ elif [ "x${MODE}x" = "xoftenx" ]; then
+ ssh -q -q ${SSHOPT} wbadm@buildd /org/wanna-build/trigger.often
+ else
+ log_error "Unknown wb trigger mode called"
+ fi
+ fi
+ rm -f "${LOCK_BUILDD}"
+}
+
# used by cron.dinstall *and* cron.unchecked.
function make_buildd_dir () {
- cd $configdir
- apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd
-
- cd ${incoming}
- rm -f buildd/Release*
- apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="Debian" -o APT::FTPArchive::Release::Label="Debian" -o APT::FTPArchive::Release::Description="buildd incoming" -o APT::FTPArchive::Release::Architectures="${archs}" release buildd > Release
- gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o Release.gpg Release
- mv Release* buildd/.
+ dak manage-build-queues -a
- cd ${incoming}
- mkdir -p tree/${STAMP}
- cp -al ${incoming}/buildd/. tree/${STAMP}/
- ln -sfT tree/${STAMP} ${incoming}/builddweb
- find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
+ cd ${incoming}
+ mkdir -p tree/${STAMP}
+ cp -al ${incoming}/buildd/. tree/${STAMP}/
+ ln -sfT tree/${STAMP} ${incoming}/builddweb
+ find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
}
# Do the unchecked processing, in case we have files.
dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$unchecked" >> $report
}
+# Do the newstage processing, in case we have files.
+function do_newstage () {
+ cd $newstage
+
+ changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs)
+ report=$queuedir/REPORT
+ timestamp=$(date "+%Y-%m-%d %H:%M")
+ UNCHECKED_WITHOUT_LOCK=${UNCHECKED_WITHOUT_LOCK:-""}
+
+ echo "$timestamp": ${changes:-"Nothing to do in newstage"} >> $report
+ dak process-upload -a ${UNCHECKED_WITHOUT_LOCK} -d "$newstage" >> $report
+}
+
function sync_debbugs () {
# sync with debbugs
echo "--" >> $report
+++ /dev/null
-#! /bin/sh
-#
-# Called from cron.unchecked to update wanna-build, each time it runs.
-#
-ssh -q -q -o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240 wbadm@buildd /org/wanna-build/trigger.often
-exit 0
ARGS=""
ERR=""
)
-stage $GO
+# disabled until p-u is faster than now. it runs often enough, so wont hurt to save
+# the time here.
+#stage $GO
GO=(
FUNC="cruft"
)
stage $GO
+GO=(
+ FUNC="filelist"
+ TIME="generate-filelist"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
GO=(
FUNC="fingerprints"
TIME="import-keyring"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="packages"
ARGS=""
ERR=""
)
+# Careful: When we ever go and remove this monster-long thing, we have to check the backgrounded
+# functions before it. We no longer have a 1.5hour sync point then.
stage $GO
GO=(
ARGS=""
ERR=""
)
-### TODO: clean-* fixup
-#stage $GO
+stage $GO
+
+GO=(
+ FUNC="buildd_dir"
+ TIME="buildd_dir"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="mkmaintainers"
+ TIME="mkmaintainers"
+ ARGS=""
+ ERR=""
+)
+stage $GO
GO=(
- FUNC="buildd"
- TIME="buildd"
+ FUNC="copyoverrides"
+ TIME="copyoverrides"
ARGS=""
ERR=""
)
stage $GO
GO=(
- FUNC="scripts"
- TIME="scripts"
+ FUNC="mklslar"
+ TIME="mklslar"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="mkfilesindices"
+ TIME="mkfilesindices"
+ ARGS=""
+ ERR=""
+)
+stage $GO
+
+GO=(
+ FUNC="mkchecksums"
+ TIME="mkchecksums"
ARGS=""
ERR=""
)
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="i18n2"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
GO=(
FUNC="stats"
ARGS=""
ERR="false"
)
-stage $GO
+stage $GO &
-rm -f ${LOCK_BRITNEY}
+rm -f "${LOCK_BRITNEY}"
GO=(
FUNC="pgdakdev"
echo "Using dak v1" >> $ftpdir/project/trace/ftp-master.debian.org
echo "Running on host: $(hostname -f)" >> $ftpdir/project/trace/ftp-master.debian.org
dak import-users-from-passwd
-#dak queue-report -n > $webdir/new.html
-#dak queue-report -8 -d accepted,new,byhand,proposedupdates,oldproposedupdates
+dak queue-report -n > $webdir/new.html
+# We used to have accepted in here, but it doesn't exist in that form any more
+dak queue-report -8 -d new,byhand,proposedupdates,oldproposedupdates
dak show-deferred > ${webdir}/deferred.html
#cd $queuedir/new ; dak show-new *.changes > /dev/null
$base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc
LOCKFILE="$lockdir/unchecked.lock"
LOCK_NEW="$lockdir/processnew.lock"
NOTICE="$lockdir/daily.lock"
+LOCK_BUILDD="$lockdir/buildd.lock"
# our name
PROGRAM="unchecked"
function do_buildd () {
if lockfile -r3 $NOTICE; then
LOCKDAILY="YES"
- psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
cd $overridedir
dak make-overrides &>/dev/null
rm -f override.sid.all3 override.sid.all3.src
fi
done
make_buildd_dir
-
- . $configdir/cron.buildd
+ wbtrigger "often"
fi
}
# the actual unchecked functions follow #
########################################################################
+# And use one locale, no matter what the caller has set
+export LANG=C
+export LC_ALL=C
+
#lockfile -r3 "$LOCK_NEW"
# acceptnew
#rm -f "$LOCK_NEW"
lockfile -r3 $LOCKFILE || exit 0
trap cleanup 0
+do_newstage
do_unchecked
if [ ! -z "$changes" ]; then
UrgencyLog "/srv/release.debian.org/britney/input/urgencies/";
Queue
{
- Accepted "/srv/ftp.debian.org/queue/accepted/";
Byhand "/srv/ftp.debian.org/queue/byhand/";
ProposedUpdates "/srv/ftp.debian.org/queue/p-u-new/";
OldProposedUpdates "/srv/ftp.debian.org/queue/o-p-u-new/";
dak make-suite-file-list
}
+function filelist() {
+ log "Generating file lists for apt-ftparchive"
+ dak generate-filelist
+}
+
function fingerprints() {
log "Not updating fingerprints - scripts needs checking"
-# log "Updating fingerprints"
-# dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
-
-# OUTFILE=$(mktemp)
-# dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
-
-# if [ -s "${OUTFILE}" ]; then
-# /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
-#From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
-#To: <debian-project@lists.debian.org>
-#Subject: Debian Maintainers Keyring changes
-#Content-Type: text/plain; charset=utf-8
-#MIME-Version: 1.0
-#
-#The following changes to the debian-maintainers keyring have just been activated:
-#
-#$(cat $OUTFILE)
-#
-#Debian distribution maintenance software,
-#on behalf of the Keyring maintainers
-#
-#EOF
-# fi
-# rm -f "$OUTFILE"
+ log "Updating fingerprints"
+ dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
+
+ OUTFILE=$(mktemp)
+ dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
+
+ if [ -s "${OUTFILE}" ]; then
+ /usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
+From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
+To: <debian-project@lists.debian.org>
+Subject: Debian Maintainers Keyring changes
+Content-Type: text/plain; charset=utf-8
+MIME-Version: 1.0
+
+The following changes to the debian-maintainers keyring have just been activated:
+
+$(cat $OUTFILE)
+
+Debian distribution maintenance software,
+on behalf of the Keyring maintainers
+
+EOF
+ fi
+ rm -f "$OUTFILE"
}
function overrides() {
dak clean-queues
}
-function buildd() {
- # Needs to be rebuilt, as files have moved. Due to unaccepts, we need to
- # update this before wanna-build is updated.
- log "Regenerating wanna-build/buildd information"
- psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id =build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
- symlinks -d /srv/incoming.debian.org/buildd > /dev/null
- apt-ftparchive generate apt.conf.buildd
-}
-
function buildd_dir() {
# Rebuilt the buildd dir to avoid long times of 403
log "Regenerating the buildd incoming dir"
}
function mkmaintainers() {
- log -n 'Creating Maintainers index ... '
+ log 'Creating Maintainers index ... '
cd $indices
dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | \
- sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
-
- set +e
- cmp .new-maintainers Maintainers >/dev/null
- rc=$?
- set -e
- if [ $rc = 1 ] || [ ! -f Maintainers ] ; then
- log -n "installing Maintainers ... "
+ sed -e "s/~[^ ]*\([ ]\)/\1/" | \
+ awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
+
+ if ! cmp -s .new-maintainers Maintainers || [ ! -f Maintainers ]; then
+ log "installing Maintainers ... "
mv -f .new-maintainers Maintainers
gzip --rsyncable -9v <Maintainers >.new-maintainers.gz
mv -f .new-maintainers.gz Maintainers.gz
- elif [ $rc = 0 ] ; then
- log '(same as before)'
- rm -f .new-maintainers
else
- log cmp returned $rc
- false
+ rm -f .new-maintainers
fi
}
function copyoverrides() {
log 'Copying override files into public view ...'
- for f in $copyoverrides ; do
+ for ofile in $copyoverrides ; do
cd $overridedir
- chmod g+w override.$f
+ chmod g+w override.$ofile
cd $indices
- rm -f .newover-$f.gz
- pc="`gzip 2>&1 -9nv <$overridedir/override.$f >.newover-$f.gz`"
- set +e
- nf=override.$f.gz
- cmp -s .newover-$f.gz $nf
- rc=$?
- set -e
- if [ $rc = 0 ]; then
- rm -f .newover-$f.gz
- elif [ $rc = 1 -o ! -f $nf ]; then
- log " installing new $nf $pc"
- mv -f .newover-$f.gz $nf
- chmod g+w $nf
- else
- log $? $pc
- exit 1
+
+ newofile=override.$ofile.gz
+ rm -f .newover-$ofile.gz
+ pc="`gzip 2>&1 -9nv <$overridedir/override.$ofile >.newover-$ofile.gz`"
+ if ! cmp -s .newover-$ofile.gz $newofile || [ ! -f $newofile ]; then
+ log " installing new $newofile $pc"
+ mv -f .newover-$ofile.gz $newofile
+ chmod g+w $newofile
+ else
+ rm -f .newover-$ofile.gz
fi
done
}
perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
}
- log "Generating sources list
+ log "Generating sources list"
(
sed -n 's/|$//p' $ARCHLIST
cd $base/ftp
find ./dists \! -type d | grep "/source/"
) | sort -u | gzip --rsyncable -9 > source.list.gz
- log "Generating arch lists
+ log "Generating arch lists"
ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
for a in $ARCHES; do
done
log "Finding everything on the ftp site to generate sundries"
-
(cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
rm -f sundries.list
${bindir}/dsync-flist -q link-dups $dsynclist || true
}
-function scripts() {
- log "Running various scripts from $scriptsdir"
- mkmaintainers
- copyoverrides
- mklslar
- mkfilesindices
- mkchecksums
-}
-
function mirror() {
log "Regenerating \"public\" mirror/ hardlink fun"
cd ${mirrordir}
function wb() {
log "Trigger daily wanna-build run"
- ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
+ wbtrigger "daily"
}
function expire() {
# If this file exists we exit immediately after the currently running
# function is done
LOCK_STOP="$lockdir/archive.stop"
+
+# Lock buildd updates
+LOCK_BUILDD="$lockdir/buildd.lock"
- copyright-contains-dh_make-todo-boilerplate
- preinst-interpreter-without-predepends
- control-interpreter-without-depends
+ - dir-or-file-in-var-www
+ - wrong-file-owner-uid-or-gid
fatal:
- debian-control-file-uses-obsolete-national-encoding
- malformed-deb-archive
- forbidden-postrm-interpreter
- control-interpreter-in-usr-local
- package-uses-local-diversion
- - wrong-file-owner-uid-or-gid
- bad-relation
- FSSTND-dir-in-usr
- FSSTND-dir-in-var
- package-installs-python-pyc
- library-in-debug-or-profile-should-not-be-stripped
- binary-file-compressed-with-upx
- - html-changelog-without-text-version
- file-in-usr-marked-as-conffile
- build-info-in-binary-control-file-section
- debian-control-with-duplicate-fields
- package-not-lowercase
- no-version-field
- bad-version-number
- - upstream-version-not-numeric
- no-architecture-field
- magic-arch-in-arch-list
- too-many-architectures
- uploader-address-is-on-localhost
- no-source-field
- source-field-does-not-match-pkg-name
- - build-depends-on-essential-package-without-using-version
- - depends-on-build-essential-package-without-using-version
- - build-depends-on-build-essential
- - executable-in-usr-share-doc
- symlink-has-too-many-up-segments
- - debian-rules-is-symlink
- debian-rules-not-a-makefile
- debian-rules-missing-required-target
- maintainer-script-removes-device-files
- - no-standards-version-field
- - invalid-standards-version
- - dir-or-file-in-var-www
- dir-or-file-in-tmp
- dir-or-file-in-mnt
- dir-or-file-in-opt
################################################################################
-def gen_blacklist(dir):
- for entry in os.listdir(dir):
- entry = entry.split('_')[0]
- blacklist[entry] = 1
-
def process(osuite, affected_suites, originosuite, component, otype, session):
global Logger, Options, sections, priorities
else:
Logger = daklog.Logger(cnf, "check-overrides", 1)
- gen_blacklist(cnf["Dir::Queue::Accepted"])
-
for osuite in cnf.SubTree("Check-Overrides::OverrideSuites").List():
if "1" != cnf["Check-Overrides::OverrideSuites::%s::Process" % osuite]:
continue
# deletion.
q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
- WHERE f.last_used IS NULL AND b.file = f.id
- AND NOT EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
-
+SELECT b.file, f.filename
+ FROM binaries b
+ LEFT JOIN files f
+ ON (b.file = f.id)
+ WHERE f.last_used IS NULL
+ AND b.id NOT IN
+ (SELECT ba.bin FROM bin_associations ba)
+ AND f.id NOT IN
+ (SELECT bqf.fileid FROM build_queue_files bqf)""")
for i in q.fetchall():
Logger.log(["set lastused", i[1]])
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
- {'lastused': now_date, 'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid AND last_used IS NULL",
+ {'lastused': now_date, 'fileid': i[0]})
+
+ if not Options["No-Action"]:
+ session.commit()
# Check for any binaries which are marked for eventual deletion
# but are now used again.
q = session.execute("""
-SELECT b.file, f.filename FROM binaries b, files f
- WHERE f.last_used IS NOT NULL AND f.id = b.file
- AND EXISTS (SELECT 1 FROM bin_associations ba WHERE ba.bin = b.id)""")
+SELECT b.file, f.filename
+ FROM binaries b
+ LEFT JOIN files f
+ ON (b.file = f.id)
+ WHERE f.last_used IS NOT NULL
+ AND (b.id IN
+ (SELECT ba.bin FROM bin_associations ba)
+ OR f.id IN
+ (SELECT bqf.fileid FROM build_queue_files bqf))""")
for i in q.fetchall():
Logger.log(["unset lastused", i[1]])
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid", {'fileid': i[0]})
+
+ if not Options["No-Action"]:
+ session.commit()
########################################
# Get the list of source packages not in a suite and not used by
# any binaries.
q = session.execute("""
-SELECT s.id, s.file, f.filename FROM source s, files f
- WHERE f.last_used IS NULL AND s.file = f.id
- AND NOT EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id)
- AND NOT EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)""")
+SELECT s.id, s.file, f.filename
+ FROM source s
+ LEFT JOIN files f
+ ON (s.file = f.id)
+ WHERE f.last_used IS NULL
+ AND s.id NOT IN
+ (SELECT sa.source FROM src_associations sa)
+ AND s.id NOT IN
+ (SELECT b.source FROM binaries b)
+ AND f.id NOT IN
+ (SELECT bqf.fileid FROM build_queue_files bqf)""")
#### XXX: this should ignore cases where the files for the binary b
#### have been marked for deletion (so the delay between bins go
# Mark the .dsc file for deletion
Logger.log(["set lastused", dsc_fname])
- session.execute("""UPDATE files SET last_used = :last_used
- WHERE id = :dscfileid AND last_used IS NULL""",
- {'last_used': now_date, 'dscfileid': dsc_file_id})
+ if not Options["No-Action"]:
+ session.execute("""UPDATE files SET last_used = :last_used
+ WHERE id = :dscfileid AND last_used IS NULL""",
+ {'last_used': now_date, 'dscfileid': dsc_file_id})
# Mark all other files references by .dsc too if they're not used by anyone else
x = session.execute("""SELECT f.id, f.filename FROM files f, dsc_files d
y = session.execute("SELECT id FROM dsc_files d WHERE d.file = :fileid", {'fileid': file_id})
if len(y.fetchall()) == 1:
Logger.log(["set lastused", file_name])
- session.execute("""UPDATE files SET last_used = :lastused
- WHERE id = :fileid AND last_used IS NULL""",
- {'lastused': now_date, 'fileid': file_id})
+ if not Options["No-Action"]:
+ session.execute("""UPDATE files SET last_used = :lastused
+ WHERE id = :fileid AND last_used IS NULL""",
+ {'lastused': now_date, 'fileid': file_id})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
# Check for any sources which are marked for deletion but which
# are now used again.
-
q = session.execute("""
SELECT f.id, f.filename FROM source s, files f, dsc_files df
WHERE f.last_used IS NOT NULL AND s.id = df.source AND df.file = f.id
AND ((EXISTS (SELECT 1 FROM src_associations sa WHERE sa.source = s.id))
- OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id)))""")
+ OR (EXISTS (SELECT 1 FROM binaries b WHERE b.source = s.id))
+ OR (EXISTS (SELECT 1 FROM build_queue_files bqf WHERE bqf.fileid = s.file)))""")
#### XXX: this should also handle deleted binaries specially (ie, not
#### reinstate sources because of them
for i in q.fetchall():
Logger.log(["unset lastused", i[1]])
- session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
- {'fileid': i[0]})
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = NULL WHERE id = :fileid",
+ {'fileid': i[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
########################################
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
- AND NOT EXISTS (SELECT 1 FROM queue_files qf WHERE qf.id = f.id)
+ AND NOT EXISTS (SELECT 1 FROM build_queue_files qf WHERE qf.fileid = f.id)
AND last_used IS NULL
ORDER BY filename""")
for x in ql:
utils.warn("orphaned file: %s" % x)
Logger.log(["set lastused", x[1], "ORPHANED FILE"])
- session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
- {'lastused': now_date, 'fileid': x[0]})
+ if not Options["No-Action"]:
+ session.execute("UPDATE files SET last_used = :lastused WHERE id = :fileid",
+ {'lastused': now_date, 'fileid': x[0]})
- session.commit()
+ if not Options["No-Action"]:
+ session.commit()
def clean_binaries(now_date, delete_date, max_delete, session):
# We do this here so that the binaries we remove will have their
cur_date = now_date.strftime("%Y-%m-%d")
dest = os.path.join(cnf["Dir::Morgue"], cnf["Clean-Suites::MorgueSubDir"], cur_date)
- if not os.path.exists(dest):
+ if not Options["No-Action"] and not os.path.exists(dest):
os.mkdir(dest)
# Delete from source
################################################################################
-def clean_queue_build(now_date, delete_date, max_delete, session):
-
- cnf = Config()
-
- if not cnf.ValueList("Dinstall::QueueBuildSuites") or Options["No-Action"]:
- return
-
- print "Cleaning out queue build symlinks..."
-
- our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"]))
- count = 0
-
- for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date):
- if not os.path.exists(qf.filename):
- utils.warn("%s (from queue_build) doesn't exist." % (qf.filename))
- continue
-
- if not cnf.FindB("Dinstall::SecurityQueueBuild") and not os.path.islink(qf.filename):
- utils.fubar("%s (from queue_build) should be a symlink but isn't." % (qf.filename))
-
- Logger.log(["delete queue build", qf.filename])
- if not Options["No-Action"]:
- os.unlink(qf.filename)
- session.delete(qf)
- count += 1
-
- if not Options["No-Action"]:
- session.commit()
-
- if count:
- Logger.log(["total", count])
- print "Cleaned %d queue_build files." % (count)
-
-################################################################################
-
def clean_empty_directories(session):
"""
Removes empty directories from pool directories.
"""
+ print "Cleaning out empty directories..."
+
count = 0
cursor = session.execute(
clean(now_date, delete_date, max_delete, session)
clean_maintainers(now_date, delete_date, max_delete, session)
clean_fingerprints(now_date, delete_date, max_delete, session)
- clean_queue_build(now_date, delete_date, max_delete, session)
clean_empty_directories(session)
Logger.close()
"Generate .diff/Index files"),
("clean-suites",
"Clean unused/superseded packages from the archive"),
+ ("manage-build-queues",
+ "Clean and update metadata for build queues"),
("clean-queues",
"Clean cruft from incoming"),
("clean-proposed-updates",
--- /dev/null
+#!/usr/bin/env python
+
+"""
+Add some meta info to queues
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mark Hymers <mhy@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import psycopg2
+
+def do_update(self):
+ print "Add meta info columns to queues."
+
+ try:
+ c = self.db.cursor()
+
+ c.execute("ALTER TABLE policy_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN origin TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN label TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE policy_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+ c.execute("""ALTER TABLE policy_queue
+ ADD CONSTRAINT policy_queue_meta_sanity_check
+ CHECK ( (generate_metadata IS FALSE)
+ OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+ c.execute("ALTER TABLE build_queue ADD COLUMN generate_metadata BOOL DEFAULT FALSE NOT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN origin TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN label TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN releasedescription TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN signingkey TEXT DEFAULT NULL")
+ c.execute("ALTER TABLE build_queue ADD COLUMN stay_of_execution INT4 NOT NULL DEFAULT 86400 CHECK (stay_of_execution >= 0)")
+ c.execute("""ALTER TABLE build_queue
+ ADD CONSTRAINT build_queue_meta_sanity_check
+ CHECK ( (generate_metadata IS FALSE)
+ OR (origin IS NOT NULL AND label IS NOT NULL AND releasedescription IS NOT NULL) )""")
+
+ print "Committing"
+ c.execute("UPDATE config SET value = '24' WHERE name = 'db_revision'")
+ self.db.commit()
+
+ except psycopg2.InternalError, msg:
+ self.db.rollback()
+ raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
+
SELECT path, filename
FROM srcfiles_suite_component
WHERE suite = :suite AND component = :component
+ ORDER BY filename
"""
args = { 'suite': suite.suite_id,
'component': component.component_id }
FROM binfiles_suite_component_arch
WHERE suite = :suite AND component = :component AND type = :type AND
(architecture = :architecture OR architecture = 2)
+ ORDER BY filename
"""
args = { 'suite': suite.suite_id,
'component': component.component_id,
def run(self):
cnf = Config()
count = 1
- for directory in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
+ for directory in [ "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
checkdir = cnf["Dir::Queue::%s" % (directory) ]
if os.path.exists(checkdir):
print "Looking into %s" % (checkdir)
changesfile = os.path.join(to_import.dirpath, to_import.changesfile)
changes.changes = parse_changes(changesfile, signing_rules=-1)
changes.changes["fingerprint"] = check_signature(changesfile)
- changes.add_known_changes(to_import.dirpath, self.session)
+ changes.add_known_changes(to_import.dirpath, session=self.session)
self.session.commit()
except InvalidDscError, line:
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+"""
+Import known_changes files
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2009 Mike O'Connor <stew@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+
+################################################################################
+
+import sys
+import os
+import logging
+import threading
+import glob
+import apt_pkg
+from daklib.dbconn import DBConn, get_dbchange, get_policy_queue, session_wrapper, ChangePendingFile, get_location, check_poolfile
+from daklib.config import Config
+from daklib.queue import Upload
+from daklib.utils import poolify
+
+# where in dak.conf all of our configuration will be stowed
+options_prefix = "NewFiles"
+options_prefix = "%s::Options" % options_prefix
+
+log = logging.getLogger()
+
+################################################################################
+
+
+def usage (exit_code=0):
+ print """Usage: dak import-new-files [options]
+
+OPTIONS
+ -v, --verbose
+ show verbose information messages
+
+ -q, --quiet
+ supress all output but errors
+
+"""
+ sys.exit(exit_code)
+
+class ImportNewFiles(object):
+ @session_wrapper
+ def __init__(self, session=None):
+ cnf = Config()
+ try:
+ newq = get_policy_queue('new', session)
+ for changes_fn in glob.glob(newq.path + "/*.changes"):
+ changes_bn = os.path.basename(changes_fn)
+ chg = get_dbchange(changes_bn, session)
+
+ u = Upload()
+ success = u.load_changes(changes_fn)
+ u.pkg.changes_file = changes_bn
+ u.check_hashes()
+
+ if not chg:
+ chg = u.pkg.add_known_changes(newq.path, newq.policy_queue_id, session)
+ session.add(chg)
+
+ if not success:
+ log.critical("failed to load %s" % changes_fn)
+ sys.exit(1)
+ else:
+ log.critical("ACCLAIM: %s" % changes_fn)
+
+ files=[]
+ for chg_fn in u.pkg.files.keys():
+ try:
+ f = open(os.path.join(newq.path, chg_fn))
+ cpf = ChangePendingFile()
+ cpf.filename = chg_fn
+ cpf.size = u.pkg.files[chg_fn]['size']
+ cpf.md5sum = u.pkg.files[chg_fn]['md5sum']
+
+ if u.pkg.files[chg_fn].has_key('sha1sum'):
+ cpf.sha1sum = u.pkg.files[chg_fn]['sha1sum']
+ else:
+ log.warning("Having to generate sha1sum for %s" % chg_fn)
+ f.seek(0)
+ cpf.sha1sum = apt_pkg.sha1sum(f)
+
+ if u.pkg.files[chg_fn].has_key('sha256sum'):
+ cpf.sha256sum = u.pkg.files[chg_fn]['sha256sum']
+ else:
+ log.warning("Having to generate sha256sum for %s" % chg_fn)
+ f.seek(0)
+ cpf.sha256sum = apt_pkg.sha256sum(f)
+
+ session.add(cpf)
+ files.append(cpf)
+ f.close()
+ except IOError:
+ # Can't find the file, try to look it up in the pool
+ poolname = poolify(u.pkg.changes["source"], u.pkg.files[chg_fn]["component"])
+ l = get_location(cnf["Dir::Pool"], u.pkg.files[chg_fn]["component"], session=session)
+ if not l:
+ log.critical("ERROR: Can't find location for %s (component %s)" % (chg_fn, u.pkg.files[chg_fn]["component"]))
+
+ found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+ u.pkg.files[chg_fn]['size'],
+ u.pkg.files[chg_fn]["md5sum"],
+ l.location_id,
+ session=session)
+
+ if found is None:
+ log.critical("ERROR: Found multiple files for %s in pool" % chg_fn)
+ sys.exit(1)
+ elif found is False and poolfile is not None:
+ log.critical("ERROR: md5sum / size mismatch for %s in pool" % chg_fn)
+ sys.exit(1)
+ else:
+ if poolfile is None:
+ log.critical("ERROR: Could not find %s in pool" % chg_fn)
+ sys.exit(1)
+ else:
+ chg.poolfiles.append(poolfile)
+
+
+ chg.files = files
+
+
+ session.commit()
+
+ except KeyboardInterrupt:
+ print("Caught C-c; terminating.")
+ utils.warn("Caught C-c; terminating.")
+ self.plsDie()
+
+
+def main():
+ cnf = Config()
+
+ arguments = [('h',"help", "%s::%s" % (options_prefix,"Help")),
+ ('q',"quiet", "%s::%s" % (options_prefix,"Quiet")),
+ ('v',"verbose", "%s::%s" % (options_prefix,"Verbose")),
+ ]
+
+ args = apt_pkg.ParseCommandLine(cnf.Cnf, arguments,sys.argv)
+
+ num_threads = 1
+
+ if len(args) > 0:
+ usage(1)
+
+ if cnf.has_key("%s::%s" % (options_prefix,"Help")):
+ usage(0)
+
+ level=logging.INFO
+ if cnf.has_key("%s::%s" % (options_prefix,"Quiet")):
+ level=logging.ERROR
+
+ elif cnf.has_key("%s::%s" % (options_prefix,"Verbose")):
+ level=logging.DEBUG
+
+
+ logging.basicConfig( level=level,
+ format='%(asctime)s %(levelname)s %(message)s',
+ stream = sys.stderr )
+
+ ImportNewFiles()
+
+
+if __name__ == '__main__':
+ main()
suite=suite, filetype = filetype)
cleanup(packages, session)
session.commit()
- write_filelists(packages, dislocated_files, session)
+
+ # has been replaced by 'dak generate-filelist':
+ #write_filelists(packages, dislocated_files, session)
################################################################################
--- /dev/null
+#!/usr/bin/env python
+
+"""Manage build queues"""
+# Copyright (C) 2000, 2001, 2002, 2006 James Troup <james@nocrew.org>
+# Copyright (C) 2009 Mark Hymers <mhy@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import os, os.path, stat, sys
+from datetime import datetime
+import apt_pkg
+
+from daklib import daklog
+from daklib.dbconn import *
+from daklib.config import Config
+
+################################################################################
+
+Options = None
+Logger = None
+
+################################################################################
+
+def usage (exit_code=0):
+ print """Usage: dak manage-build-queues [OPTIONS] buildqueue1 buildqueue2
+Manage the contents of one or more build queues
+
+ -a, --all run on all known build queues
+ -n, --no-action don't do anything
+ -h, --help show this help and exit"""
+
+ sys.exit(exit_code)
+
+################################################################################
+
+def main ():
+ global Options, Logger
+
+ cnf = Config()
+
+ for i in ["Help", "No-Action", "All"]:
+ if not cnf.has_key("Manage-Build-Queues::Options::%s" % (i)):
+ cnf["Manage-Build-Queues::Options::%s" % (i)] = ""
+
+ Arguments = [('h',"help","Manage-Build-Queues::Options::Help"),
+ ('n',"no-action","Manage-Build-Queues::Options::No-Action"),
+ ('a',"all","Manage-Build-Queues::Options::All")]
+
+ queue_names = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
+ Options = cnf.SubTree("Manage-Build-Queues::Options")
+
+ if Options["Help"]:
+ usage()
+
+ Logger = daklog.Logger(cnf, 'manage-build-queues', Options['No-Action'])
+
+ starttime = datetime.now()
+
+ session = DBConn().session()
+
+ if Options["All"]:
+ if len(queue_names) != 0:
+ print "E: Cannot use both -a and a queue_name"
+ sys.exit(1)
+ queues = session.query(BuildQueue).all()
+
+ else:
+ queues = []
+ for q in queue_names:
+ queue = get_build_queue(q.lower(), session)
+ if queue:
+ queues.append(queue)
+ else:
+ Logger.log(['cannot find queue %s' % q])
+
+ # For each given queue, look up object and call manage_queue
+ for q in queues:
+ Logger.log(['cleaning queue %s using datetime %s' % (q.queue_name, starttime)])
+ q.clean_and_update(starttime, Logger, dryrun=Options["No-Action"])
+
+ Logger.close()
+
+#######################################################################################
+
+if __name__ == '__main__':
+ main()
# 3. run dak make-suite-file-list / apt-ftparchve / dak generate-releases
print "Updating file lists for apt-ftparchive..."
spawn("dak make-suite-file-list")
+ spawn("dak generate-filelist")
print "Updating Packages and Sources files..."
spawn("/org/security.debian.org/dak/config/debian-security/map.sh")
spawn("apt-ftparchive generate %s" % (utils.which_apt_conf_file()))
from daklib.queue import *
from daklib import daklog
from daklib import utils
-from daklib.regexes import re_no_epoch, re_default_answer, re_isanum
+from daklib.regexes import re_no_epoch, re_default_answer, re_isanum, re_package
from daklib.dak_exceptions import CantOpenError, AlreadyLockedError, CantGetLockError
from daklib.summarystats import SummaryStats
from daklib.config import Config
################################################################################
def recheck(upload, session):
- upload.recheck()
+# STU: I'm not sure, but I don't thin kthis is necessary any longer: upload.recheck(session)
if len(upload.rejects) > 0:
answer = "XXX"
if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]:
if answer == 'R':
upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
- os.unlink(upload.pkg.changes_file[:-8]+".dak")
return 0
elif answer == 'S':
return 0
for filename in changes_files:
u = Upload()
try:
- u.pkg.load_dot_dak(filename)
+ u.pkg.changes_file = filename
+ u.load_changes(filename)
u.update_subst()
cache[filename] = copy.copy(u.pkg.changes)
cache[filename]["filename"] = filename
def do_new(upload, session):
print "NEW\n"
files = upload.pkg.files
+ upload.check_files(not Options["No-Action"])
changes = upload.pkg.changes
cnf = Config()
try:
check_daily_lock()
done = add_overrides (new, upload, session)
+ do_accept(upload, session)
Logger.log(["NEW ACCEPT: %s" % (upload.pkg.changes_file)])
except CantGetLockError:
print "Hello? Operator! Give me the number for 911!"
note=get_new_comments(changes.get("source", ""), session=session))
if not aborted:
Logger.log(["NEW REJECT: %s" % (upload.pkg.changes_file)])
- os.unlink(upload.pkg.changes_file[:-8]+".dak")
done = 1
elif answer == 'N':
edit_note(get_new_comments(changes.get("source", ""), session=session),
elif answer == 'M':
Logger.log(["BYHAND REJECT: %s" % (upload.pkg.changes_file)])
upload.do_reject(manual=1, reject_message=Options["Manual-Reject"])
- os.unlink(upload.pkg.changes_file[:-8]+".dak")
done = 1
elif answer == 'S':
done = 1
finally:
os.unlink(path)
-def _accept(upload):
+class clean_holding(object):
+ def __init__(self,pkg):
+ self.pkg = pkg
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, type, value, traceback):
+ h = Holding()
+
+ for f in self.pkg.files.keys():
+ if os.path.exists(os.path.join(h.holding_dir, f)):
+ os.unlink(os.path.join(h.holding_dir, f))
+
+
+
+def changes_to_newstage(upload, session):
+ """move a changes file to newstage"""
+ new = get_policy_queue('new', session );
+ newstage = get_policy_queue('newstage', session );
+
+ chg = session.query(DBChange).filter_by(changesname=os.path.basename(upload.pkg.changes_file)).one()
+ chg.approved_for = newstage.policy_queue_id
+
+ for f in chg.files:
+ # update the changes_pending_files row
+ f.queue = newstage
+ utils.move(os.path.join(new.path, f.filename), newstage.path, perms=int(newstage.perms, 8))
+
+ utils.move(os.path.join(new.path, upload.pkg.changes_file), newstage.path, perms=int(newstage.perms, 8))
+ chg.in_queue = newstage
+ session.commit()
+
+def _accept(upload, session):
if Options["No-Action"]:
return
(summary, short_summary) = upload.build_summaries()
- upload.accept(summary, short_summary, targetqueue)
- os.unlink(upload.pkg.changes_file[:-8]+".dak")
+ # upload.accept(summary, short_summary, targetqueue)
+
+ changes_to_newstage(upload, session)
-def do_accept(upload):
+def do_accept(upload, session):
print "ACCEPT"
cnf = Config()
if not Options["No-Action"]:
upload.Subst["__SUMMARY__"] = summary
else:
# Just a normal upload, accept it...
- _accept(upload)
+ _accept(upload, session)
def do_pkg(changes_file, session):
+ new_queue = get_policy_queue('new', session );
u = Upload()
- u.pkg.load_dot_dak(changes_file)
+ u.pkg.changes_file = changes_file
+ (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changes_file)
+ u.load_changes(changes_file)
+ u.pkg.directory = new_queue.path
u.update_subst()
+ u.logger = Logger
+ origchanges = os.path.abspath(u.pkg.changes_file)
cnf = Config()
bcc = "X-DAK: dak process-new"
u.Subst["__BCC__"] = bcc
files = u.pkg.files
+ for deb_filename, f in files.items():
+ if deb_filename.endswith(".udeb") or deb_filename.endswith(".deb"):
+ u.binary_file_checks(deb_filename, session)
+ u.check_binary_against_db(deb_filename, session)
+ else:
+ u.source_file_checks(deb_filename, session)
+ u.check_source_against_db(deb_filename, session)
+
+ u.pkg.changes["suite"] = copy.copy(u.pkg.changes["distribution"])
try:
with lock_package(u.pkg.changes["source"]):
- if not recheck(u, session):
- return
+ with clean_holding(u.pkg):
+ if not recheck(u, session):
+ return
- (new, byhand) = check_status(files)
- if new or byhand:
+ # FIXME: This does need byhand checks added!
+ new = determine_new(u.pkg.changes, files)
if new:
do_new(u, session)
- if byhand:
- do_byhand(u, session)
- (new, byhand) = check_status(files)
-
- if not new and not byhand:
- try:
- check_daily_lock()
- do_accept(u)
- except CantGetLockError:
- print "Hello? Operator! Give me the number for 911!"
- print "Dinstall in the locked area, cant process packages, come back later"
+ else:
+ try:
+ check_daily_lock()
+ do_accept(u, session)
+ except CantGetLockError:
+ print "Hello? Operator! Give me the number for 911!"
+ print "Dinstall in the locked area, cant process packages, come back later"
+# (new, byhand) = check_status(files)
+# if new or byhand:
+# if new:
+# do_new(u, session)
+# if byhand:
+# do_byhand(u, session)
+# (new, byhand) = check_status(files)
+
+# if not new and not byhand:
+# try:
+# check_daily_lock()
+# do_accept(u)
+# except CantGetLockError:
+# print "Hello? Operator! Give me the number for 911!"
+# print "Dinstall in the locked area, cant process packages, come back later"
except AlreadyLockedError, e:
print "Seems to be locked by %s already, skipping..." % (e)
def main():
global Options, Logger, Sections, Priorities
- print "NO NEW PROCESSING CURRENTLY AVAILABLE"
- print "(Go and do something more interesting)"
- sys.exit(0)
-
cnf = Config()
session = DBConn().session()
changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
if len(changes_files) == 0:
- changes_files = utils.get_changes_files(cnf["Dir::Queue::New"])
+ new_queue = get_policy_queue('new', session );
+ changes_files = utils.get_changes_files(new_queue.path)
Options = cnf.SubTree("Process-New::Options")
###############################################################################
+def byebye():
+ if not Options["No-Action"]:
+ # Clean out the queue files
+ session = DBConn().session()
+ session.execute("DELETE FROM changes_pending_files WHERE id NOT IN (SELECT file_id FROM changes_pending_files_map )")
+ session.commit()
+
+
+
def action(u, session):
cnf = Config()
holding = Holding()
u.do_reject(0, pi)
elif answer == 'A':
if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session)
+ chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
+ session.commit()
u.accept(summary, short_summary, session)
u.check_override()
+ chg.clean_from_queue()
session.commit()
u.remove()
elif answer == 'P':
if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session)
+ chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
package_to_queue(u, summary, short_summary, policyqueue, chg, session)
session.commit()
u.remove()
elif answer == queuekey:
if not chg:
- chg = u.pkg.add_known_changes(holding.holding_dir, session)
+ chg = u.pkg.add_known_changes(holding.holding_dir, session=session)
QueueInfo[qu]["process"](u, summary, short_summary, chg, session)
session.commit()
u.remove()
elif answer == 'Q':
+ byebye()
sys.exit(0)
session.commit()
utils.size_type(int(summarystats.accept_bytes)))
Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
+ byebye()
+
if not Options["No-Action"]:
if log_urgency:
UrgencyLog().close()
+
Logger.close()
###############################################################################
from daklib import utils
from daklib.queue import Upload
-from daklib.dbconn import DBConn, has_new_comment
+from daklib.dbconn import DBConn, has_new_comment, DBChange
from daklib.textutils import fix_maintainer
from daklib.dak_exceptions import *
(name, mail) = changedby.split(":", 1)
print "<span class=\"changed-by\">Changed-By: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span><br/>" % (utils.html_escape(mail), utils.html_escape(name))
- try:
- (login, domain) = sponsor.split("@", 1)
- print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span>@debian.org<br/>" % (utils.html_escape(login), utils.html_escape(login))
- except Exception, e:
- print "WARNING: Exception %s" % e
- pass
+ if sponsor:
+ try:
+ (login, domain) = sponsor.split("@", 1)
+ print "<span class=\"sponsor\">Sponsor: <a href=\"http://qa.debian.org/developer.php?login=%s\">%s</a></span>@debian.org<br/>" % (utils.html_escape(login), utils.html_escape(login))
+ except Exception, e:
+ pass
print "<span class=\"signature\">Fingerprint: %s</span>" % (fingerprint)
print "</td>"
############################################################
def process_changes_files(changes_files, type, log):
+ session = DBConn().session()
msg = ""
cache = {}
# Read in all the .changes files
arches = {}
versions = {}
for j in i[1]["list"]:
+ changesbase = os.path.basename(j["filename"])
+ try:
+ dbc = session.query(DBChange).filter_by(changesname=changesbase).one()
+ except Exception, e:
+ print "Can't find changes file in NEW for %s (%s)" % (changesbase, e)
+ dbc = None
+
if Cnf.has_key("Queue-Report::Options::New") or Cnf.has_key("Queue-Report::Options::822"):
try:
(maintainer["maintainer822"], maintainer["maintainer2047"],
distribution=j["distribution"].keys()
closes=j["closes"].keys()
- fingerprint=j["fingerprint"]
- if j.has_key("sponsoremail"):
- sponsor=j["sponsoremail"]
+ if dbc:
+ fingerprint = dbc.fingerprint
+
+ # TODO: This won't work now as it never gets set
+ # Fix so that we compare the changed-by/maintainer and the signing key
+ # Should probably be done somewhere more central
+ #if j.has_key("sponsoremail"):
+ # sponsor=j["sponsoremail"]
+
for arch in j["architecture"].keys():
arches[arch] = ""
version = j["version"]
################################################################################
Cnf = None
-required_database_schema = 23
+required_database_schema = 24
################################################################################
self.changes[key]='missing'
@session_wrapper
- def add_known_changes(self, dirpath, session=None):
+ def add_known_changes(self, dirpath, in_queue=None, session=None):
"""add "missing" in fields which we will require for the known_changes table"""
cnf = Config()
if isinstance(self.changes[key], dict):
multivalues[key] = " ".join(self.changes[key].keys())
else:
- multivalues[key] = self.changes[key].keys()
-
- # TODO: Use ORM
- session.execute(
- """INSERT INTO changes
- (changesname, seen, source, binaries, architecture, version,
- distribution, urgency, maintainer, fingerprint, changedby, date)
- VALUES (:changesfile,:filetime,:source,:binary, :architecture,
- :version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
- { 'changesfile': self.changes_file,
- 'filetime': filetime,
- 'source': self.changes["source"],
- 'binary': multivalues["binary"],
- 'architecture': multivalues["architecture"],
- 'version': self.changes["version"],
- 'distribution': multivalues["distribution"],
- 'urgency': self.changes["urgency"],
- 'maintainer': self.changes["maintainer"],
- 'fingerprint': self.changes["fingerprint"],
- 'changedby': self.changes["changed-by"],
- 'date': self.changes["date"]} )
+ multivalues[key] = self.changes[key]
+
+ chg = DBChange()
+ chg.changesname = self.changes_file
+ chg.seen = filetime
+ chg.in_queue_id = in_queue
+ chg.source = self.changes["source"]
+ chg.binaries = multivalues["binary"]
+ chg.architecture = multivalues["architecture"]
+ chg.version = self.changes["version"]
+ chg.distribution = multivalues["distribution"]
+ chg.urgency = self.changes["urgency"]
+ chg.maintainer = self.changes["maintainer"]
+ chg.fingerprint = self.changes["fingerprint"]
+ chg.changedby = self.changes["changed-by"]
+ chg.date = self.changes["date"]
+
+ session.add(chg)
+
+ files = []
+ for chg_fn, entry in self.files.items():
+ try:
+ f = open(os.path.join(dirpath, chg_fn))
+ cpf = ChangePendingFile()
+ cpf.filename = chg_fn
+ cpf.size = entry['size']
+ cpf.md5sum = entry['md5sum']
+
+ if entry.has_key('sha1sum'):
+ cpf.sha1sum = entry['sha1sum']
+ else:
+ f.seek(0)
+ cpf.sha1sum = apt_pkg.sha1sum(f)
+
+ if entry.has_key('sha256sum'):
+ cpf.sha256sum = entry['sha256sum']
+ else:
+ f.seek(0)
+ cpf.sha256sum = apt_pkg.sha256sum(f)
+
+ session.add(cpf)
+ files.append(cpf)
+ f.close()
+
+ except IOError:
+ # Can't find the file, try to look it up in the pool
+ poolname = poolify(entry["source"], entry["component"])
+ l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+ found, poolfile = check_poolfile(os.path.join(poolname, chg_fn),
+ entry['size'],
+ entry["md5sum"],
+ l.location_id,
+ session=session)
+
+ if found is None:
+ Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+ elif found is False and poolfile is not None:
+ Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+ else:
+ if poolfile is None:
+ Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+ else:
+ chg.poolfiles.append(poolfile)
+
+ chg.files = files
session.commit()
-
- return session.query(DBChange).filter_by(changesname = self.changes_file).one()
+ chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
+
+ return chg
def unknown_files_fields(self, name):
return sorted(list( set(self.files[name].keys()) -
import re
import psycopg2
import traceback
-from datetime import datetime
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
from inspect import getargspec
################################################################################
+MINIMAL_APT_CONF="""
+Dir
+{
+ ArchiveDir "%(archivepath)s";
+ OverrideDir "/srv/ftp.debian.org/scripts/override/";
+ CacheDir "/srv/ftp.debian.org/database/";
+};
+
+Default
+{
+ Packages::Compress ". bzip2 gzip";
+ Sources::Compress ". bzip2 gzip";
+ DeLinkLimit 0;
+ FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+ Packages "Packages";
+ Contents " ";
+
+ BinOverride "override.sid.all3";
+ BinCacheDB "packages-accepted.db";
+
+ FileList "%(filelist)s";
+
+ PathPrefix "";
+ Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+ Sources "Sources";
+ BinOverride "override.sid.all3";
+ SrcOverride "override.sid.all3.src";
+ FileList "%(filelist)s";
+};
+"""
+
class BuildQueue(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<BuildQueue %s>' % self.queue_name
+ def write_metadata(self, starttime, force=False):
+ # Do we write out metafiles?
+ if not (force or self.generate_metadata):
+ return
+
+ session = DBConn().session().object_session(self)
+
+ fl_fd = fl_name = ac_fd = ac_name = None
+ tempdir = None
+ arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+ startdir = os.getcwd()
+
+ try:
+ # Grab files we want to include
+ newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+ # Write file list with newer files
+ (fl_fd, fl_name) = mkstemp()
+ for n in newer:
+ os.write(fl_fd, '%s\n' % n.fullpath)
+ os.close(fl_fd)
+
+ # Write minimal apt.conf
+ # TODO: Remove hardcoding from template
+ (ac_fd, ac_name) = mkstemp()
+ os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+ 'filelist': fl_name})
+ os.close(ac_fd)
+
+ # Run apt-ftparchive generate
+ os.chdir(os.path.dirname(ac_name))
+ os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+ # Run apt-ftparchive release
+ # TODO: Eww - fix this
+ bname = os.path.basename(self.path)
+ os.chdir(self.path)
+ os.chdir('..')
+
+ # We have to remove the Release file otherwise it'll be included in the
+ # new one
+ try:
+ os.unlink(os.path.join(bname, 'Release'))
+ except OSError:
+ pass
+
+ os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+ # Sign if necessary
+ if self.signingkey:
+ cnf = Config()
+ keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+ if cnf.has_key("Dinstall::SigningPubKeyring"):
+ keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+ os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+ # Move the files if we got this far
+ os.rename('Release', os.path.join(bname, 'Release'))
+ if self.signingkey:
+ os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+ # Clean up any left behind files
+ finally:
+ os.chdir(startdir)
+ if fl_fd:
+ try:
+ os.close(fl_fd)
+ except OSError:
+ pass
+
+ if fl_name:
+ try:
+ os.unlink(fl_name)
+ except OSError:
+ pass
+
+ if ac_fd:
+ try:
+ os.close(ac_fd)
+ except OSError:
+ pass
+
+ if ac_name:
+ try:
+ os.unlink(ac_name)
+ except OSError:
+ pass
+
+ def clean_and_update(self, starttime, Logger, dryrun=False):
+ """WARNING: This routine commits for you"""
+ session = DBConn().session().object_session(self)
+
+ if self.generate_metadata and not dryrun:
+ self.write_metadata(starttime)
+
+ # Grab files older than our execution time
+ older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+ for o in older:
+ killdb = False
+ try:
+ if dryrun:
+ Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+ else:
+ Logger.log(["I: Removing %s from the queue" % o.fullpath])
+ os.unlink(o.fullpath)
+ killdb = True
+ except OSError, e:
+ # If it wasn't there, don't worry
+ if e.errno == ENOENT:
+ killdb = True
+ else:
+ # TODO: Replace with proper logging call
+ Logger.log(["E: Could not remove %s" % o.fullpath])
+
+ if killdb:
+ session.delete(o)
+
+ session.commit()
+
+ for f in os.listdir(self.path):
+ if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'):
+ continue
+
+ try:
+ r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+ except NoResultFound:
+ fp = os.path.join(self.path, f)
+ if dryrun:
+ Logger.log(["I: Would remove unused link %s" % fp])
+ else:
+ Logger.log(["I: Removing unused link %s" % fp])
+ try:
+ os.unlink(fp)
+ except OSError:
+ Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
def add_file_from_pool(self, poolfile):
"""Copies a file into the pool. Assumes that the PoolFile object is
attached to the same SQLAlchemy session as the Queue object is.
pass
def __repr__(self):
- return '<BuildQueueFile %s (%s)>' % (self.filename, self.queue_id)
+ return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
+
+ @property
+ def fullpath(self):
+ return os.path.join(self.buildqueue.path, self.filename)
+
__all__.append('BuildQueueFile')
"""
# TODO: There must be a way of properly using bind parameters with %FOO%
- q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+ q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
return q.all()
def __repr__(self):
return '<DBChange %s>' % self.changesname
+ def clean_from_queue(self):
+ session = DBConn().session().object_session(self)
+
+ # Remove changes_pool_files entries
+ self.poolfiles = []
+
+ # Remove changes_pending_files references
+ self.files = []
+
+ # Clear out of queue
+ self.in_queue = None
+ self.approved_for_id = None
+
__all__.append('DBChange')
@session_wrapper
poolfiles = relation(PoolFile,
secondary=self.tbl_changes_pool_files,
backref="changeslinks"),
+ seen = self.tbl_changes.c.seen,
+ source = self.tbl_changes.c.source,
+ binaries = self.tbl_changes.c.binaries,
+ architecture = self.tbl_changes.c.architecture,
+ distribution = self.tbl_changes.c.distribution,
+ urgency = self.tbl_changes.c.urgency,
+ maintainer = self.tbl_changes.c.maintainer,
+ changedby = self.tbl_changes.c.changedby,
+ date = self.tbl_changes.c.date,
+ version = self.tbl_changes.c.version,
files = relation(ChangePendingFile,
secondary=self.tbl_changes_pending_files_map,
backref="changesfile"),
properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
mapper(ChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+ properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+ filename = self.tbl_changes_pending_files.c.filename,
+ size = self.tbl_changes_pending_files.c.size,
+ md5sum = self.tbl_changes_pending_files.c.md5sum,
+ sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+ sha256sum = self.tbl_changes_pending_files.c.sha256sum))
mapper(ChangePendingSource, self.tbl_changes_pending_source,
properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
# Build up a list of potentially new things
for name, f in files.items():
# Skip byhand elements
- if f["type"] == "byhand":
- continue
+# if f["type"] == "byhand":
+# continue
pkg = f["package"]
priority = f["priority"]
section = f["section"]
def check_status(files):
new = byhand = 0
for f in files.keys():
- if files[f]["type"] == "byhand":
+ if files[f].has_key("byhand"):
byhand = 1
elif files[f].has_key("new"):
new = 1
entry["new"] = 1
else:
dsc_file_exists = False
- for myq in ["Accepted", "Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
+ for myq in ["Embargoed", "Unembargoed", "ProposedUpdates", "OldProposedUpdates"]:
if cnf.has_key("Dir::Queue::%s" % (myq)):
if os.path.exists(os.path.join(cnf["Dir::Queue::" + myq], dsc_filename)):
dsc_file_exists = True
# if in the pool or in a queue other than unchecked, reject
if (dbc.in_queue is None) \
or (dbc.in_queue is not None
- and dbc.in_queue.queue_name != 'unchecked'):
+ and dbc.in_queue.queue_name not in ["unchecked", "newstage"]):
self.rejects.append("%s file already known to dak" % base_filename)
except NoResultFound, e:
# not known, good
continue
# Look in some other queues for the file
- queues = ('Accepted', 'New', 'Byhand', 'ProposedUpdates',
+ queues = ('New', 'Byhand', 'ProposedUpdates',
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
else:
# TODO: Record the queues and info in the DB so we don't hardcode all this crap
# Not there? Check the queue directories...
- for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
+ for directory in [ "New", "Byhand", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not Cnf.has_key("Dir::Queue::%s" % (directory)):
continue
in_otherdir = os.path.join(Cnf["Dir::Queue::%s" % (directory)], dsc_name)
source_epochless_version = re_no_epoch.sub('', source_version)
dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
found = False
- for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+ for q in ["Embargoed", "Unembargoed", "Newstage"]:
if cnf.has_key("Dir::Queue::%s" % (q)):
if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
found = True