From: Joerg Jaspert Date: Sat, 1 Nov 2008 18:56:31 +0000 (+0100) Subject: Merge branch 'master' into security X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=f8996e240d9d0278bce098e23be63db0bcc6fbee;hp=7f20bb168b919452f8ee0c865ab91f0084a9f46a;p=dak.git Merge branch 'master' into security * master: (182 commits) Remove / Deactivate oldstable ignore EPERM as a result of the chmod of a .dak file EVIL SINGLE TAB CHARACTER IN LAST COMMIT EMERGENCY FIX changelog bug # produce changes without signature, fix delay calculation also for status add machine-readable status show_deferred config for show-deferred whitespace sanitizing move config to config message massage logfiles move show-deferred to dak move show-deferred to dak implement public access to deferred fix remaining days, some style spaces in uploader names validtime Valid-Until ... --- diff --git a/.bzrignore b/.bzrignore deleted file mode 100644 index ce45799c..00000000 --- a/.bzrignore +++ /dev/null @@ -1 +0,0 @@ -dak/daklib diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..f3d74a9a --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +*.pyc +*~ diff --git a/ChangeLog b/ChangeLog index 05a17bc5..8c7ce8a4 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,461 @@ +2008-10-27 Joerg Jaspert + + * scripts/debian/mkfilesindices: Remove oldstable + + * config/debian/vars: Remove sarge + + * config/debian/dak.conf: Untouchable o-p-u, until we removed all + of sarge and its files. + + * config/debian/apt.conf.oldstable: Removed + + * config/debian/apt.conf: Remove oldstable + +2008-10-14 Thomas Viehmann + + * dak/show_deferred.py: produce .changes and improve status + +2008-10-07 Joerg Jaspert + + * config/debian/cron.dinstall: Only keep the last 60 days of + dinstall logfiles on disc. + +2008-10-05 Thomas Viehmann + + * daklib/database.py: added get_suites + * dak/dak.py, dak/show_deferred.py: add show-deferred to dak. + +2008-09-23 Joerg Jaspert + + * config/debian/dak.conf: Add the validtime fields, set to 7 + days. + + * dak/generate_releases.py (main): Add a "Valid-Until" line into + our release files, meaning "$receiver shouldn't trust this files + after that date". Should be used by apt and similar tools to + detect some kind of MITM attacks, see #499897 + +2008-09-21 Joerg Jaspert + + * config/debian/cron.hourly: Generate the DEFERRED queue + overview. + +2008-09-13 Philipp Kern + + * dak/queue.py (dump_vars): make .dak u,g=rw,o=r; James' + assumption (as stated in 2002-05-18's ChangeLog entry) + was that people will use the information therein albeit + it is "just" a duplication of information present in + other control files; people should still not use it + as source of information but access to those files makes + dak debugging easier and there is no leak of sensitive + information involved + +2008-09-12 Philipp Kern + + * dak/new_security_install.py (actually_upload): remove + oldstable-security/amd64 check; Etch, as the next oldstable, + already had proper amd64 support + +2008-09-12 Joerg Jaspert + + * scripts/debian/update-pseudopackages.sh: s/i/file/ + +2008-09-11 Joerg Jaspert + + * config/debian/pseudo-packages.description, ...maintainers: + Removed, now with the bts people + + * scripts/debian/update-pseudopackages.sh: Added, fetching + pseudo-packages from new bts location + + * scripts/debian/mkmaintainers: Use new location + +2008-09-08 Philipp Kern + + * dak/check_archive.py (check_checksums): rewind the files + before the sha1sum/sha256sum checks as they got seeked by + md5sum + + * daklib/utils.py (build_file_list): do not die on very + old dsc files without format header + +2008-09-07 Philipp Kern + + * daklib/utils.py (check_hash): try..except..finally only + works on python >=2.5. + + * dak/process_accepted.py (install): better use dsc_file + instead of the (for the loop iteration) static file + variable + +2008-09-07 Philipp Kern + + * daklib/utils.py (check_hash): change the comment and warn + if a file is not found when checking the hashes (i.e. when + it is probably in the pool) + + * daklib/utils.py (check_size): do not bail out if the file + is not found, because it may be in the pool + + * dak/process_accepted.py (install): bail out and skip the + upload when ensure_hashes fails, print the rejection messages + as warnings + +2008-08-28 Philipp Kern + + * daklib/utils.py (check_hashes): adapt to different API, check + sizes separately + + * daklib/utils.py (parse_changes, parse_deb822): refactor + the string-based logic of parse_changes into a new function + parse_deb822; parse_changes itself remains file-based + + * daklib/utils.py (hash_key): gives the key of a hash in the + files dict + + * daklib/utils.py (create_hash, check_size): made more readable + + * daklib/utils.py (check_hash): just check the hashes and complain + about missing checksums + + * daklib/utils.py (check_hash_fields): function to reject unknown + checksums fields + + * daklib/utils.py (_ensure_changes_hash, _ensure_dsc_hash): helper + functions for ensure_hashes; check their corresponding manifests' + hashes + + * daklib/utils.py (ensure_hashes): retrieve the checksums fields + from the original filecontents blob so that they do not need to + be present in the .dak; refactored the actual checks by calling + the aforementioned helper functions + + * daklib/utils.py (parse_checksums): parse a given checksums field + in a manifest and insert the values found into the files dict, + checking the file sizes on the way + +2008-09-06 Philipp Kern + + * dak/process_new.py (is_source_in_queue_dir): Access the right + variable to check if the given entry in the queue is the sourceful + upload we are looking for. + +2008-09-02 Joerg Jaspert + + * config/debian/pseudo-packages.description: Added debian-i18n and + buildd.emdebian.org + + * dak/process_new.py (_accept): Fix Philipps new helper function + to not break by moving Upload.build_summaries there. + +2008-08-31 Philipp Kern + + * dak/process_new.py (_accept): new helper function to accept + an upload regularly, obeying no-action if set + * dak/process_new.py (do_accept): use _accept + * dak/process_new.py (do_accept_stableupdate): bail out in else + on binary uploads, in case we missed something; use the _accept + helper + +2008-08-30 Philipp Kern + + * dak/process_new.py (is_source_in_queue_dir): join the queue path + because os.listdir entries come with their path stripped + +2008-08-30 Philipp Kern + + * dak/process_new.py (do_accept_stableupdate): state what we intend + to do + +2008-08-26 Philipp Kern + + * dak/process_new.py (is_source_in_queue_dir): fix variable usage + * dak/process_new.py (move_to_holding): just state what we intend + to do in no-action mode + * dak/process_new.py (do_accept_stableupdate): fetch summaries, + fix invokation of is_source_in_queue_dir, actually accept sourceful + uploads in p-u holding + +2008-08-26 Philipp Kern + + * dak/process_new.py (do_accept): do not try to free the unchecked + lockfile in no-action mode + +2008-08-16 Joerg Jaspert + + * config/debian/cron.dinstall: We dont want i18n to ever fail + dinstall, add a || true + +2008-08-15 Mark Hymers + + * daklib/utils.py: Actually import a module before using it. + + * daklib/utils.py: Actually check we have basedict before trying to + use it. + + * dak/process_accepted.py, dak/process_unchecked.py, + daklib/database.py: Don't change get_files_id to use sha1sum and + sha256sum. + + * setup/init_pool.sql, dak/check_archive.py, dak/decode_dot_dak.py, + dak/process_accepted.py, dak/process_unchecked.py, daklib/database.py, + daklib/queue.py, daklib/utils.py: Attempt to add sha1sum and + sha256sums into the database. The complication is that we have to + keep backwards compatibility with the .dak files already in existance. + Note that import_archive hasn't been hacked to deal with this yet. + +2008-08-14 Joerg Jaspert + + * config/debian/cron.dinstall: Added the i18n retrieval of package + description translations + +2008-08-12 Joerg Jaspert + + * config/debian/cron.dinstall: Complicate the i18n export a little + by using date/hour based directories which we then link into the + web view. They contain a signed timestamp file now, which means + the i18n people can take a long time to generate files, yet we + still know exactly on which dataset their data is based on, and + can then verify it with that. Ensures we only get descriptions for + packages we know off (or knew of in the past 2 days). + +2008-08-11 Joerg Jaspert + + * web/dinstall.html: Added + + * config/debian/dak.conf: Added back the pgp keyrings for now, as + it seems that we should keep it for a few more days, until we + somehow got ll those oldtimers to get a newer key into the + keyring. Unfortunately our logic to look for uploads done from + that keyring wasnt the most perfect one, so well, it is actually + used. Damn. + +2008-08-09 Joerg Jaspert + + * config/debian/dak.conf: No longer use the pgp keyring - no + uploads recorded for any of the pgp keys for a long time. + + * config/debian/cron.dinstall: Export the i18n foo. + +2008-08-08 Joerg Jaspert + + * config/debian/cron.dinstall: Create a hardlinked tree of the + ftp/ in mirror/ so we have more atomic mirror updates for the + buildds + + * config/debian/cron.unchecked: Added signing of buildd incoming + +2008-08-07 Philipp Kern + + * dak/process_new.py (do_accept): handle uploads to (oldstable-) + proposed-updates differently and put them into p-u holding + for review instead of unconditionally accepting them into + p-u proper; additional care needed to be taken to look + out for the source if a binary-only upload is being handled + +2008-08-07 Joerg Jaspert + + * dak/cruft_report.py (parse_nfu): call utils.warn instead of warn + (main): Only do the nfu stuff if nfu is a check we want to run + later. + (main): And another place where we dont want to do nfu foo unless + we need nfu + + * dak/make_suite_file_list.py (main): Fix a bug that has been + there for ages, but "just" never triggered. + +2008-08-07 Stephen Gran + + * Drop use of exec to eval variable interpolation +2008-08-07 Joerg Jaspert + + * dak/process_accepted.py (install): Error out with the new + exception if we dont know the source package for a file we want to + install. Shouldn't ever hit us, but better safe than sorry. + + * daklib/dak_exceptions.py (dakerrors): new exception - no source field. + +2008-08-05 Joerg Jaspert + + * config/debian/cron.unchecked: disable the ssh-move insanity (and + as soon as rietz is back online - replace it with a one-line scp + or rsync statement followed by a one-line rm) + And now replaced this 128 line perl "not-invented-here" with a + one-line rsync command, using a feature rsync only understands + since sarge - damn new-fangled technology. + +2008-08-05 Joachim Breitner + + * dak/cruft_report.py: In full mode, report out-of-date binaries on + architectures that have set Not-For-Us for that package. + + * scripts/nfu/get-w-b-db: Script to fetch the wanna-build database + dump from http://buildd.debian.org/ + + * config/debian/cron.weekly: Run the above script + +2008-08-03 Mark Hymers + + * dak/process_new.py: Apply jvw's patch so that process_new shows + packages which need binary uploads sorted earlier than other packages. + +2008-07-26 Joerg Jaspert + + * templates/reject-proposed-updates.rejected,dak/reject_proposed_updates.py: + applied a patch by luk modifying the stable rejection mails to fit + reality a bit more + + * config/debian/dak.conf: no m68k in testing, so no m68k in t-p-u + r4 now + +2008-06-19 Thomas Viehmann + + * dak/process_unchecked.py (check_dsc,check_hashes): Catch + UnknownFormatError and reject + +2008-06-15 Joerg Jaspert + + * config/debian/cron.weekly: Work around a git bug until git is + fixed upstream and the fix is on backports.org + + * config/debian/cron.dinstall: (various ssh calls): Make them use + batchmode/connect/setuptimeout to not take too long with + connections... Also || true them, no need to die in dinstall if + one host isn't reachable. + Also do not die when the ldap server is unreachable, just ignore + that error. + + * README: Updated mailing list location + +2008-06-14 Otavio Salvador + + * docs/manpages/clean-suites.1.sgml: Minor typo fix + + * dak/import_archive.py: Add support to udeb packages + + * dak/control_suite.py (main): Handle SystemError exception in + case of a incompatible commandline parameter + + * dak/check_overrides.py (main): Use case-insensitive comparing + for codename + +2008-06-14 Joerg Jaspert + + * scripts/debian/byhand-task: Merged patch from Frans Pop to + fail on byhand-task uploads if they do not go to unstable. + + * config/debian/cron.weekly: Do a little git cleanup work too. + + * config/debian/cron.buildd: Add batchmode and also + Connect/SetupTimeout parameters to ssh + + * config/debian/cron.dinstall (POSTDUMP): Compress all + uncompressed psql backups + +2008-06-08 Joerg Jaspert + + * dak/process_unchecked.py (check_urgency): Lowercase urgency + before we (eventually) warn on it. Patch taken from Russ Allbery. + +2008-06-01 Otavio Salvador + + * daklib/queue.py (check_valid): allow debian-installer specific + sources to have 'debian-installer' section. + +2008-05-28 Frans Pop + + * add autobyhand support for task overrides (from tasksel) + +2008-05-27 Joerg Jaspert + + * config/debian/pseudo-packages.maintainers: Change ftp.debian.org + pseudopackage maintainer name. + +2008-05-12 Joerg Jaspert + + * dak/transitions.py: use yaml.dump instead of syck.dump, as syck + seems to have a bug in its dump(), causing it to write illegal entries + And also do this for load. + +2008-05-10 Stephen Gran + * tools/debianqueued-0.9/debianqueued: First pass at a send_mail + implementation that sucks less + * Update debian/control to reflect new perl dependency + +2008-05-09 Joerg Jaspert + + * dak/override.py (main): substitute value in X-Debian-Package + header + + * templates/override.bug-close: Add X-Debian-Package header + * templates/reject-proposed-updates.rejected: dito + * templates/queue.rejected: dito + * templates/process-unchecked.new: dito + * templates/process-unchecked.bug-nmu-fixed: dito + * templates/process-unchecked.bug-experimental-fixed: dito + * templates/process-unchecked.bug-close: dito + * templates/process-unchecked.announce: dito + * templates/process-unchecked.accepted: dito + * templates/process-new.prod: dito + * templates/process-accepted.unaccept: dito + * templates/process-accepted.install: dito + * templates/process-unchecked.override-disparity: dito + +2008-05-08 Joerg Jaspert + + * templates/override.bug-close: Add X-Debian header + * templates/rm.bug-close: dito + * templates/reject-proposed-updates.rejected: dito + * templates/queue.rejected: dito + * templates/process-unchecked.new: dito + * templates/process-unchecked.bug-nmu-fixed: dito + * templates/process-unchecked.bug-experimental-fixed: dito + * templates/process-unchecked.bug-close: dito + * templates/process-unchecked.announce: dito + * templates/process-unchecked.accepted: dito + * templates/process-new.prod: dito + * templates/process-accepted.unaccept: dito + * templates/process-accepted.install: dito + * templates/process-unchecked.override-disparity: dito, but also + mention that people should include the package lists with the + override disparities. + +2008-05-06 Joerg Jaspert + + * config/debian/cron.dinstall: Put the timestamp stuff into an own + function, call that from everywhere. Also change the timestamp + format to not be local dependent. + +2008-05-05 Joerg Jaspert + + * daklib/dak_exceptions.py (dakerrors): add TransitionsError + * dak/transitions.py: Use it, instead of the own definition + +2008-05-05 Mark Hymers + + * daklib/dak_exceptions.py: Add a default message and tidy up our string + representation + +2008-05-05 Joerg Jaspert + + * daklib/dak_exceptions.py: New file, central place for all those + own exceptions dak may raise. + + * daklib/utils.py: Use dak_exceptions and delete all those string + exception raising stuff, which is depcreated. + During that delete the unknown_hostname_exc, as it wasnt used. + + * dak/import_archive.py: use the new Exception class + * dak/rm.py: dito + * dak/generate_releases.py: dito + * dak/queue_report.py: dito + * daklib/queue.py: dito + 2008-05-04 Joerg Jaspert + * daklib/queue.py: Various pychecker cleanups + * dak/import_keyring.py: Remove unused daklib.logging and Logger and add the actually used daklib/utils @@ -12,6 +468,10 @@ * dak/clean_suites.py: likewise * dak/compare_suites.py: likewise * dak/cruft_report.py: likewise + (get_suite_binaries): Seperated in own function, out of main. More + of main should be splitted. (Or well, cruft_report redesigned a + little, so its easier to run on multiple suites with differing tests) + * dak/examine_package.py: likewise * dak/find_null_maintainers.py: likewise * dak/generate_index_diffs.py: likewise diff --git a/README b/README index 0c027786..68c91be6 100644 --- a/README +++ b/README @@ -18,7 +18,7 @@ TODO file is an incomplete list of things needing to be done. There's a mailing list for discussion, development of and help with dak. See: - http://mailman.nocrew.org/cgi-bin/mailman/listinfo/dak-disc + http://lists.debian.org/debian-dak/ for archives and details on how to subscribe. diff --git a/config/debian/apt.conf b/config/debian/apt.conf index 408e7dce..25486b8b 100644 --- a/config/debian/apt.conf +++ b/config/debian/apt.conf @@ -20,18 +20,6 @@ TreeDefault Contents::Header "/srv/ftp.debian.org/dak/config/debian/Contents.top"; }; -tree "dists/oldstable-proposed-updates" -{ - FileList "/srv/ftp.debian.org/database/dists/oldstable-proposed-updates_$(SECTION)_binary-$(ARCH).list"; - SourceFileList "/srv/ftp.debian.org/database/dists/oldstable-proposed-updates_$(SECTION)_source.list"; - Sections "main contrib non-free"; - Architectures "alpha arm hppa i386 ia64 m68k mips mipsel powerpc s390 sparc source"; - BinOverride "override.sarge.$(SECTION)"; - ExtraOverride "override.sarge.extra.$(SECTION)"; - SrcOverride "override.sarge.$(SECTION).src"; - Contents " "; -}; - tree "dists/proposed-updates" { FileList "/srv/ftp.debian.org/database/dists/proposed-updates_$(SECTION)_binary-$(ARCH).list"; @@ -81,18 +69,6 @@ tree "dists/unstable" // debian-installer -tree "dists/oldstable-proposed-updates/main" -{ - FileList "/srv/ftp.debian.org/database/dists/oldstable-proposed-updates_main_$(SECTION)_binary-$(ARCH).list"; - Sections "debian-installer"; - Architectures "alpha arm hppa i386 ia64 m68k mips mipsel powerpc s390 sparc"; - BinOverride "override.sarge.main.$(SECTION)"; - SrcOverride "override.sarge.main.src"; - BinCacheDB "packages-debian-installer-$(ARCH).db"; - Packages::Extensions ".udeb"; - Contents " "; -}; - tree "dists/proposed-updates/main" { FileList "/srv/ftp.debian.org/database/dists/proposed-updates_main_$(SECTION)_binary-$(ARCH).list"; diff --git a/config/debian/apt.conf.buildd b/config/debian/apt.conf.buildd index 2ad4483c..65a8363e 100644 --- a/config/debian/apt.conf.buildd +++ b/config/debian/apt.conf.buildd @@ -7,8 +7,8 @@ Dir Default { - Packages::Compress "bzip2 gzip"; - Sources::Compress "bzip2 gzip"; + Packages::Compress ". bzip2 gzip"; + Sources::Compress ". bzip2 gzip"; DeLinkLimit 0; FileMode 0664; } diff --git a/config/debian/apt.conf.oldstable b/config/debian/apt.conf.oldstable deleted file mode 100644 index d827a66c..00000000 --- a/config/debian/apt.conf.oldstable +++ /dev/null @@ -1,45 +0,0 @@ -Dir -{ - ArchiveDir "/srv/ftp.debian.org/ftp/"; - OverrideDir "/srv/ftp.debian.org/scripts/override/"; - CacheDir "/srv/ftp.debian.org/database/"; -}; - -Default -{ - Packages::Compress ". gzip"; - Sources::Compress "gzip"; - Contents::Compress "gzip"; - DeLinkLimit 0; - FileMode 0664; -} - -TreeDefault -{ - Contents::Header "/srv/ftp.debian.org/dak/config/debian/Contents.top"; -}; - -tree "dists/oldstable" -{ - FileList "/srv/ftp.debian.org/database/dists/oldstable_$(SECTION)_binary-$(ARCH).list"; - SourceFileList "/srv/ftp.debian.org/database/dists/oldstable_$(SECTION)_source.list"; - Sections "main contrib non-free"; - Architectures "alpha arm hppa i386 ia64 m68k mips mipsel powerpc s390 sparc source"; - BinOverride "override.sarge.$(SECTION)"; - ExtraOverride "override.sarge.extra.$(SECTION)"; - SrcOverride "override.sarge.$(SECTION).src"; -}; - -// debian-installer - -tree "dists/oldstable/main" -{ - FileList "/srv/ftp.debian.org/database/dists/oldstable_main_$(SECTION)_binary-$(ARCH).list"; - Sections "debian-installer"; - Architectures "alpha arm hppa i386 ia64 m68k mips mipsel powerpc s390 sparc"; - BinOverride "override.sarge.main.$(SECTION)"; - SrcOverride "override.sarge.main.src"; - BinCacheDB "packages-debian-installer-$(ARCH).db"; - Packages::Extensions ".udeb"; - Contents " "; -}; diff --git a/config/debian/cron.buildd b/config/debian/cron.buildd index 1bc4d680..98494479 100755 --- a/config/debian/cron.buildd +++ b/config/debian/cron.buildd @@ -2,5 +2,5 @@ # # Called from cron.unchecked to update wanna-build, each time it runs. # -ssh buildd@buildd /org/wanna-build/trigger.often +ssh -o BatchMode=yes -o ConnectTimeout=30 -o SetupTimeout=240 buildd@buildd /org/wanna-build/trigger.often exit 0 diff --git a/config/debian/cron.dinstall b/config/debian/cron.dinstall index 8813d519..cb326fc6 100755 --- a/config/debian/cron.dinstall +++ b/config/debian/cron.dinstall @@ -13,12 +13,18 @@ NOW=`date "+%Y.%m.%d-%H:%M:%S"` LOGFILE="$logdir/dinstall_${NOW}.log" exec > "$LOGFILE" 2>&1 -echo Archive maintenance started at $(date +%X) +ts() { + TS=$(($TS+1)); + echo Archive maintenance timestamp $TS: $(date +%H:%M:%S) +} + +echo Archive maintenance started at $(date +%H:%M:%S) TS=0 NOTICE="$ftpdir/Archive_Maintenance_In_Progress" LOCKCU="$lockdir/daily.lock" LOCKAC="$lockdir/unchecked.lock" +BRITNEYLOCK="$lockdir/britney.lock" lockac=0 cleanup() { @@ -32,18 +38,24 @@ cleanup() { lockfile -l 3600 $LOCKCU trap cleanup 0 +# This file is simply used to indicate to britney whether or not +# the Packages file updates completed sucessfully. It's not a lock +# from our point of view +touch ${BRITNEYLOCK} + rm -f "$NOTICE" cat > "$NOTICE" < $base/backup/dump_$(date +%Y.%m.%d-%H:%M:%S) ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Updating Bugs docu, Mirror list and mailing-lists.txt" cd $configdir $scriptsdir/update-bugdoctxt $scriptsdir/update-mirrorlists $scriptsdir/update-mailingliststxt +$scriptsdir/update-pseudopackages.sh ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Doing automated p-u-new processing" cd $queuedir/p-u-new date -u -R >> REPORT dak process-new -a -C COMMENTS >> REPORT || true echo >> REPORT -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Doing automated o-p-u-new processing" cd $queuedir/o-p-u-new date -u -R >> REPORT @@ -77,7 +90,47 @@ echo >> REPORT ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts + +echo "Synchronizing i18n package descriptions" +# First sync their newest data +cd ${scriptdir}/i18nsync +rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true + +# Now check if we still know about the packages for which they created the files +# is the timestamp signed by us? +if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then + # now read it. As its signed by us we are sure the content is what we expect, no need + # to do more here. And we only test -d a directory on it anyway. + TSTAMP=$(cat timestamp) + # do we have the dir still? + if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then + # Lets check! + if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then + # Yay, worked, lets copy around + for dir in lenny sid; do + if [ -d dists/${dir}/ ]; then + cd dists/${dir}/main/i18n + rsync -aq --delete --delete-after . ${ftpdir}/dists/${dir}/main/i18n/. + fi + cd ${scriptdir}/i18nsync + done + else + echo "ARRRR, bad guys, wrong files, ARRR" + echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail debian-l10n-devel@lists.alioth.debian.org + fi + else + echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR" + echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail debian-l10n-devel@lists.alioth.debian.org + fi +else + echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR." + echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail debian-l10n-devel@lists.alioth.debian.org +fi + +################################################################################ + +ts lockfile $LOCKAC lockac=1 echo "Processing queue/accepted" @@ -88,7 +141,7 @@ dak process-accepted -pa *.changes | tee REPORT | \ chgrp debadmin REPORT chmod 664 REPORT -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Checking for cruft in overrides" dak check-overrides rm -f $LOCKAC @@ -97,57 +150,57 @@ lockac=0 echo "Fixing symlinks in $ftpdir" symlinks -d -r $ftpdir -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Generating suite file lists for apt-ftparchive" dak make-suite-file-list -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Updating fingerprints" # Update fingerprints -dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg +dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg || true -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Generate override files echo "Writing overrides into text files" cd $overridedir dak make-overrides -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # FIXME rm -f override.sid.all3 for i in main contrib non-free main.debian-installer; do cat override.sid.$i >> override.sid.all3; done -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Generate Packages and Sources files echo "Generating Packages and Sources files" cd $configdir apt-ftparchive generate apt.conf -# Generate *.diff/ incremental updates -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts +# Generate *.diff/ incremental updates echo "Generating pdiff files" dak generate-index-diffs -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Generate Release files echo "Generating Release files" dak generate-releases -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Clean out old packages echo "Cleanup old packages/files" dak clean-suites dak clean-queues -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Needs to be rebuilt, as files have moved. Due to unaccepts, we need to # update this before wanna-build is updated. @@ -156,7 +209,7 @@ psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE suite = 5 AND symlinks -d /srv/incoming.debian.org/buildd > /dev/null apt-ftparchive generate apt.conf.buildd -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Running various scripts from $scriptsdir" cd $scriptsdir @@ -165,16 +218,23 @@ cd $scriptsdir ./mklslar ./mkfilesindices ./mkchecksums -# -rm -f $NOTICE -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts + +# (Re)generate the hardlinked mirror directory for "public" buildd / mirror access +echo "Regenerating mirror/ hardlink fun" +cd ${mirrordir} +rsync -aH --link-dest ${ftpdir} --delete --delete-after --ignore-errors ${ftpdir}/. . + + +ts echo "Trigger daily wanna-build run" -ssh buildd@buildd /org/wanna-build/trigger.daily +ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 buildd@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org +rm -f $NOTICE rm -f $LOCKCU -echo Archive maintenance finished at $(date +%X) +echo Archive maintenance finished at $(date +%H:%M:%S) ################################################################################ @@ -185,14 +245,14 @@ pg_dump projectb > $POSTDUMP ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Expiring old database dumps..." (cd $base/backup; $scriptsdir/expire_dumps -d . -p -f "dump_*") ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Send a report on NEW/BYHAND packages echo "Nagging ftpteam about NEW/BYHAND packages" @@ -203,7 +263,7 @@ dak cruft-report > $webdir/cruft-report-daily.txt dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts echo "Updating DM html page" $scriptsdir/dm-monitor >$webdir/dm-uploaders.html @@ -212,20 +272,41 @@ $scriptsdir/dm-monitor >$webdir/dm-uploaders.html # Push katie@merkel so it syncs the projectb there. Returns immediately, the sync runs detached echo "Trigger merkels projectb sync" -ssh -2 -i ~/.ssh/push_merkel_projectb katie@merkel.debian.org sleep 1 +ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb katie@merkel.debian.org sleep 1 || true ################################################################################ -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts ulimit -m 90000 -d 90000 -s 10000 -v 200000 echo "Using run-parts to run scripts in $base/scripts/distmnt" run-parts --report $base/scripts/distmnt +ts + +echo "Exporting package data foo for i18n project" +STAMP=$(date "+%Y%m%d%H%M") +mkdir -p ${scriptdir}/i18n/${STAMP} +cd ${scriptdir}/i18n/${STAMP} +dak control-suite -l stable > etch +dak control-suite -l testing > lenny +dak control-suite -l unstable > sid +echo "${STAMP}" > timestamp +gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 6070D3A1 --detach-sign -o timestamp.gpg timestamp +rm -f md5sum +md5sum * > md5sum +cd ${webdir}/ +ln -sfT ${scriptdir}/i18n/${STAMP} i18n + +cd ${scriptdir} +find ./i18n -mtime +2 -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf + +ts + echo "Daily cron scripts successful." -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts # Stats pr0n echo "Updating stats data" @@ -233,26 +314,44 @@ cd $configdir $scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data R --slave --vanilla < $base/misc/ftpstats.R -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts + +# Remove the britney lock +rm -f ${BRITNEYLOCK} # Clean up apt-ftparchive's databases echo "Clean up apt-ftparchive's databases" cd $configdir apt-ftparchive -q clean apt.conf -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts -# Compress psql backups older than a week, but no more than 20 of them +# Compress psql backups echo "Compress old psql backups" (cd $base/backup/ - find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mtime +7 | - sort | head -n20 | while read dumpname; do - echo "Compressing $dumpname" - bzip2 -9 "$dumpname" - done + find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mtime +1 | + while read dumpname; do + echo "Compressing $dumpname" + bzip2 -9 "$dumpname" + done ) -TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) +ts + +echo "Removing old dinstall logfiles" +(cd $logdir + find -maxdepth 1 -mindepth 1 -type f -name 'dinstall_*' -mtime +60 | + while read dumpname; do + echo "Removing $dumpname" + rm -f "$dumpname" + done + + find -maxdepth 1 -mindepth 1 -type f -name 'weekly_*' -mtime +60 | + while read dumpname; do + echo "Removing $dumpname" + rm -f "$dumpname" + done +) echo "Finally, all is done, sending mail and compressing logfile" exec > /dev/null 2>&1 diff --git a/config/debian/cron.hourly b/config/debian/cron.hourly index 468695f7..ce1d43e6 100755 --- a/config/debian/cron.hourly +++ b/config/debian/cron.hourly @@ -1,6 +1,6 @@ #! /bin/sh # -# Executed hourly via cron, out of troup's crontab. +# Executed hourly via cron, out of dak's crontab. set -e set -u @@ -10,4 +10,5 @@ export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars date -u > $ftpdir/project/trace/ftp-master.debian.org dak import-users-from-passwd dak queue-report -n > $webdir/new.html +dak show-deferred > ${webdir}/deferred.html cd $queuedir/new ; dak show-new *.changes > /dev/null diff --git a/config/debian/cron.unchecked b/config/debian/cron.unchecked old mode 100644 new mode 100755 index 0e595736..406636ef --- a/config/debian/cron.unchecked +++ b/config/debian/cron.unchecked @@ -11,47 +11,62 @@ NOTICE="$lockdir/daily.lock" if [ -e $NOTICE ]; then exit 0; fi +STAMP=$(date "+%Y%m%d%H%M") + cleanup() { - rm -f "$LOCKFILE" - if [ ! -z "$LOCKDAILY" ]; then - rm -f "$NOTICE" - fi + rm -f "$LOCKFILE" + if [ ! -z "$LOCKDAILY" ]; then + rm -f "$NOTICE" + fi } # only run one cron.unchecked if lockfile -r3 $LOCKFILE; then - trap cleanup 0 - cd $unchecked - - changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs) - report=$queuedir/REPORT - timestamp=$(date "+%Y-%m-%d %H:%M") - - if [ ! -z "$changes" ]; then - echo "$timestamp": "$changes" >> $report - dak process-unchecked -a $changes >> $report - echo "--" >> $report - - # sync with debbugs - $scriptsdir/ssh-move --server --ssh-identity /srv/ftp.debian.org/s3kr1t/id_debbugs-vt --ssh-move-path /home/debbugs/ssh-move --from-directory $queuedir/bts_version_track --to-directory /org/bugs.debian.org/versions/queue/ftp-master debbugs@bugs.debian.org \*.debinfo \*.versions - - if lockfile -r3 $NOTICE; then - LOCKDAILY="YES" - psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE queue = 0 AND suite = 5 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list - cd $overridedir - dak make-overrides &>/dev/null - rm -f override.sid.all3 override.sid.all3.src - for i in main contrib non-free main.debian-installer; do - cat override.sid.$i >> override.sid.all3 - if [ "$i" != "main.debian-installer" ]; then - cat override.sid.$i.src >> override.sid.all3.src - fi - done - cd $configdir - apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd - . $configdir/cron.buildd + trap cleanup 0 + cd $unchecked + + changes=$(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs) + report=$queuedir/REPORT + timestamp=$(date "+%Y-%m-%d %H:%M") + + if [ ! -z "$changes" ]; then + echo "$timestamp": "$changes" >> $report + dak process-unchecked -a $changes >> $report + echo "--" >> $report + + # sync with debbugs + rsync -aq --remove-source-files $queuedir/bts_version_track/ bugs-sync:/org/bugs.debian.org/versions/queue/ftp-master/ + + if lockfile -r3 $NOTICE; then + LOCKDAILY="YES" + psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE queue = 0 AND suite = 5 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list + cd $overridedir + dak make-overrides &>/dev/null + rm -f override.sid.all3 override.sid.all3.src + for i in main contrib non-free main.debian-installer; do + cat override.sid.$i >> override.sid.all3 + if [ "$i" != "main.debian-installer" ]; then + cat override.sid.$i.src >> override.sid.all3.src fi - else - echo "$timestamp": Nothing to do >> $report + done + cd $configdir + apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd + + cd ${incoming} + rm -f buildd/Release* + apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="Debian" -o APT::FTPArchive::Release::Label="Debian" -o APT::FTPArchive::Release::Description="buildd incoming" -o APT::FTPArchive::Release::Architectures="${archs}" release buildd > Release + gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 6070D3A1 --detach-sign -o Release.gpg Release + mv Release* buildd/. + + cd ${incoming} + mkdir -p tree/${STAMP} + cp -al ${incoming}/buildd/. tree/${STAMP}/ + ln -sfT tree/${STAMP} ${incoming}/builddweb + find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf + + . $configdir/cron.buildd fi + else + echo "$timestamp": Nothing to do >> $report + fi fi diff --git a/config/debian/cron.weekly b/config/debian/cron.weekly index 99d16359..4baaf46c 100755 --- a/config/debian/cron.weekly +++ b/config/debian/cron.weekly @@ -34,12 +34,25 @@ dak split-done > /dev/null # Vacuum the database echo "VACUUM; VACUUM ANALYZE;" | psql --no-psqlrc projectb 2>&1 | grep -v "^NOTICE: Skipping.*only table owner can VACUUM it$" +# Do git cleanup stuff +echo "Doing git stuff" +cd /org/ftp.debian.org/git/dak.git +git gc --prune +git update-server-info +# now workaround a git bug not honoring the setup in logs/* +# (fix in development, but until it reached backports.org.......) +chmod -R g+w logs/ + # Clean up apt-ftparchive's databases cd $configdir echo "Cleanup apt-ftparchive's database" apt-ftparchive -q clean apt.conf apt-ftparchive -q clean apt.conf.buildd +# Update wanna-build dump +echo "Update wanna-build database dump" +/org/ftp.debian.org/scripts/nfu/get-w-b-db + echo "Finally, all is done, compressing logfile" exec > /dev/null 2>&1 diff --git a/config/debian/dak.conf b/config/debian/dak.conf index 4181fb01..fc251f8b 100644 --- a/config/debian/dak.conf +++ b/config/debian/dak.conf @@ -126,6 +126,12 @@ Show-New HTMLPath "/srv/ftp.debian.org/web/new/"; } +Show-Deferred +{ + LinkPath "/srv/ftp.debian.org/web/deferred/"; + DeferredQueue "/srv/queued/DEFERRED/"; +} + Import-Users-From-Passwd { ValidGID "800"; @@ -173,8 +179,9 @@ Import-Archive Reject-Proposed-Updates { - StableRejector "Andreas Barth and Martin Zobel-Helas"; - MoreInfoURL "http://release.debian.org/stable/4.0/4.0r3/"; + StableRejector "the Stable Release Team"; + StableMail "debian-release@lists.debian.org"; + MoreInfoURL "http://release.debian.org/stable/4.0/4.0r5/"; }; Import-LDAP-Fingerprints @@ -300,6 +307,7 @@ Suite CodeName "sarge-proposed-updates"; OverrideCodeName "sarge"; OverrideSuite "oldstable"; + Untouchable "1"; Priority "2"; VersionChecks { @@ -399,6 +407,7 @@ Suite CodeName "etch-proposed-updates"; OverrideCodeName "etch"; OverrideSuite "stable"; + ValidTime 604800; // 7 days Priority "4"; VersionChecks { @@ -453,6 +462,7 @@ Suite Description "Debian Testing distribution - Not Released"; CodeName "lenny"; OverrideCodeName "lenny"; + ValidTime 604800; // 7 days Priority "5"; UdebComponents { @@ -480,7 +490,6 @@ Suite hppa; i386; ia64; - m68k; mips; mipsel; powerpc; @@ -493,6 +502,7 @@ Suite CodeName "testing-proposed-updates"; OverrideCodeName "lenny"; OverrideSuite "testing"; + ValidTime 604800; // 7 days Priority "6"; VersionChecks { @@ -579,6 +589,7 @@ Suite Description "Debian Unstable - Not Released"; CodeName "sid"; OverrideCodeName "sid"; + ValidTime 604800; // 7 days Priority "7"; VersionChecks { @@ -631,6 +642,7 @@ Suite NotAutomatic "yes"; OverrideCodeName "sid"; OverrideSuite "unstable"; + ValidTime 604800; // 7 days Priority "0"; VersionChecks { @@ -654,11 +666,11 @@ Suite SuiteMappings { - "propup-version oldstable-security stable testing testing-proposed-updates unstable"; +// "propup-version oldstable-security stable testing testing-proposed-updates unstable"; "propup-version stable-security testing testing-proposed-updates unstable"; "propup-version testing-security unstable"; - "map oldstable oldstable-proposed-updates"; - "map oldstable-security oldstable-proposed-updates"; +// "map oldstable oldstable-proposed-updates"; +// "map oldstable-security oldstable-proposed-updates"; "map stable proposed-updates"; "map stable-security proposed-updates"; "map stable-proposed-updates proposed-updates"; @@ -692,6 +704,13 @@ AutomaticByHandPackages { Extension "tar.gz"; Script "/srv/ftp.debian.org/dak/scripts/debian/byhand-tag"; }; + + "task-overrides" { + Source "tasksel"; + Section "byhand"; + Extension "tar.gz"; + Script "/srv/ftp.debian.org/dak/scripts/debian/byhand-task"; + }; }; Dir diff --git a/config/debian/pseudo-packages.description b/config/debian/pseudo-packages.description deleted file mode 100644 index ab08f8f4..00000000 --- a/config/debian/pseudo-packages.description +++ /dev/null @@ -1,24 +0,0 @@ -base Base system general bugs -cdrom Installation system -spam Spam (reassign spam to here so we can complain about it) -press Press release issues -kernel Problems with the Linux kernel, or that shipped with Debian -project Problems related to project administration -general General problems (e.g. "many manpages are mode 755") -nm.debian.org New Maintainer process and nm.debian.org webpages -qa.debian.org The Quality Assurance group -ftp.debian.org Problems with the FTP site -www.debian.org Problems with the WWW site -bugs.debian.org The bug tracking system, @bugs.debian.org -lists.debian.org The mailing lists, debian-*@lists.debian.org -wnpp Work-Needing and Prospective Packages list -cdimage.debian.org CD Image issues -tech-ctte The Debian Technical Committee (see the Constitution) -mirrors Problems with the official mirrors -security.debian.org The Debian Security Team -installation-reports Reports of installation problems with stable & testing -upgrade-reports Reports of upgrade problems for stable & testing -release-notes Problems with the Release Notes -wiki.debian.org Problems with the Debian wiki -security-tracker The Debian Security Bug Tracker -release.debian.org Requests regarding Debian releases and release team tools diff --git a/config/debian/pseudo-packages.maintainers b/config/debian/pseudo-packages.maintainers deleted file mode 100644 index 37401fec..00000000 --- a/config/debian/pseudo-packages.maintainers +++ /dev/null @@ -1,24 +0,0 @@ -base Base Maintainers -cdrom Debian CD-ROM Team -press press@debian.org -bugs.debian.org Debian Bug Tracking Team -ftp.debian.org James Troup and others -nm.debian.org New Maintainer Front-Desk -qa.debian.org debian-qa@lists.debian.org -www.debian.org Debian WWW Team -mirrors Debian Mirrors Team -project debian-project@lists.debian.org -general debian-devel@lists.debian.org -kernel Debian Kernel Team -lists.debian.org Debian Listmaster Team -spam spam@debian.org -wnpp wnpp@debian.org -cdimage.debian.org Debian CD-ROM Team -tech-ctte Technical Committee -security.debian.org Debian Security Team -installation-reports Debian Install Team -upgrade-reports Debian Testing Group -release-notes Debian Documentation Team -wiki.debian.org Debian WWW Team -security-tracker Debian Security Tracker Team -release.debian.org Debian Release Team diff --git a/config/debian/vars b/config/debian/vars index 3f993fad..69cef765 100644 --- a/config/debian/vars +++ b/config/debian/vars @@ -19,11 +19,12 @@ logdir=$base/log/cron/ queuedir=$base/queue/ unchecked=$queuedir/unchecked/ accepted=$queuedir/accepted/ +mirrordir=$base/mirror/ incoming=$base/incoming ftpgroup=debadmin -copyoverrides="etch.contrib etch.contrib.src etch.main etch.main.src etch.non-free etch.non-free.src etch.extra.main etch.extra.non-free etch.extra.contrib etch.main.debian-installer sarge.contrib sarge.contrib.src sarge.main sarge.main.src sarge.non-free sarge.non-free.src sid.contrib sid.contrib.src sid.main sid.main.debian-installer sid.main.src sid.non-free sid.non-free.src sid.extra.contrib sid.extra.main sid.extra.non-free sarge.extra.contrib sarge.extra.main sarge.extra.non-free lenny.contrib lenny.contrib.src lenny.main lenny.main.src lenny.non-free lenny.non-free.src lenny.extra.main lenny.extra.contrib lenny.extra.non-free" +copyoverrides="etch.contrib etch.contrib.src etch.main etch.main.src etch.non-free etch.non-free.src etch.extra.main etch.extra.non-free etch.extra.contrib etch.main.debian-installer sid.contrib sid.contrib.src sid.main sid.main.debian-installer sid.main.src sid.non-free sid.non-free.src sid.extra.contrib sid.extra.main sid.extra.non-free lenny.contrib lenny.contrib.src lenny.main lenny.main.src lenny.non-free lenny.non-free.src lenny.extra.main lenny.extra.contrib lenny.extra.non-free" PATH=$masterdir:$PATH umask 022 diff --git a/dak/check_archive.py b/dak/check_archive.py index c00aa08b..896ab1f5 100755 --- a/dak/check_archive.py +++ b/dak/check_archive.py @@ -28,8 +28,8 @@ import commands, os, pg, stat, sys, time import apt_pkg, apt_inst -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ @@ -52,7 +52,7 @@ Run various sanity checks of the archive and/or database. The following MODEs are available: - md5sums - validate the md5sums stored in the database + checksums - validate the checksums stored in the database files - check files in the database against what's in the archive dsc-syntax - validate the syntax of .dsc files in the archive missing-overrides - check for missing overrides @@ -132,7 +132,7 @@ def check_dscs(): f = line[:-1] try: utils.parse_changes(f, signing_rules=1) - except utils.invalid_dsc_format_exc, line: + except InvalidDscError, line: utils.warn("syntax error in .dsc file '%s', line %s." % (f, line)) count += 1 @@ -194,16 +194,18 @@ SELECT l.path, f.filename FROM files f, dsc_files df, location l WHERE df.source ################################################################################ -def check_md5sums(): +def check_checksums(): print "Getting file information from database..." - q = projectB.query("SELECT l.path, f.filename, f.md5sum, f.size FROM files f, location l WHERE f.location = l.id") + q = projectB.query("SELECT l.path, f.filename, f.md5sum, f.sha1sum, f.sha256sum, f.size FROM files f, location l WHERE f.location = l.id") ql = q.getresult() - print "Checking file md5sums & sizes..." + print "Checking file checksums & sizes..." for i in ql: filename = os.path.abspath(i[0] + i[1]) db_md5sum = i[2] - db_size = int(i[3]) + db_sha1sum = i[3] + db_sha256sum = i[4] + db_size = int(i[5]) try: f = utils.open_file(filename) except: @@ -215,6 +217,20 @@ def check_md5sums(): utils.warn("**WARNING** md5sum mismatch for '%s' ('%s' [current] vs. '%s' [db])." % (filename, md5sum, db_md5sum)) if size != db_size: utils.warn("**WARNING** size mismatch for '%s' ('%s' [current] vs. '%s' [db])." % (filename, size, db_size)) + # Until the main database is filled, we need to not spit 500,000 warnings + # every time we scan the archive. Yet another hack (TM) which can go away + # once this is all working + if db_sha1sum is not None and db_sha1sum != '': + f.seek(0) + sha1sum = apt_pkg.sha1sum(f) + if sha1sum != db_sha1sum: + utils.warn("**WARNING** sha1sum mismatch for '%s' ('%s' [current] vs. '%s' [db])." % (filename, sha1sum, db_sha1sum)) + + if db_sha256sum is not None and db_sha256sum != '': + f.seek(0) + sha256sum = apt_pkg.sha256sum(f) + if sha256sum != db_sha256sum: + utils.warn("**WARNING** sha256sum mismatch for '%s' ('%s' [current] vs. '%s' [db])." % (filename, sha256sum, db_sha256sum)) print "Done." @@ -425,8 +441,8 @@ def main (): projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"])) database.init(Cnf, projectB) - if mode == "md5sums": - check_md5sums() + if mode == "checksums": + check_checksums() elif mode == "files": check_files() elif mode == "dsc-syntax": diff --git a/dak/check_overrides.py b/dak/check_overrides.py index cdab6449..f276dbae 100644 --- a/dak/check_overrides.py +++ b/dak/check_overrides.py @@ -50,9 +50,9 @@ import pg, sys, os import apt_pkg -import daklib.database as database -import daklib.logging as logging -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import utils ################################################################################ @@ -314,10 +314,10 @@ def main (): print "Processing %s%s..." % (osuite, originremark) # Get a list of all suites that use the override file of 'osuite' - ocodename = Cnf["Suite::%s::codename" % osuite] + ocodename = Cnf["Suite::%s::codename" % osuite].lower() suites = [] for suite in Cnf.SubTree("Suite").List(): - if ocodename == Cnf["Suite::%s::OverrideCodeName" % suite]: + if ocodename == Cnf["Suite::%s::OverrideCodeName" % suite].lower(): suites.append(suite) q = projectB.query("SELECT id FROM suite WHERE suite_name in (%s)" \ diff --git a/dak/check_proposed_updates.py b/dak/check_proposed_updates.py index a4e7c0cb..afb0faae 100755 --- a/dak/check_proposed_updates.py +++ b/dak/check_proposed_updates.py @@ -30,8 +30,8 @@ import pg, sys, os import apt_pkg, apt_inst -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/clean_proposed_updates.py b/dak/clean_proposed_updates.py index fc063ddb..3dd6e6f3 100755 --- a/dak/clean_proposed_updates.py +++ b/dak/clean_proposed_updates.py @@ -21,8 +21,8 @@ import os, pg, re, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/clean_queues.py b/dak/clean_queues.py index 30e0baf1..9f771b72 100755 --- a/dak/clean_queues.py +++ b/dak/clean_queues.py @@ -35,7 +35,7 @@ import os, stat, sys, time import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/clean_suites.py b/dak/clean_suites.py index f459cdda..fc4b8473 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -30,7 +30,7 @@ import os, pg, stat, sys, time import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/compare_suites.py b/dak/compare_suites.py index a3df8298..8c367582 100755 --- a/dak/compare_suites.py +++ b/dak/compare_suites.py @@ -22,8 +22,8 @@ import pg, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/control_overrides.py b/dak/control_overrides.py index 9ea1cd60..0af5c48b 100644 --- a/dak/control_overrides.py +++ b/dak/control_overrides.py @@ -51,9 +51,9 @@ import pg, sys, time import apt_pkg -import daklib.utils as utils -import daklib.database as database -import daklib.logging as logging +from daklib import utils +from daklib import database +from daklib import logging ################################################################################ diff --git a/dak/control_suite.py b/dak/control_suite.py index 63a0386b..4b704b97 100644 --- a/dak/control_suite.py +++ b/dak/control_suite.py @@ -43,9 +43,9 @@ import pg, sys import apt_pkg -import daklib.database as database -import daklib.logging as logging -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import utils ####################################################################################### @@ -244,7 +244,11 @@ def main (): if not Cnf.has_key("Control-Suite::Options::%s" % (i)): Cnf["Control-Suite::Options::%s" % (i)] = "" - file_list = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv) + try: + file_list = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv); + except SystemError, e: + print "%s\n" % e + usage(1) Options = Cnf.SubTree("Control-Suite::Options") if Options["Help"]: diff --git a/dak/cruft_report.py b/dak/cruft_report.py index 238cdd49..fab47bf0 100755 --- a/dak/cruft_report.py +++ b/dak/cruft_report.py @@ -27,10 +27,10 @@ ################################################################################ -import commands, pg, os, sys, time +import commands, pg, os, sys, time, re import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ @@ -51,7 +51,8 @@ Check for obsolete or duplicated packages. -h, --help show this help and exit. -m, --mode=MODE chose the MODE to run in (full or daily). - -s, --suite=SUITE check suite SUITE.""" + -s, --suite=SUITE check suite SUITE. + -w, --wanna-build-dump where to find the copies of http://buildd.debian.org/stats/*.txt""" sys.exit(exit_code) ################################################################################ @@ -114,6 +115,61 @@ def do_anais(architecture, binaries_list, source): anais_output += " o %s: %s\n" % (version, ", ".join(arches)) return anais_output + +################################################################################ + +# Check for out-of-date binaries on architectures that do not want to build that +# package any more, and have them listed as Not-For-Us +def do_nfu(nfu_packages): + output = "" + + a2p = {} + + for architecture in nfu_packages: + a2p[architecture] = [] + for (package,bver,sver) in nfu_packages[architecture]: + output += " * [%s] does not want %s (binary %s, source %s)\n" % (architecture, package, bver, sver) + a2p[architecture].append(package) + + + if output: + print "Obsolete by Not-For-Us" + print "----------------------" + print + print output + + print "Suggested commands:" + for architecture in a2p: + if a2p[architecture]: + print (" dak rm -m \"[auto-cruft] NFU\" -s %s -a %s -b %s" % + (suite, architecture, " ".join(a2p[architecture]))) + print + +def parse_nfu(architecture): + # utils/hpodder_1.1.5.0: Not-For-Us [optional:out-of-date] + r = re.compile("^\w+/([^_]+)_.*: Not-For-Us") + + ret = set() + + filename = "%s/%s-all.txt" % (Cnf["Cruft-Report::Options::Wanna-Build-Dump"], architecture) + + # Not all architectures may have a wanna-build dump, so we want to ignore missin + # files + if os.path.exists(filename): + f = utils.open_file(filename) + for line in f: + if line[0] == ' ': + continue + + m = r.match(line) + if m: + ret.add(m.group(1)) + + f.close() + else: + utils.warn("No wanna-build dump file for architecture %s", architecture) + return ret + ################################################################################ def do_nviu(): @@ -242,6 +298,20 @@ def do_obsolete_source(duplicate_bins, bin2source): print " dak rm -S -p -m \"[auto-cruft] obsolete source package\" %s" % (" ".join(to_remove)) print +def get_suite_binaries(): + # Initalize a large hash table of all binary packages + binaries = {} + before = time.time() + + sys.stderr.write("[Getting a list of binary packages in %s..." % (suite)) + q = projectB.query("SELECT distinct b.package FROM binaries b, bin_associations ba WHERE ba.suite = %s AND ba.bin = b.id" % (suite_id)) + ql = q.getresult() + sys.stderr.write("done. (%d seconds)]\n" % (int(time.time()-before))) + for i in ql: + binaries[i[0]] = "" + + return binaries + ################################################################################ def main (): @@ -251,7 +321,8 @@ def main (): Arguments = [('h',"help","Cruft-Report::Options::Help"), ('m',"mode","Cruft-Report::Options::Mode", "HasArg"), - ('s',"suite","Cruft-Report::Options::Suite","HasArg")] + ('s',"suite","Cruft-Report::Options::Suite","HasArg"), + ('w',"wanna-build-dump","Cruft-Report::Options::Wanna-Build-Dump","HasArg")] for i in [ "help" ]: if not Cnf.has_key("Cruft-Report::Options::%s" % (i)): Cnf["Cruft-Report::Options::%s" % (i)] = "" @@ -260,6 +331,9 @@ def main (): if not Cnf.has_key("Cruft-Report::Options::Mode"): Cnf["Cruft-Report::Options::Mode"] = "daily" + if not Cnf.has_key("Cruft-Report::Options::Wanna-Build-Dump"): + Cnf["Cruft-Report::Options::Wanna-Build-Dump"] = "/srv/ftp.debian.org/scripts/nfu" + apt_pkg.ParseCommandLine(Cnf, Arguments, sys.argv) Options = Cnf.SubTree("Cruft-Report::Options") @@ -270,7 +344,7 @@ def main (): if Options["Mode"] == "daily": checks = [ "nbs", "nviu", "obsolete source" ] elif Options["Mode"] == "full": - checks = [ "nbs", "nviu", "obsolete source", "dubious nbs", "bnb", "bms", "anais" ] + checks = [ "nbs", "nviu", "obsolete source", "nfu", "dubious nbs", "bnb", "bms", "anais" ] else: utils.warn("%s is not a recognised mode - only 'full' or 'daily' are understood." % (Options["Mode"])) usage(1) @@ -288,20 +362,15 @@ def main (): anais_output = "" duplicate_bins = {} + nfu_packages = {} + suite = Options["Suite"] suite_id = database.get_suite_id(suite) bin_not_built = {} if "bnb" in checks: - # Initalize a large hash table of all binary packages - before = time.time() - sys.stderr.write("[Getting a list of binary packages in %s..." % (suite)) - q = projectB.query("SELECT distinct b.package FROM binaries b, bin_associations ba WHERE ba.suite = %s AND ba.bin = b.id" % (suite_id)) - ql = q.getresult() - sys.stderr.write("done. (%d seconds)]\n" % (int(time.time()-before))) - for i in ql: - bins_in_suite[i[0]] = "" + bins_in_suite = get_suite_binaries() # Checks based on the Sources files components = Cnf.ValueList("Suite::%s::Components" % (suite)) @@ -365,6 +434,11 @@ def main (): if (result != 0): sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output)) sys.exit(result) + + if "nfu" in checks: + nfu_packages.setdefault(architecture,[]) + nfu_entries = parse_nfu(architecture) + packages = utils.open_file(temp_filename) Packages = apt_pkg.ParseTagFile(packages) while Packages.Step(): @@ -398,6 +472,11 @@ def main (): duplicate_bins.setdefault(key, []) if package not in duplicate_bins[key]: duplicate_bins[key].append(package) + if "nfu" in checks: + if package in nfu_entries and \ + version != source_versions[source]: # only suggest to remove out-of-date packages + nfu_packages[architecture].append((package,version,source_versions[source])) + packages.close() os.unlink(temp_filename) @@ -430,6 +509,9 @@ def main (): print "="*75 print + if "nfu" in checks: + do_nfu(nfu_packages) + if "bnb" in checks: print "Unbuilt binary packages" print "-----------------------" diff --git a/dak/dak.py b/dak/dak.py index 5a986d55..c987c1e7 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -88,6 +88,8 @@ def init(): "Produce a report on NEW and BYHAND packages"), ("show-new", "Output html for packages in NEW"), + ("show-deferred", + "Output html and symlinks for packages in DEFERRED"), ("rm", "Remove packages from suites"), diff --git a/dak/daklib b/dak/daklib new file mode 120000 index 00000000..820fad39 --- /dev/null +++ b/dak/daklib @@ -0,0 +1 @@ +../daklib \ No newline at end of file diff --git a/dak/decode_dot_dak.py b/dak/decode_dot_dak.py index d4373641..7ea342bd 100644 --- a/dak/decode_dot_dak.py +++ b/dak/decode_dot_dak.py @@ -28,8 +28,8 @@ import sys import apt_pkg -import daklib.queue as queue -import daklib.utils as utils +from daklib import queue +from daklib import utils ################################################################################ @@ -101,9 +101,9 @@ def main(): for f in files.keys(): print " %s:" % (f) for i in [ "package", "version", "architecture", "type", "size", - "md5sum", "component", "location id", "source package", - "source version", "maintainer", "dbtype", "files id", - "new", "section", "priority", "pool name" ]: + "md5sum", "sha1sum", "sha256sum", "component", "location id", + "source package", "source version", "maintainer", "dbtype", + "files id", "new", "section", "priority", "pool name" ]: if files[f].has_key(i): print " %s: %s" % (i.capitalize(), files[f][i]) del files[f][i] diff --git a/dak/examine_package.py b/dak/examine_package.py index 81191d25..ae3ec6c9 100755 --- a/dak/examine_package.py +++ b/dak/examine_package.py @@ -34,8 +34,8 @@ import errno, os, pg, re, sys, md5 import apt_pkg, apt_inst -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/find_null_maintainers.py b/dak/find_null_maintainers.py index c4e457f1..652edfd2 100755 --- a/dak/find_null_maintainers.py +++ b/dak/find_null_maintainers.py @@ -21,7 +21,7 @@ import ldap, pg, sys, time import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/generate_index_diffs.py b/dak/generate_index_diffs.py index 7cbedb9c..286c1eba 100755 --- a/dak/generate_index_diffs.py +++ b/dak/generate_index_diffs.py @@ -32,7 +32,7 @@ import sys, os, tempfile import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/generate_releases.py b/dak/generate_releases.py index cb82a947..00831192 100755 --- a/dak/generate_releases.py +++ b/dak/generate_releases.py @@ -24,7 +24,8 @@ import sys, os, popen2, tempfile, stat, time, pg import apt_pkg -import daklib.utils as utils +from daklib import utils +from daklib.dak_exceptions import * ################################################################################ @@ -107,7 +108,7 @@ def print_md5sha_files (tree, files, hashop): else: size = os.stat(path + name)[stat.ST_SIZE] file_handle = utils.open_file(path + name) - except utils.cant_open_exc: + except CantOpenError: print "ALERT: Couldn't open " + path + name else: hash = hashop(file_handle) @@ -217,6 +218,11 @@ def main (): if codename != "": out.write("Codename: %s\n" % (codename)) out.write("Date: %s\n" % (time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime(time.time())))) + + if SuiteBlock.has_key("ValidTime"): + validtime=float(SuiteBlock["ValidTime"]) + out.write("Valid-Until: %s\n" % (time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime(time.time()+validtime)))) + if notautomatic != "": out.write("NotAutomatic: %s\n" % (notautomatic)) out.write("Architectures: %s\n" % (" ".join(filter(utils.real_arch, SuiteBlock.ValueList("Architectures"))))) diff --git a/dak/import_archive.py b/dak/import_archive.py index b8884ab2..4432ff1a 100755 --- a/dak/import_archive.py +++ b/dak/import_archive.py @@ -38,8 +38,9 @@ import commands, os, pg, re, sys, time import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils +from daklib.dak_exceptions import * ############################################################################### @@ -323,7 +324,7 @@ def process_sources (filename, suite, component, archive): suite_id = database.get_suite_id(suite) try: file = utils.open_file (filename) - except utils.cant_open_exc: + except CantOpenError: utils.warn("can't open '%s'" % (filename)) return Scanner = apt_pkg.ParseTagFile(file) @@ -406,7 +407,7 @@ def process_packages (filename, suite, component, archive): suite_id = database.get_suite_id(suite) try: file = utils.open_file (filename) - except utils.cant_open_exc: + except CantOpenError: utils.warn("can't open '%s'" % (filename)) return Scanner = apt_pkg.ParseTagFile(file) @@ -432,8 +433,12 @@ def process_packages (filename, suite, component, archive): if not source_version: source_version = version filename = Scanner.Section["filename"] + if filename.endswith(".deb"): + type = "deb" + else: + type = "udeb" location = get_location_path(filename.split('/')[0]) - location_id = database.get_location_id (location, component, archive) + location_id = database.get_location_id (location, component.replace("/debian-installer", ""), archive) filename = poolify (filename, location) if architecture == "all": filename = re_arch_from_filename.sub("binary-all", filename) @@ -442,7 +447,6 @@ def process_packages (filename, suite, component, archive): size = Scanner.Section["size"] md5sum = Scanner.Section["md5sum"] files_id = get_or_set_files_id (filename, size, md5sum, location_id) - type = "deb"; # FIXME cache_key = "%s_%s_%s_%d_%d_%d_%d" % (package, version, repr(source_id), architecture_id, location_id, files_id, suite_id) if not arch_all_cache.has_key(cache_key): arch_all_cache[cache_key] = 1 @@ -565,7 +569,9 @@ Please read the documentation before running this script. process_packages (packages, suite, "", server) elif type == "legacy" or type == "pool": for suite in Cnf.ValueList("Location::%s::Suites" % (location)): - for component in Cnf.SubTree("Component").List(): + udeb_components = map(lambda x: x+"/debian-installer", + Cnf.ValueList("Suite::%s::UdebComponents" % suite)) + for component in Cnf.SubTree("Component").List() + udeb_components: architectures = filter(utils.real_arch, Cnf.ValueList("Suite::%s::Architectures" % (suite))) for architecture in architectures: diff --git a/dak/import_keyring.py b/dak/import_keyring.py index 6d91b460..a70a2e78 100755 --- a/dak/import_keyring.py +++ b/dak/import_keyring.py @@ -19,8 +19,8 @@ ################################################################################ -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils import sys, os, re import apt_pkg, pg, ldap, email.Utils diff --git a/dak/import_ldap_fingerprints.py b/dak/import_ldap_fingerprints.py index 85cd7a22..b5682853 100755 --- a/dak/import_ldap_fingerprints.py +++ b/dak/import_ldap_fingerprints.py @@ -46,8 +46,8 @@ import commands, ldap, pg, re, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/import_users_from_passwd.py b/dak/import_users_from_passwd.py index af9d1239..b182f604 100755 --- a/dak/import_users_from_passwd.py +++ b/dak/import_users_from_passwd.py @@ -31,7 +31,7 @@ import pg, pwd, sys import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/init_db.py b/dak/init_db.py index 16a8d8ba..d40ad0c0 100755 --- a/dak/init_db.py +++ b/dak/init_db.py @@ -21,8 +21,8 @@ import pg, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/init_dirs.py b/dak/init_dirs.py index f92515e2..d095eeee 100755 --- a/dak/init_dirs.py +++ b/dak/init_dirs.py @@ -21,7 +21,7 @@ import os, sys import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/ls.py b/dak/ls.py index 2b445d82..baf13733 100755 --- a/dak/ls.py +++ b/dak/ls.py @@ -28,8 +28,8 @@ import os, pg, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/make_maintainers.py b/dak/make_maintainers.py index c3905af9..090b8d43 100755 --- a/dak/make_maintainers.py +++ b/dak/make_maintainers.py @@ -27,8 +27,8 @@ import pg, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/make_overrides.py b/dak/make_overrides.py index 77e7bb8c..1087ce28 100755 --- a/dak/make_overrides.py +++ b/dak/make_overrides.py @@ -27,8 +27,8 @@ import pg, sys import apt_pkg -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils ################################################################################ diff --git a/dak/make_suite_file_list.py b/dak/make_suite_file_list.py index ca4e0363..e3664385 100755 --- a/dak/make_suite_file_list.py +++ b/dak/make_suite_file_list.py @@ -36,9 +36,9 @@ import copy, os, pg, sys import apt_pkg import symlink_dists -import daklib.database as database -import daklib.logging as logging -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import utils ################################################################################ @@ -416,7 +416,7 @@ def main(): ('n', "no-delete", "Make-Suite-File-List::Options::No-Delete"), ('f', "force", "Make-Suite-File-List::Options::Force"), ('s', "suite", "Make-Suite-File-List::Options::Suite", "HasArg")] - for i in ["architecture", "component", "help", "no-delete", "suite", "force-touch" ]: + for i in ["architecture", "component", "help", "no-delete", "suite", "force" ]: if not Cnf.has_key("Make-Suite-File-List::Options::%s" % (i)): Cnf["Make-Suite-File-List::Options::%s" % (i)] = "" apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv) diff --git a/dak/new_security_install.py b/dak/new_security_install.py index ca119c81..a6469e27 100755 --- a/dak/new_security_install.py +++ b/dak/new_security_install.py @@ -20,10 +20,10 @@ ################################################################################ -import daklib.queue as queue -import daklib.logging as logging -import daklib.utils as utils -import daklib.database as database +from daklib import queue +from daklib import logging +from daklib import utils +from daklib import database import apt_pkg, os, sys, pwd, time, re, commands re_taint_free = re.compile(r"^['/;\-\+\.~\s\w]+$"); @@ -235,10 +235,6 @@ def actually_upload(changes_files): files = Upload.pkg.files changes = Upload.pkg.changes dsc = Upload.pkg.dsc - # We have the changes, now return if its amd64, to not upload them to ftp-master - if changes["distribution"].has_key("oldstable-security") and changes["architecture"].has_key("amd64"): - print "Not uploading amd64 oldstable-security changes to ftp-master\n" - continue # Build the file list for this .changes file for file in files.keys(): poolname = os.path.join(Cnf["Dir::Root"], Cnf["Dir::PoolRoot"], diff --git a/dak/override.py b/dak/override.py index ffa6ae9a..ca3cb41d 100755 --- a/dak/override.py +++ b/dak/override.py @@ -27,9 +27,9 @@ import pg, sys import apt_pkg -import daklib.logging as logging -import daklib.database as database -import daklib.utils as utils +from daklib import logging +from daklib import database +from daklib import utils ################################################################################ @@ -247,10 +247,11 @@ def main (): Subst["__BCC__"] = "Bcc: " + ", ".join(bcc) else: Subst["__BCC__"] = "X-Filler: 42" - Subst["__CC__"] = "X-DAK: dak override\nX-Katie: alicia $Revision: 1.6$" + Subst["__CC__"] = "X-DAK: dak override\nX-Katie: alicia" Subst["__ADMIN_ADDRESS__"] = Cnf["Dinstall::MyAdminAddress"] Subst["__DISTRO__"] = Cnf["Dinstall::MyDistribution"] Subst["__WHOAMI__"] = utils.whoami() + Subst["__SOURCE__"] = package summary = "Concerning package %s...\n" % (package) summary += "Operating on the %s suite\n" % (suite) diff --git a/dak/process_accepted.py b/dak/process_accepted.py index 605b5c23..6013b18c 100755 --- a/dak/process_accepted.py +++ b/dak/process_accepted.py @@ -31,10 +31,11 @@ import errno, fcntl, os, sys, time, re import apt_pkg -import daklib.database as database -import daklib.logging as logging -import daklib.queue as queue -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import queue +from daklib import utils +from daklib.dak_exceptions import * ############################################################################### @@ -273,6 +274,14 @@ def install (): # Begin a transaction; if we bomb out anywhere between here and the COMMIT WORK below, the DB will not be changed. projectB.query("BEGIN WORK") + # Ensure that we have all the hashes we need below. + rejmsg = utils.ensure_hashes(changes, dsc, files, dsc_files) + if len(rejmsg) > 0: + # There were errors. Print them and SKIP the changes. + for msg in rejmsg: + utils.warn(msg) + return + # Add the .dsc file to the DB for file in files.keys(): if files[file]["type"] == "dsc": @@ -290,7 +299,7 @@ def install (): dsc_component = files[file]["component"] dsc_location_id = files[file]["location id"] if not files[file].has_key("files id") or not files[file]["files id"]: - files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], dsc_location_id) + files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], dsc_location_id) projectB.query("INSERT INTO source (source, version, maintainer, changedby, file, install_date, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, '%s', %s)" % (package, version, maintainer_id, changedby_id, files[file]["files id"], install_date, fingerprint_id)) @@ -309,7 +318,7 @@ def install (): files_id = database.get_files_id(filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], dsc_location_id) # FIXME: needs to check for -1/-2 and or handle exception if files_id == None: - files_id = database.set_files_id (filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], dsc_location_id) + files_id = database.set_files_id (filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], files[dsc_file]["sha1sum"], files[dsc_file]["sha256sum"], dsc_location_id) projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files_id)) # Add the src_uploaders to the DB @@ -348,14 +357,13 @@ def install (): if not files[file].has_key("location id") or not files[file]["location id"]: files[file]["location id"] = database.get_location_id(Cnf["Dir::Pool"],files[file]["component"],utils.where_am_i()) if not files[file].has_key("files id") or not files[file]["files id"]: - files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["location id"]) + files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], files[file]["location id"]) source_id = database.get_source_id (source, source_version) if source_id: projectB.query("INSERT INTO binaries (package, version, maintainer, source, architecture, file, type, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, %d, '%s', %d)" % (package, version, maintainer_id, source_id, architecture_id, files[file]["files id"], type, fingerprint_id)) else: - projectB.query("INSERT INTO binaries (package, version, maintainer, architecture, file, type, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, '%s', %d)" - % (package, version, maintainer_id, architecture_id, files[file]["files id"], type, fingerprint_id)) + raise NoSourceFieldError, "Unable to find a source id for %s (%s), %s, file %s, type %s, signed by %s" % (package, version, architecture, file, type, sig_fpr) for suite in changes["distribution"].keys(): suite_id = database.get_suite_id(suite) projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id)) @@ -388,16 +396,18 @@ def install (): # if changes["architecture"].has_key("source") and orig_tar_id and \ orig_tar_location != "legacy" and orig_tar_location != dsc_location_id: - q = projectB.query("SELECT l.path, f.filename, f.size, f.md5sum FROM files f, location l WHERE f.id = %s AND f.location = l.id" % (orig_tar_id)) + q = projectB.query("SELECT l.path, f.filename, f.size, f.md5sum, f.sha1sum, f.sha256sum FROM files f, location l WHERE f.id = %s AND f.location = l.id" % (orig_tar_id)) ql = q.getresult()[0] old_filename = ql[0] + ql[1] file_size = ql[2] file_md5sum = ql[3] + file_sha1sum = ql[4] + file_sha256sum = ql[5] new_filename = utils.poolify(changes["source"], dsc_component) + os.path.basename(old_filename) new_files_id = database.get_files_id(new_filename, file_size, file_md5sum, dsc_location_id) if new_files_id == None: utils.copy(old_filename, Cnf["Dir::Pool"] + new_filename) - new_files_id = database.set_files_id(new_filename, file_size, file_md5sum, dsc_location_id) + new_files_id = database.set_files_id(new_filename, file_size, file_md5sum, file_sha1sum, file_sha256sum, dsc_location_id) projectB.query("UPDATE dsc_files SET file = %s WHERE source = %s AND file = %s" % (new_files_id, source_id, orig_tar_id)) # Install the files into the pool diff --git a/dak/process_new.py b/dak/process_new.py index c13ff781..3dd93ada 100755 --- a/dak/process_new.py +++ b/dak/process_new.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# vim:set et ts=4 sw=4: # Handles NEW and BYHAND packages # Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006 James Troup @@ -39,10 +40,10 @@ import copy, errno, os, readline, stat, sys, time import apt_pkg, apt_inst import examine_package -import daklib.database as database -import daklib.logging as logging -import daklib.queue as queue -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import queue +from daklib import utils # Globals Cnf = None @@ -151,7 +152,7 @@ def indiv_sg_compare (a, b): def sg_compare (a, b): a = a[1] b = b[1] - """Sort by have note, time of oldest upload.""" + """Sort by have note, source already in database and time of oldest upload.""" # Sort by have note a_note_state = a["note_state"] b_note_state = b["note_state"] @@ -159,6 +160,10 @@ def sg_compare (a, b): return -1 elif a_note_state > b_note_state: return 1 + # Sort by source already in database (descending) + source_in_database = cmp(a["source_in_database"], b["source_in_database"]) + if source_in_database: + return -source_in_database # Sort by time of oldest upload return cmp(a["oldest"], b["oldest"]) @@ -193,6 +198,9 @@ def sort_changes(changes_files): per_source[source]["list"].append(cache[filename]) # Determine oldest time and have note status for each source group for source in per_source.keys(): + q = projectB.query("SELECT 1 FROM source WHERE source = '%s'" % source) + ql = q.getresult() + per_source[source]["source_in_database"] = len(ql)>0 source_list = per_source[source]["list"] first = source_list[0] oldest = os.stat(first["filename"])[stat.ST_MTIME] @@ -806,21 +814,107 @@ def move_to_dir (dest, perms=0660, changesperms=0664): for f in file_keys: utils.move (f, dest, perms=perms) +def is_source_in_queue_dir(qdir): + entries = [ x for x in os.listdir(qdir) if x.startswith(Upload.pkg.changes["source"]) + and x.endswith(".changes") ] + for entry in entries: + # read the .dak + u = queue.Upload(Cnf) + u.pkg.changes_file = os.path.join(qdir, entry) + u.update_vars() + if not u.pkg.changes["architecture"].has_key("source"): + # another binary upload, ignore + continue + if Upload.pkg.changes["version"] != u.pkg.changes["version"]: + # another version, ignore + continue + # found it! + return True + return False + +def move_to_holding(suite, queue_dir): + print "Moving to %s holding area." % (suite.upper(),) + if Options["No-Action"]: + return + Logger.log(["Moving to %s" % (suite,), Upload.pkg.changes_file]) + Upload.dump_vars(queue_dir) + move_to_dir(queue_dir) + os.unlink(Upload.pkg.changes_file[:-8]+".dak") + +def _accept(): + if Options["No-Action"]: + return + (summary, short_summary) = Upload.build_summaries() + Upload.accept(summary, short_summary) + os.unlink(Upload.pkg.changes_file[:-8]+".dak") + +def do_accept_stableupdate(suite, q): + queue_dir = Cnf["Dir::Queue::%s" % (q,)] + if not Upload.pkg.changes["architecture"].has_key("source"): + # It is not a sourceful upload. So its source may be either in p-u + # holding, in new, in accepted or already installed. + if is_source_in_queue_dir(queue_dir): + # It's in p-u holding, so move it there. + print "Binary-only upload, source in %s." % (q,) + move_to_holding(suite, queue_dir) + elif Upload.source_exists(Upload.pkg.changes["source"], + Upload.pkg.changes["version"]): + # dak tells us that there is source available. At time of + # writing this means that it is installed, so put it into + # accepted. + print "Binary-only upload, source installed." + _accept() + elif is_source_in_queue_dir(Cnf["Dir::Queue::Accepted"]): + # The source is in accepted, the binary cleared NEW: accept it. + print "Binary-only upload, source in accepted." + _accept() + elif is_source_in_queue_dir(Cnf["Dir::Queue::New"]): + # It's in NEW. We expect the source to land in p-u holding + # pretty soon. + print "Binary-only upload, source in new." + move_to_holding(suite, queue_dir) + else: + # No case applicable. Bail out. Return will cause the upload + # to be skipped. + print "ERROR" + print "Stable update failed. Source not found." + return + else: + # We are handling a sourceful upload. Move to accepted if currently + # in p-u holding and to p-u holding otherwise. + if is_source_in_queue_dir(queue_dir): + print "Sourceful upload in %s, accepting." % (q,) + _accept() + else: + move_to_holding(suite, queue_dir) + def do_accept(): print "ACCEPT" if not Options["No-Action"]: get_accept_lock() (summary, short_summary) = Upload.build_summaries() - if Cnf.FindB("Dinstall::SecurityQueueHandling"): - Upload.dump_vars(Cnf["Dir::Queue::Embargoed"]) - move_to_dir(Cnf["Dir::Queue::Embargoed"]) - Upload.queue_build("embargoed", Cnf["Dir::Queue::Embargoed"]) - # Check for override disparities - Upload.Subst["__SUMMARY__"] = summary - else: - Upload.accept(summary, short_summary) - os.unlink(Upload.pkg.changes_file[:-8]+".dak") - os.unlink(Cnf["Process-New::AcceptedLockFile"]) + try: + if Cnf.FindB("Dinstall::SecurityQueueHandling"): + Upload.dump_vars(Cnf["Dir::Queue::Embargoed"]) + move_to_dir(Cnf["Dir::Queue::Embargoed"]) + Upload.queue_build("embargoed", Cnf["Dir::Queue::Embargoed"]) + # Check for override disparities + Upload.Subst["__SUMMARY__"] = summary + else: + # Stable updates need to be copied to proposed-updates holding + # area instead of accepted. Sourceful uploads need to go + # to it directly, binaries only if the source has not yet been + # accepted into p-u. + for suite, q in [("proposed-updates", "ProposedUpdates"), + ("oldstable-proposed-updates", "OldProposedUpdates")]: + if not Upload.pkg.changes["distribution"].has_key(suite): + continue + return do_accept_stableupdate(suite, q) + # Just a normal upload, accept it... + _accept() + finally: + if not Options["No-Action"]: + os.unlink(Cnf["Process-New::AcceptedLockFile"]) def check_status(files): new = byhand = 0 diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py index fbd7d74d..5884db18 100755 --- a/dak/process_unchecked.py +++ b/dak/process_unchecked.py @@ -30,10 +30,11 @@ import commands, errno, fcntl, os, re, shutil, stat, sys, time, tempfile, traceback import apt_inst, apt_pkg -import daklib.database as database -import daklib.logging as logging -import daklib.queue as queue -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import queue +from daklib import utils +from daklib.dak_exceptions import * from types import * @@ -181,19 +182,19 @@ def check_changes(): # Parse the .changes field into a dictionary try: changes.update(utils.parse_changes(filename)) - except utils.cant_open_exc: + except CantOpenError: reject("%s: can't read file." % (filename)) return 0 - except utils.changes_parse_error_exc, line: + except ParseChangesError, line: reject("%s: parse error, can't grok: %s." % (filename, line)) return 0 # Parse the Files field from the .changes into another dictionary try: files.update(utils.build_file_list(changes)) - except utils.changes_parse_error_exc, line: + except ParseChangesError, line: reject("%s: parse error, can't grok: %s." % (filename, line)) - except utils.nk_format_exc, format: + except UnknownFormatError, format: reject("%s: unknown format '%s'." % (filename, format)) return 0 @@ -226,7 +227,7 @@ def check_changes(): (changes["maintainer822"], changes["maintainer2047"], changes["maintainername"], changes["maintaineremail"]) = \ utils.fix_maintainer (changes["maintainer"]) - except utils.ParseMaintError, msg: + except ParseMaintError, msg: reject("%s: Maintainer field ('%s') failed to parse: %s" \ % (filename, changes["maintainer"], msg)) @@ -235,7 +236,7 @@ def check_changes(): (changes["changedby822"], changes["changedby2047"], changes["changedbyname"], changes["changedbyemail"]) = \ utils.fix_maintainer (changes.get("changed-by", "")) - except utils.ParseMaintError, msg: + except ParseMaintError, msg: (changes["changedby822"], changes["changedby2047"], changes["changedbyname"], changes["changedbyemail"]) = \ ("", "", "", "") @@ -685,21 +686,24 @@ def check_dsc(): # Parse the .dsc file try: dsc.update(utils.parse_changes(dsc_filename, signing_rules=1)) - except utils.cant_open_exc: + except CantOpenError: # if not -n copy_to_holding() will have done this for us... if Options["No-Action"]: reject("%s: can't read file." % (dsc_filename)) - except utils.changes_parse_error_exc, line: + except ParseChangesError, line: reject("%s: parse error, can't grok: %s." % (dsc_filename, line)) - except utils.invalid_dsc_format_exc, line: + except InvalidDscError, line: reject("%s: syntax error on line %s." % (dsc_filename, line)) # Build up the file list of files mentioned by the .dsc try: dsc_files.update(utils.build_file_list(dsc, is_a_dsc=1)) - except utils.no_files_exc: + except NoFilesFieldError: reject("%s: no Files: field." % (dsc_filename)) return 0 - except utils.changes_parse_error_exc, line: + except UnknownFormatError, format: + reject("%s: unknown format '%s'." % (dsc_filename, format)) + return 0 + except ParseChangesError, line: reject("%s: parse error, can't grok: %s." % (dsc_filename, line)) return 0 @@ -723,7 +727,7 @@ def check_dsc(): # Validate the Maintainer field try: utils.fix_maintainer (dsc["maintainer"]) - except utils.ParseMaintError, msg: + except ParseMaintError, msg: reject("%s: Maintainer field ('%s') failed to parse: %s" \ % (dsc_filename, dsc["maintainer"], msg)) @@ -773,6 +777,8 @@ def check_dsc(): files[orig_tar_gz] = {} files[orig_tar_gz]["size"] = os.stat(orig_tar_gz)[stat.ST_SIZE] files[orig_tar_gz]["md5sum"] = dsc_files[orig_tar_gz]["md5sum"] + files[orig_tar_gz]["sha1sum"] = dsc_files[orig_tar_gz]["sha1sum"] + files[orig_tar_gz]["sha256sum"] = dsc_files[orig_tar_gz]["sha256sum"] files[orig_tar_gz]["section"] = files[dsc_filename]["section"] files[orig_tar_gz]["priority"] = files[dsc_filename]["priority"] files[orig_tar_gz]["component"] = files[dsc_filename]["component"] @@ -905,88 +911,23 @@ def check_urgency (): if changes["architecture"].has_key("source"): if not changes.has_key("urgency"): changes["urgency"] = Cnf["Urgency::Default"] + changes["urgency"] = changes["urgency"].lower() if changes["urgency"] not in Cnf.ValueList("Urgency::Valid"): reject("%s is not a valid urgency; it will be treated as %s by testing." % (changes["urgency"], Cnf["Urgency::Default"]), "Warning: ") changes["urgency"] = Cnf["Urgency::Default"] - changes["urgency"] = changes["urgency"].lower() ################################################################################ def check_hashes (): - # Make sure we recognise the format of the Files: field - format = changes.get("format", "0.0").split(".",1) - if len(format) == 2: - format = int(format[0]), int(format[1]) - else: - format = int(float(format[0])), 0 - - check_hash(".changes", files, "md5sum", apt_pkg.md5sum) - check_hash(".dsc", dsc_files, "md5sum", apt_pkg.md5sum) - - if format >= (1,8): - hashes = [("sha1", apt_pkg.sha1sum), - ("sha256", apt_pkg.sha256sum)] - else: - hashes = [] - - for x in changes: - if x.startswith("checksum-"): - h = x.split("-",1)[1] - if h not in dict(hashes): - reject("Unsupported checksum field in .changes" % (h)) - - for x in dsc: - if x.startswith("checksum-"): - h = x.split("-",1)[1] - if h not in dict(hashes): - reject("Unsupported checksum field in .dsc" % (h)) - - for h,f in hashes: - try: - fs = utils.build_file_list(changes, 0, "checksums-%s" % h, h) - check_hash(".changes %s" % (h), fs, h, f, files) - except utils.no_files_exc: - reject("No Checksums-%s: field in .changes" % (h)) - except utils.changes_parse_error_exc, line: - reject("parse error for Checksums-%s in .changes, can't grok: %s." % (h, line)) + utils.check_hash(".changes", files, "md5", apt_pkg.md5sum) + utils.check_size(".changes", files) + utils.check_hash(".dsc", dsc_files, "md5", apt_pkg.md5sum) + utils.check_size(".dsc", dsc_files) - if "source" not in changes["architecture"]: continue - - try: - fs = utils.build_file_list(dsc, 1, "checksums-%s" % h, h) - check_hash(".dsc %s" % (h), fs, h, f, dsc_files) - except utils.no_files_exc: - reject("No Checksums-%s: field in .dsc" % (h)) - except utils.changes_parse_error_exc, line: - reject("parse error for Checksums-%s in .dsc, can't grok: %s." % (h, line)) - -################################################################################ - -def check_hash (where, lfiles, key, testfn, basedict = None): - if basedict: - for f in basedict.keys(): - if f not in lfiles: - reject("%s: no %s checksum" % (f, key)) - - for f in lfiles.keys(): - if basedict and f not in basedict: - reject("%s: extraneous entry in %s checksums" % (f, key)) - - try: - file_handle = utils.open_file(f) - except utils.cant_open_exc: - continue - - # Check hash - if testfn(file_handle) != lfiles[f][key]: - reject("%s: %s check failed." % (f, key)) - file_handle.close() - # Check size - actual_size = os.stat(f)[stat.ST_SIZE] - size = int(lfiles[f]["size"]) - if size != actual_size: - reject("%s: actual file size (%s) does not match size (%s) in %s" - % (f, actual_size, size, where)) + # This is stupid API, but it'll have to do for now until + # we actually have proper abstraction + for m in utils.ensure_hashes(changes, dsc, files, dsc_files): + reject(m) ################################################################################ @@ -1109,8 +1050,6 @@ def check_signed_by_key(): for si in q.getresult(): if si[0] not in source_ids: source_ids.append(si[0]) - print "source_ids: %s" % (",".join([str(x) for x in source_ids])) - is_nmu = 1 for si in source_ids: is_nmu = 1 diff --git a/dak/queue_report.py b/dak/queue_report.py old mode 100644 new mode 100755 index f1e5287f..aa1c54da --- a/dak/queue_report.py +++ b/dak/queue_report.py @@ -36,8 +36,9 @@ import copy, glob, os, stat, sys, time import apt_pkg -import daklib.queue as queue -import daklib.utils as utils +from daklib import queue +from daklib import utils +from daklib.dak_exceptions import * Cnf = None Upload = None @@ -322,7 +323,7 @@ def process_changes_files(changes_files, type): (maintainer["maintainer822"], maintainer["maintainer2047"], maintainer["maintainername"], maintainer["maintaineremail"]) = \ utils.fix_maintainer (j["maintainer"]) - except utils.ParseMaintError, msg: + except ParseMaintError, msg: print "Problems while parsing maintainer address\n" maintainer["maintainername"] = "Unknown" maintainer["maintaineremail"] = "Unknown" diff --git a/dak/reject_proposed_updates.py b/dak/reject_proposed_updates.py old mode 100644 new mode 100755 index 060ad513..a61db174 --- a/dak/reject_proposed_updates.py +++ b/dak/reject_proposed_updates.py @@ -21,10 +21,10 @@ import os, pg, sys import apt_pkg -import daklib.database as database -import daklib.logging as logging -import daklib.queue as queue -import daklib.utils as utils +from daklib import database +from daklib import logging +from daklib import queue +from daklib import utils ################################################################################ @@ -168,6 +168,7 @@ def reject (reject_message = ""): Upload.Subst["__REJECTOR_ADDRESS__"] = user_email_address Upload.Subst["__MANUAL_REJECT_MESSAGE__"] = reject_message Upload.Subst["__STABLE_REJECTOR__"] = Cnf["Reject-Proposed-Updates::StableRejector"] + Upload.Subst["__STABLE_MAIL__"] = Cnf["Reject-Proposed-Updates::StableMail"] Upload.Subst["__MORE_INFO_URL__"] = Cnf["Reject-Proposed-Updates::MoreInfoURL"] Upload.Subst["__CC__"] = "Cc: " + Cnf["Dinstall::MyEmailAddress"] reject_mail_message = utils.TemplateSubst(Upload.Subst,Cnf["Dir::Templates"]+"/reject-proposed-updates.rejected") diff --git a/dak/rm.py b/dak/rm.py index d5663e34..7535562a 100755 --- a/dak/rm.py +++ b/dak/rm.py @@ -41,8 +41,9 @@ import commands, os, pg, re, sys import apt_pkg, apt_inst -import daklib.database as database -import daklib.utils as utils +from daklib import database +from daklib import utils +from daklib.dak_exceptions import * ################################################################################ @@ -364,7 +365,7 @@ def main (): filename = "/".join(source_packages[i]) try: dsc = utils.parse_changes(filename) - except utils.cant_open_exc: + except CantOpenError: utils.warn("couldn't open '%s'." % (filename)) continue for package in dsc.get("binary").split(','): @@ -510,7 +511,7 @@ def main (): Subst["__BCC__"] = "Bcc: " + ", ".join(bcc) else: Subst["__BCC__"] = "X-Filler: 42" - Subst["__CC__"] = "X-DAK: dak rm\nX-Katie: melanie $Revision: 1.44 $" + Subst["__CC__"] = "X-DAK: dak rm\nX-Katie: melanie" if carbon_copy: Subst["__CC__"] += "\nCc: " + ", ".join(carbon_copy) Subst["__SUITE_LIST__"] = suites_list diff --git a/dak/security_install.py b/dak/security_install.py index 34efeb4c..3aebedc1 100644 --- a/dak/security_install.py +++ b/dak/security_install.py @@ -32,7 +32,7 @@ import commands, os, pwd, re, sys, time import apt_pkg -import daklib.queue as queue +from daklib import queue import daklib.utils ################################################################################ diff --git a/dak/show_deferred.py b/dak/show_deferred.py new file mode 100755 index 00000000..bb29fdda --- /dev/null +++ b/dak/show_deferred.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python + +# based on queue-report +# Copyright (C) 2001, 2002, 2003, 2005, 2006 James Troup +# Copyright (C) 2008 Thomas Viehmann + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +import sys, os, re, time +import apt_pkg +import tempfile +from debian_bundle import deb822 +from daklib import database +from daklib import queue +from daklib import utils + +################################################################################ +### work around bug #487902 in debian-python 0.1.10 +deb822.Changes._multivalued_fields = { + "files": [ "md5sum", "size", "section", "priority", "name" ], + "checksums-sha1": ["sha1", "size", "name"], + "checksums-sha256": ["sha256", "size", "name"], + } + +################################################################################ + +row_number = 1 + +html_escaping = {'"':'"', '&':'&', '<':'<', '>':'>'} +re_html_escaping = re.compile('|'.join(map(re.escape, html_escaping.keys()))) +def html_escape(s): + return re_html_escaping.sub(lambda x: html_escaping.get(x.group(0)), s) + +################################################################################ + +def header(): + return """ + + Deferred uploads to Debian + + + + +
+ + + + Debian Project +
+
+ + + + + + + + + + +
+ Deferred uploads to Debian +
+ +
+ """ + +def footer(): + res = "

Timestamp: %s (UTC)

" % (time.strftime("%d.%m.%Y / %H:%M:%S", time.gmtime())) + res += """ + Valid HTML 4.01! + + Valid CSS! + """ + res += "" + return res + +def table_header(): + return """

Deferred uploads

+
+ + + + + + + """ + return res + +def table_footer(): + return '
ChangeTime remainingUploaderCloses

non-NEW uploads are available, see the UploadQueue-README for more information.


\n' + +def table_row(changesname, delay, changed_by, closes): + global row_number + + res = ''%((row_number%2) and 'odd' or 'even') + res += (3*'%s')%tuple(map(html_escape,(changesname,delay,changed_by))) + res += ('%s' % + ''.join(map(lambda close: '#%s
' % (close, close),closes))) + res += '\n' + row_number+=1 + return res + +def get_upload_data(changesfn): + achanges = deb822.Changes(file(changesfn)) + changesname = os.path.basename(changesfn) + delay = os.path.basename(os.path.dirname(changesfn)) + m = re.match(r'([0-9]+)-day', delay) + if m: + delaydays = int(m.group(1)) + remainingtime = (delaydays>0)*max(0,24*60*60+os.stat(changesfn).st_mtime-time.time()) + delay = "%d days %02d:%02d" %(max(delaydays-1,0), int(remainingtime/3600),int(remainingtime/60)%60) + else: + remainingtime = 0 + + uploader = achanges.get('changed-by') + uploader = re.sub(r'^\s*(\S.*)\s+<.*>',r'\1',uploader) + if Cnf.has_key("Show-Deferred::LinkPath"): + isnew = 0 + suites = database.get_suites(achanges['source'],src=1) + if 'unstable' not in suites and 'experimental' not in suites: + isnew = 1 + for b in achanges['binary'].split(): + suites = database.get_suites(b) + if 'unstable' not in suites and 'experimental' not in suites: + isnew = 1 + if not isnew: + # we don't link .changes because we don't want other people to + # upload it with the existing signature. + for afn in map(lambda x: x['name'],achanges['files']): + lfn = os.path.join(Cnf["Show-Deferred::LinkPath"],afn) + qfn = os.path.join(os.path.dirname(changesfn),afn) + if os.path.islink(lfn): + os.unlink(lfn) + if os.path.exists(qfn): + os.symlink(qfn,lfn) + os.chmod(qfn, 0644) + return (max(delaydays-1,0)*24*60*60+remainingtime, changesname, delay, uploader, achanges.get('closes').split(),achanges) + +def list_uploads(filelist): + uploads = map(get_upload_data, filelist) + uploads.sort() + # print the summary page + print header() + if uploads: + print table_header() + print ''.join(map(lambda x: table_row(*x[1:5]), uploads)) + print table_footer() + else: + print '

Currently no deferred uploads to Debian

' + print footer() + # machine readable summary + if Cnf.has_key("Show-Deferred::LinkPath"): + fn = os.path.join(Cnf["Show-Deferred::LinkPath"],'.status.tmp') + f = open(fn,"w") + try: + for u in uploads: + print >> f, "Changes: %s"%u[1] + fields = """Location: DEFERRED +Delayed-Until: %s +Delay-Remaining: %s"""%(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time()+u[0])),u[2]) + print >> f, fields + print >> f, str(u[5]).rstrip() + open(os.path.join(Cnf["Show-Deferred::LinkPath"],u[1]),"w").write(str(u[5])+fields+'\n') + print >> f + f.close() + os.rename(os.path.join(Cnf["Show-Deferred::LinkPath"],'.status.tmp'), + os.path.join(Cnf["Show-Deferred::LinkPath"],'status')) + except: + os.unlink(fn) + raise + +def usage (exit_code=0): + if exit_code: + f = sys.stderr + else: + f = sys.stdout + print >> f, """Usage: dak show-deferred + -h, --help show this help and exit. + -p, --link-path [path] override output directory. + -d, --deferred-queue [path] path to the deferred queue + """ + sys.exit(exit_code) + +def init(): + global Cnf, Options, Upload, projectB + Cnf = utils.get_conf() + Arguments = [('h',"help","Show-Deferred::Options::Help"), + ("p","link-path","Show-Deferred::LinkPath","HasArg"), + ("d","deferred-queue","Show-Deferred::DeferredQueue","HasArg")] + args = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv) + for i in ["help"]: + if not Cnf.has_key("Show-Deferred::Options::%s" % (i)): + Cnf["Show-Deferred::Options::%s" % (i)] = "" + for i,j in [("DeferredQueue","--deferred-queue")]: + if not Cnf.has_key("Show-Deferred::%s" % (i)): + print >> sys.stderr, """Show-Deferred::%s is mandatory. + set via config file or command-line option %s"""%(i,j) + + Options = Cnf.SubTree("Show-Deferred::Options") + if Options["help"]: + usage() + Upload = queue.Upload(Cnf) + projectB = Upload.projectB + return args + +def main(): + args = init() + if len(args)!=0: + usage(1) + + filelist = [] + for r,d,f in os.walk(Cnf["Show-Deferred::DeferredQueue"]): + filelist += map (lambda x: os.path.join(r,x), + filter(lambda x: x.endswith('.changes'), f)) + list_uploads(filelist) + + available_changes = set(map(os.path.basename,filelist)) + if Cnf.has_key("Show-Deferred::LinkPath"): + # remove dead links + for r,d,f in os.walk(Cnf["Show-Deferred::LinkPath"]): + for af in f: + afp = os.path.join(r,af) + if (not os.path.exists(afp) or + (af.endswith('.changes') and af not in available_changes)): + os.unlink(afp) diff --git a/dak/show_new.py b/dak/show_new.py index 87bea796..4b485f26 100755 --- a/dak/show_new.py +++ b/dak/show_new.py @@ -28,9 +28,9 @@ import copy, os, sys, time import apt_pkg import examine_package -import daklib.database as database -import daklib.queue as queue -import daklib.utils as utils +from daklib import database +from daklib import queue +from daklib import utils # Globals Cnf = None diff --git a/dak/split_done.py b/dak/split_done.py index 5502dcc5..5f8fadda 100755 --- a/dak/split_done.py +++ b/dak/split_done.py @@ -19,7 +19,7 @@ ################################################################################ import glob, os, stat, time -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/stats.py b/dak/stats.py index e1786c4a..20a02b55 100755 --- a/dak/stats.py +++ b/dak/stats.py @@ -32,7 +32,7 @@ import pg, sys import apt_pkg -import daklib.utils as utils +from daklib import utils ################################################################################ diff --git a/dak/transitions.py b/dak/transitions.py index 7a0da2e1..b7e50651 100755 --- a/dak/transitions.py +++ b/dak/transitions.py @@ -25,9 +25,10 @@ import os, pg, sys, time, errno, fcntl, tempfile, pwd, re import apt_pkg -import daklib.database as database -import daklib.utils as utils -import syck +from daklib import database +from daklib import utils +from daklib.dak_exceptions import TransitionsError +import yaml # Globals Cnf = None @@ -104,10 +105,10 @@ def load_transitions(trans_file): sourcecontent = sourcefile.read() failure = False try: - trans = syck.load(sourcecontent) - except syck.error, msg: + trans = yaml.load(sourcecontent) + except yaml.YAMLError, exc: # Someone fucked it up - print "ERROR: %s" % (msg) + print "ERROR: %s" % (exc) return None # lets do further validation here @@ -218,7 +219,7 @@ def write_transitions(from_trans): temp_lock = lock_file(trans_temp) destfile = file(trans_temp, 'w') - syck.dump(from_trans, destfile) + yaml.dump(from_trans, destfile, default_flow_style=False) destfile.close() os.rename(trans_temp, trans_file) @@ -227,9 +228,6 @@ def write_transitions(from_trans): ################################################################################ -class ParseException(Exception): - pass - ########################################## #### This usually runs within sudo !! #### ########################################## @@ -248,7 +246,7 @@ def write_transitions_from_file(from_file): else: trans = load_transitions(from_file) if trans is None: - raise ParseException, "Unparsable transitions file %s" % (file) + raise TransitionsError, "Unparsable transitions file %s" % (file) write_transitions(trans) ################################################################################ @@ -261,7 +259,7 @@ def temp_transitions_file(transitions): (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Transitions::TempPath"]) os.chmod(path, 0644) f = open(path, "w") - syck.dump(transitions, f) + yaml.dump(transitions, f, default_flow_style=False) return path ################################################################################ @@ -458,7 +456,7 @@ def main(): if Options["import"]: try: write_transitions_from_file(Options["import"]) - except ParseException, m: + except TransitionsError, m: print m sys.exit(2) sys.exit(0) diff --git a/daklib/dak_exceptions.py b/daklib/dak_exceptions.py new file mode 100644 index 00000000..4e795461 --- /dev/null +++ b/daklib/dak_exceptions.py @@ -0,0 +1,67 @@ +# Exception classes used in dak + +# Copyright (C) 2008 Mark Hymers + +################################################################################ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +class DakError(Exception): + """Base class for all simple errors in this module. + + Attributes: + + message -- explanation of the error + """ + + def __init__(self, message=""): + self.args = str(message) + self.message = str(message) + + def __str__(self): + return self.message + +__all__ = ['DakError'] + +dakerrors = { + "ParseMaintError": """Exception raised for errors in parsing a maintainer field.""", + "ParseChangesError": """Exception raised for errors in parsing a changes file.""", + "InvalidDscError": """Exception raised for invalid dsc files.""", + "UnknownFormatError": """Exception raised for unknown Format: lines in changes files.""", + "NoFilesFieldError": """Exception raised for missing files field in dsc/changes.""", + "CantOpenError": """Exception raised when files can't be opened.""", + "CantOverwriteError": """Exception raised when files can't be overwritten.""", + "FileExistsError": """Exception raised when destination file exists.""", + "SendmailFailedError": """Exception raised when Sendmail invocation failed.""", + "NoFreeFilenameError": """Exception raised when no alternate filename was found.""", + "TransitionsError": """Exception raised when transitions file can't be parsed.""", + "NoSourceFieldError": """Exception raised - we cant find the source - wtf?""" +} + +def construct_dak_exception(name, description): + class Er(DakError): + __doc__ = description + setattr(Er, "__name__", name) + return Er + +for e in dakerrors.keys(): + globals()[e] = construct_dak_exception(e, dakerrors[e]) + __all__ += [e] + + + +################################################################################ diff --git a/daklib/database.py b/daklib/database.py index 5c362604..5c7bd838 100755 --- a/daklib/database.py +++ b/daklib/database.py @@ -360,10 +360,10 @@ def get_or_set_queue_id (queue): ################################################################################ -def set_files_id (filename, size, md5sum, location_id): +def set_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id): global files_id_cache - projectB.query("INSERT INTO files (filename, size, md5sum, location) VALUES ('%s', %d, '%s', %d)" % (filename, long(size), md5sum, location_id)) + projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', '%s', '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id)) return get_files_id (filename, size, md5sum, location_id) @@ -389,3 +389,11 @@ def get_maintainer (maintainer_id): return maintainer_cache[maintainer_id] ################################################################################ + +def get_suites(pkgname, src=False): + if src: + sql = "select suite_name from source, src_associations,suite where source.id=src_associations.source and source.source='%s' and src_associations.suite = suite.id"%pkgname + else: + sql = "select suite_name from binaries, bin_associations,suite where binaries.id=bin_associations.bin and package='%s' and bin_associations.suite = suite.id"%pkgname + q = projectB.query(sql) + return map(lambda x: x[0], q.getresult()) diff --git a/daklib/queue.py b/daklib/queue.py old mode 100644 new mode 100755 index 9f8223a6..813782e2 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -22,6 +22,7 @@ import cPickle, errno, os, pg, re, stat, sys, time import apt_inst, apt_pkg import utils, database +from dak_exceptions import * from types import * @@ -40,36 +41,36 @@ def determine_new(changes, files, projectB, warn=1): new = {} # Build up a list of potentially new things - for file in files.keys(): - f = files[file] + for file_entry in files.keys(): + f = files[file_entry] # Skip byhand elements if f["type"] == "byhand": continue pkg = f["package"] priority = f["priority"] section = f["section"] - type = get_type(f) + file_type = get_type(f) component = f["component"] - if type == "dsc": + if file_type == "dsc": priority = "source" if not new.has_key(pkg): new[pkg] = {} new[pkg]["priority"] = priority new[pkg]["section"] = section - new[pkg]["type"] = type + new[pkg]["type"] = file_type new[pkg]["component"] = component new[pkg]["files"] = [] else: old_type = new[pkg]["type"] - if old_type != type: + if old_type != file_type: # source gets trumped by deb or udeb if old_type == "dsc": new[pkg]["priority"] = priority new[pkg]["section"] = section - new[pkg]["type"] = type + new[pkg]["type"] = file_type new[pkg]["component"] = component - new[pkg]["files"].append(file) + new[pkg]["files"].append(file_entry) if f.has_key("othercomponents"): new[pkg]["othercomponents"] = f["othercomponents"] @@ -81,9 +82,9 @@ def determine_new(changes, files, projectB, warn=1): q = projectB.query("SELECT package FROM override WHERE package = '%s' AND suite = %s AND component = %s AND type = %s" % (pkg, suite_id, component_id, type_id)) ql = q.getresult() if ql: - for file in new[pkg]["files"]: - if files[file].has_key("new"): - del files[file]["new"] + for file_entry in new[pkg]["files"]: + if files[file_entry].has_key("new"): + del files[file_entry]["new"] del new[pkg] if warn: @@ -102,18 +103,18 @@ def determine_new(changes, files, projectB, warn=1): def get_type(f): # Determine the type if f.has_key("dbtype"): - type = f["dbtype"] + file_type = f["dbtype"] elif f["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]: - type = "dsc" + file_type = "dsc" else: - utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (type)) + utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (file_type)) # Validate the override type - type_id = database.get_override_type_id(type) + type_id = database.get_override_type_id(file_type) if type_id == -1: - utils.fubar("invalid type (%s) for new. Say wha?" % (type)) + utils.fubar("invalid type (%s) for new. Say wha?" % (file_type)) - return type + return file_type ################################################################################ @@ -123,15 +124,15 @@ def check_valid(new): for pkg in new.keys(): section = new[pkg]["section"] priority = new[pkg]["priority"] - type = new[pkg]["type"] + file_type = new[pkg]["type"] new[pkg]["section id"] = database.get_section_id(section) new[pkg]["priority id"] = database.get_priority_id(new[pkg]["priority"]) # Sanity checks di = section.find("debian-installer") != -1 - if (di and type != "udeb") or (not di and type == "udeb"): + if (di and file_type not in ("udeb", "dsc")) or (not di and file_type == "udeb"): new[pkg]["section id"] = -1 - if (priority == "source" and type != "dsc") or \ - (priority != "source" and type == "dsc"): + if (priority == "source" and file_type != "dsc") or \ + (priority != "source" and file_type == "dsc"): new[pkg]["priority id"] = -1 @@ -170,8 +171,11 @@ class Upload: ########################################################################### def init_vars (self): - for i in [ "changes", "dsc", "files", "dsc_files", "legacy_source_untouchable" ]: - exec "self.pkg.%s.clear();" % (i) + self.pkg.changes.clear() + self.pkg.dsc.clear() + self.pkg.files.clear() + self.pkg.dsc_files.clear() + self.pkg.legacy_source_untouchable.clear() self.pkg.orig_tar_id = None self.pkg.orig_tar_location = "" self.pkg.orig_tar_gz = None @@ -182,10 +186,16 @@ class Upload: dump_filename = self.pkg.changes_file[:-8]+".dak" dump_file = utils.open_file(dump_filename) p = cPickle.Unpickler(dump_file) - for i in [ "changes", "dsc", "files", "dsc_files", "legacy_source_untouchable" ]: - exec "self.pkg.%s.update(p.load());" % (i) - for i in [ "orig_tar_id", "orig_tar_location" ]: - exec "self.pkg.%s = p.load();" % (i) + + self.pkg.changes.update(p.load()) + self.pkg.dsc.update(p.load()) + self.pkg.files.update(p.load()) + self.pkg.dsc_files.update(p.load()) + self.pkg.legacy_source_untouchable.update(p.load()) + + self.pkg.orig_tar_id = p.load() + self.pkg.orig_tar_location = p.load() + dump_file.close() ########################################################################### @@ -195,34 +205,53 @@ class Upload: # process-new use from process-unchecked def dump_vars(self, dest_dir): - for i in [ "changes", "dsc", "files", "dsc_files", - "legacy_source_untouchable", "orig_tar_id", "orig_tar_location" ]: - exec "%s = self.pkg.%s;" % (i,i) + + changes = self.pkg.changes + dsc = self.pkg.dsc + files = self.pkg.files + dsc_files = self.pkg.dsc_files + legacy_source_untouchable = self.pkg.legacy_source_untouchable + orig_tar_id = self.pkg.orig_tar_id + orig_tar_location = self.pkg.orig_tar_location + dump_filename = os.path.join(dest_dir,self.pkg.changes_file[:-8] + ".dak") dump_file = utils.open_file(dump_filename, 'w') try: - os.chmod(dump_filename, 0660) + os.chmod(dump_filename, 0664) except OSError, e: + # chmod may fail when the dumpfile is not owned by the user + # invoking dak (like e.g. when NEW is processed by a member + # of ftpteam) if errno.errorcode[e.errno] == 'EPERM': perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE]) - if perms & stat.S_IROTH: - utils.fubar("%s is world readable and chmod failed." % (dump_filename)) + # security precaution, should never happen unless a weird + # umask is set anywhere + if perms & stat.S_IWOTH: + utils.fubar("%s is world writable and chmod failed." % \ + (dump_filename,)) + # ignore the failed chmod otherwise as the file should + # already have the right privileges and is just, at worst, + # unreadable for world else: raise p = cPickle.Pickler(dump_file, 1) - for i in [ "d_changes", "d_dsc", "d_files", "d_dsc_files" ]: - exec "%s = {}" % i + d_changes = {} + d_dsc = {} + d_files = {} + d_dsc_files = {} + ## files - for file in files.keys(): - d_files[file] = {} + for file_entry in files.keys(): + d_files[file_entry] = {} for i in [ "package", "version", "architecture", "type", "size", - "md5sum", "component", "location id", "source package", - "source version", "maintainer", "dbtype", "files id", - "new", "section", "priority", "othercomponents", + "md5sum", "sha1sum", "sha256sum", "component", + "location id", "source package", "source version", + "maintainer", "dbtype", "files id", "new", + "section", "priority", "othercomponents", "pool name", "original component" ]: - if files[file].has_key(i): - d_files[file][i] = files[file][i] + if files[file_entry].has_key(i): + d_files[file_entry][i] = files[file_entry][i] ## changes # Mandatory changes fields for i in [ "distribution", "source", "architecture", "version", @@ -242,15 +271,15 @@ class Upload: if dsc.has_key(i): d_dsc[i] = dsc[i] ## dsc_files - for file in dsc_files.keys(): - d_dsc_files[file] = {} + for file_entry in dsc_files.keys(): + d_dsc_files[file_entry] = {} # Mandatory dsc_files fields for i in [ "size", "md5sum" ]: - d_dsc_files[file][i] = dsc_files[file][i] + d_dsc_files[file_entry][i] = dsc_files[file_entry][i] # Optional dsc_files fields for i in [ "files id" ]: - if dsc_files[file].has_key(i): - d_dsc_files[file][i] = dsc_files[file][i] + if dsc_files[file_entry].has_key(i): + d_dsc_files[file_entry][i] = dsc_files[file_entry][i] for i in [ d_changes, d_dsc, d_files, d_dsc_files, legacy_source_untouchable, orig_tar_id, orig_tar_location ]: @@ -317,31 +346,31 @@ class Upload: override_summary =""; file_keys = files.keys() file_keys.sort() - for file in file_keys: - if files[file].has_key("byhand"): + for file_entry in file_keys: + if files[file_entry].has_key("byhand"): byhand = 1 - summary += file + " byhand\n" - elif files[file].has_key("new"): + summary += file_entry + " byhand\n" + elif files[file_entry].has_key("new"): new = 1 - summary += "(new) %s %s %s\n" % (file, files[file]["priority"], files[file]["section"]) - if files[file].has_key("othercomponents"): - summary += "WARNING: Already present in %s distribution.\n" % (files[file]["othercomponents"]) - if files[file]["type"] == "deb": - deb_fh = utils.open_file(file) + summary += "(new) %s %s %s\n" % (file_entry, files[file_entry]["priority"], files[file_entry]["section"]) + if files[file_entry].has_key("othercomponents"): + summary += "WARNING: Already present in %s distribution.\n" % (files[file_entry]["othercomponents"]) + if files[file_entry]["type"] == "deb": + deb_fh = utils.open_file(file_entry) summary += apt_pkg.ParseSection(apt_inst.debExtractControl(deb_fh))["Description"] + '\n' deb_fh.close() else: - files[file]["pool name"] = utils.poolify (changes.get("source",""), files[file]["component"]) - destination = self.Cnf["Dir::PoolRoot"] + files[file]["pool name"] + file - summary += file + "\n to " + destination + "\n" - if not files[file].has_key("type"): - files[file]["type"] = "unknown" - if files[file]["type"] in ["deb", "udeb", "dsc"]: + files[file_entry]["pool name"] = utils.poolify (changes.get("source",""), files[file_entry]["component"]) + destination = self.Cnf["Dir::PoolRoot"] + files[file_entry]["pool name"] + file_entry + summary += file_entry + "\n to " + destination + "\n" + if not files[file_entry].has_key("type"): + files[file_entry]["type"] = "unknown" + if files[file_entry]["type"] in ["deb", "udeb", "dsc"]: # (queue/unchecked), there we have override entries already, use them # (process-new), there we dont have override entries, use the newly generated ones. - override_prio = files[file].get("override priority", files[file]["priority"]) - override_sect = files[file].get("override section", files[file]["section"]) - override_summary += "%s - %s %s\n" % (file, override_prio, override_sect) + override_prio = files[file_entry].get("override priority", files[file_entry]["priority"]) + override_sect = files[file_entry].get("override section", files[file_entry]["section"]) + override_summary += "%s - %s %s\n" % (file_entry, override_prio, override_sect) short_summary = summary @@ -408,14 +437,14 @@ distribution.""" Subst["__SHORT_SUMMARY__"] = short_summary for dist in changes["distribution"].keys(): - list = Cnf.Find("Suite::%s::Announce" % (dist)) - if list == "" or lists_done.has_key(list): + announce_list = Cnf.Find("Suite::%s::Announce" % (dist)) + if announce_list == "" or lists_done.has_key(announce_list): continue - lists_done[list] = 1 - summary += "Announcing to %s\n" % (list) + lists_done[announce_list] = 1 + summary += "Announcing to %s\n" % (announce_list) if action: - Subst["__ANNOUNCE_LIST_ADDRESS__"] = list + Subst["__ANNOUNCE_LIST_ADDRESS__"] = announce_list if Cnf.get("Dinstall::TrackingServer") and changes["architecture"].has_key("source"): Subst["__ANNOUNCE_LIST_ADDRESS__"] = Subst["__ANNOUNCE_LIST_ADDRESS__"] + "\nBcc: %s@%s" % (changes["source"], Cnf["Dinstall::TrackingServer"]) mail_message = utils.TemplateSubst(Subst,Cnf["Dir::Templates"]+"/process-unchecked.announce") @@ -444,9 +473,9 @@ distribution.""" # Move all the files into the accepted directory utils.move(changes_file, Cnf["Dir::Queue::Accepted"]) file_keys = files.keys() - for file in file_keys: - utils.move(file, Cnf["Dir::Queue::Accepted"]) - self.accept_bytes += float(files[file]["size"]) + for file_entry in file_keys: + utils.move(file_entry, Cnf["Dir::Queue::Accepted"]) + self.accept_bytes += float(files[file_entry]["size"]) self.accept_count += 1 # Send accept mail, announce to lists, close bugs and check for @@ -482,8 +511,8 @@ distribution.""" temp_filename = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"], dotprefix=1, perms=0644) debinfo = utils.open_file(temp_filename, 'w') - for file in file_keys: - f = files[file] + for file_entry in file_keys: + f = files[file_entry] if f["type"] == "deb": line = " ".join([f["package"], f["version"], f["architecture"], f["source package"], @@ -518,9 +547,9 @@ distribution.""" dest_dir = Cnf["Dir::QueueBuild"] if Cnf.FindB("Dinstall::SecurityQueueBuild"): dest_dir = os.path.join(dest_dir, suite) - for file in file_keys: - src = os.path.join(path, file) - dest = os.path.join(dest_dir, file) + for file_entry in file_keys: + src = os.path.join(path, file_entry) + dest = os.path.join(dest_dir, file_entry) if Cnf.FindB("Dinstall::SecurityQueueBuild"): # Copy it since the original won't be readable by www-data utils.copy(src, dest) @@ -574,16 +603,16 @@ distribution.""" summary = "" file_keys = files.keys() file_keys.sort() - for file in file_keys: - if not files[file].has_key("new") and files[file]["type"] == "deb": - section = files[file]["section"] - override_section = files[file]["override section"] + for file_entry in file_keys: + if not files[file_entry].has_key("new") and files[file_entry]["type"] == "deb": + section = files[file_entry]["section"] + override_section = files[file_entry]["override section"] if section.lower() != override_section.lower() and section != "-": - summary += "%s: package says section is %s, override says %s.\n" % (file, section, override_section) - priority = files[file]["priority"] - override_priority = files[file]["override priority"] + summary += "%s: package says section is %s, override says %s.\n" % (file_entry, section, override_section) + priority = files[file_entry]["priority"] + override_priority = files[file_entry]["override priority"] if priority != override_priority and priority != "-": - summary += "%s: package says priority is %s, override says %s.\n" % (file, priority, override_priority) + summary += "%s: package says priority is %s, override says %s.\n" % (file_entry, priority, override_priority) if summary == "": return @@ -602,36 +631,36 @@ distribution.""" Cnf = self.Cnf - for file in files: + for file_entry in files: # Skip any files which don't exist or which we don't have permission to copy. - if os.access(file,os.R_OK) == 0: + if os.access(file_entry,os.R_OK) == 0: continue - dest_file = os.path.join(Cnf["Dir::Queue::Reject"], file) + dest_file = os.path.join(Cnf["Dir::Queue::Reject"], file_entry) try: dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644) except OSError, e: # File exists? Let's try and move it to the morgue if errno.errorcode[e.errno] == 'EEXIST': - morgue_file = os.path.join(Cnf["Dir::Morgue"],Cnf["Dir::MorgueReject"],file) + morgue_file = os.path.join(Cnf["Dir::Morgue"],Cnf["Dir::MorgueReject"],file_entry) try: morgue_file = utils.find_next_free(morgue_file) - except utils.tried_too_hard_exc: + except NoFreeFilenameError: # Something's either gone badly Pete Tong, or # someone is trying to exploit us. - utils.warn("**WARNING** failed to move %s from the reject directory to the morgue." % (file)) + utils.warn("**WARNING** failed to move %s from the reject directory to the morgue." % (file_entry)) return utils.move(dest_file, morgue_file, perms=0660) try: dest_fd = os.open(dest_file, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0644) except OSError, e: # Likewise - utils.warn("**WARNING** failed to claim %s in the reject directory." % (file)) + utils.warn("**WARNING** failed to claim %s in the reject directory." % (file_entry)) return else: raise # If we got here, we own the destination file, so we can # safely overwrite it. - utils.move(file, dest_file, 1, perms=0660) + utils.move(file_entry, dest_file, 1, perms=0660) os.close(dest_fd) ########################################################################### @@ -763,9 +792,9 @@ distribution.""" files = self.pkg.files if binary_type == "": # must be source - type = "dsc" + file_type = "dsc" else: - type = binary_type + file_type = binary_type # Override suite name; used for example with proposed-updates if self.Cnf.Find("Suite::%s::OverrideSuite" % (suite)) != "": @@ -776,13 +805,13 @@ distribution.""" if suite_id == -1: return None component_id = database.get_component_id(component) - type_id = database.get_override_type_id(type) + type_id = database.get_override_type_id(file_type) q = self.projectB.query("SELECT s.section, p.priority FROM override o, section s, priority p WHERE package = '%s' AND suite = %s AND component = %s AND type = %s AND o.section = s.id AND o.priority = p.id" % (package, suite_id, component_id, type_id)) result = q.getresult() # If checking for a source package fall back on the binary override type - if type == "dsc" and not result: + if file_type == "dsc" and not result: deb_type_id = database.get_override_type_id("deb") udeb_type_id = database.get_override_type_id("udeb") q = self.projectB.query("SELECT s.section, p.priority FROM override o, section s, priority p WHERE package = '%s' AND suite = %s AND component = %s AND (type = %s OR type = %s) AND o.section = s.id AND o.priority = p.id" @@ -1038,8 +1067,8 @@ SELECT s.version, su.suite_name FROM source s, src_associations sa, suite su if os.path.exists(in_unchecked) and False: return (self.reject_message, in_unchecked) else: - for dir in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates" ]: - in_otherdir = os.path.join(self.Cnf["Dir::Queue::%s" % (dir)],dsc_file) + for directory in [ "Accepted", "New", "Byhand", "ProposedUpdates", "OldProposedUpdates" ]: + in_otherdir = os.path.join(self.Cnf["Dir::Queue::%s" % (directory)],dsc_file) if os.path.exists(in_otherdir): in_otherdir_fh = utils.open_file(in_otherdir) actual_md5 = apt_pkg.md5sum(in_otherdir_fh) diff --git a/daklib/utils.py b/daklib/utils.py old mode 100644 new mode 100755 index a094788f..fc1465d1 --- a/daklib/utils.py +++ b/daklib/utils.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# vim:set et ts=4 sw=4: # Utility functions # Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup @@ -22,9 +23,10 @@ ################################################################################ import codecs, commands, email.Header, os, pwd, re, select, socket, shutil, \ - sys, tempfile, traceback + sys, tempfile, traceback, stat import apt_pkg import database +from dak_exceptions import * ################################################################################ @@ -48,39 +50,15 @@ re_verwithext = re.compile(r"^(\d+)(?:\.(\d+))(?:\s+\((\S+)\))?$") re_srchasver = re.compile(r"^(\S+)\s+\((\S+)\)$") -changes_parse_error_exc = "Can't parse line in .changes file" -invalid_dsc_format_exc = "Invalid .dsc file" -nk_format_exc = "Unknown Format: in .changes file" -no_files_exc = "No Files: field in .dsc or .changes file." -cant_open_exc = "Can't open file" -unknown_hostname_exc = "Unknown hostname" -cant_overwrite_exc = "Permission denied; can't overwrite existent file." -file_exists_exc = "Destination file exists" -sendmail_failed_exc = "Sendmail invocation failed" -tried_too_hard_exc = "Tried too hard to find a free filename." - default_config = "/etc/dak/dak.conf" default_apt_config = "/etc/dak/apt.conf" alias_cache = None key_uid_email_cache = {} -################################################################################ - -class Error(Exception): - """Base class for exceptions in this module.""" - pass - -class ParseMaintError(Error): - """Exception raised for errors in parsing a maintainer field. - - Attributes: - message -- explanation of the error - """ - - def __init__(self, message): - self.args = message, - self.message = message +# (hashname, function, earliest_changes_version) +known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)), + ("sha256", apt_pkg.sha256sum, (1, 8))] ################################################################################ @@ -88,7 +66,7 @@ def open_file(filename, mode='r'): try: f = open(filename, mode) except IOError: - raise cant_open_exc, filename + raise CantOpenError, filename return f ################################################################################ @@ -123,35 +101,15 @@ def extract_component_from_section(section): ################################################################################ -def parse_changes(filename, signing_rules=0): - """Parses a changes file and returns a dictionary where each field is a -key. The mandatory first argument is the filename of the .changes -file. - -signing_rules is an optional argument: - - o If signing_rules == -1, no signature is required. - o If signing_rules == 0 (the default), a signature is required. - o If signing_rules == 1, it turns on the same strict format checking - as dpkg-source. - -The rules for (signing_rules == 1)-mode are: - - o The PGP header consists of "-----BEGIN PGP SIGNED MESSAGE-----" - followed by any PGP header data and must end with a blank line. - - o The data section must end with a blank line and must be followed by - "-----BEGIN PGP SIGNATURE-----". -""" - +def parse_deb822(contents, signing_rules=0): error = "" changes = {} - changes_in = open_file(filename) - lines = changes_in.readlines() + # Split the lines in the input, keeping the linebreaks. + lines = contents.splitlines(True) - if not lines: - raise changes_parse_error_exc, "[Empty changes file]" + if len(lines) == 0: + raise ParseChangesError, "[Empty changes file]" # Reindex by line number so we can easily verify the format of # .dsc files... @@ -173,10 +131,10 @@ The rules for (signing_rules == 1)-mode are: if signing_rules == 1: index += 1 if index > num_of_lines: - raise invalid_dsc_format_exc, index + raise InvalidDscError, index line = indexed_lines[index] if not line.startswith("-----BEGIN PGP SIGNATURE"): - raise invalid_dsc_format_exc, index + raise InvalidDscError, index inside_signature = 0 break else: @@ -205,7 +163,7 @@ The rules for (signing_rules == 1)-mode are: mlf = re_multi_line_field.match(line) if mlf: if first == -1: - raise changes_parse_error_exc, "'%s'\n [Multi-line field continuing on from nothing?]" % (line) + raise ParseChangesError, "'%s'\n [Multi-line field continuing on from nothing?]" % (line) if first == 1 and changes[field] != "": changes[field] += '\n' first = 0 @@ -214,9 +172,8 @@ The rules for (signing_rules == 1)-mode are: error += line if signing_rules == 1 and inside_signature: - raise invalid_dsc_format_exc, index + raise InvalidDscError, index - changes_in.close() changes["filecontents"] = "".join(lines) if changes.has_key("source"): @@ -228,12 +185,234 @@ The rules for (signing_rules == 1)-mode are: changes["source-version"] = srcver.group(2) if error: - raise changes_parse_error_exc, error + raise ParseChangesError, error return changes ################################################################################ +def parse_changes(filename, signing_rules=0): + """Parses a changes file and returns a dictionary where each field is a +key. The mandatory first argument is the filename of the .changes +file. + +signing_rules is an optional argument: + + o If signing_rules == -1, no signature is required. + o If signing_rules == 0 (the default), a signature is required. + o If signing_rules == 1, it turns on the same strict format checking + as dpkg-source. + +The rules for (signing_rules == 1)-mode are: + + o The PGP header consists of "-----BEGIN PGP SIGNED MESSAGE-----" + followed by any PGP header data and must end with a blank line. + + o The data section must end with a blank line and must be followed by + "-----BEGIN PGP SIGNATURE-----". +""" + + changes_in = open_file(filename) + content = changes_in.read() + changes_in.close() + return parse_deb822(content, signing_rules) + +################################################################################ + +def hash_key(hashname): + return '%ssum' % hashname + +################################################################################ + +def create_hash(where, files, hashname, hashfunc): + """create_hash extends the passed files dict with the given hash by + iterating over all files on disk and passing them to the hashing + function given.""" + + rejmsg = [] + for f in files.keys(): + try: + file_handle = open_file(f) + except CantOpenError: + rejmsg.append("Could not open file %s for checksumming" % (f)) + + files[f][hash_key(hashname)] = hashfunc(file_handle) + + file_handle.close() + return rejmsg + +################################################################################ + +def check_hash(where, files, hashname, hashfunc): + """check_hash checks the given hash in the files dict against the actual + files on disk. The hash values need to be present consistently in + all file entries. It does not modify its input in any way.""" + + rejmsg = [] + for f in files.keys(): + file_handle = None + try: + try: + file_handle = open_file(f) + + # Check for the hash entry, to not trigger a KeyError. + if not files[f].has_key(hash_key(hashname)): + rejmsg.append("%s: misses %s checksum in %s" % (f, hashname, + where)) + continue + + # Actually check the hash for correctness. + if hashfunc(file_handle) != files[f][hash_key(hashname)]: + rejmsg.append("%s: %s check failed in %s" % (f, hashname, + where)) + except CantOpenError: + # TODO: This happens when the file is in the pool. + # warn("Cannot open file %s" % f) + continue + finally: + if file_handle: + file_handle.close() + return rejmsg + +################################################################################ + +def check_size(where, files): + """check_size checks the file sizes in the passed files dict against the + files on disk.""" + + rejmsg = [] + for f in files.keys(): + try: + entry = os.stat(f) + except OSError, exc: + if exc.errno == 2: + # TODO: This happens when the file is in the pool. + continue + raise + + actual_size = entry[stat.ST_SIZE] + size = int(files[f]["size"]) + if size != actual_size: + rejmsg.append("%s: actual file size (%s) does not match size (%s) in %s" + % (f, actual_size, size, where)) + return rejmsg + +################################################################################ + +def check_hash_fields(what, manifest): + """check_hash_fields ensures that there are no checksum fields in the + given dict that we do not know about.""" + + rejmsg = [] + hashes = map(lambda x: x[0], known_hashes) + for field in manifest: + if field.startswith("checksums-"): + hashname = field.split("-",1)[1] + if hashname not in hashes: + rejmsg.append("Unsupported checksum field for %s "\ + "in %s" % (hashname, what)) + return rejmsg + +################################################################################ + +def _ensure_changes_hash(changes, format, version, files, hashname, hashfunc): + if format >= version: + # The version should contain the specified hash. + func = check_hash + + # Import hashes from the changes + rejmsg = parse_checksums(".changes", files, changes, hashname) + if len(rejmsg) > 0: + return rejmsg + else: + # We need to calculate the hash because it can't possibly + # be in the file. + func = create_hash + return func(".changes", files, hashname, hashfunc) + +# We could add the orig which might be in the pool to the files dict to +# access the checksums easily. + +def _ensure_dsc_hash(dsc, dsc_files, hashname, hashfunc): + """ensure_dsc_hashes' task is to ensure that each and every *present* hash + in the dsc is correct, i.e. identical to the changes file and if necessary + the pool. The latter task is delegated to check_hash.""" + + rejmsg = [] + if not dsc.has_key('Checksums-%s' % (hashname,)): + return rejmsg + # Import hashes from the dsc + parse_checksums(".dsc", dsc_files, dsc, hashname) + # And check it... + rejmsg.extend(check_hash(".dsc", dsc_files, hashname, hashfunc)) + return rejmsg + +################################################################################ + +def ensure_hashes(changes, dsc, files, dsc_files): + rejmsg = [] + + # Make sure we recognise the format of the Files: field in the .changes + format = changes.get("format", "0.0").split(".", 1) + if len(format) == 2: + format = int(format[0]), int(format[1]) + else: + format = int(float(format[0])), 0 + + # We need to deal with the original changes blob, as the fields we need + # might not be in the changes dict serialised into the .dak anymore. + orig_changes = parse_deb822(changes['filecontents']) + + # Copy the checksums over to the current changes dict. This will keep + # the existing modifications to it intact. + for field in orig_changes: + if field.startswith('checksums-'): + changes[field] = orig_changes[field] + + # Check for unsupported hashes + rejmsg.extend(check_hash_fields(".changes", changes)) + rejmsg.extend(check_hash_fields(".dsc", dsc)) + + # We have to calculate the hash if we have an earlier changes version than + # the hash appears in rather than require it exist in the changes file + for hashname, hashfunc, version in known_hashes: + rejmsg.extend(_ensure_changes_hash(changes, format, version, files, + hashname, hashfunc)) + if "source" in changes["architecture"]: + rejmsg.extend(_ensure_dsc_hash(dsc, dsc_files, hashname, + hashfunc)) + + return rejmsg + +def parse_checksums(where, files, manifest, hashname): + rejmsg = [] + field = 'checksums-%s' % hashname + if not field in manifest: + return rejmsg + input = manifest[field] + for line in input.split('\n'): + if not line: + break + hash, size, file = line.strip().split(' ') + if not files.has_key(file): + # TODO: check for the file's entry in the original files dict, not + # the one modified by (auto)byhand and other weird stuff + # rejmsg.append("%s: not present in files but in checksums-%s in %s" % + # (file, hashname, where)) + continue + if not files[file]["size"] == size: + rejmsg.append("%s: size differs for files and checksums-%s entry "\ + "in %s" % (file, hashname, where)) + continue + files[file][hash_key(hashname)] = hash + for f in files.keys(): + if not files[f].has_key(hash_key(hashname)): + rejmsg.append("%s: no entry in checksums-%s in %s" % (file, + hashname, where)) + return rejmsg + +################################################################################ + # Dropped support for 1.4 and ``buggy dchanges 3.4'' (?!) compared to di.pl def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): @@ -241,12 +420,12 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): # Make sure we have a Files: field to parse... if not changes.has_key(field): - raise no_files_exc + raise NoFilesFieldError # Make sure we recognise the format of the Files: field format = re_verwithext.search(changes.get("format", "0.0")) if not format: - raise nk_format_exc, "%s" % (changes.get("format","0.0")) + raise UnknownFormatError, "%s" % (changes.get("format","0.0")) format = format.groups() if format[1] == None: @@ -257,13 +436,16 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): format = format[:2] if is_a_dsc: - if format != (1,0): - raise nk_format_exc, "%s" % (changes.get("format","0.0")) + # format = (1,0) are the only formats we currently accept, + # format = (0,0) are missing format headers of which we still + # have some in the archive. + if format != (1,0) and format != (0,0): + raise UnknownFormatError, "%s" % (changes.get("format","0.0")) else: if (format < (1,5) or format > (1,8)): - raise nk_format_exc, "%s" % (changes.get("format","0.0")) + raise UnknownFormatError, "%s" % (changes.get("format","0.0")) if field != "files" and format < (1,8): - raise nk_format_exc, "%s" % (changes.get("format","0.0")) + raise UnknownFormatError, "%s" % (changes.get("format","0.0")) includes_section = (not is_a_dsc) and field == "files" @@ -279,7 +461,7 @@ def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): else: (md5, size, name) = s except ValueError: - raise changes_parse_error_exc, i + raise ParseChangesError, i if section == "": section = "-" @@ -387,7 +569,7 @@ def send_mail (message, filename=""): # Invoke sendmail (result, output) = commands.getstatusoutput("%s < %s" % (Cnf["Dinstall::SendmailCommand"], filename)) if (result != 0): - raise sendmail_failed_exc, output + raise SendmailFailedError, output # Clean up any temporary files if message: @@ -443,10 +625,10 @@ def copy (src, dest, overwrite = 0, perms = 0664): # Don't overwrite unless forced to if os.path.exists(dest): if not overwrite: - raise file_exists_exc + raise FileExistsError else: if not os.access(dest, os.W_OK): - raise cant_overwrite_exc + raise CantOverwriteError shutil.copy2(src, dest) os.chmod(dest, perms) @@ -590,7 +772,7 @@ def find_next_free (dest, too_many=100): dest = orig_dest + '.' + repr(extra) extra += 1 if extra >= too_many: - raise tried_too_hard_exc + raise NoFreeFilenameError return dest ################################################################################ diff --git a/debian/control b/debian/control index 2d6678b9..a44e3632 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Standards-Version: 3.5.6.0 Package: dak Architecture: any -Depends: ${python:Depends}, python-pygresql, python2.1-email | python (>= 2.2), python-apt, apt-utils, gnupg (>= 1.0.6-1), ${shlibs:Depends}, dpkg-dev, python-syck (>= 0.61.2-1) +Depends: ${python:Depends}, python-pygresql, python2.1-email | python (>= 2.2), python-apt, apt-utils, gnupg (>= 1.0.6-1), ${shlibs:Depends}, dpkg-dev, python-syck (>= 0.61.2-1), libemail-send-perl Suggests: lintian, linda, less, binutils-multiarch, symlinks, postgresql (>= 7.1.0), dsync Description: Debian's archive maintenance scripts This is a collection of archive maintenance scripts used by the diff --git a/docs/THANKS b/docs/THANKS index 47a31d0d..99424b8a 100644 --- a/docs/THANKS +++ b/docs/THANKS @@ -19,7 +19,7 @@ Guy Maor Jason Gunthorpe Jeroen van Wolffelaar Joey Hess -Joerg Jaspert +Joerg Jaspert Mark Brown Martin Michlmayr Matt Kraai diff --git a/docs/manpages/clean-suites.1.sgml b/docs/manpages/clean-suites.1.sgml index 0691f5ac..621bbc34 100644 --- a/docs/manpages/clean-suites.1.sgml +++ b/docs/manpages/clean-suites.1.sgml @@ -30,7 +30,7 @@ Description</> <para> - <command>dak clean-suites</command> is a utility clean out old packages. It will clean out any binary packages not referenced by a suite and any source packages not referenced by a suite and not referenced by any binary packages. Cleaning is not actual deletion, but rather, removal of packages from the pool to a 'morgue' directory. The 'morgue' directory is split into dated sub-directories to keep things sane in big archives. + <command>dak clean-suites</command> is a utility to clean out old packages. It will clean out any binary packages not referenced by a suite and any source packages not referenced by a suite and not referenced by any binary packages. Cleaning is not actual deletion, but rather, removal of packages from the pool to a 'morgue' directory. The 'morgue' directory is split into dated sub-directories to keep things sane in big archives. </PARA> </REFSECT1> diff --git a/scripts/debian/byhand-task b/scripts/debian/byhand-task new file mode 100755 index 00000000..8caf9422 --- /dev/null +++ b/scripts/debian/byhand-task @@ -0,0 +1,65 @@ +#!/bin/sh -ue + +if [ $# -lt 4 ]; then + echo "Usage: $0 filename version arch changes_file" + exit 1 +fi + +INPUT="$1" # Tarball to read, compressed with gzip +VERSION="$2" +ARCH="$3" +CHANGES="$4" # Changes file for the upload + +error() { + echo "$*" + exit 1 +} + +# Get the target suite from the Changes file +# NOTE: it may be better to pass this to the script as a parameter! +SUITE="$(grep "^Distribution:" "$CHANGES" | awk '{print $2}')" +case $SUITE in + "") + error "Error: unable to determine suite from Changes file" + ;; + unstable|sid) + : # OK for automated byband processing + ;; + *) + error "Reject: task overrides can only be processed automatically for uploads to unstable" + ;; +esac + + +# Regular expression used to validate tag lines +CHECKRE='^[a-z0-9A-Z.+-]+[[:space:]]+Task[[:space:]]+[a-z0-9:. ,{}+-]+$' + +# This must end with / +TARGET=/srv/ftp.debian.org/scripts/external-overrides/ + +# Read the main directory from the tarball +DIR="`tar ztf \"$INPUT\" | tac | tail -n 1`" + +# Create temporary files where to store the validated data +umask 002 +OUTMAIN="`mktemp \"$TARGET\"task.new.XXXXXX`" + +# If we fail somewhere, cleanup the temporary files +cleanup() { + rm -f "$OUTMAIN" +} +trap cleanup EXIT + +# Extract the data into the temporary files +tar -O -zxf "$INPUT" "$DIR"task | grep -E "$CHECKRE" > "$OUTMAIN" + +# Move the data to the final location +mv "$OUTMAIN" "$TARGET"task + +chmod 644 "$TARGET"task + +(cd $TARGET && ./mk-extra-overrides.sh) + +trap - EXIT + +exit 0 diff --git a/scripts/debian/ddtp-i18n-check.sh b/scripts/debian/ddtp-i18n-check.sh new file mode 100755 index 00000000..c42286c3 --- /dev/null +++ b/scripts/debian/ddtp-i18n-check.sh @@ -0,0 +1,371 @@ +#!/bin/bash +# +# $Id: ddtp_i18n_check.sh 1186 2008-08-12 18:31:25Z faw $ +# +# Copyright (C) 2008, Felipe Augusto van de Wiel <faw@funlabs.org> +# Copyright (C) 2008, Nicolas François <nicolas.francois@centraliens.net> +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# On Debian systems, you can find the full text of the license in +# /usr/share/common-licenses/GPL-2 + +set -eu +export LC_ALL=C + +# This must be defined to either 0 or 1 +# When DEBUG=0, fail after the first error. +# Otherwise, list all the errors. +DEBUG=0 + +#STABLE="etch" +TESTING="lenny" +UNSTABLE="sid" + +# Original SHA256SUMS, generated by i18n.debian.net +SHA256SUMS="SHA256SUMS" + +# DAK Timestamp +TIMESTAMP="timestamp" + +# These special files must exist on the top of dists_parent_dir +SPECIAL_FILES="$SHA256SUMS $TIMESTAMP $TIMESTAMP.gpg" + +usage () { + echo "Usage: $0 <dists_parent_dir> [<packages_lists_directory>]" >&2 + exit 1 +} + +if [ "$#" -lt 1 ] || [ "$#" -gt 2 ] || [ ! -d $1 ] +then + usage +fi + +# Temporary working directory. We need a full path to reduce the +# complexity of checking SHA256SUMS and cleaning/removing TMPDIR +TEMP_WORK_DIR=$(mktemp -d -t ddtp_dinstall_tmpdir.XXXXXX) +cd "$TEMP_WORK_DIR" +TMP_WORK_DIR=$(pwd) +cd "$OLDPWD" +unset TEMP_WORK_DIR + +# If it's traped, something bad happened. +trap_exit () { + rm -rf "$TMP_WORK_DIR" + rm -f "$dists_parent_dir"/dists/*/main/i18n/Translation-*.{bz2,gz} + exit 1 +} +trap trap_exit EXIT HUP INT QUIT TERM + +# If no argument indicates the PACKAGES_LISTS_DIR then use '.' +PACKAGES_LISTS_DIR=${2:-.} + +if [ ! -d "$PACKAGES_LISTS_DIR" ] +then + usage +fi + +# Removing trailing / +dists_parent_dir=${1%/} + +is_filename_okay () { + ifo_file="$1" + + # Check that the file in on an "i18n" directory + # This ensures that the Translation-$lang files are not e.g. in + # dists/etch/ or dists/etch/main/ + ifo_d=$(basename $(dirname "$ifo_file")) + if [ "x$ifo_d" = "xi18n" ]; then + + # Check that the file is named Translation-$lang + ifo_f=$(basename "$ifo_file") + case "$ifo_f" in + Translation-[a-z][a-z][a-z]_[A-Z][A-Z]) return 0;; + Translation-[a-z][a-z]_[A-Z][A-Z]) return 0;; + Translation-[a-z][a-z][a-z]) return 0;; + Translation-[a-z][a-z]) return 0;; + esac + fi + + return 1 +} + +# Check a directory name against a directory whitelist +is_dirname_okay () { + ido_dir="$1" + + case "$ido_dir" in + "$dists_parent_dir") return 0;; + "$dists_parent_dir/dists") return 0;; +# TODO/FIXME: It is undecided how to update at stable/point-releases, so we +# don't allow files to $STABLE. +# "$dists_parent_dir/dists/$STABLE") return 0;; +# "$dists_parent_dir/dists/$STABLE/main") return 0;; +# "$dists_parent_dir/dists/$STABLE/main/i18n") return 0;; +# "$dists_parent_dir/dists/$STABLE/contrib") return 0;; +# "$dists_parent_dir/dists/$STABLE/contrib/i18n") return 0;; +# "$dists_parent_dir/dists/$STABLE/non-free") return 0;; +# "$dists_parent_dir/dists/$STABLE/non-free/i18n") return 0;; + "$dists_parent_dir/dists/$TESTING") return 0;; + "$dists_parent_dir/dists/$TESTING/main") return 0;; + "$dists_parent_dir/dists/$TESTING/main/i18n") return 0;; + "$dists_parent_dir/dists/$TESTING/contrib") return 0;; + "$dists_parent_dir/dists/$TESTING/contrib/i18n") return 0;; + "$dists_parent_dir/dists/$TESTING/non-free") return 0;; + "$dists_parent_dir/dists/$TESTING/non-free/i18n") return 0;; + "$dists_parent_dir/dists/$UNSTABLE") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/main") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/main/i18n") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/contrib") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/contrib/i18n") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/non-free") return 0;; + "$dists_parent_dir/dists/$UNSTABLE/non-free/i18n") return 0;; + esac + + return 1 +} + +has_valid_fields () { + hvf_file="$1" + hvf_lang=${hvf_file/*-} + +awk " +function print_status () { + printf (\"p: %d, m: %d, s: %d, l: %d\n\", package, md5, s_description, l_description) +} +BEGIN { + package = 0 # Indicates if a Package field was found + md5 = 0 # Indicates if a Description-md5 field was found + s_description = 0 # Indicates if a short description was found + l_description = 0 # Indicates if a long description was found + + failures = 0 # Number of failures (debug only) + failed = 0 # Failure already reported for the block +} + +/^Package: / { + if (0 == failed) { + if ( (0 != package) \ + || (0 != md5) \ + || (0 != s_description) \ + || (0 != l_description)) { + printf (\"Package field unexpected in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } + } + package++ + } + # Next input line + next +} + +/^Description-md5: / { + if (0 == failed) { + if ( (1 != package) \ + || (0 != md5) \ + || (0 != s_description) \ + || (0 != l_description)) { + printf (\"Description-md5 field unexpected in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } + } + md5++ + } + # Next input line + next +} + +/^Description-$hvf_lang: / { + if (0 == failed) { + if ( (1 != package) \ + || (1 != md5) \ + || (0 != s_description) \ + || (0 != l_description)) { + printf (\"Description-$hvf_lang field unexpected in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } + } + s_description++ + } + # Next input line + next +} + +/^ / { + if (0 == failed) { + if ( (1 != package) \ + || (1 != md5) \ + || (1 != s_description)) { + printf (\"Long description unexpected in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } + } + l_description = 1 # There can be any number of long description + # lines. Do not count. + } + # Next line + next +} + +/^$/ { + if (0 == failed) { + if ( (1 != package) \ + || (1 != md5) \ + || (1 != s_description) \ + || (1 != l_description)) { + printf (\"End of block unexpected in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } + } + } + + # Next package + package = 0; md5 = 0; s_description = 0; l_description = 0 + failed = 0 + + # Next input line + next +} + +# Anything else: fail +{ + printf (\"Unexpected line '\$0' in $hvf_file (line %d)\n\", NR) + print_status() + failed = 1 + if ($DEBUG) { failures++ } else { exit 1 } +} + +END { + if (0 == failed) { + # They must be all set to 0 or all set to 1 + if ( ( (0 == package) \ + || (0 == md5) \ + || (0 == s_description) \ + || (0 == l_description)) \ + && ( (0 != package) \ + || (0 != md5) \ + || (0 != s_description) \ + || (0 != l_description))) { + printf (\"End of file unexpected in $hvf_file (line %d)\n\", NR) + print_status() + exit 1 + } + } + + if (failures > 0) { + exit 1 + } +} +" "$hvf_file" || return 1 + + return 0 +} + +# $SPECIAL_FILES must exist +for sf in $SPECIAL_FILES; do + if [ ! -f "$dists_parent_dir/$sf" ]; then + echo "Special file ($sf) doesn't exist" + exit 1; + fi +done + +# Comparing SHA256SUMS +# We don use -c because a file could exist in the directory tree and not in +# the SHA256SUMS, so we sort the existing SHA256SUMS and we create a new one +# already sorted, if cmp fails then files are different and we don't want to +# continue. +cd "$dists_parent_dir" +find dists -type f -print0 |xargs --null sha256sum > "$TMP_WORK_DIR/$SHA256SUMS.new" +sort "$SHA256SUMS" > "$TMP_WORK_DIR/$SHA256SUMS.sorted" +sort "$TMP_WORK_DIR/$SHA256SUMS.new" > "$TMP_WORK_DIR/$SHA256SUMS.new.sorted" +if ! cmp --quiet "$TMP_WORK_DIR/$SHA256SUMS.sorted" "$TMP_WORK_DIR/$SHA256SUMS.new.sorted"; then + echo "Failed to compare the SHA256SUMS, they are not identical!" >&2 + diff -au "$TMP_WORK_DIR/$SHA256SUMS.sorted" "$TMP_WORK_DIR/$SHA256SUMS.new.sorted" >&2 + exit 1 +fi +cd "$OLDPWD" + +# Get the list of valid packages (sorted, uniq) +for t in "$TESTING" "$UNSTABLE"; do + if [ ! -f "$PACKAGES_LISTS_DIR/$t" ]; then + echo "Missing $PACKAGES_LISTS_DIR/$t" >&2 + exit 1 + fi + cut -d' ' -f 1 "$PACKAGES_LISTS_DIR/$t" | sort -u > "$TMP_WORK_DIR/$t.pkgs" +done + +/usr/bin/find "$dists_parent_dir" | +while read f; do + if [ -d "$f" ]; then + if ! is_dirname_okay "$f"; then + echo "Wrong directory name: $f" >&2 + exit 1 + fi + elif [ -f "$f" ]; then + # If $f is in $SPECIAL_FILES, we skip to the next loop because + # we won't check it for format, fields and encoding. + for sf in $SPECIAL_FILES; do + if [ "$f" = "$dists_parent_dir/$sf" ]; then + continue 2 + fi + done + + if ! is_filename_okay "$f"; then + echo "Wrong file: $f" >&2 + exit 1 + fi + + # Check that all entries contains the right fields + if ! has_valid_fields "$f"; then + echo "File $f has an invalid format" >&2 + exit 1 + fi + + # Check that every packages in Translation-$lang exists + TPKGS=$(basename "$f").pkgs + grep "^Package: " "$f" | cut -d' ' -f 2 | sort -u > "$TMP_WORK_DIR/$TPKGS" + case "$f" in + */$TESTING/*) t="$TESTING";; + */$UNSTABLE/*) t="$UNSTABLE";; + esac + if diff "$TMP_WORK_DIR/$t.pkgs" "$TMP_WORK_DIR/$TPKGS" | grep -q "^>"; then + diff -au "$TMP_WORK_DIR/$t.pkgs" "$TMP_WORK_DIR/$TPKGS" |grep "^+" + echo "$f contains packages which are not in $t" >&2 + exit 1 + fi + + # Check encoding + iconv -f utf-8 -t utf-8 < "$f" > /dev/null 2>&1 || { + echo "$f is not an UTF-8 file" >&2 + exit 1 + } + + # We do not check if the md5 in Translation-$lang are + # correct. + + # Now generate files + # Compress the file + bzip2 -c "$f" > "$f.bz2" + gzip -c "$f" > "$f.gz" + else + echo "Neither a file or directory: $f" >&2 + exit 1 + fi +done || false +# The while will just fail if an internal check "exit 1", but the script +# is not exited. "|| false" makes the script fail (and exit) in that case. + +echo "$dists_parent_dir structure validated successfully ($(date +%c))" + +# If we reach this point, everything went fine. +trap - EXIT +rm -rf "$TMP_WORK_DIR" + diff --git a/scripts/debian/mkfilesindices b/scripts/debian/mkfilesindices index f7a14a00..8457e811 100755 --- a/scripts/debian/mkfilesindices +++ b/scripts/debian/mkfilesindices @@ -38,9 +38,6 @@ for a in $ARCHES; do cd $base/ftp find ./dists -maxdepth 1 \! -type d find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)" - if echo X sparc mips mipsel hppa X | grep -q " $a "; then - find ./dists/sarge/main/upgrade-kernel \! -type d - fi ) | sort -u | gzip -9 > arch-$a.list.gz done @@ -83,7 +80,7 @@ for a in $ARCHES; do sort -u | poolfirst > ../arch-$a.files done -(cat ../arch-i386.files ../arch-amd64.files; zcat suite-oldstable.list.gz suite-proposed-updates.list.gz) | +(cat ../arch-i386.files ../arch-amd64.files; zcat suite-proposed-updates.list.gz) | sort -u | poolfirst > ../typical.files rm -f $ARCHLIST diff --git a/scripts/debian/mkmaintainers b/scripts/debian/mkmaintainers index aa80065f..a0abaa1f 100755 --- a/scripts/debian/mkmaintainers +++ b/scripts/debian/mkmaintainers @@ -8,7 +8,7 @@ set -e cd $base/misc/ cd $indices -dak make-maintainers $configdir/pseudo-packages.maintainers | sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers +dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers set +e cmp .new-maintainers Maintainers >/dev/null diff --git a/scripts/debian/ssh-move b/scripts/debian/ssh-move deleted file mode 100755 index ab820a7d..00000000 --- a/scripts/debian/ssh-move +++ /dev/null @@ -1,128 +0,0 @@ -#! /usr/bin/perl -w -use strict; - -use IPC::Open2; - -$ENV{LANG} = "C"; - -# The protocol consists of repeated exchanges of the following: -# -# S: <filename> -# S: <each line of file, with dot-stuffing as in SMTP> -# S: . -# C: [writes file] -# C: <filename> -# S: [unlinks file] - -my $server = 0; -my $verbose = 0; -my $nonint = 0; -my $sshidentity; -my $sshmovepath = 'ssh-move'; -my $fromdir; -my $todir; - -while (@ARGV) { - local $_ = shift @ARGV; - if (/^--server$/) { - $server = 1; - } elsif (/^--verbose$/) { - $verbose = 1; - } elsif (/^--ssh-identity$/) { - $sshidentity = shift @ARGV; - } elsif (/^--ssh-move-path$/) { - $sshmovepath = shift @ARGV; - } elsif (/^--from-directory$/) { - $fromdir = shift @ARGV; - } elsif (/^--to-directory$/) { - $todir = shift @ARGV; - } elsif (/^--non-interactive$/) { - $nonint = 1; - } else { - unshift @ARGV, $_; - last; - } -} - -local $| = 1; - - -my ($in, $out) = (*STDIN, *STDOUT); - -unless ($nonint) { - my $servername = shift @ARGV; - local (*READER, *WRITER); - - my @args = ('ssh'); - push @args, '-i', $sshidentity if defined $sshidentity; - - push @args, $servername, $sshmovepath; - push @args, '--server' unless ($server); - push @args, '--to-directory', $todir if (defined $todir && $server); - push @args, '--from-directory', $fromdir if (defined $fromdir && !$server); - push @args, '--non-interactive'; - push @args, map quotemeta, @ARGV unless ($server); - - my $pid = open2 (\*READER, \*WRITER, @args); - - ($in, $out) = (*READER, *WRITER); -} - -sub server () -{ - chdir $fromdir if defined $fromdir; - - my @files = map glob, @ARGV; - - for my $file (@files) { - print $out "$file\n" or die "can't print to client: $!"; - open FILE, "< $file" or die "can't open $file: $!\n"; - local $_; - while (<FILE>) { - chomp; - $_ = ".$_" if /^\./; - print $out "$_\n" or die "can't print to client: $!"; - } - print $out ".\n" or die "can't print to client: $!"; - - my $confirm = <$in>; - chomp $confirm if defined $confirm; - unlink $file if defined $confirm and $confirm eq $file; - } -} - -sub client () -{ - chdir $todir if defined $todir; - - my $file; - while (defined ($file = <$in>)) { - chomp $file; - print STDERR $file if $verbose; - (my $tmpfile = $file) =~ s[.*/][]; - $tmpfile .= ".$$.tmp"; - # TODO: unlink $tmpfile if things go wrong - open TMP, "> $tmpfile" or die "can't open $tmpfile: $!"; - local $_; - while (<$in>) { - chomp; - if ($_ eq '.') { - close TMP or die "can't close $tmpfile: $!"; - rename $tmpfile, $file - or die "can't rename $tmpfile to $file: $!"; - print $out "$file\n" or die "can't print to server: $!"; - last; - } else { - s/^\.//; - print TMP "$_\n" or die "can't print to $tmpfile: $!"; - } - } - print STDERR " ok\n" if $verbose; - } -} - -if ($server) { - server (); -} else { - client (); -} diff --git a/scripts/debian/update-pseudopackages.sh b/scripts/debian/update-pseudopackages.sh new file mode 100755 index 00000000..d9a4d235 --- /dev/null +++ b/scripts/debian/update-pseudopackages.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# +# Fetches latest copy of pseudo-packages +# Joerg Jaspert <joerg@debian.org> + +. vars + +cd ${scriptdir}/masterfiles + +echo Updating archive version of pseudo-packages +for file in maintainers description; do + wget -t2 -T20 -q -N http://bugs.debian.org/pseudopackages/pseudo-packages.${file} || echo "Some error occured with $file..." +done diff --git a/scripts/nfu/get-w-b-db b/scripts/nfu/get-w-b-db new file mode 100755 index 00000000..c34e5e38 --- /dev/null +++ b/scripts/nfu/get-w-b-db @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +# list of architectures taken from +# http://buildd.debian.org/stats/ + +# For debugging, you can override the path using +# the WB_DB_DIR enviroment variable +if [ -z "$WB_DB_DIR" ] +then + WB_DB_DIR=/srv/ftp.debian.org/scripts/nfu +fi + +cd $WB_DB_DIR || { echo "Failed to cd to $WB_DB_DIR" ; exit 1 ;} + +for arch in alpha amd64 arm armel hppa i386 ia64 m68k mips mipsel powerpc s390 sparc +do + rm -f $arch-all.txt + echo "Getting $arch-all.txt" + wget -q http://buildd.debian.org/stats/$arch-all.txt +done diff --git a/setup/init_pool.sql b/setup/init_pool.sql index 7a6e2a49..1e363940 100644 --- a/setup/init_pool.sql +++ b/setup/init_pool.sql @@ -70,6 +70,8 @@ CREATE TABLE files ( md5sum TEXT NOT NULL, location INT4 NOT NULL, -- REFERENCES location last_used TIMESTAMP, + sha1sum TEXT NOT NULL, + sha256sum TEXT NOT NULL, unique (filename, location) ); diff --git a/templates/override.bug-close b/templates/override.bug-close index 95599f43..5865651e 100644 --- a/templates/override.bug-close +++ b/templates/override.bug-close @@ -2,6 +2,8 @@ From: __OVERRIDE_ADDRESS__ To: __BUG_NUMBER__-close@__BUG_SERVER__ __CC__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-accepted.install b/templates/process-accepted.install index bf0c20e5..454fe81a 100644 --- a/templates/process-accepted.install +++ b/templates/process-accepted.install @@ -1,6 +1,8 @@ From: __DAK_ADDRESS__ To: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: bulk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" diff --git a/templates/process-accepted.unaccept b/templates/process-accepted.unaccept index 0d77be1f..abb98dbf 100644 --- a/templates/process-accepted.unaccept +++ b/templates/process-accepted.unaccept @@ -2,6 +2,8 @@ From: __REJECTOR_ADDRESS__ To: __MAINTAINER_TO__ __CC__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-new.bxa_notification b/templates/process-new.bxa_notification index 438e843e..8fafaa9c 100644 --- a/templates/process-new.bxa_notification +++ b/templates/process-new.bxa_notification @@ -2,6 +2,7 @@ From: Ben Collins <bxa@ftp-master.debian.org> X-Not-Really-To: crypt@bis.doc.gov, enc@nsa.gov, web_site@bis.doc.gov To: bxa@ftp-master.debian.org __BCC__ +X-Debian: DAK Precedence: junk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" diff --git a/templates/process-new.prod b/templates/process-new.prod index 78276b7c..4d24dc05 100644 --- a/templates/process-new.prod +++ b/templates/process-new.prod @@ -1,6 +1,8 @@ From: __FROM_ADDRESS__ To: __MAINTAINER_TO__ __CC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-unchecked.accepted b/templates/process-unchecked.accepted index c8b62d24..e7268254 100644 --- a/templates/process-unchecked.accepted +++ b/templates/process-unchecked.accepted @@ -1,6 +1,8 @@ From: __DAK_ADDRESS__ To: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: bulk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" diff --git a/templates/process-unchecked.announce b/templates/process-unchecked.announce index 421495b9..7297c91c 100644 --- a/templates/process-unchecked.announce +++ b/templates/process-unchecked.announce @@ -1,6 +1,8 @@ From: __MAINTAINER_FROM__ To: __ANNOUNCE_LIST_ADDRESS__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-unchecked.bug-close b/templates/process-unchecked.bug-close index 0bb9ed0e..8ab73172 100644 --- a/templates/process-unchecked.bug-close +++ b/templates/process-unchecked.bug-close @@ -1,6 +1,8 @@ From: __MAINTAINER_FROM__ To: __BUG_NUMBER__-close@__BUG_SERVER__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-unchecked.bug-experimental-fixed b/templates/process-unchecked.bug-experimental-fixed index af52958d..34c5ca59 100644 --- a/templates/process-unchecked.bug-experimental-fixed +++ b/templates/process-unchecked.bug-experimental-fixed @@ -2,6 +2,8 @@ From: __MAINTAINER_FROM__ To: control@__BUG_SERVER__ Cc: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-unchecked.bug-nmu-fixed b/templates/process-unchecked.bug-nmu-fixed index 8ff215e4..45f6c733 100644 --- a/templates/process-unchecked.bug-nmu-fixed +++ b/templates/process-unchecked.bug-nmu-fixed @@ -2,6 +2,8 @@ From: __MAINTAINER_FROM__ To: control@__BUG_SERVER__ Cc: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/process-unchecked.new b/templates/process-unchecked.new index d5cd6775..6c3162fa 100644 --- a/templates/process-unchecked.new +++ b/templates/process-unchecked.new @@ -1,6 +1,8 @@ From: __DAK_ADDRESS__ To: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: bulk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" diff --git a/templates/process-unchecked.override-disparity b/templates/process-unchecked.override-disparity index 52590803..bafbd4fd 100644 --- a/templates/process-unchecked.override-disparity +++ b/templates/process-unchecked.override-disparity @@ -1,6 +1,8 @@ From: __DAK_ADDRESS__ To: __MAINTAINER_TO__ __BCC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: junk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" @@ -15,6 +17,8 @@ Either the package or the override file is incorrect. If you think the override is correct and the package wrong please fix the package so that this disparity is fixed in the next upload. If you feel the override is incorrect then please reply to this mail and explain why. +Please INCLUDE the list of packages as seen above, or we won't be able +to deal with your mail due to missing information. [NB: this is an automatically generated mail; if you replied to one like it before and have not received a response yet, please ignore diff --git a/templates/queue.rejected b/templates/queue.rejected index 35a6ede3..7b027868 100644 --- a/templates/queue.rejected +++ b/templates/queue.rejected @@ -2,6 +2,8 @@ From: __REJECTOR_ADDRESS__ To: __MAINTAINER_TO__ __BCC__ __CC__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: bulk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" diff --git a/templates/reject-proposed-updates.rejected b/templates/reject-proposed-updates.rejected index fb32f03c..8b86cb5b 100644 --- a/templates/reject-proposed-updates.rejected +++ b/templates/reject-proposed-updates.rejected @@ -2,6 +2,9 @@ From: __DAK_ADDRESS__ To: __MAINTAINER_TO__ __CC__ __BCC__ +Reply-To: __STABLE_MAIL__ +X-Debian: DAK +X-Debian-Package: __SOURCE__ Precedence: bulk MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" @@ -10,8 +13,8 @@ Subject: __CHANGES_FILENAME__ REJECTED from proposed-updates Your package was rejected by an ftp master on behalf of __STABLE_REJECTOR__, if you have any questions or -comments regarding this rejection, please address them to him as we -can't help you. +comments regarding this rejection, please address them to +__STABLE_REJECTOR__ by replying to this mail. The reason given for rejection was: diff --git a/templates/rm.bug-close b/templates/rm.bug-close index 6b29c0c2..353d3ccb 100644 --- a/templates/rm.bug-close +++ b/templates/rm.bug-close @@ -2,6 +2,7 @@ From: __RM_ADDRESS__ To: __BUG_NUMBER__-close@__BUG_SERVER__ __CC__ __BCC__ +X-Debian: DAK MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit diff --git a/templates/security-install.advisory b/templates/security-install.advisory index 13220f36..eea2e937 100644 --- a/templates/security-install.advisory +++ b/templates/security-install.advisory @@ -1,6 +1,7 @@ From: __DAK_ADDRESS__ To: __WHOAMI__ <dak@security.debian.org> __BCC__ +X-Debian-Package: __SOURCE__ Subject: Template Advisory __ADVISORY__ ------------------------------------------------------------------------ diff --git a/tools/debianqueued-0.9/ChangeLog b/tools/debianqueued-0.9/ChangeLog index 8bd3cf99..42e62ea9 100644 --- a/tools/debianqueued-0.9/ChangeLog +++ b/tools/debianqueued-0.9/ChangeLog @@ -1,3 +1,64 @@ +2008-10-05 Thomas Viehmann <tv@beamnet.de> + + * show-deferred: make non-new uploads in deferred accessible + +2008-09-22 Thomas Viehmann <tv@beamnet.de> + + * show-deferred: minor fixes + +2008-09-21 Joerg Jaspert <joerg@debian.org> + + * debianqueued: Use perltidy + (copy_to_target): Only check md5sums if we want it, using a new + config value for it. + + * Queue.README: Its ftp.upload.debian.org now, not + ftp-master.debian.org. + + * Queue.README.ravel: New file for ravel + + * config-upload: New file, used for ravel + +2008-09-20 Thomas Viehmann <tv@beamnet.de> + + * show-deferred: status page for deferred upload queue + +2008-09-20 Joerg Jaspert <joerg@debian.org> + + * Queue.README (Version): Update the text to match reality with + DEFERRED/DELAYED and the removed mv command + +2008-09-20 Thomas Viehmann <tv@beamnet.de> + + * debianqueued: Minor fixes on .commands processing. + +2008-09-15 Joerg Jaspert <joerg@debian.org> + + * config: Use 15 delayed dirs. Also change maintainer_mail to + ftpmaster. And remove lotsa ancient cvs history foo + +2008-09-11 Thomas Viehmann <tv@beamnet.de> + + * debianqueued: Add DELAYED-support. + +2008-06-15 Joerg Jaspert <joerg@debian.org> + + * debianqueued: Fix a brown-paper-bag bug (we just dont know who + to assign the bag too). strftime %b is better than %B for + the month name. + +2008-06-14 Joerg Jaspert <joerg@debian.org> + + * debianqueued (process_commands): Add a little note that one + should use dcut for .commands files + +2008-05-10 Stephen Gran <sgran@debian.org> + * debianqueued: First pass at a send_mail implementation that + sucks less. This also gives us X-Debian-Package + +2008-05-08 Joerg Jaspert <joerg@debian.org> + + * debianqueued: added header X-Debian: DAK -- Version 0.9 released diff --git a/tools/debianqueued-0.9/Queue.README b/tools/debianqueued-0.9/Queue.README index a8681d15..669fda39 100644 --- a/tools/debianqueued-0.9/Queue.README +++ b/tools/debianqueued-0.9/Queue.README @@ -1,20 +1,14 @@ -This directory is the Debian upload queue of ftp.uni-erlangen.de. All +This directory is the Debian upload queue of ftp.upload.debian.org. All files uploaded here will be moved into the project incoming dir on -master.debian.org. +this machine. -Only known Debian developers can upload here. All uploads must be in -the same format as they would go to master, i.e. with a PGP-signed -.changes file that lists all files that belong to the upload. Files -not meeting this condition will be removed automatically after some -time. +Only known Debian developers can upload here. Uploads have to be signed +by PGP keys in the Debian keyring. Files not meeting this criterion or +files not mentioned in a .changes file will be removed after some time. The queue daemon will notify you by mail of success or any problems -with your upload. For this, the Maintainer: field in the .changes must -contain your (the uploader's) correct e-mail address, not the address -of the real maintainer (if different). The same convention applies to -master itself, which sends installation acknowledgements to the -address in Maintainer:. +with your upload. *.commands Files @@ -34,26 +28,68 @@ The Uploader: field should contain the mail address to which the reply should go, just like Maintainer: in a *.changes. Commands: is a multi-line field like e.g. Description:, so each continuation line should start with a space. Each line in Commands: can contain a -standard 'rm' or 'mv' command, but no options are allowed, and -filenames may not contain slashes (so that they're restricted to the -queue directory). 'rm' can process as much arguments as you give it -(not only one), and also knows about the shell wildcards *, ?, and []. +standard 'rm' command, but no options are allowed. Except for the +DELAYED queue (see below) filenames may not contain slashes (so that +they're restricted to the queue directory). 'rm' can process as much +arguments as you give it (not only one), and also knows about the shell +wildcards *, ?, and []. Example of a *.commands file: -----BEGIN PGP SIGNED MESSAGE----- -Uploader: Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> +Uploader: Some One <some@example.com> Commands: rm hello_1.0-1_i386.deb - mv hello_1.0-1.dsx hello_1.0-1.dsc -----BEGIN PGP SIGNATURE----- Version: 2.6.3ia -iQCVAwUBNFiQSXVhJ0HiWnvJAQG58AP+IDJVeSWmDvzMUphScg1EK0mvChgnuD7h -BRiVQubXkB2DphLJW5UUSRnjw1iuFcYwH/lFpNpl7XP95LkLX3iFza9qItw4k2/q -tvylZkmIA9jxCyv/YB6zZCbHmbvUnL473eLRoxlnYZd3JFaCZMJ86B0Ph4GFNPAf -Z4jxNrgh7Bc= -=pH94 +[...] -----END PGP SIGNATURE----- + + +DELAYED Queue +------------- +There is a DELAYED queue available. Packages uploaded into the +X-day (X between 0 and 15) subdirectories of DELAYED/ will be moved into +the DEFERRED queue and won't be placed into the archive before the +waiting time is over. + +To avoid confusion, the terms used are: +DELAYED - the public ftp upload directories, reachable via DELAYED/X-day + +DEFERRED - this is the place where the uploads are placed by the queue + daemon after processing and where packages wait before they + are moved into the incoming queue. + + +You can modify the queues (besides uploading packages) with *.commands +files as described above, using the following syntax. + +Note that any processing in the DEFERRED queue works on whole uploads +(i.e. a .changes file and all the files that came with it), whereas +operations in the DELAYED queue (and the normal ftp root directory) +work on individual files. + + - To move a package from one DEFERRED directory into another, say + from 8-days to 2-days delay: + reschedule foo_1.2-1.1_all.changes 2-day + + The move-target has to be without the trailing /. + + - To delete an upload (and all associated files) in the DEFERRED queue: + cancel foo_1.2-1.1_all.changes + + - To delete a broken upload in the DELAYED queue: + rm DELAYED/X-day/foobar.deb + + or + + rm --searchdirs foobar.deb + + - The old mv command is no longer supported. + +Wildcards in .commands files are only valid for the DELAYED queue and +its rm command, the DEFERRED queue commands cancel and reschedule do +not allow them. diff --git a/tools/debianqueued-0.9/Queue.README.ravel b/tools/debianqueued-0.9/Queue.README.ravel new file mode 100644 index 00000000..525a3e02 --- /dev/null +++ b/tools/debianqueued-0.9/Queue.README.ravel @@ -0,0 +1,47 @@ + +This directory is the Debian upload queue of ssh.upload.debian.org. All +valid files uploaded here will be transferred to ftp.upload.debian.org. + +Only known Debian developers can upload here. Uploads have to be signed +by PGP keys in the Debian keyring. Files not meeting this criterion or +files not mentioned in a .changes file will be removed after some time. + +The queue daemon will notify you by mail of success or any problems +with your upload. + + +*.commands Files +---------------- + +Besides *.changes files, you can also upload *.commands files for the +daemon to process. With *.commands files, you can instruct the daemon +to remove or rename files in the queue directory that, for example, +resulted from failed or interrupted uploads. A *.commands file looks +much like a *.changes, but contains only two fields: Uploader: and +Commands:. It must be PGP-signed by a known Debian developer, to avoid +that E.V.L. Hacker can remove/rename files in the queue. The basename +(the part before the .commands extension) doesn't matter, but best +make it somehow unique. + +The Uploader: field should contain the mail address to which the reply +should go, just like Maintainer: in a *.changes. Commands: is a +multi-line field like e.g. Description:, so each continuation line +should start with a space. Each line in Commands: can contain a +standard 'rm' command, but no options are allowed. Filenames may not +contain slashes (so that they're restricted to the queue +directory). 'rm' can process as much arguments as you give it (not only +one), and also knows about the shell wildcards *, ?, and []. + +Example of a *.commands file: + +-----BEGIN PGP SIGNED MESSAGE----- + +Uploader: Some One <some@example.com> +Commands: + rm hello_1.0-1_i386.deb + +-----BEGIN PGP SIGNATURE----- +Version: 2.6.3ia + +[...] +-----END PGP SIGNATURE----- diff --git a/tools/debianqueued-0.9/config b/tools/debianqueued-0.9/config index bc432459..6de4931a 100644 --- a/tools/debianqueued-0.9/config +++ b/tools/debianqueued-0.9/config @@ -1,66 +1,6 @@ # # example configuration file for debianqueued # -# $Id: config,v 1.15 1999/07/07 16:19:32 ftplinux Exp $ -# -# $Log: config,v $ -# Revision 1.15 1999/07/07 16:19:32 ftplinux -# New variables for upload methods: $upload_method, $ftptimeout, -# $ftpdebug, $ls, $cp, $chmod. -# New variables for GnuPG checking: $gpg, $gpg_keyring, -# $gpg_keyring_archive_name. -# Renamed "master" in vars to "target". -# Updated list of non-US packages. -# -# Revision 1.14 1998/07/06 14:25:46 ftplinux -# Make $keyring_archive_name use a wildcard, newer debian keyring tarball -# contain a dir with a date. -# -# Revision 1.13 1998/04/23 10:56:53 ftplinux -# Added new config var $chmod_on_master. -# -# Revision 1.12 1998/02/17 10:57:21 ftplinux -# Added @test_binaries -# -# Revision 1.11 1997/12/09 13:51:46 ftplinux -# Implemented rejecting of nonus packages (new config var @nonus_packages) -# -# Revision 1.10 1997/10/30 11:32:39 ftplinux -# Implemented warning mails for incomplete uploads that miss a .changes -# file. Maintainer address can be extracted from *.deb, *.diff.gz, -# *.dsc, or *.tar.gz files with help of new utility functions -# is_debian_file, get_maintainer, and debian_file_stem. -# -# Revision 1.9 1997/09/17 12:16:33 ftplinux -# Added writing summaries to a file -# -# Revision 1.8 1997/08/18 13:07:14 ftplinux -# Implemented summary mails -# -# Revision 1.7 1997/08/11 12:49:09 ftplinux -# Implemented logfile rotating -# -# Revision 1.6 1997/08/07 09:25:21 ftplinux -# Added timeout for remote operations -# -# Revision 1.5 1997/07/09 10:14:58 ftplinux -# Change RCS Header: to Id: -# -# Revision 1.4 1997/07/09 10:13:51 ftplinux -# Alternative implementation of status file as plain file (not FIFO), because -# standard wu-ftpd doesn't allow retrieval of non-regular files. New config -# option $statusdelay for this. -# -# Revision 1.3 1997/07/08 08:34:14 ftplinux -# If dqueued-watcher runs as cron job, $PATH might not contain gzip. Use extra -# --use-compress-program option to tar, and new config var $gzip. -# -# Revision 1.2 1997/07/03 13:06:48 ftplinux -# Little last changes before beta release -# -# Revision 1.1.1.1 1997/07/03 12:54:59 ftplinux -# Import initial sources -# # set to != 0 for debugging output (to log file) $debug = 0; @@ -73,7 +13,7 @@ $scp = "/usr/bin/scp"; $ssh_agent = "/usr/bin/ssh-agent"; $ssh_add = "/usr/bin/ssh-add"; $md5sum = "/usr/bin/md5sum"; -$mail = "/usr/bin/mail"; +$mail = "/usr/sbin/sendmail"; $mkfifo = "/usr/bin/mkfifo"; $tar = "/bin/tar"; # must be GNU tar! $gzip = "/bin/gzip"; @@ -96,6 +36,13 @@ $ssh_key_file = ""; # the incoming dir we live in $incoming = "/srv/queued/UploadQueue"; +# the delayed incoming directories +$incoming_delayed = "/srv/queued/UploadQueue/DELAYED/%d-day"; + +# maximum delay directory, -1 for no delayed directory, +# incoming_delayed and target_delayed need to exist. +$max_delayed = 15; + # files not to delete in $incoming (regexp) $keep_files = '(status|\.message|README)$'; @@ -105,6 +52,9 @@ $valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$'; # Change files to mode 644 locally (after md5 check) or only on master? $chmod_on_target = 0; +# Do an md5sum check after upload? +$check_md5sum = 1; + # name of the status file or named pipe in the incoming dir $statusfile = "$incoming/status"; @@ -135,6 +85,9 @@ $targetlogin = "queue"; # incoming on target host $targetdir = "/srv/ftp.debian.org/queue/unchecked/"; +# incoming/delayed on target host +$targetdir_delayed = "/srv/queued/DEFERRED/%d-day"; + # select FTP debugging #$ftpdebug = 0; @@ -171,7 +124,7 @@ $bad_changes_timeout = 2*24*60*60; # 2 days $remote_timeout = 3*60*60; # 3 hours # mail address of maintainer -$maintainer_mail = "james\@nocrew.org"; +$maintainer_mail = "ftpmaster\@debian.org"; # logfile rotating: diff --git a/tools/debianqueued-0.9/config-upload b/tools/debianqueued-0.9/config-upload new file mode 100644 index 00000000..1b72e88d --- /dev/null +++ b/tools/debianqueued-0.9/config-upload @@ -0,0 +1,141 @@ +# +# example configuration file for debianqueued +# + +# set to != 0 for debugging output (to log file) +$debug = 0; + +# various programs: +# ----------------- +$gpg = "/usr/bin/gpg"; +$ssh = "/usr/bin/ssh"; +$scp = "/usr/bin/scp"; +$ssh_agent = "/usr/bin/ssh-agent"; +$ssh_add = "/usr/bin/ssh-add"; +$md5sum = "/usr/bin/md5sum"; +$mail = "/usr/sbin/sendmail"; +$mkfifo = "/usr/bin/mkfifo"; +$tar = "/bin/tar"; # must be GNU tar! +$gzip = "/bin/gzip"; +$ar = "/usr/bin/ar"; # must support p option, optional +$ls = "/bin/ls"; +$cp = "/bin/cp"; +$chmod = "/bin/chmod"; + +# binaries which existance should be tested before each queue run +#@test_binaries = (); + +# general options to ssh/scp +$ssh_options = "-o'BatchMode yes' -o'FallBackToRsh no' ". + "-o'ForwardAgent no' -o'ForwardX11 no' ". + "-o'PasswordAuthentication no' -o'StrictHostKeyChecking yes'"; + +# ssh key file to use for connects to master (empty: default ~/.ssh/identity) +$ssh_key_file = ""; + +# the incoming dir we live in +$incoming = "/srv/upload.debian.org/UploadQueue"; + +# the delayed incoming directories +$incoming_delayed = "/srv/queued/UploadQueue/DELAYED/%d-day"; + +# maximum delay directory, -1 for no delayed directory, +# incoming_delayed and target_delayed need to exist. +$max_delayed = -1; + +# files not to delete in $incoming (regexp) +$keep_files = '(status|\.message|README)$'; + +# file patterns that aren't deleted right away +$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$'; + +# Change files to mode 644 locally (after md5 check) or only on master? +$chmod_on_target = 0; + +# Do an md5sum check? +$check_md5sum = 0; + +# name of the status file or named pipe in the incoming dir +$statusfile = "$incoming/status"; + +# if 0, status file implemented as FIFO; if > 0, status file is plain +# file and updated with a delay of this many seconds +$statusdelay = 30; + +# names of the keyring files +@keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg", + "/srv/keyring.debian.org/keyrings/debian-keyring.pgp"); + +# our log file +$logfile = "$queued_dir/log"; + +# our pid file +$pidfile = "$queued_dir/pid"; + +# upload method (ssh, copy, ftp) +$upload_method = "ftp"; + +# name of target host (ignored on copy method) +$target = "ftp.upload.debian.org"; + +# login name on target host (for ssh, always 'ftp' for ftp, ignored for copy) +$targetlogin = "ftp"; + +# incoming on target host +$targetdir = "/pub/UploadQueue/"; + +# incoming/delayed on target host +$targetdir_delayed = "/srv/queued/DEFERRED/%d-day"; + +# select FTP debugging +$ftpdebug = 0; + +# FTP timeout +$ftptimeout = 900; + +# max. number of tries to upload +$max_upload_retries = 8; + +# delay after first failed upload +$upload_delay_1 = 30*60; # 30 min. + +# delay between successive failed uploads +$upload_delay_2 = 4*60*60; # 4 hours + +# packages that must go to nonus.debian.org and thus are rejected here +#@nonus_packages = qw(gpg-rsaidea); + +# timings: +# -------- +# time between two queue checks +$queue_delay = 5*60; # 5 min. +# when are stray files deleted? +$stray_remove_timeout = 24*60*60; # 1 day +# delay before reporting problems with a .changes file (not +# immediately for to-be-continued uploads) +$problem_report_timeout = 30*60; # 30 min. +# delay before reporting that a .changes file is missing (not +# immediately for to-be-continued uploads) +$no_changes_timeout = 30*60; # 30 min. +# when are .changes with persistent problems removed? +$bad_changes_timeout = 2*24*60*60; # 2 days +# how long may a remote operation (ssh/scp) take? +$remote_timeout = 3*60*60; # 3 hours + +# mail address of maintainer +$maintainer_mail = "ftpmaster\@debian.org"; + + +# logfile rotating: +# ----------------- +# how often to rotate (in days) +$log_age = 7; +# how much old logs to keep +$log_keep = 4; +# send summary mail when rotating logs? +$mail_summary = 1; +# write summary to file when rotating logs? (no if name empty) +$summary_file = "$queued_dir/summary"; + +# don't remove this, Perl needs it! +1; diff --git a/tools/debianqueued-0.9/debianqueued b/tools/debianqueued-0.9/debianqueued index d6f2afdc..256561a7 100755 --- a/tools/debianqueued-0.9/debianqueued +++ b/tools/debianqueued-0.9/debianqueued @@ -4,6 +4,7 @@ # # Copyright (C) 1997 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> # Copyright (C) 2001-2007 Ryan Murray <rmurray@debian.org> +# Copyright (C) 2008 Thomas Viehmann <tv@beamnet.de> # # This program is free software. You can redistribute it and/or # modify it under the terms of the GNU General Public License as @@ -11,237 +12,11 @@ # (at your option) any later version. # This program comes with ABSOLUTELY NO WARRANTY! # -# $Id: debianqueued,v 1.51 1999/07/08 09:43:21 ftplinux Exp $ -# -# $Log: debianqueued,v $ -# Revision 1.51 1999/07/08 09:43:21 ftplinux -# Bumped release number to 0.9 -# -# Revision 1.50 1999/07/07 16:17:30 ftplinux -# Signatures can now also be created by GnuPG; in pgp_check, also try -# gpg for checking. -# In several messages, also mention GnuPG. -# -# Revision 1.49 1999/07/07 16:14:43 ftplinux -# Implemented new upload methods "copy" and "ftp" as alternatives to "ssh". -# Replaced "master" in many function and variable names by "target". -# New functions ssh_cmd, ftp_cmd, and local_cmd for more abstraction and -# better readable code. -# -# Revision 1.48 1998/12/08 13:09:39 ftplinux -# At the end of process_changes, do not remove the @other_files with the same -# stem if a .changes file is in that list; then there is probably another -# upload for a different version or another architecture. -# -# Revision 1.47 1998/05/14 14:21:44 ftplinux -# Bumped release number to 0.8 -# -# Revision 1.46 1998/05/14 14:17:00 ftplinux -# When --after a successfull upload-- deleting files for the same job, check -# for equal revision number on files that have one. It has happened that the -# daemon deleted files that belonged to another job with different revision. -# -# Revision 1.45 1998/04/23 11:05:47 ftplinux -# Implemented $conf::chmod_on_master. If 0, new part to change mode locally in -# process_changes. -# -# Revision 1.44 1998/04/21 08:44:44 ftplinux -# Don't use return value of debian_file_stem as regexp, it's a shell pattern. -# -# Revision 1.43 1998/04/21 08:22:21 ftplinux -# Also recogize "read-only filesystem" as error message so it triggers assuming -# that incoming is unwritable. -# Don't increment failure count after an upload try that did clear -# $incoming_writable. -# Fill in forgotten pattern for mail addr in process_commands. -# -# Revision 1.42 1998/03/31 13:27:32 ftplinux -# In fatal_signal, kill status daemon only if it has been started (otherwise -# warning about uninitialized variable). -# Change mode of files uploaded to master explicitly to 644 there, scp copies the -# permissions in the queue. -# -# Revision 1.41 1998/03/31 09:06:00 ftplinux -# Implemented handling of improper mail addresses in Maintainer: field. -# -# Revision 1.40 1998/03/24 13:17:33 ftplinux -# Added new check if incoming dir on master is writable. This check is triggered -# if an upload returns "permission denied" errors. If the dir is unwritable, the -# queue is holded (no upload tries) until it's writable again. -# -# Revision 1.39 1998/03/23 14:05:14 ftplinux -# Bumped release number to 0.7 -# -# Revision 1.38 1998/03/23 14:03:55 ftplinux -# In an upload failure message, say explicitly that the job will be -# retried, to avoid confusion of users. -# $failure_file was put on @keep_list only for first retry. -# If the daemon removes a .changes, set SGID bit on all files associated -# with it, so that the test for Debian files without a .changes doesn't -# find them. -# Don't send reports for files without a .changes if the files look like -# a recompilation for another architecture. -# Also don't send such a report if the list of files with the same stem -# contains a .changes. -# Set @keep_list earlier, before PGP and non-US checks. -# Fix recognition of -k argument. -# -# Revision 1.37 1998/02/17 12:29:58 ftplinux -# Removed @conf::test_binaries used only once warning -# Try to kill old daemon for 20secs instead of 10 -# -# Revision 1.36 1998/02/17 10:53:47 ftplinux -# Added test for binaries on maybe-slow NFS filesystems (@conf::test_binaries) -# -# Revision 1.35 1997/12/16 13:19:28 ftplinux -# Bumped release number to 0.6 -# -# Revision 1.34 1997/12/09 13:51:24 ftplinux -# Implemented rejecting of nonus packages (new config var @nonus_packages) -# -# Revision 1.33 1997/11/25 10:40:53 ftplinux -# In check_alive, loop up the IP address everytime, since it can change -# while the daemon is running. -# process_changes: Check presence of .changes on master at a later -# point, to avoid bothering master as long as there are errors in a -# .changes. -# Don't view .orig.tar.gz files as is_debian_file, to avoid that they're -# picked for extracting the maintainer address in the -# job-without-changes processing. -# END statement: Fix swapped arguments to kill -# Program startup: Implemented -r and -k arguments. -# -# Revision 1.32 1997/11/20 15:18:47 ftplinux -# Bumped release number to 0.5 -# -# Revision 1.31 1997/11/11 13:37:52 ftplinux -# Replaced <./$pattern> contruct be cleaner glob() call -# Avoid potentially uninitialized $_ in process_commands file read loop -# Implemented rm command with more than 1 arg and wildcards in rm args -# -# Revision 1.30 1997/11/06 14:09:53 ftplinux -# In process_commands, also recognize commands given on the same line as -# the Commands: keyword, not only the continuation lines. -# -# Revision 1.29 1997/11/03 15:52:20 ftplinux -# After reopening the log file write one line to it for dqueued-watcher. -# -# Revision 1.28 1997/10/30 15:37:23 ftplinux -# Removed some leftover comments in process_commands. -# Changed pgp_check so that it returns the address of the signator. -# process_commands now also logs PGP signator, since Uploader: address -# can be choosen freely by uploader. -# -# Revision 1.27 1997/10/30 14:05:37 ftplinux -# Added "command" to log string for command file uploader, to make it -# unique for dqueued-watcher. -# -# Revision 1.26 1997/10/30 14:01:05 ftplinux -# Implemented .commands files -# -# Revision 1.25 1997/10/30 13:05:29 ftplinux -# Removed date from status version info (too long) -# -# Revision 1.24 1997/10/30 13:04:02 ftplinux -# Print revision, version, and date in status data -# -# Revision 1.23 1997/10/30 12:56:01 ftplinux -# Implemented deletion of files that (probably) belong to an upload, but -# weren't listed in the .changes. -# -# Revision 1.22 1997/10/30 12:22:32 ftplinux -# When setting sgid bit for stray files without a .changes, check for -# files deleted in the meantime. -# -# Revision 1.21 1997/10/30 11:32:19 ftplinux -# Added quotes where filenames are used on sh command lines, in case -# they contain metacharacters. -# print_time now always print three-field times, as omitting the hour if -# 0 could cause confusing (hour or seconds missing?). -# Implemented warning mails for incomplete uploads that miss a .changes -# file. Maintainer address can be extracted from *.deb, *.diff.gz, -# *.dsc, or *.tar.gz files with help of new utility functions -# is_debian_file, get_maintainer, and debian_file_stem. -# -# Revision 1.20 1997/10/13 09:12:21 ftplinux -# On some .changes errors (missing/bad PGP signature, no files) also log the -# uploader -# -# Revision 1.19 1997/09/25 11:20:42 ftplinux -# Bumped release number to 0.4 -# -# Revision 1.18 1997/09/25 08:15:02 ftplinux -# In process_changes, initialize some vars to avoid warnings -# If first consistency checks failed, don't forget to delete .changes file -# -# Revision 1.17 1997/09/16 10:53:35 ftplinux -# Made logging more verbose in queued and dqueued-watcher -# -# Revision 1.16 1997/08/12 09:54:39 ftplinux -# Bumped release number -# -# Revision 1.15 1997/08/11 12:49:09 ftplinux -# Implemented logfile rotating -# -# Revision 1.14 1997/08/11 11:35:05 ftplinux -# Revised startup scheme so it works with the socket-based ssh-agent, too. -# That watches whether its child still exists, so the go-to-background fork must be done before the ssh-agent. -# -# Revision 1.13 1997/08/11 08:48:31 ftplinux -# Aaarg... forgot the alarm(0)'s -# -# Revision 1.12 1997/08/07 09:25:22 ftplinux -# Added timeout for remote operations -# -# Revision 1.11 1997/07/28 13:20:38 ftplinux -# Added release numner to startup message -# -# Revision 1.10 1997/07/28 11:23:39 ftplinux -# $main::statusd_pid not necessarily defined in status daemon -- rewrite check -# whether to delete pid file in signal handler. -# -# Revision 1.9 1997/07/28 08:12:16 ftplinux -# Again revised SIGCHLD handling. -# Set $SHELL to /bin/sh explicitly before starting ssh-agent. -# Again raise ping timeout. -# -# Revision 1.8 1997/07/25 10:23:03 ftplinux -# Made SIGCHLD handling more portable between perl versions -# -# Revision 1.7 1997/07/09 10:15:16 ftplinux -# Change RCS Header: to Id: -# -# Revision 1.6 1997/07/09 10:13:53 ftplinux -# Alternative implementation of status file as plain file (not FIFO), because -# standard wu-ftpd doesn't allow retrieval of non-regular files. New config -# option $statusdelay for this. -# -# Revision 1.5 1997/07/09 09:21:22 ftplinux -# Little revisions to signal handling; status daemon should ignore SIGPIPE, -# in case someone closes the FIFO before completely reading it; in fatal_signal, -# only the main daemon should remove the pid file. -# -# Revision 1.4 1997/07/08 11:31:51 ftplinux -# Print messages of ssh call in is_on_master to debug log. -# In ssh call to remove bad files on master, the split() doesn't work -# anymore, now that I use -o'xxx y'. Use string interpolation and let -# the shell parse the stuff. -# -# Revision 1.3 1997/07/07 09:29:30 ftplinux -# Call check_alive also if master hasn't been pinged for 8 hours. -# -# Revision 1.2 1997/07/03 13:06:49 ftplinux -# Little last changes before beta release -# -# Revision 1.1.1.1 1997/07/03 12:54:59 ftplinux -# Import initial sources -# -# require 5.002; use strict; use POSIX; -use POSIX qw( sys_stat_h sys_wait_h signal_h ); +use POSIX qw( strftime sys_stat_h sys_wait_h signal_h ); use Net::Ping; use Net::FTP; use Socket qw( PF_INET AF_INET SOCK_STREAM ); @@ -252,10 +27,10 @@ use Config; # --------------------------------------------------------------------------- package conf; -($conf::queued_dir = (($0 !~ m,^/,) ? POSIX::getcwd()."/" : "") . $0) - =~ s,/[^/]+$,,; +( $conf::queued_dir = ( ( $0 !~ m,^/, ) ? POSIX::getcwd() . "/" : "" ) . $0 ) + =~ s,/[^/]+$,,; require "$conf::queued_dir/config"; -my $junk = $conf::debug; # avoid spurious warnings about unused vars +my $junk = $conf::debug; # avoid spurious warnings about unused vars $junk = $conf::ssh_key_file; $junk = $conf::stray_remove_timeout; $junk = $conf::problem_report_timeout; @@ -268,128 +43,157 @@ $junk = $conf::upload_delay_2; $junk = $conf::ar; $junk = $conf::gzip; $junk = $conf::cp; -$junk = $conf::ls; -$junk = $conf::chmod; -$junk = $conf::ftpdebug; -$junk = $conf::ftptimeout; -$junk = $conf::no_changes_timeout; -$junk = @conf::nonus_packages; -$junk = @conf::test_binaries; -$junk = @conf::maintainer_mail; +$junk = $conf::check_md5sum; + +#$junk = $conf::ls; +$junk = $conf::chmod; +$junk = $conf::ftpdebug; +$junk = $conf::ftptimeout; +$junk = $conf::no_changes_timeout; +$junk = @conf::nonus_packages; +$junk = @conf::test_binaries; +$junk = @conf::maintainer_mail; +$junk = @conf::targetdir_delayed; +$junk = $conf::mail ||= '/usr/sbin/sendmail'; $conf::target = "localhost" if $conf::upload_method eq "copy"; + package main; -($main::progname = $0) =~ s,.*/,,; +( $main::progname = $0 ) =~ s,.*/,,; + +my %packages = (); # extract -r and -k args $main::arg = ""; -if (@ARGV == 1 && $ARGV[0] =~ /^-[rk]$/) { - $main::arg = ($ARGV[0] eq '-k') ? "kill" : "restart"; - shift @ARGV; +if ( @ARGV == 1 && $ARGV[0] =~ /^-[rk]$/ ) { + $main::arg = ( $ARGV[0] eq '-k' ) ? "kill" : "restart"; + shift @ARGV; } # test for another instance of the queued already running -my $pid; -if (open( PIDFILE, "<$conf::pidfile" )) { - chomp( $pid = <PIDFILE> ); - close( PIDFILE ); - if (!$pid) { - # remove stale pid file - unlink( $conf::pidfile ); - } - elsif ($main::arg) { - local($|) = 1; - print "Killing running daemon (pid $pid) ..."; - kill( 15, $pid ); - my $cnt = 20; - while( kill( 0, $pid ) && $cnt-- > 0 ) { - sleep 1; - print "."; - } - if (kill( 0, $pid )) { - print " failed!\nProcess $pid still running.\n"; - exit 1; - } - print "ok\n"; - if (-e "$conf::incoming/core") { - unlink( "$conf::incoming/core" ); - print "(Removed core file)\n"; - } - exit 0 if $main::arg eq "kill"; - } - else { - die "Another $main::progname is already running (pid $pid)\n" - if $pid && kill( 0, $pid ); - } -} -elsif ($main::arg eq "kill") { - die "No daemon running\n"; -} -elsif ($main::arg eq "restart") { - print "(No daemon running; starting anyway)\n"; +my ( $pid, $delayed_dirs, $adelayedcore ); +if ( open( PIDFILE, "<$conf::pidfile" ) ) { + chomp( $pid = <PIDFILE> ); + close(PIDFILE); + if ( !$pid ) { + + # remove stale pid file + unlink($conf::pidfile); + } elsif ($main::arg) { + local ($|) = 1; + print "Killing running daemon (pid $pid) ..."; + kill( 15, $pid ); + my $cnt = 20; + while ( kill( 0, $pid ) && $cnt-- > 0 ) { + sleep 1; + print "."; + } + if ( kill( 0, $pid ) ) { + print " failed!\nProcess $pid still running.\n"; + exit 1; + } + print "ok\n"; + if ( -e "$conf::incoming/core" ) { + unlink("$conf::incoming/core"); + print "(Removed core file)\n"; + } + for ( $delayed_dirs = 0 ; + $delayed_dirs <= $conf::max_delayed ; + $delayed_dirs++ ) + { + $adelayedcore = + sprintf( "$conf::incoming_delayed/core", $delayed_dirs ); + if ( -e $adelayedcore ) { + unlink($adelayedcore); + print "(Removed core file)\n"; + } + } ## end for ( $delayed_dirs = 0... + exit 0 if $main::arg eq "kill"; + } else { + die "Another $main::progname is already running (pid $pid)\n" + if $pid && kill( 0, $pid ); + } +} elsif ( $main::arg eq "kill" ) { + die "No daemon running\n"; +} elsif ( $main::arg eq "restart" ) { + print "(No daemon running; starting anyway)\n"; } # if started without arguments (initial invocation), then fork -if (!@ARGV) { - # now go to background - die "$main::progname: fork failed: $!\n" unless defined( $pid = fork ); - if ($pid) { - # parent: wait for signal from child (SIGCHLD or SIGUSR1) and exit - my $sigset = POSIX::SigSet->new(); - $sigset->emptyset(); - $SIG{"CHLD"} = sub { }; - $SIG{"USR1"} = sub { }; - POSIX::sigsuspend( $sigset ); - waitpid( $pid, WNOHANG ); - if (kill( 0, $pid )) { - print "Daemon started in background (pid $pid)\n"; - exit 0; - } - else { - exit 1; - } - } - else { - # child - setsid; - if ($conf::upload_method eq "ssh") { - # exec an ssh-agent that starts us again - # force shell to be /bin/sh, ssh-agent may base its decision - # whether to use a fd or a Unix socket on the shell... - $ENV{"SHELL"} = "/bin/sh"; - exec $conf::ssh_agent, $0, "startup", getppid(); - die "$main::progname: Could not exec $conf::ssh_agent: $!\n"; - } - else { - # no need to exec, just set up @ARGV as expected below - @ARGV = ("startup", getppid()); - } - } -} +if ( !@ARGV ) { + + # now go to background + die "$main::progname: fork failed: $!\n" + unless defined( $pid = fork ); + if ($pid) { + + # parent: wait for signal from child (SIGCHLD or SIGUSR1) and exit + my $sigset = POSIX::SigSet->new(); + $sigset->emptyset(); + $SIG{"CHLD"} = sub { }; + $SIG{"USR1"} = sub { }; + POSIX::sigsuspend($sigset); + waitpid( $pid, WNOHANG ); + if ( kill( 0, $pid ) ) { + print "Daemon started in background (pid $pid)\n"; + exit 0; + } else { + exit 1; + } + } else { + + # child + setsid; + if ( $conf::upload_method eq "ssh" ) { + + # exec an ssh-agent that starts us again + # force shell to be /bin/sh, ssh-agent may base its decision + # whether to use a fd or a Unix socket on the shell... + $ENV{"SHELL"} = "/bin/sh"; + exec $conf::ssh_agent, $0, "startup", getppid(); + die "$main::progname: Could not exec $conf::ssh_agent: $!\n"; + } else { + + # no need to exec, just set up @ARGV as expected below + @ARGV = ( "startup", getppid() ); + } + } ## end else [ if ($pid) +} ## end if ( !@ARGV ) die "Please start without any arguments.\n" - if @ARGV != 2 || $ARGV[0] ne "startup"; + if @ARGV != 2 || $ARGV[0] ne "startup"; my $parent_pid = $ARGV[1]; do { - my $version; - ($version = 'Release: 0.9 $Revision: 1.51 $ $Date: 1999/07/08 09:43:21 $ $Author: ftplinux $') =~ s/\$ ?//g; - print "debianqueued $version\n"; + my $version; + ( $version = +'Release: 0.9 $Revision: 1.51 $ $Date: 1999/07/08 09:43:21 $ $Author: ftplinux $' + ) =~ s/\$ ?//g; + print "debianqueued $version\n"; }; # check if all programs exist my $prg; foreach $prg ( $conf::gpg, $conf::ssh, $conf::scp, $conf::ssh_agent, - $conf::ssh_add, $conf::md5sum, $conf::mail, $conf::mkfifo ) { - die "Required program $prg doesn't exist or isn't executable\n" - if ! -x $prg; -# check for correct upload method -die "Bad upload method '$conf::upload_method'.\n" - if $conf::upload_method ne "ssh" && - $conf::upload_method ne "ftp" && - $conf::upload_method ne "copy"; -die "No keyrings\n" if ! @conf::keyrings; - -} + $conf::ssh_add, $conf::md5sum, $conf::mail, $conf::mkfifo ) +{ + die "Required program $prg doesn't exist or isn't executable\n" + if !-x $prg; + + # check for correct upload method + die "Bad upload method '$conf::upload_method'.\n" + if $conf::upload_method ne "ssh" + && $conf::upload_method ne "ftp" + && $conf::upload_method ne "copy"; + die "No keyrings\n" if !@conf::keyrings; + +} ## end foreach $prg ( $conf::gpg, ... +die "statusfile path must be absolute." + if $conf::statusfile !~ m,^/,; +die "upload and target queue paths must be absolute." + if $conf::incoming !~ m,^/, + || $conf::incoming_delayed !~ m,^/, + || $conf::targetdir !~ m,^/, + || $conf::targetdir_delayed !~ m,^/,; # --------------------------------------------------------------------------- # initializations @@ -398,9 +202,12 @@ die "No keyrings\n" if ! @conf::keyrings; # prototypes sub calc_delta(); sub check_dir(); +sub get_filelist_from_known_good_changes($); +sub age_delayed_queues(); sub process_changes($\@); sub process_commands($); -sub is_on_target($); +sub age_delayed_queues(); +sub is_on_target($\@); sub copy_to_target(@); sub pgp_check($); sub check_alive(;$); @@ -443,7 +250,7 @@ sub restart_statusd(); sub fatal_signal($); $ENV{"PATH"} = "/bin:/usr/bin"; -$ENV{"IFS"} = "" if defined($ENV{"IFS"} && $ENV{"IFS"} ne ""); +$ENV{"IFS"} = "" if defined( $ENV{"IFS"} && $ENV{"IFS"} ne "" ); # constants for stat sub ST_DEV() { 0 } @@ -457,45 +264,47 @@ sub ST_SIZE() { 7 } sub ST_ATIME() { 8 } sub ST_MTIME() { 9 } sub ST_CTIME() { 10 } + # fixed lengths of data items passed over status pipe sub STATNUM_LEN() { 30 } sub STATSTR_LEN() { 128 } # init list of signals -defined $Config{sig_name} or die "$main::progname: No signal list defined!\n"; +defined $Config{sig_name} + or die "$main::progname: No signal list defined!\n"; my $i = 0; my $name; -foreach $name (split( ' ', $Config{sig_name} )) { - $main::signo{$name} = $i++; +foreach $name ( split( ' ', $Config{sig_name} ) ) { + $main::signo{$name} = $i++; } @main::fatal_signals = qw( INT QUIT ILL TRAP ABRT BUS FPE USR2 SEGV PIPE - TERM XCPU XFSZ PWR ); + TERM XCPU XFSZ PWR ); $main::block_sigset = POSIX::SigSet->new; $main::block_sigset->addset( $main::signo{"INT"} ); $main::block_sigset->addset( $main::signo{"TERM"} ); # some constant net stuff -$main::tcp_proto = (getprotobyname('tcp'))[2] - or die "Cannot get protocol number for 'tcp'\n"; -my $used_service = ($conf::upload_method eq "ssh") ? "ssh" : "ftp"; -$main::echo_port = (getservbyname($used_service, 'tcp'))[2] - or die "Cannot get port number for service '$used_service'\n"; +$main::tcp_proto = ( getprotobyname('tcp') )[2] + or die "Cannot get protocol number for 'tcp'\n"; +my $used_service = ( $conf::upload_method eq "ssh" ) ? "ssh" : "ftp"; +$main::echo_port = ( getservbyname( $used_service, 'tcp' ) )[2] + or die "Cannot get port number for service '$used_service'\n"; # clear queue of stored mails @main::stored_mails = (); # run ssh-add to bring the key into the agent (will use stdin/stdout) -if ($conf::upload_method eq "ssh") { - system "$conf::ssh_add $conf::ssh_key_file" - and die "$main::progname: Running $conf::ssh_add failed ". - "(exit status ", $? >> 8, ")\n"; +if ( $conf::upload_method eq "ssh" ) { + system "$conf::ssh_add $conf::ssh_key_file" + and die "$main::progname: Running $conf::ssh_add failed " + . "(exit status ", $? >> 8, ")\n"; } # change to queue dir -chdir( $conf::incoming ) - or die "$main::progname: cannot cd to $conf::incoming: $!\n"; +chdir($conf::incoming) + or die "$main::progname: cannot cd to $conf::incoming: $!\n"; # needed before /dev/null redirects, some system send a SIGHUP when loosing # the controlling tty @@ -503,53 +312,57 @@ $SIG{"HUP"} = "IGNORE"; # open logfile, make it unbuffered open( LOG, ">>$conf::logfile" ) - or die "Cannot open my logfile $conf::logfile: $!\n"; + or die "Cannot open my logfile $conf::logfile: $!\n"; chmod( 0644, $conf::logfile ) - or die "Cannot set modes of $conf::logfile: $!\n"; -select( (select(LOG), $| = 1)[0] ); + or die "Cannot set modes of $conf::logfile: $!\n"; +select( ( select(LOG), $| = 1 )[0] ); -sleep( 1 ); +sleep(1); $SIG{"HUP"} = \&close_log; # redirect stdin, ... to /dev/null open( STDIN, "</dev/null" ) - or die "$main::progname: Can't redirect stdin to /dev/null: $!\n"; + or die "$main::progname: Can't redirect stdin to /dev/null: $!\n"; open( STDOUT, ">&LOG" ) - or die "$main::progname: Can't redirect stdout to $conf::logfile: $!\n"; + or die "$main::progname: Can't redirect stdout to $conf::logfile: $!\n"; open( STDERR, ">&LOG" ) - or die "$main::progname: Can't redirect stderr to $conf::logfile: $!\n"; + or die "$main::progname: Can't redirect stderr to $conf::logfile: $!\n"; + # ok, from this point usually no "die" anymore, stderr is gone! msg( "log", "daemon (pid $$) started\n" ); # initialize variables used by send_status before launching the status daemon $main::dstat = "i"; -format_status_num( $main::next_run, time+10 ); +format_status_num( $main::next_run, time + 10 ); format_status_str( $main::current_changes, "" ); check_alive(); -$main::incoming_writable = 1; # assume this for now +$main::incoming_writable = 1; # assume this for now # start the daemon watching the 'status' FIFO -if ($conf::statusfile && $conf::statusdelay == 0) { - $main::statusd_pid = fork_statusd(); - $SIG{"CHLD"} = \&kid_died; # watch out for dead status daemon - # SIGUSR1 triggers status info - $SIG{"USR1"} = \&send_status; -} +if ( $conf::statusfile && $conf::statusdelay == 0 ) { + $main::statusd_pid = fork_statusd(); + $SIG{"CHLD"} = \&kid_died; # watch out for dead status daemon + # SIGUSR1 triggers status info + $SIG{"USR1"} = \&send_status; +} ## end if ( $conf::statusfile... $main::maind_pid = $$; -END { kill( $main::signo{"ABRT"}, $$ ) if defined $main::signo{"ABRT"}; } +END { + kill( $main::signo{"ABRT"}, $$ ) + if defined $main::signo{"ABRT"}; +} # write the pid file open( PIDFILE, ">$conf::pidfile" ) - or msg( "log", "Can't open $conf::pidfile: $!\n" ); + or msg( "log", "Can't open $conf::pidfile: $!\n" ); printf PIDFILE "%5d\n", $$; -close( PIDFILE ); +close(PIDFILE); chmod( 0644, $conf::pidfile ) - or die "Cannot set modes of $conf::pidfile: $!\n"; + or die "Cannot set modes of $conf::pidfile: $!\n"; # other signals will just log an error and exit -foreach ( @main::fatal_signals ) { - $SIG{$_} = \&fatal_signal; +foreach (@main::fatal_signals) { + $SIG{$_} = \&fatal_signal; } # send signal to user-started process that we're ready and it can exit @@ -559,958 +372,1304 @@ kill( $main::signo{"USR1"}, $parent_pid ); # the mainloop # --------------------------------------------------------------------------- +# default to classical incoming/target +$main::current_incoming = $conf::incoming; +$main::current_targetdir = $conf::targetdir; + $main::dstat = "i"; write_status_file() if $conf::statusdelay; -while( 1 ) { - - # ping target only if there is the possibility that we'll contact it (but - # also don't wait too long). - my @have_changes = <*.changes *.commands>; - check_alive() if @have_changes || (time - $main::last_ping_time) > 8*60*60; - - if (@have_changes && $main::target_up) { - check_incoming_writable if !$main::incoming_writable; - check_dir() if $main::incoming_writable; - } - $main::dstat = "i"; - write_status_file() if $conf::statusdelay; - - # sleep() returns if we received a signal (SIGUSR1 for status FIFO), so - # calculate the end time once and wait for it being reached. - format_status_num( $main::next_run, time + $conf::queue_delay ); - my $delta; - while( ($delta = calc_delta()) > 0 ) { - debug( "mainloop sleeping $delta secs" ); - sleep( $delta ); - # check if statusd died, if using status FIFO, or update status file - if ($conf::statusdelay) { - write_status_file(); - } - else { - restart_statusd(); - } - } -} +while (1) { + + # ping target only if there is the possibility that we'll contact it (but + # also don't wait too long). + my @have_changes = <*.changes *.commands>; + for ( my $delayed_dirs = 0 ; + $delayed_dirs <= $conf::max_delayed ; + $delayed_dirs++ ) + { + my $adelayeddir = sprintf( "$conf::incoming_delayed", $delayed_dirs ); + push( @have_changes, <$adelayeddir/*.changes> ); + } ## end for ( my $delayed_dirs ... + check_alive() + if @have_changes || ( time - $main::last_ping_time ) > 8 * 60 * 60; + + if ( @have_changes && $main::target_up ) { + check_incoming_writable if !$main::incoming_writable; + check_dir() if $main::incoming_writable; + } + $main::dstat = "i"; + write_status_file() if $conf::statusdelay; + + if ( $conf::upload_method eq "copy" ) { + age_delayed_queues(); + } + + # sleep() returns if we received a signal (SIGUSR1 for status FIFO), so + # calculate the end time once and wait for it being reached. + format_status_num( $main::next_run, time + $conf::queue_delay ); + my $delta; + while ( ( $delta = calc_delta() ) > 0 ) { + debug("mainloop sleeping $delta secs"); + sleep($delta); + + # check if statusd died, if using status FIFO, or update status file + if ($conf::statusdelay) { + write_status_file(); + } else { + restart_statusd(); + } + } ## end while ( ( $delta = calc_delta... +} ## end while (1) sub calc_delta() { - my $delta; - - $delta = $main::next_run - time; - $delta = $conf::statusdelay - if $conf::statusdelay && $conf::statusdelay < $delta; - return $delta; -} + my $delta; + $delta = $main::next_run - time; + $delta = $conf::statusdelay + if $conf::statusdelay && $conf::statusdelay < $delta; + return $delta; +} ## end sub calc_delta() # --------------------------------------------------------------------------- # main working functions # --------------------------------------------------------------------------- - # # main function for checking the incoming dir # sub check_dir() { - my( @files, @changes, @keep_files, @this_keep_files, @stats, $file ); - - debug( "starting checkdir" ); - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - - # test if needed binaries are available; this is if they're on maybe - # slow-mounted NFS filesystems - foreach (@conf::test_binaries) { - next if -f $_; - # maybe the mount succeeds now - sleep 5; - next if -f $_; - msg( "log", "binary test failed for $_; delaying queue run\n"); - goto end_run; - } - - # look for *.commands files - foreach $file ( <*.commands> ) { - init_mail( $file ); - block_signals(); - process_commands( $file ); - unblock_signals(); - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - finish_mail(); - } - - opendir( INC, "." ) - or (msg( "log", "Cannot open incoming dir $conf::incoming: $!\n" ), - return); - @files = readdir( INC ); - closedir( INC ); - - # process all .changes files found - @changes = grep /\.changes$/, @files; - push( @keep_files, @changes ); # .changes files aren't stray - foreach $file ( @changes ) { - init_mail( $file ); - # wrap in an eval to allow jumpbacks to here with die in case - # of errors - block_signals(); - eval { process_changes( $file, @this_keep_files ); }; - unblock_signals(); - msg( "log,mail", $@ ) if $@; - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - - # files which are ok in conjunction with this .changes - debug( "$file tells to keep @this_keep_files" ); - push( @keep_files, @this_keep_files ); - finish_mail(); - - # break out of this loop if the incoming dir has become unwritable - goto end_run if !$main::incoming_writable; - } - ftp_close() if $conf::upload_method eq "ftp"; - - # find files which aren't related to any .changes - foreach $file ( @files ) { - # filter out files we never want to delete - next if ! -f $file || # may have disappeared in the meantime - $file eq "." || $file eq ".." || - (grep { $_ eq $file } @keep_files) || - $file =~ /$conf::keep_files/; - # Delete such files if they're older than - # $stray_remove_timeout; they could be part of an - # yet-incomplete upload, with the .changes still missing. - # Cannot send any notification, since owner unknown. - next if !(@stats = stat( $file )); - my $age = time - $stats[ST_MTIME]; - my( $maint, $pattern, @job_files ); - if ($file =~ /^junk-for-writable-test/ || - $file !~ m,$conf::valid_files, || - $age >= $conf::stray_remove_timeout) { - msg( "log", "Deleted stray file $file\n" ) if rm( $file ); - } - elsif ($age > $conf::no_changes_timeout && - is_debian_file( $file ) && - # not already reported - !($stats[ST_MODE] & S_ISGID) && - ($pattern = debian_file_stem( $file )) && - (@job_files = glob($pattern)) && - # If a .changes is in the list, it has the same stem as the - # found file (probably a .orig.tar.gz). Don't report in this - # case. - !(grep( /\.changes$/, @job_files ))) { - $maint = get_maintainer( $file ); - # Don't send a mail if this looks like the recompilation of a - # package for a non-i386 arch. For those, the maintainer field is - # useless :-( - if (!grep( /(\.dsc|_(i386|all)\.deb)$/, @job_files )) { - msg( "log", "Found an upload without .changes and with no ", - ".dsc file\n" ); - msg( "log", "Not sending a report, because probably ", - "recompilation job\n" ); - } - elsif ($maint) { - init_mail(); - $main::mail_addr = $maint; - $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; - $main::mail_subject = "Incomplete upload found in ". - "Debian upload queue"; - msg( "mail", "Probably you are the uploader of the following ". - "file(s) in\n" ); - msg( "mail", "the Debian upload queue directory:\n " ); - msg( "mail", join( "\n ", @job_files ), "\n" ); - msg( "mail", "This looks like an upload, but a .changes file ". - "is missing, so the job\n" ); - msg( "mail", "cannot be processed.\n\n" ); - msg( "mail", "If no .changes file arrives within ", - print_time( $conf::stray_remove_timeout - $age ), - ", the files will be deleted.\n\n" ); - msg( "mail", "If you didn't upload those files, please just ". - "ignore this message.\n" ); - finish_mail(); - msg( "log", "Sending problem report for an upload without a ". - ".changes\n" ); - msg( "log", "Maintainer: $maint\n" ); - } - else { - msg( "log", "Found an upload without .changes, but can't ". - "find a maintainer address\n" ); - } - msg( "log", "Files: @job_files\n" ); - # remember we already have sent a mail regarding this file - foreach ( @job_files ) { - my @st = stat($_); - next if !@st; # file may have disappeared in the meantime - chmod +($st[ST_MODE] |= S_ISGID), $_; - } - } - else { - debug( "found stray file $file, deleting in ", - print_time($conf::stray_remove_timeout - $age) ); - } - } - - end_run: - $main::dstat = "i"; - write_status_file() if $conf::statusdelay; -} + my ( @files, @changes, @keep_files, @this_keep_files, @stats, $file, + $adelay ); + + debug("starting checkdir"); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + # test if needed binaries are available; this is if they're on maybe + # slow-mounted NFS filesystems + foreach (@conf::test_binaries) { + next if -f $_; + + # maybe the mount succeeds now + sleep 5; + next if -f $_; + msg( "log", "binary test failed for $_; delaying queue run\n" ); + goto end_run; + } ## end foreach (@conf::test_binaries) + + for ( $adelay = -1 ; $adelay <= $conf::max_delayed ; $adelay++ ) { + if ( $adelay == -1 ) { + $main::current_incoming = $conf::incoming; + $main::current_incoming_short = ""; + $main::current_targetdir = $conf::targetdir; + } else { + $main::current_incoming = sprintf( $conf::incoming_delayed, $adelay ); + $main::current_incoming_short = sprintf( "DELAYED/%d-day", $adelay ); + $main::current_targetdir = sprintf( $conf::targetdir_delayed, $adelay ); + } + + # need to clear directory specific variables + undef(@keep_files); + undef(@this_keep_files); + + chdir($main::current_incoming) + or ( + msg( + "log", + "Cannot change to dir " + . "${main::current_incoming_short}: $!\n" + ), + return + ); + + # look for *.commands files but not in delayed queues + if ( $adelay == -1 ) { + foreach $file (<*.commands>) { + init_mail($file); + block_signals(); + process_commands($file); + unblock_signals(); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + finish_mail(); + } ## end foreach $file (<*.commands>) + } ## end if ( $adelay == -1 ) + opendir( INC, "." ) + or ( + msg( + "log", "Cannot open dir ${main::current_incoming_short}: $!\n" + ), + return + ); + @files = readdir(INC); + closedir(INC); + + # process all .changes files found + @changes = grep /\.changes$/, @files; + push( @keep_files, @changes ); # .changes files aren't stray + foreach $file (@changes) { + init_mail($file); + + # wrap in an eval to allow jumpbacks to here with die in case + # of errors + block_signals(); + eval { process_changes( $file, @this_keep_files ); }; + unblock_signals(); + msg( "log,mail", $@ ) if $@; + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + # files which are ok in conjunction with this .changes + debug("$file tells to keep @this_keep_files"); + push( @keep_files, @this_keep_files ); + finish_mail(); + + # break out of this loop if the incoming dir has become unwritable + goto end_run if !$main::incoming_writable; + } ## end foreach $file (@changes) + ftp_close() if $conf::upload_method eq "ftp"; + + # find files which aren't related to any .changes + foreach $file (@files) { + + # filter out files we never want to delete + next if !-f $file || # may have disappeared in the meantime + $file eq "." + || $file eq ".." + || ( grep { $_ eq $file } @keep_files ) + || $file =~ /$conf::keep_files/; + + # Delete such files if they're older than + # $stray_remove_timeout; they could be part of an + # yet-incomplete upload, with the .changes still missing. + # Cannot send any notification, since owner unknown. + next if !( @stats = stat($file) ); + my $age = time - $stats[ST_MTIME]; + my ( $maint, $pattern, @job_files ); + if ( $file =~ /^junk-for-writable-test/ + || $file !~ m,$conf::valid_files, + || $age >= $conf::stray_remove_timeout ) + { + msg( "log", + "Deleted stray file ${main::current_incoming_short}/$file\n" ) + if rm($file); + } elsif ( + $age > $conf::no_changes_timeout + && is_debian_file($file) + && + + # not already reported + !( $stats[ST_MODE] & S_ISGID ) + && ( $pattern = debian_file_stem($file) ) + && ( @job_files = glob($pattern) ) + && + + # If a .changes is in the list, it has the same stem as the + # found file (probably a .orig.tar.gz). Don't report in this + # case. + !( grep( /\.changes$/, @job_files ) ) + ) + { + $maint = get_maintainer($file); + + # Don't send a mail if this looks like the recompilation of a + # package for a non-i386 arch. For those, the maintainer field is + # useless :-( + if ( !grep( /(\.dsc|_(i386|all)\.deb)$/, @job_files ) ) { + msg( "log", "Found an upload without .changes and with no ", + ".dsc file\n" ); + msg( "log", + "Not sending a report, because probably ", + "recompilation job\n" ); + } elsif ($maint) { + init_mail(); + $main::mail_addr = $maint; + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + $main::mail_subject = + "Incomplete upload found in " . "Debian upload queue"; + msg( + "mail", + "Probably you are the uploader of the following " + . "file(s) in\n" + ); + msg( "mail", "the Debian upload queue directory:\n " ); + msg( "mail", join( "\n ", @job_files ), "\n" ); + msg( + "mail", + "This looks like an upload, but a .changes file " + . "is missing, so the job\n" + ); + msg( "mail", "cannot be processed.\n\n" ); + msg( + "mail", + "If no .changes file arrives within ", + print_time( $conf::stray_remove_timeout - $age ), + ", the files will be deleted.\n\n" + ); + msg( + "mail", + "If you didn't upload those files, please just " + . "ignore this message.\n" + ); + finish_mail(); + msg( + "log", + "Sending problem report for an upload without a " + . ".changes\n" + ); + msg( "log", "Maintainer: $maint\n" ); + } else { + msg( + "log", + "Found an upload without .changes, but can't " + . "find a maintainer address\n" + ); + } ## end else [ if ( !grep( /(\.dsc|_(i386|all)\.deb)$/... + msg( "log", "Files: @job_files\n" ); + + # remember we already have sent a mail regarding this file + foreach (@job_files) { + my @st = stat($_); + next if !@st; # file may have disappeared in the meantime + chmod +( $st[ST_MODE] |= S_ISGID ), $_; + } + } else { + debug( +"found stray file ${main::current_incoming_short}/$file, deleting in ", + print_time( $conf::stray_remove_timeout - $age ) + ); + } ## end else [ if ( $file =~ /^junk-for-writable-test/... + } ## end foreach $file (@files) + } ## end for ( $adelay = -1 ; $adelay... + chdir($conf::incoming); + +end_run: + $main::dstat = "i"; + write_status_file() if $conf::statusdelay; +} ## end sub check_dir() + +sub get_filelist_from_known_good_changes($) { + my $changes = shift; + + local (*CHANGES); + my (@filenames); + + # parse the .changes file + open( CHANGES, "<$changes" ) + or die "$changes: $!\n"; +outer_loop: while (<CHANGES>) { + if (/^Files:/i) { + while (<CHANGES>) { + redo outer_loop if !/^\s/; + my @field = split(/\s+/); + next if @field != 6; + + # forbid shell meta chars in the name, we pass it to a + # subshell several times... + $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/; + if ( $1 ne $field[5] ) { + msg( "log", "found suspicious filename $field[5]\n" ); + next; + } + push( @filenames, $field[5] ); + } ## end while (<CHANGES>) + } ## end if (/^Files:/i) + } ## end while (<CHANGES>) + close(CHANGES); + return @filenames; +} ## end sub get_filelist_from_known_good_changes($) # # process one .changes file # sub process_changes($\@) { - my $changes = shift; - my $keep_list = shift; - my( $pgplines, @files, @filenames, @changes_stats, $failure_file, - $retries, $last_retry, $upload_time, $file, $do_report, $ls_l, - $problems_reported, $errs, $pkgname, $signator ); - local( *CHANGES ); - local( *FAILS ); - - format_status_str( $main::current_changes, $changes ); - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - - @$keep_list = (); - msg( "log", "processing $changes\n" ); - - # parse the .changes file - open( CHANGES, "<$changes" ) - or die "Cannot open $changes: $!\n"; - $pgplines = 0; - $main::mail_addr = ""; - @files = (); - outer_loop: while( <CHANGES> ) { - if (/^---+(BEGIN|END) PGP .*---+$/) { - ++$pgplines; - } - elsif (/^Maintainer:\s*/i) { - chomp( $main::mail_addr = $' ); - $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; - } - elsif (/^Source:\s*/i) { - chomp( $pkgname = $' ); - $pkgname =~ s/\s+$//; - } - elsif (/^Files:/i) { - while( <CHANGES> ) { - redo outer_loop if !/^\s/; - my @field = split( /\s+/ ); - next if @field != 6; - # forbid shell meta chars in the name, we pass it to a - # subshell several times... - $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/; - if ($1 ne $field[5]) { - msg( "log", "found suspicious filename $field[5]\n" ); - msg( "mail", "File '$field[5]' mentioned in $changes\n", - "has bad characters in its name. Removed.\n" ); - rm( $field[5] ); - next; - } - push( @files, { md5 => $field[1], - size => $field[2], - name => $field[5] } ); - push( @filenames, $field[5] ); - debug( "includes file $field[5], size $field[2], ", - "md5 $field[1]" ); - } - } - } - close( CHANGES ); - - # tell check_dir that the files mentioned in this .changes aren't stray, - # we know about them somehow - @$keep_list = @filenames; - - # some consistency checks - if (!$main::mail_addr) { - msg( "log,mail", "$changes doesn't contain a Maintainer: field; ". - "cannot process\n" ); - goto remove_only_changes; - } - if ($main::mail_addr !~ /^(buildd_\S+-\S+|\S+\@\S+\.\S+)/) { - # doesn't look like a mail address, maybe only the name - my( $new_addr, @addr_list ); - if ($new_addr = try_to_get_mail_addr( $main::mail_addr, \@addr_list )){ - # substitute (unique) found addr, but give a warning - msg( "mail", "(The Maintainer: field didn't contain a proper ". - "mail address.\n" ); - msg( "mail", "Looking for `$main::mail_addr' in the Debian ". - "keyring gave your address\n" ); - msg( "mail", "as unique result, so I used this.)\n" ); - msg( "log", "Substituted $new_addr for malformed ". - "$main::mail_addr\n" ); - $main::mail_addr = $new_addr; - } - else { - # not found or not unique: hold the job and inform queue maintainer - my $old_addr = $main::mail_addr; - $main::mail_addr = $conf::maintainer_mail; - msg( "mail", "The job $changes doesn't have a correct email\n" ); - msg( "mail", "address in the Maintainer: field:\n" ); - msg( "mail", " $old_addr\n" ); - msg( "mail", "A check for this in the Debian keyring gave:\n" ); - msg( "mail", @addr_list ? - " " . join( ", ", @addr_list ) . "\n" : - " nothing\n" ); - msg( "mail", "Please fix this manually\n" ); - msg( "log", "Bad Maintainer: field in $changes: $old_addr\n" ); - goto remove_only_changes; - } - } - if ($pgplines < 3) { - msg( "log,mail", "$changes isn't signed with PGP/GnuPG\n" ); - msg( "log", "(uploader $main::mail_addr)\n" ); - goto remove_only_changes; - } - if (!@files) { - msg( "log,mail", "$changes doesn't mention any files\n" ); - msg( "log", "(uploader $main::mail_addr)\n" ); - goto remove_only_changes; - } - - # check for packages that shouldn't be processed - if (grep( $_ eq $pkgname, @conf::nonus_packages )) { - msg( "log,mail", "$pkgname is a package that must be uploaded ". - "to nonus.debian.org\n" ); - msg( "log,mail", "instead of target.\n" ); - msg( "log,mail", "Job rejected and removed all files belonging ". - "to it:\n" ); - msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); - rm( $changes, @filenames ); - return; - } - - $failure_file = $changes . ".failures"; - $retries = $last_retry = 0; - if (-f $failure_file) { - open( FAILS, "<$failure_file" ) - or die "Cannot open $failure_file: $!\n"; - my $line = <FAILS>; - close( FAILS ); - ( $retries, $last_retry ) = ( $1, $2 ) if $line =~ /^(\d+)\s+(\d+)$/; - push( @$keep_list, $failure_file ); - } - - # run PGP on the file to check the signature - if (!($signator = pgp_check( $changes ))) { - msg( "log,mail", "$changes has bad PGP/GnuPG signature!\n" ); - msg( "log", "(uploader $main::mail_addr)\n" ); - remove_only_changes: - msg( "log,mail", "Removing $changes, but keeping its associated ", - "files for now.\n" ); - rm( $changes ); - # Set SGID bit on associated files, so that the test for Debian files - # without a .changes doesn't consider them. - foreach ( @filenames ) { - my @st = stat($_); - next if !@st; # file may have disappeared in the meantime - chmod +($st[ST_MODE] |= S_ISGID), $_; - } - return; - } - elsif ($signator eq "LOCAL ERROR") { - # An error has appened when starting pgp... Don't process the file, - # but also don't delete it - debug( "Can't PGP/GnuPG check $changes -- don't process it for now" ); - return; - } - - die "Cannot stat $changes (??): $!\n" - if !(@changes_stats = stat( $changes )); - # Make $upload_time the maximum of all modification times of files - # related to this .changes (and the .changes it self). This is the - # last time something changes to these files. - $upload_time = $changes_stats[ST_MTIME]; - for $file ( @files ) { - my @stats; - next if !(@stats = stat( $file->{"name"} )); - $file->{"stats"} = \@stats; - $upload_time = $stats[ST_MTIME] if $stats[ST_MTIME] > $upload_time; - } - - $do_report = (time - $upload_time) > $conf::problem_report_timeout; - $problems_reported = $changes_stats[ST_MODE] & S_ISGID; - # if any of the files is newer than the .changes' ctime (the time - # we sent a report and set the sticky bit), send new problem reports - if ($problems_reported && $changes_stats[ST_CTIME] < $upload_time) { - $problems_reported = 0; - chmod +($changes_stats[ST_MODE] &= ~S_ISGID), $changes; - debug( "upload_time>changes-ctime => resetting problems reported" ); - } - debug( "do_report=$do_report problems_reported=$problems_reported" ); - - # now check all files for correct size and md5 sum - for $file ( @files ) { - my $filename = $file->{"name"}; - if (!defined( $file->{"stats"} )) { - # could be an upload that isn't complete yet, be quiet, - # but don't process the file; - msg( "log,mail", "$filename doesn't exist\n" ) - if $do_report && !$problems_reported; - msg( "log", "$filename doesn't exist (ignored for now)\n" ) - if !$do_report; - msg( "log", "$filename doesn't exist (already reported)\n" ) - if $problems_reported; - ++$errs; - } - elsif ($file->{"stats"}->[ST_SIZE] < $file->{"size"} && !$do_report) { - # could be an upload that isn't complete yet, be quiet, - # but don't process the file - msg( "log", "$filename is too small (ignored for now)\n" ); - ++$errs; - } - elsif ($file->{"stats"}->[ST_SIZE] != $file->{"size"}) { - msg( "log,mail", "$filename has incorrect size; deleting it\n" ); - rm( $filename ); - ++$errs; - } - elsif (md5sum( $filename ) ne $file->{"md5"}) { - msg( "log,mail", "$filename has incorrect md5 checksum; ", - "deleting it\n" ); - rm( $filename ); - ++$errs; - } - } - - if ($errs) { - if ((time - $upload_time) > $conf::bad_changes_timeout) { - # if a .changes fails for a really long time (several days - # or so), remove it and all associated files - msg( "log,mail", - "$changes couldn't be processed for ", - int($conf::bad_changes_timeout/(60*60)), - " hours and is now deleted\n" ); - msg( "log,mail", - "All files it mentions are also removed:\n" ); - msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); - rm( $changes, @filenames, $failure_file ); - } - elsif ($do_report && !$problems_reported) { - # otherwise, send a problem report, if not done already - msg( "mail", - "Due to the errors above, the .changes file couldn't ", - "be processed.\n", - "Please fix the problems for the upload to happen.\n" ); - # remember we already have sent a mail regarding this file - debug( "Sending problem report mail and setting SGID bit" ); - my $mode = $changes_stats[ST_MODE] |= S_ISGID; - msg( "log", "chmod failed: $!" ) if (chmod ($mode, $changes) != 1); - } - # else: be quiet - - return; - } - - # if this upload already failed earlier, wait until the delay requirement - # is fulfilled - if ($retries > 0 && (time - $last_retry) < - ($retries == 1 ? $conf::upload_delay_1 : $conf::upload_delay_2)) { - msg( "log", "delaying retry of upload\n" ); - return; - } - - if ($conf::upload_method eq "ftp") { - return if !ftp_open(); - } - - # check if the job is already present on target - # (moved to here, to avoid bothering target as long as there are errors in - # the job) - if ($ls_l = is_on_target( $changes )) { - msg( "log,mail", "$changes is already present on target host:\n" ); - msg( "log,mail", "$ls_l\n" ); - msg( "mail", "Either you already uploaded it, or someone else ", - "came first.\n" ); - msg( "log,mail", "Job $changes removed.\n" ); - rm( $changes, @filenames, $failure_file ); - return; - } - - # clear sgid bit before upload, scp would copy it to target. We don't need - # it anymore, we know there are no problems if we come here. Also change - # mode of files to 644 if this should be done locally. - $changes_stats[ST_MODE] &= ~S_ISGID; - if (!$conf::chmod_on_target) { - $changes_stats[ST_MODE] &= ~0777; - $changes_stats[ST_MODE] |= 0644; - } - chmod +($changes_stats[ST_MODE]), $changes; - - # try uploading to target - if (!copy_to_target( $changes, @filenames )) { - # if the upload failed, increment the retry counter and remember the - # current time; both things are written to the .failures file. Don't - # increment the fail counter if the error was due to incoming - # unwritable. - return if !$main::incoming_writable; - if (++$retries >= $conf::max_upload_retries) { - msg( "log,mail", - "$changes couldn't be uploaded for $retries times now.\n" ); - msg( "log,mail", - "Giving up and removing it and its associated files:\n" ); - msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); - rm( $changes, @filenames, $failure_file ); - } - else { - $last_retry = time; - if (open( FAILS, ">$failure_file" )) { - print FAILS "$retries $last_retry\n"; - close( FAILS ); - chmod( 0600, $failure_file ) - or die "Cannot set modes of $failure_file: $!\n"; - } - push( @$keep_list, $failure_file ); - debug( "now $retries failed uploads" ); - msg( "mail", - "The upload will be retried in ", - print_time( $retries == 1 ? $conf::upload_delay_1 : - $conf::upload_delay_2 ), "\n" ); - } - return; - } - - # If the files were uploaded ok, remove them - rm( $changes, @filenames, $failure_file ); - - msg( "mail", "$changes uploaded successfully to $conf::target\n" ); - msg( "mail", "along with the files:\n ", - join( "\n ", @filenames ), "\n" ); - msg( "log", "$changes processed successfully (uploader $main::mail_addr)\n" ); - - # Check for files that have the same stem as the .changes (and weren't - # mentioned there) and delete them. It happens often enough that people - # upload a .orig.tar.gz where it isn't needed and also not in the - # .changes. Explicitly deleting it (and not waiting for the - # $stray_remove_timeout) reduces clutter in the queue dir and maybe also - # educates uploaders :-) - -# my $pattern = debian_file_stem( $changes ); -# my $spattern = substr( $pattern, 0, -1 ); # strip off '*' at end -# my @other_files = glob($pattern); - # filter out files that have a Debian revision at all and a different - # revision. Those belong to a different upload. -# if ($changes =~ /^\Q$spattern\E-([\d.+-]+)/) { -# my $this_rev = $1; -# @other_files = grep( !/^\Q$spattern\E-([\d.+-]+)/ || $1 eq $this_rev, -# @other_files); - #} - # Also do not remove those files if a .changes is among them. Then there - # is probably a second upload for another version or another architecture. -# if (@other_files && !grep( /\.changes$/, @other_files )) { -# rm( @other_files ); -# msg( "mail", "\nThe following file(s) seemed to belong to the same ". -# "upload, but weren't listed\n" ); -# msg( "mail", "in the .changes file:\n " ); -# msg( "mail", join( "\n ", @other_files ), "\n" ); -# msg( "mail", "They have been deleted.\n" ); -# msg( "log", "Deleted files in upload not in $changes: @other_files\n" ); - #} -} + my $changes = shift; + my $keep_list = shift; + my ( + $pgplines, @files, @filenames, @changes_stats, + $failure_file, $retries, $last_retry, $upload_time, + $file, $do_report, $ls_l, $problems_reported, + $errs, $pkgname, $signator + ); + local (*CHANGES); + local (*FAILS); + + format_status_str( $main::current_changes, + "$main::current_incoming_short/$changes" ); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + @$keep_list = (); + msg( "log", "processing ${main::current_incoming_short}/$changes\n" ); + + # parse the .changes file + open( CHANGES, "<$changes" ) + or die "Cannot open ${main::current_incoming_short}/$changes: $!\n"; + $pgplines = 0; + $main::mail_addr = ""; + @files = (); +outer_loop: while (<CHANGES>) { + if (/^---+(BEGIN|END) PGP .*---+$/) { + ++$pgplines; + } elsif (/^Maintainer:\s*/i) { + chomp( $main::mail_addr = $' ); + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + } elsif (/^Source:\s*/i) { + chomp( $pkgname = $' ); + $pkgname =~ s/\s+$//; + $main::packages{$pkgname}++; + } elsif (/^Files:/i) { + while (<CHANGES>) { + redo outer_loop if !/^\s/; + my @field = split(/\s+/); + next if @field != 6; + + # forbid shell meta chars in the name, we pass it to a + # subshell several times... + $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/; + if ( $1 ne $field[5] ) { + msg( "log", "found suspicious filename $field[5]\n" ); + msg( + "mail", +"File '$field[5]' mentioned in $main::current_incoming_short/$changes\n", + "has bad characters in its name. Removed.\n" + ); + rm( $field[5] ); + next; + } ## end if ( $1 ne $field[5] ) + push( + @files, + { + md5 => $field[1], + size => $field[2], + name => $field[5] + } + ); + push( @filenames, $field[5] ); + debug( "includes file $field[5], size $field[2], ", "md5 $field[1]" ); + } ## end while (<CHANGES>) + } ## end elsif (/^Files:/i) + } ## end while (<CHANGES>) + close(CHANGES); + + # tell check_dir that the files mentioned in this .changes aren't stray, + # we know about them somehow + @$keep_list = @filenames; + + # some consistency checks + if ( !$main::mail_addr ) { + msg( "log,mail", +"$main::current_incoming_short/$changes doesn't contain a Maintainer: field; " + . "cannot process\n" ); + goto remove_only_changes; + } ## end if ( !$main::mail_addr) + if ( $main::mail_addr !~ /^(buildd_\S+-\S+|\S+\@\S+\.\S+)/ ) { + + # doesn't look like a mail address, maybe only the name + my ( $new_addr, @addr_list ); + if ( $new_addr = try_to_get_mail_addr( $main::mail_addr, \@addr_list ) ) { + + # substitute (unique) found addr, but give a warning + msg( + "mail", + "(The Maintainer: field didn't contain a proper " + . "mail address.\n" + ); + msg( + "mail", + "Looking for `$main::mail_addr' in the Debian " + . "keyring gave your address\n" + ); + msg( "mail", "as unique result, so I used this.)\n" ); + msg( "log", + "Substituted $new_addr for malformed " . "$main::mail_addr\n" ); + $main::mail_addr = $new_addr; + } else { + + # not found or not unique: hold the job and inform queue maintainer + my $old_addr = $main::mail_addr; + $main::mail_addr = $conf::maintainer_mail; + msg( + "mail", +"The job ${main::current_incoming_short}/$changes doesn't have a correct email\n" + ); + msg( "mail", "address in the Maintainer: field:\n" ); + msg( "mail", " $old_addr\n" ); + msg( "mail", "A check for this in the Debian keyring gave:\n" ); + msg( "mail", + @addr_list + ? " " . join( ", ", @addr_list ) . "\n" + : " nothing\n" ); + msg( "mail", "Please fix this manually\n" ); + msg( + "log", +"Bad Maintainer: field in ${main::current_incoming_short}/$changes: $old_addr\n" + ); + goto remove_only_changes; + } ## end else [ if ( $new_addr = try_to_get_mail_addr... + } ## end if ( $main::mail_addr ... + if ( $pgplines < 3 ) { + msg( + "log,mail", + "$main::current_incoming_short/$changes isn't signed with PGP/GnuPG\n" + ); + msg( "log", "(uploader $main::mail_addr)\n" ); + goto remove_only_changes; + } ## end if ( $pgplines < 3 ) + if ( !@files ) { + msg( "log,mail", + "$main::current_incoming_short/$changes doesn't mention any files\n" ); + msg( "log", "(uploader $main::mail_addr)\n" ); + goto remove_only_changes; + } ## end if ( !@files ) + + # check for packages that shouldn't be processed + if ( grep( $_ eq $pkgname, @conf::nonus_packages ) ) { + msg( + "log,mail", + "$pkgname is a package that must be uploaded " + . "to nonus.debian.org\n" + ); + msg( "log,mail", "instead of target.\n" ); + msg( "log,mail", + "Job rejected and removed all files belonging " . "to it:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames ); + return; + } ## end if ( grep( $_ eq $pkgname... + + $failure_file = $changes . ".failures"; + $retries = $last_retry = 0; + if ( -f $failure_file ) { + open( FAILS, "<$failure_file" ) + or die "Cannot open $main::current_incoming_short/$failure_file: $!\n"; + my $line = <FAILS>; + close(FAILS); + ( $retries, $last_retry ) = ( $1, $2 ) + if $line =~ /^(\d+)\s+(\d+)$/; + push( @$keep_list, $failure_file ); + } ## end if ( -f $failure_file ) + + # run PGP on the file to check the signature + if ( !( $signator = pgp_check($changes) ) ) { + msg( + "log,mail", + "$main::current_incoming_short/$changes has bad PGP/GnuPG signature!\n" + ); + msg( "log", "(uploader $main::mail_addr)\n" ); + remove_only_changes: + msg( + "log,mail", +"Removing $main::current_incoming_short/$changes, but keeping its associated ", + "files for now.\n" + ); + rm($changes); + + # Set SGID bit on associated files, so that the test for Debian files + # without a .changes doesn't consider them. + foreach (@filenames) { + my @st = stat($_); + next if !@st; # file may have disappeared in the meantime + chmod +( $st[ST_MODE] |= S_ISGID ), $_; + } + return; + } elsif ( $signator eq "LOCAL ERROR" ) { + + # An error has appened when starting pgp... Don't process the file, + # but also don't delete it + debug( +"Can't PGP/GnuPG check $main::current_incoming_short/$changes -- don't process it for now" + ); + return; + } ## end elsif ( $signator eq "LOCAL ERROR") + + die "Cannot stat ${main::current_incoming_short}/$changes (??): $!\n" + if !( @changes_stats = stat($changes) ); + + # Make $upload_time the maximum of all modification times of files + # related to this .changes (and the .changes it self). This is the + # last time something changes to these files. + $upload_time = $changes_stats[ST_MTIME]; + for $file (@files) { + my @stats; + next if !( @stats = stat( $file->{"name"} ) ); + $file->{"stats"} = \@stats; + $upload_time = $stats[ST_MTIME] if $stats[ST_MTIME] > $upload_time; + } ## end for $file (@files) + + $do_report = ( time - $upload_time ) > $conf::problem_report_timeout; + $problems_reported = $changes_stats[ST_MODE] & S_ISGID; + + # if any of the files is newer than the .changes' ctime (the time + # we sent a report and set the sticky bit), send new problem reports + if ( $problems_reported && $changes_stats[ST_CTIME] < $upload_time ) { + $problems_reported = 0; + chmod +( $changes_stats[ST_MODE] &= ~S_ISGID ), $changes; + debug("upload_time>changes-ctime => resetting problems reported"); + } + debug("do_report=$do_report problems_reported=$problems_reported"); + + # now check all files for correct size and md5 sum + for $file (@files) { + my $filename = $file->{"name"}; + if ( !defined( $file->{"stats"} ) ) { + + # could be an upload that isn't complete yet, be quiet, + # but don't process the file; + msg( "log,mail", "$filename doesn't exist\n" ) + if $do_report && !$problems_reported; + msg( "log", "$filename doesn't exist (ignored for now)\n" ) + if !$do_report; + msg( "log", "$filename doesn't exist (already reported)\n" ) + if $problems_reported; + ++$errs; + } elsif ( $file->{"stats"}->[ST_SIZE] < $file->{"size"} + && !$do_report ) + { + + # could be an upload that isn't complete yet, be quiet, + # but don't process the file + msg( "log", "$filename is too small (ignored for now)\n" ); + ++$errs; + } elsif ( $file->{"stats"}->[ST_SIZE] != $file->{"size"} ) { + msg( "log,mail", "$filename has incorrect size; deleting it\n" ); + rm($filename); + ++$errs; + } elsif ( md5sum($filename) ne $file->{"md5"} ) { + msg( "log,mail", + "$filename has incorrect md5 checksum; ", + "deleting it\n" ); + rm($filename); + ++$errs; + } ## end elsif ( md5sum($filename)... + } ## end for $file (@files) + + if ($errs) { + if ( ( time - $upload_time ) > $conf::bad_changes_timeout ) { + + # if a .changes fails for a really long time (several days + # or so), remove it and all associated files + msg( + "log,mail", + "$main::current_incoming_short/$changes couldn't be processed for ", + int( $conf::bad_changes_timeout / ( 60 * 60 ) ), + " hours and is now deleted\n" + ); + msg( "log,mail", "All files it mentions are also removed:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames, $failure_file ); + } elsif ( $do_report && !$problems_reported ) { + + # otherwise, send a problem report, if not done already + msg( + "mail", + "Due to the errors above, the .changes file couldn't ", + "be processed.\n", + "Please fix the problems for the upload to happen.\n" + ); + + # remember we already have sent a mail regarding this file + debug("Sending problem report mail and setting SGID bit"); + my $mode = $changes_stats[ST_MODE] |= S_ISGID; + msg( "log", "chmod failed: $!" ) + if ( chmod( $mode, $changes ) != 1 ); + } ## end elsif ( $do_report && !$problems_reported) + + # else: be quiet + + return; + } ## end if ($errs) + + # if this upload already failed earlier, wait until the delay requirement + # is fulfilled + if ( $retries > 0 + && ( time - $last_retry ) < + ( $retries == 1 ? $conf::upload_delay_1 : $conf::upload_delay_2 ) ) + { + msg( "log", "delaying retry of upload\n" ); + return; + } ## end if ( $retries > 0 && (... + + if ( $conf::upload_method eq "ftp" ) { + return if !ftp_open(); + } + + # check if the job is already present on target + # (moved to here, to avoid bothering target as long as there are errors in + # the job) + if ( $ls_l = is_on_target( $changes, @filenames ) ) { + msg( + "log,mail", +"$main::current_incoming_short/$changes is already present on target host:\n" + ); + msg( "log,mail", "$ls_l\n" ); + msg( "mail", + "Either you already uploaded it, or someone else ", + "came first.\n" ); + msg( "log,mail", "Job $changes removed.\n" ); + rm( $changes, @filenames, $failure_file ); + return; + } ## end if ( $ls_l = is_on_target... + + # clear sgid bit before upload, scp would copy it to target. We don't need + # it anymore, we know there are no problems if we come here. Also change + # mode of files to 644 if this should be done locally. + $changes_stats[ST_MODE] &= ~S_ISGID; + if ( !$conf::chmod_on_target ) { + $changes_stats[ST_MODE] &= ~0777; + $changes_stats[ST_MODE] |= 0644; + } + chmod +( $changes_stats[ST_MODE] ), $changes; + + # try uploading to target + if ( !copy_to_target( $changes, @filenames ) ) { + + # if the upload failed, increment the retry counter and remember the + # current time; both things are written to the .failures file. Don't + # increment the fail counter if the error was due to incoming + # unwritable. + return if !$main::incoming_writable; + if ( ++$retries >= $conf::max_upload_retries ) { + msg( "log,mail", + "$changes couldn't be uploaded for $retries times now.\n" ); + msg( "log,mail", + "Giving up and removing it and its associated files:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames, $failure_file ); + } else { + $last_retry = time; + if ( open( FAILS, ">$failure_file" ) ) { + print FAILS "$retries $last_retry\n"; + close(FAILS); + chmod( 0600, $failure_file ) + or die "Cannot set modes of $failure_file: $!\n"; + } ## end if ( open( FAILS, ">$failure_file"... + push( @$keep_list, $failure_file ); + debug("now $retries failed uploads"); + msg( + "mail", + "The upload will be retried in ", + print_time( + $retries == 1 + ? $conf::upload_delay_1 + : $conf::upload_delay_2 + ), + "\n" + ); + } ## end else [ if ( ++$retries >= $conf::max_upload_retries) + return; + } ## end if ( !copy_to_target( ... + + # If the files were uploaded ok, remove them + rm( $changes, @filenames, $failure_file ); + + msg( "mail", "$changes uploaded successfully to $conf::target\n" ); + msg( "mail", "along with the files:\n ", join( "\n ", @filenames ), + "\n" ); + msg( "log", + "$changes processed successfully (uploader $main::mail_addr)\n" ); + + # Check for files that have the same stem as the .changes (and weren't + # mentioned there) and delete them. It happens often enough that people + # upload a .orig.tar.gz where it isn't needed and also not in the + # .changes. Explicitly deleting it (and not waiting for the + # $stray_remove_timeout) reduces clutter in the queue dir and maybe also + # educates uploaders :-) + + # my $pattern = debian_file_stem( $changes ); + # my $spattern = substr( $pattern, 0, -1 ); # strip off '*' at end + # my @other_files = glob($pattern); + # filter out files that have a Debian revision at all and a different + # revision. Those belong to a different upload. + # if ($changes =~ /^\Q$spattern\E-([\d.+-]+)/) { + # my $this_rev = $1; + # @other_files = grep( !/^\Q$spattern\E-([\d.+-]+)/ || $1 eq $this_rev, + # @other_files); + #} + # Also do not remove those files if a .changes is among them. Then there + # is probably a second upload for another version or another architecture. + # if (@other_files && !grep( /\.changes$/, @other_files )) { + # rm( @other_files ); + # msg( "mail", "\nThe following file(s) seemed to belong to the same ". + # "upload, but weren't listed\n" ); + # msg( "mail", "in the .changes file:\n " ); + # msg( "mail", join( "\n ", @other_files ), "\n" ); + # msg( "mail", "They have been deleted.\n" ); + # msg( "log", "Deleted files in upload not in $changes: @other_files\n" ); + #} +} ## end sub process_changes($\@) # # process one .commands file # sub process_commands($) { - my $commands = shift; - my( @cmds, $cmd, $pgplines, $signator ); - local( *COMMANDS ); - - format_status_str( $main::current_changes, $commands ); - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - - msg( "log", "processing $commands\n" ); - - # parse the .commands file - if (!open( COMMANDS, "<$commands" )) { - msg( "log", "Cannot open $commands: $!\n" ); - return; - } - $pgplines = 0; - $main::mail_addr = ""; - @cmds = (); - outer_loop: while( <COMMANDS> ) { - if (/^---+(BEGIN|END) PGP .*---+$/) { - ++$pgplines; - } - elsif (/^Uploader:\s*/i) { - chomp( $main::mail_addr = $' ); - $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; - } - elsif (/^Commands:/i) { - $_ = $'; - for(;;) { - s/^\s*(.*)\s*$/$1/; # delete whitespace at both ends - if (!/^\s*$/) { - push( @cmds, $_ ); - debug( "includes cmd $_" ); - } - last outer_loop if !defined( $_ = scalar(<COMMANDS>) ); - chomp; - redo outer_loop if !/^\s/ || /^$/; - } - } - } - close( COMMANDS ); - - # some consistency checks - if (!$main::mail_addr || $main::mail_addr !~ /^\S+\@\S+\.\S+/) { - msg( "log,mail", "$commands contains no or bad Uploader: field: ". - "$main::mail_addr\n" ); - msg( "log,mail", "cannot process $commands\n" ); - $main::mail_addr = ""; - goto remove; - } - msg( "log", "(command uploader $main::mail_addr)\n" ); - - if ($pgplines < 3) { - msg( "log,mail", "$commands isn't signed with PGP/GnuPG\n" ); - goto remove; - } - - # run PGP on the file to check the signature - if (!($signator = pgp_check( $commands ))) { - msg( "log,mail", "$commands has bad PGP/GnuPG signature!\n" ); - remove: - msg( "log,mail", "Removing $commands\n" ); - rm( $commands ); - return; - } - elsif ($signator eq "LOCAL ERROR") { - # An error has appened when starting pgp... Don't process the file, - # but also don't delete it - debug( "Can't PGP/GnuPG check $commands -- don't process it for now" ); - return; - } - msg( "log", "(PGP/GnuPG signature by $signator)\n" ); - - # now process commands - msg( "mail", "Log of processing your commands file $commands:\n\n" ); - foreach $cmd ( @cmds ) { - my @word = split( /\s+/, $cmd ); - msg( "mail,log", "> @word\n" ); - next if @word < 1; - - if ($word[0] eq "rm") { - my( @files, $file, @removed ); - foreach ( @word[1..$#word] ) { - if (m,/,) { - msg( "mail,log", "$_: filename may not contain slashes\n" ); - } - elsif (/[*?[]/) { - # process wildcards - my $pat = quotemeta($_); - $pat =~ s/\\\*/.*/g; - $pat =~ s/\\\?/.?/g; - $pat =~ s/\\([][])/$1/g; - opendir( DIR, "." ); - push( @files, grep /^$pat$/, readdir(DIR) ); - closedir( DIR ); - } - else { - push( @files, $_ ); - } - } - if (!@files) { - msg( "mail,log", "No files to delete\n" ); - } - else { - @removed = (); - foreach $file ( @files ) { - if (!-f $file) { - msg( "mail,log", "$file: no such file\n" ); - } - elsif ($file =~ /$conf::keep_files/) { - msg( "mail,log", "$file is protected, cannot ". - "remove\n" ); - } - elsif (!unlink( $file )) { - msg( "mail,log", "$file: rm: $!\n" ); - } - else { - push( @removed, $file ); - } - } - msg( "mail,log", "Files removed: @removed\n" ) if @removed; - } - } - elsif ($word[0] eq "mv") { - if (@word != 3) { - msg( "mail,log", "Wrong number of arguments\n" ); - } - elsif ($word[1] =~ m,/,) { - msg( "mail,log", "$word[1]: filename may not contain slashes\n" ); - } - elsif ($word[2] =~ m,/,) { - msg( "mail,log", "$word[2]: filename may not contain slashes\n" ); - } - elsif (!-f $word[1]) { - msg( "mail,log", "$word[1]: no such file\n" ); - } - elsif (-e $word[2]) { - msg( "mail,log", "$word[2]: file exists\n" ); - } - elsif ($word[1] =~ /$conf::keep_files/) { - msg( "mail,log", "$word[1] is protected, cannot rename\n" ); - } - else { - if (!rename( $word[1], $word[2] )) { - msg( "mail,log", "rename: $!\n" ); - } - else { - msg( "mail,log", "OK\n" ); - } - } - } - else { - msg( "mail,log", "unknown command $word[0]\n" ); - } - } - rm( $commands ); - msg( "log", "-- End of $commands processing\n" ); -} + my $commands = shift; + my ( @cmds, $cmd, $pgplines, $signator ); + local (*COMMANDS); + my ( @files, $file, @removed, $target_delay ); + + format_status_str( $main::current_changes, $commands ); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + msg( "log", "processing $main::current_incoming_short/$commands\n" ); + + # parse the .commands file + if ( !open( COMMANDS, "<$commands" ) ) { + msg( "log", "Cannot open $main::current_incoming_short/$commands: $!\n" ); + return; + } + $pgplines = 0; + $main::mail_addr = ""; + @cmds = (); +outer_loop: while (<COMMANDS>) { + if (/^---+(BEGIN|END) PGP .*---+$/) { + ++$pgplines; + } elsif (/^Uploader:\s*/i) { + chomp( $main::mail_addr = $' ); + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + } elsif (/^Commands:/i) { + $_ = $'; + for ( ; ; ) { + s/^\s*(.*)\s*$/$1/; # delete whitespace at both ends + if ( !/^\s*$/ ) { + push( @cmds, $_ ); + debug("includes cmd $_"); + } + last outer_loop if !defined( $_ = scalar(<COMMANDS>) ); + chomp; + redo outer_loop if !/^\s/ || /^$/; + } ## end for ( ; ; ) + } ## end elsif (/^Commands:/i) + } ## end while (<COMMANDS>) + close(COMMANDS); + + # some consistency checks + if ( !$main::mail_addr || $main::mail_addr !~ /^\S+\@\S+\.\S+/ ) { + msg( "log,mail", +"$main::current_incoming_short/$commands contains no or bad Uploader: field: " + . "$main::mail_addr\n" ); + msg( "log,mail", + "cannot process $main::current_incoming_short/$commands\n" ); + $main::mail_addr = ""; + goto remove; + } ## end if ( !$main::mail_addr... + msg( "log", "(command uploader $main::mail_addr)\n" ); + + if ( $pgplines < 3 ) { + msg( + "log,mail", + "$main::current_incoming_short/$commands isn't signed with PGP/GnuPG\n" + ); + msg( + "mail", + "or the uploaded file is broken. Make sure to transfer in binary mode\n" + ); + msg( "mail", "or better yet - use dcut for commands files\n" ); + goto remove; + } ## end if ( $pgplines < 3 ) + + # run PGP on the file to check the signature + if ( !( $signator = pgp_check($commands) ) ) { + msg( + "log,mail", + "$main::current_incoming_short/$commands has bad PGP/GnuPG signature!\n" + ); + remove: + msg( "log,mail", "Removing $main::current_incoming_short/$commands\n" ); + rm($commands); + return; + } elsif ( $signator eq "LOCAL ERROR" ) { + + # An error has appened when starting pgp... Don't process the file, + # but also don't delete it + debug( +"Can't PGP/GnuPG check $main::current_incoming_short/$commands -- don't process it for now" + ); + return; + } ## end elsif ( $signator eq "LOCAL ERROR") + msg( "log", "(PGP/GnuPG signature by $signator)\n" ); + + # now process commands + msg( + "mail", +"Log of processing your commands file $main::current_incoming_short/$commands:\n\n" + ); + foreach $cmd (@cmds) { + my @word = split( /\s+/, $cmd ); + msg( "mail,log", "> @word\n" ); + my $selecteddelayed = -1; + next if @word < 1; + + if ( $word[0] eq "rm" ) { + foreach ( @word[ 1 .. $#word ] ) { + my $origword = $_; + if (m,^DELAYED/([0-9]+)-day/,) { + $selecteddelayed = $1; + s,^DELAYED/[0-9]+-day/,,; + } + if ( $origword eq "--searchdirs" ) { + $selecteddelayed = -2; + } elsif (m,/,) { + msg( + "mail,log", +"$_: filename may not contain slashes except for DELAYED/#-day/ prefixes\n" + ); + } else { + + # process wildcards but also plain names + my (@thesefiles); + my $pat = quotemeta($_); + $pat =~ s/\\\*/.*/g; + $pat =~ s/\\\?/.?/g; + $pat =~ s/\\([][])/$1/g; + + if ( $selecteddelayed < 0 ) { # scanning or explicitly incoming + opendir( DIR, "." ); + push( @thesefiles, grep /^$pat$/, readdir(DIR) ); + closedir(DIR); + } + if ( $selecteddelayed >= 0 ) { + my $dir = sprintf( $conf::incoming_delayed, $selecteddelayed ); + opendir( DIR, $dir ); + push( @thesefiles, + map ( "$dir/$_", grep /^$pat$/, readdir(DIR) ) ); + closedir(DIR); + } elsif ( $selecteddelayed == -2 ) { + for ( my ($adelay) = 0 ; + ( !@thesefiles ) && $adelay <= $conf::max_delayed ; + $adelay++ ) + { + my $dir = sprintf( $conf::incoming_delayed, $adelay ); + opendir( DIR, $dir ); + push( @thesefiles, + map ( "$dir/$_", grep /^$pat$/, readdir(DIR) ) ); + closedir(DIR); + } ## end for ( my ($adelay) = 0 ... + } ## end elsif ( $selecteddelayed ... + push( @files, @thesefiles ); + if ( !@thesefiles ) { + msg( "mail,log", "$origword did not match anything\n" ); + } + } ## end else [ if ( $origword eq "--searchdirs") + } ## end foreach ( @word[ 1 .. $#word... + if ( !@files ) { + msg( "mail,log", "No files to delete\n" ); + } else { + @removed = (); + foreach $file (@files) { + if ( !-f $file ) { + msg( "mail,log", "$file: no such file\n" ); + } elsif ( $file =~ /$conf::keep_files/ ) { + msg( "mail,log", "$file is protected, cannot " . "remove\n" ); + } elsif ( !unlink($file) ) { + msg( "mail,log", "$file: rm: $!\n" ); + } else { + $file =~ s,$conf::incoming/?,,; + push( @removed, $file ); + } + } ## end foreach $file (@files) + msg( "mail,log", "Files removed: @removed\n" ) if @removed; + } ## end else [ if ( !@files ) + } elsif ( $word[0] eq "reschedule" ) { + if ( @word != 3 ) { + msg( "mail,log", "Wrong number of arguments\n" ); + } elsif ( $conf::upload_method ne "copy" ) { + msg( "mail,log", "reschedule not available\n" ); + } elsif ( $word[1] =~ m,/, || $word[1] !~ m/\.changes/ ) { + msg( + "mail,log", + "$word[1]: filename may not contain slashes and must be .changes\n" + ); + } elsif ( !( ($target_delay) = $word[2] =~ m,^([0-9]+)-day$, ) + || $target_delay > $conf::max_delayed ) + { + msg( + "mail,log", +"$word[2]: rescheduling target must be #-day with # between 0 and $conf::max_delayed (in particular, no '/' allowed)\n" + ); + } elsif ( $word[1] =~ /$conf::keep_files/ ) { + msg( "mail,log", "$word[1] is protected, cannot do stuff with it\n" ); + } else { + my ($adelay); + for ( $adelay = 0 ; + $adelay <= $conf::max_delayed + && !-f ( + sprintf( "$conf::targetdir_delayed", $adelay ) . "/$word[1]" ) ; + $adelay++ ) + { + } ## end for ( $adelay = 0 ; $adelay... + if ( $adelay > $conf::max_delayed ) { + msg( "mail,log", "$word[1] not found\n" ); + } elsif ( $adelay == $target_delay ) { + msg( "mail,log", "$word[1] already is in $word[2]\n" ); + } else { + my (@thesefiles); + my ($dir) = sprintf( "$conf::targetdir_delayed", $adelay ); + my ($target_dir) = + sprintf( "$conf::targetdir_delayed", $target_delay ); + push( @thesefiles, $word[1] ); + push( @thesefiles, + get_filelist_from_known_good_changes("$dir/$word[1]") ); + for my $afile (@thesefiles) { + if ( $afile =~ m/\.changes$/ ) { + utime undef, undef, ("$dir/$afile"); + } + if ( !rename "$dir/$afile", "$target_dir/$afile" ) { + msg( "mail,log", "rename: $!\n" ); + } else { + msg( "mail,log", "$afile moved to $target_delay-day\n" ); + } + } ## end for my $afile (@thesefiles) + } ## end else [ if ( $adelay > $conf::max_delayed) + } ## end else [ if ( @word != 3 ) + } elsif ( $word[0] eq "cancel" ) { + if ( @word != 2 ) { + msg( "mail,log", "Wrong number of arguments\n" ); + } elsif ( $conf::upload_method ne "copy" ) { + msg( "mail,log", "cancel not available\n" ); + } elsif ( + $word[1] !~ m,^[a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*\.changes$, ) + { + msg( "mail,log", + "argument to cancel must be one .changes filename without path\n" ); + } ## end elsif ( $word[1] !~ ... + my (@files) = (); + for ( my ($adelay) = 0 ; $adelay <= $conf::max_delayed ; $adelay++ ) { + my ($dir) = sprintf( "$conf::targetdir_delayed", $adelay ); + if ( -f "$dir/$word[1]" ) { + @removed = (); + push( @files, "$word[1]" ); + push( @files, + get_filelist_from_known_good_changes("$dir/$word[1]") ); + foreach $file (@files) { + if ( !-f "$dir/$file" ) { + msg( "mail,log", "$dir/$file: no such file\n" ); + } elsif ( "$dir/$file" =~ /$conf::keep_files/ ) { + msg( "mail,log", + "$dir/$file is protected, cannot " . "remove\n" ); + } elsif ( !unlink("$dir/$file") ) { + msg( "mail,log", "$dir/$file: rm: $!\n" ); + } else { + push( @removed, $file ); + } + } ## end foreach $file (@files) + msg( "mail,log", "Files removed from $adelay-day: @removed\n" ) + if @removed; + } ## end if ( -f "$dir/$word[1]") + } ## end for ( my ($adelay) = 0 ... + if ( !@files ) { + msg( "mail,log", "No upload found: $word[1]\n" ); + } + } else { + msg( "mail,log", "unknown command $word[0]\n" ); + } + } ## end foreach $cmd (@cmds) + rm($commands); + msg( "log", + "-- End of $main::current_incoming_short/$commands processing\n" ); +} ## end sub process_commands($) + +sub age_delayed_queues() { + for ( my ($adelay) = 0 ; $adelay <= $conf::max_delayed ; $adelay++ ) { + my ($dir) = sprintf( "$conf::targetdir_delayed", $adelay ); + my ($target_dir); + if ( $adelay == 0 ) { + $target_dir = $conf::targetdir; + } else { + $target_dir = sprintf( "$conf::targetdir_delayed", $adelay - 1 ); + } + for my $achanges (<$dir/*.changes>) { + my $mtime = ( stat($achanges) )[9]; + if ( $mtime + 24 * 60 * 60 <= time || $adelay == 0 ) { + utime undef, undef, ($achanges); + my @thesefiles = ( $achanges =~ m,.*/([^/]*), ); + push( @thesefiles, get_filelist_from_known_good_changes($achanges) ); + for my $afile (@thesefiles) { + if ( !rename "$dir/$afile", "$target_dir/$afile" ) { + msg( "log", "rename: $!\n" ); + } else { + msg( "log", "$afile moved to $target_dir\n" ); + } + } ## end for my $afile (@thesefiles) + } ## end if ( $mtime + 24 * 60 ... + } ## end for my $achanges (<$dir/*.changes>) + } ## end for ( my ($adelay) = 0 ... +} ## end sub age_delayed_queues() # # check if a file is already on target # -sub is_on_target($) { - my $file = shift; - my $msg; - my $stat; - - if ($conf::upload_method eq "ssh") { - ($msg, $stat) = ssh_cmd( "ls -l $file" ); - } - elsif ($conf::upload_method eq "ftp") { - my $err; - ($msg, $err) = ftp_cmd( "dir", $file ); - if ($err) { - $stat = 1; - $msg = $err; - } - elsif (!$msg) { - $stat = 1; - $msg = "ls: no such file\n"; - } - else { - $stat = 0; - $msg = join( "\n", @$msg ); - } - } - else { - ($msg, $stat) = local_cmd( "$conf::ls -l $file" ); - } - chomp( $msg ); - debug( "exit status: $stat, output was: $msg" ); - - return "" if $stat && $msg =~ /no such file/i; # file not present - msg( "log", "strange ls -l output on target:\n", $msg ), return "" - if $stat || $@; # some other error, but still try to upload - - # ls -l returned 0 -> file already there - $msg =~ s/\s\s+/ /g; # make multiple spaces into one, to save space - return $msg; -} +sub is_on_target($\@) { + my $file = shift; + my $filelist = shift; + my $msg; + my $stat; + + if ( $conf::upload_method eq "ssh" ) { + ( $msg, $stat ) = ssh_cmd("ls -l $file"); + } elsif ( $conf::upload_method eq "ftp" ) { + my $err; + ( $msg, $err ) = ftp_cmd( "dir", $file ); + if ($err) { + $stat = 1; + $msg = $err; + } elsif ( !$msg ) { + $stat = 1; + $msg = "ls: no such file\n"; + } else { + $stat = 0; + $msg = join( "\n", @$msg ); + } + } else { + my @allfiles = ($file); + push( @allfiles, @$filelist ); + $stat = 1; + $msg = "no such file"; + for my $afile (@allfiles) { + if ( -f "$conf::targetdir/$afile" ) { + $stat = 0; + $msg = "$afile"; + } + } ## end for my $afile (@allfiles) + for ( my ($adelay) = 0 ; + $adelay <= $conf::max_delayed && $stat ; + $adelay++ ) + { + for my $afile (@allfiles) { + if ( + -f ( sprintf( "$conf::targetdir_delayed", $adelay ) . "/$afile" ) ) + { + $stat = 0; + $msg = sprintf( "%d-day", $adelay ) . "/$afile"; + } ## end if ( -f ( sprintf( "$conf::targetdir_delayed"... + } ## end for my $afile (@allfiles) + } ## end for ( my ($adelay) = 0 ... + } ## end else [ if ( $conf::upload_method... + chomp($msg); + debug("exit status: $stat, output was: $msg"); + + return "" if $stat && $msg =~ /no such file/i; # file not present + msg( "log", "strange ls -l output on target:\n", $msg ), return "" + if $stat || $@; # some other error, but still try to upload + + # ls -l returned 0 -> file already there + $msg =~ s/\s\s+/ /g; # make multiple spaces into one, to save space + return $msg; +} ## end sub is_on_target($\@) # # copy a list of files to target # sub copy_to_target(@) { - my @files = @_; - my( @md5sum, @expected_files, $sum, $name, $msgs, $stat ); - - $main::dstat = "u"; - write_status_file() if $conf::statusdelay; - - # copy the files - if ($conf::upload_method eq "ssh") { - ($msgs, $stat) = scp_cmd( @files ); - goto err if $stat; - } - elsif ($conf::upload_method eq "ftp") { - my($rv, $file); - foreach $file (@files) { - ($rv, $msgs) = ftp_cmd( "put", $file ); - goto err if !$rv; - } - } - else { - ($msgs, $stat) = local_cmd( "$conf::cp @files $conf::targetdir", 'NOCD' ); - goto err if $stat; - } - - # check md5sums or sizes on target against our own - my $have_md5sums = 1; - if ($conf::upload_method eq "ssh") { - ($msgs, $stat) = ssh_cmd( "md5sum @files" ); - goto err if $stat; - @md5sum = split( "\n", $msgs ); - } - elsif ($conf::upload_method eq "ftp") { - my ($rv, $err, $file); - foreach $file (@files) { - ($rv, $err) = ftp_cmd( "quot", "site", "md5sum", $file ); - if ($err) { - next if ftp_code() == 550; # file not found - if (ftp_code() == 500) { # unimplemented - $have_md5sums = 0; - goto get_sizes_instead; - } - $msgs = $err; - goto err; - } - chomp( my $t = ftp_response() ); - push( @md5sum, $t ); - } - if (!$have_md5sums) { - get_sizes_instead: - foreach $file (@files) { - ($rv, $err) = ftp_cmd( "size", $file ); - if ($err) { - next if ftp_code() == 550; # file not found - $msgs = $err; - goto err; - } - push( @md5sum, "$rv $file" ); - } - } - } - else { - ($msgs, $stat) = local_cmd( "$conf::md5sum @files" ); - goto err if $stat; - @md5sum = split( "\n", $msgs ); - } - - @expected_files = @files; - foreach (@md5sum) { - chomp; - ($sum,$name) = split; - next if !grep { $_ eq $name } @files; # a file we didn't upload?? - next if $sum eq "md5sum:"; # looks like an error message - if (($have_md5sums && $sum ne md5sum( $name )) || - (!$have_md5sums && $sum != (-s $name))) { - msg( "log,mail", "Upload of $name to $conf::target failed ", - "(".($have_md5sums ? "md5sum" : "size")." mismatch)\n" ); - goto err; - } - # seen that file, remove it from expect list - @expected_files = map { $_ eq $name ? () : $_ } @expected_files; - } - if (@expected_files) { - msg( "log,mail", "Failed to upload the files\n" ); - msg( "log,mail", " ", join( ", ", @expected_files ), "\n" ); - msg( "log,mail", "(Not present on target after upload)\n" ); - goto err; - } - - if ($conf::chmod_on_target) { - # change file's mode explicitly to 644 on target - if ($conf::upload_method eq "ssh") { - ($msgs, $stat) = ssh_cmd( "chmod 644 @files" ); - goto err if $stat; - } - elsif ($conf::upload_method eq "ftp") { - my ($rv, $file); - foreach $file (@files) { - ($rv, $msgs) = ftp_cmd( "quot", "site", "chmod", "644", $file ); - msg( "log", "Can't chmod $file on target:\n$msgs" ) - if $msgs; - goto err if !$rv; - } - } - else { - ($msgs, $stat) = local_cmd( "$conf::chmod 644 @files" ); - goto err if $stat; - } - } - - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - return 1; - - err: - msg( "log,mail", "Upload to $conf::target failed", - $? ? ", last exit status ".sprintf( "%s", $?>>8 ) : "", "\n" ); - msg( "log,mail", "Error messages:\n", $msgs ) - if $msgs; - - # If "permission denied" was among the errors, test if the incoming is - # writable at all. - if ($msgs =~ /(permission denied|read-?only file)/i) { - if (!check_incoming_writable()) { - msg( "log,mail", "(The incoming directory seems to be ", - "unwritable.)\n" ); - } - } - - # remove bad files or an incomplete upload on target - if ($conf::upload_method eq "ssh") { - ssh_cmd( "rm -f @files" ); - } - elsif ($conf::upload_method eq "ftp") { - my $file; - foreach $file (@files) { - my ($rv, $err); - ($rv, $err) = ftp_cmd( "delete", $file ); - msg( "log", "Can't delete $file on target:\n$err" ) - if $err; - } - } - else { - my @tfiles = map { "$conf::targetdir/$_" } @files; - debug( "executing unlink(@tfiles)" ); - rm( @tfiles ); - } - $main::dstat = "c"; - write_status_file() if $conf::statusdelay; - return 0; -} + my @files = @_; + my ( @md5sum, @expected_files, $sum, $name, $msgs, $stat ); + + $main::dstat = "u"; + write_status_file() if $conf::statusdelay; + + # copy the files + if ( $conf::upload_method eq "ssh" ) { + ( $msgs, $stat ) = scp_cmd(@files); + goto err if $stat; + } elsif ( $conf::upload_method eq "ftp" ) { + my ( $rv, $file ); + if ( !$main::FTP_chan->cwd($main::current_targetdir) ) { + msg( "log,mail", + "Can't cd to $main::current_targetdir on $conf::target\n" ); + goto err; + } + foreach $file (@files) { + ( $rv, $msgs ) = ftp_cmd( "put", $file ); + goto err if !$rv; + } + } else { + ( $msgs, $stat ) = + local_cmd( "$conf::cp @files $main::current_targetdir", 'NOCD' ); + goto err if $stat; + } + + # check md5sums or sizes on target against our own + my $have_md5sums = 1; + if ($conf::check_md5sum) { + if ( $conf::upload_method eq "ssh" ) { + ( $msgs, $stat ) = ssh_cmd("md5sum @files"); + goto err if $stat; + @md5sum = split( "\n", $msgs ); + } elsif ( $conf::upload_method eq "ftp" ) { + my ( $rv, $err, $file ); + foreach $file (@files) { + ( $rv, $err ) = ftp_cmd( "quot", "site", "md5sum", $file ); + if ($err) { + next if ftp_code() == 550; # file not found + if ( ftp_code() == 500 ) { # unimplemented + $have_md5sums = 0; + goto get_sizes_instead; + } + $msgs = $err; + goto err; + } ## end if ($err) + chomp( my $t = ftp_response() ); + push( @md5sum, $t ); + } ## end foreach $file (@files) + if ( !$have_md5sums ) { + get_sizes_instead: + foreach $file (@files) { + ( $rv, $err ) = ftp_cmd( "size", $file ); + if ($err) { + next if ftp_code() == 550; # file not found + $msgs = $err; + goto err; + } + push( @md5sum, "$rv $file" ); + } ## end foreach $file (@files) + } ## end if ( !$have_md5sums ) + } else { + ( $msgs, $stat ) = local_cmd("$conf::md5sum @files"); + goto err if $stat; + @md5sum = split( "\n", $msgs ); + } + + @expected_files = @files; + foreach (@md5sum) { + chomp; + ( $sum, $name ) = split; + next if !grep { $_ eq $name } @files; # a file we didn't upload?? + next if $sum eq "md5sum:"; # looks like an error message + if ( ( $have_md5sums && $sum ne md5sum($name) ) + || ( !$have_md5sums && $sum != ( -s $name ) ) ) + { + msg( + "log,mail", + "Upload of $name to $conf::target failed ", + "(" . ( $have_md5sums ? "md5sum" : "size" ) . " mismatch)\n" + ); + goto err; + } ## end if ( ( $have_md5sums &&... + + # seen that file, remove it from expect list + @expected_files = map { $_ eq $name ? () : $_ } @expected_files; + } ## end foreach (@md5sum) + if (@expected_files) { + msg( "log,mail", "Failed to upload the files\n" ); + msg( "log,mail", " ", join( ", ", @expected_files ), "\n" ); + msg( "log,mail", "(Not present on target after upload)\n" ); + goto err; + } ## end if (@expected_files) + } ## end if ($conf::check_md5sum) + + if ($conf::chmod_on_target) { + + # change file's mode explicitly to 644 on target + if ( $conf::upload_method eq "ssh" ) { + ( $msgs, $stat ) = ssh_cmd("chmod 644 @files"); + goto err if $stat; + } elsif ( $conf::upload_method eq "ftp" ) { + my ( $rv, $file ); + foreach $file (@files) { + ( $rv, $msgs ) = ftp_cmd( "quot", "site", "chmod", "644", $file ); + msg( "log", "Can't chmod $file on target:\n$msgs" ) + if $msgs; + goto err if !$rv; + } ## end foreach $file (@files) + } else { + ( $msgs, $stat ) = local_cmd("$conf::chmod 644 @files"); + goto err if $stat; + } + } ## end if ($conf::chmod_on_target) + + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + return 1; + +err: + msg( "log,mail", + "Upload to $conf::target failed", + $? ? ", last exit status " . sprintf( "%s", $? >> 8 ) : "", "\n" ); + msg( "log,mail", "Error messages:\n", $msgs ) + if $msgs; + + # If "permission denied" was among the errors, test if the incoming is + # writable at all. + if ( $msgs =~ /(permission denied|read-?only file)/i ) { + if ( !check_incoming_writable() ) { + msg( "log,mail", "(The incoming directory seems to be ", + "unwritable.)\n" ); + } + } ## end if ( $msgs =~ /(permission denied|read-?only file)/i) + + # remove bad files or an incomplete upload on target + if ( $conf::upload_method eq "ssh" ) { + ssh_cmd("rm -f @files"); + } elsif ( $conf::upload_method eq "ftp" ) { + my $file; + foreach $file (@files) { + my ( $rv, $err ); + ( $rv, $err ) = ftp_cmd( "delete", $file ); + msg( "log", "Can't delete $file on target:\n$err" ) + if $err; + } ## end foreach $file (@files) + } else { + my @tfiles = map { "$main::current_targetdir/$_" } @files; + debug("executing unlink(@tfiles)"); + rm(@tfiles); + } + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + return 0; +} ## end sub copy_to_target(@) # # check if a file is correctly signed with PGP # sub pgp_check($) { - my $file = shift; - my $output = ""; - my $signator; - my $found = 0; - my $stat; - local( *PIPE ); - - $stat = 1; - if (-x $conf::gpg) { - debug( "executing $conf::gpg --no-options --batch ". - "--no-default-keyring --always-trust ". - "--keyring ". join (" --keyring ",@conf::keyrings). - " --verify '$file'" ); - if (!open( PIPE, "$conf::gpg --no-options --batch ". - "--no-default-keyring --always-trust ". - "--keyring " . join (" --keyring ",@conf::keyrings). - " --verify '$file'". - " 2>&1 |" )) { - msg( "log", "Can't open pipe to $conf::gpg: $!\n" ); - return "LOCAL ERROR"; - } - $output .= $_ while( <PIPE> ); - close( PIPE ); - $stat = $?; - } - - if ($stat) { - msg( "log,mail", "GnuPG signature check failed on $file\n" ); - msg( "mail", $output ); - msg( "log,mail", "(Exit status ", $stat >> 8, ")\n" ); - return ""; - } - - $output =~ /^(gpg: )?good signature from (user )?"(.*)"\.?$/im; - ($signator = $3) ||= "unknown signator"; - if ($conf::debug) { - debug( "GnuPG signature ok (by $signator)" ); - } - return $signator; -} - + my $file = shift; + my $output = ""; + my $signator; + my $found = 0; + my $stat; + local (*PIPE); + + $stat = 1; + if ( -x $conf::gpg ) { + debug( "executing $conf::gpg --no-options --batch " + . "--no-default-keyring --always-trust " + . "--keyring " + . join( " --keyring ", @conf::keyrings ) + . " --verify '$file'" ); + if ( + !open( PIPE, + "$conf::gpg --no-options --batch " + . "--no-default-keyring --always-trust " + . "--keyring " + . join( " --keyring ", @conf::keyrings ) + . " --verify '$file'" + . " 2>&1 |" + ) + ) + { + msg( "log", "Can't open pipe to $conf::gpg: $!\n" ); + return "LOCAL ERROR"; + } ## end if ( !open( PIPE, "$conf::gpg --no-options --batch "... + $output .= $_ while (<PIPE>); + close(PIPE); + $stat = $?; + } ## end if ( -x $conf::gpg ) + + if ($stat) { + msg( "log,mail", "GnuPG signature check failed on $file\n" ); + msg( "mail", $output ); + msg( "log,mail", "(Exit status ", $stat >> 8, ")\n" ); + return ""; + } ## end if ($stat) + + $output =~ /^(gpg: )?good signature from (user )?"(.*)"\.?$/im; + ( $signator = $3 ) ||= "unknown signator"; + if ($conf::debug) { + debug("GnuPG signature ok (by $signator)"); + } + return $signator; +} ## end sub pgp_check($) # --------------------------------------------------------------------------- # the status daemon @@ -1518,167 +1677,175 @@ sub pgp_check($) { # # fork a subprocess that watches the 'status' FIFO -# +# # that process blocks until someone opens the FIFO, then sends a -# signal (SIGUSR1) to the main process, expects +# signal (SIGUSR1) to the main process, expects # sub fork_statusd() { - my $statusd_pid; - my $main_pid = $$; - my $errs; - local( *STATFIFO ); - - $statusd_pid = open( STATUSD, "|-" ); - die "cannot fork: $!\n" if !defined( $statusd_pid ); - # parent just returns - if ($statusd_pid) { - msg( "log", "forked status daemon (pid $statusd_pid)\n" ); - return $statusd_pid; - } - # child: the status FIFO daemon - - # ignore SIGPIPE here, in case some closes the FIFO without completely - # reading it - $SIG{"PIPE"} = "IGNORE"; - # also ignore SIGCLD, we don't want to inherit the restart-statusd handler - # from our parent - $SIG{"CHLD"} = "DEFAULT"; - - rm( $conf::statusfile ); - $errs = `$conf::mkfifo $conf::statusfile`; - die "$main::progname: cannot create named pipe $conf::statusfile: $errs" - if $?; - chmod( 0644, $conf::statusfile ) - or die "Cannot set modes of $conf::statusfile: $!\n"; - - # close log file, so that log rotating works - close( LOG ); - close( STDOUT ); - close( STDERR ); - - while( 1 ) { - my( $status, $mup, $incw, $ds, $next_run, $last_ping, $currch, $l ); - - # open the FIFO for writing; this blocks until someone (probably ftpd) - # opens it for reading - open( STATFIFO, ">$conf::statusfile" ) - or die "Cannot open $conf::statusfile\n"; - select( STATFIFO ); - # tell main daemon to send us status infos - kill( $main::signo{"USR1"}, $main_pid ); - - # get the infos from stdin; must loop until enough bytes received! - my $expect_len = 3 + 2*STATNUM_LEN + STATSTR_LEN; - for( $status = ""; ($l = length($status)) < $expect_len; ) { - sysread( STDIN, $status, $expect_len-$l, $l ); - } - - # disassemble the status byte stream - my $pos = 0; - foreach ( [ mup => 1 ], [ incw => 1 ], [ ds => 1 ], - [ next_run => STATNUM_LEN ], [ last_ping => STATNUM_LEN ], - [ currch => STATSTR_LEN ] ) { - eval "\$$_->[0] = substr( \$status, $pos, $_->[1] );"; - $pos += $_->[1]; - } - $currch =~ s/\n+//g; - - print_status( $mup, $incw, $ds, $next_run, $last_ping, $currch ); - close( STATFIFO ); - - # This sleep is necessary so that we can't reopen the FIFO - # immediately, in case the reader hasn't closed it yet if we get to - # the open again. Is there a better solution for this?? - sleep 1; - } -} + my $statusd_pid; + my $main_pid = $$; + my $errs; + local (*STATFIFO); + + $statusd_pid = open( STATUSD, "|-" ); + die "cannot fork: $!\n" if !defined($statusd_pid); + + # parent just returns + if ($statusd_pid) { + msg( "log", "forked status daemon (pid $statusd_pid)\n" ); + return $statusd_pid; + } + + # child: the status FIFO daemon + + # ignore SIGPIPE here, in case some closes the FIFO without completely + # reading it + $SIG{"PIPE"} = "IGNORE"; + + # also ignore SIGCLD, we don't want to inherit the restart-statusd handler + # from our parent + $SIG{"CHLD"} = "DEFAULT"; + + rm($conf::statusfile); + $errs = `$conf::mkfifo $conf::statusfile`; + die "$main::progname: cannot create named pipe $conf::statusfile: $errs" + if $?; + chmod( 0644, $conf::statusfile ) + or die "Cannot set modes of $conf::statusfile: $!\n"; + + # close log file, so that log rotating works + close(LOG); + close(STDOUT); + close(STDERR); + + while (1) { + my ( $status, $mup, $incw, $ds, $next_run, $last_ping, $currch, $l ); + + # open the FIFO for writing; this blocks until someone (probably ftpd) + # opens it for reading + open( STATFIFO, ">$conf::statusfile" ) + or die "Cannot open $conf::statusfile\n"; + select(STATFIFO); + + # tell main daemon to send us status infos + kill( $main::signo{"USR1"}, $main_pid ); + + # get the infos from stdin; must loop until enough bytes received! + my $expect_len = 3 + 2 * STATNUM_LEN + STATSTR_LEN; + for ( $status = "" ; ( $l = length($status) ) < $expect_len ; ) { + sysread( STDIN, $status, $expect_len - $l, $l ); + } + + # disassemble the status byte stream + my $pos = 0; + foreach ( + [ mup => 1 ], + [ incw => 1 ], + [ ds => 1 ], + [ next_run => STATNUM_LEN ], + [ last_ping => STATNUM_LEN ], + [ currch => STATSTR_LEN ] + ) + { + eval "\$$_->[0] = substr( \$status, $pos, $_->[1] );"; + $pos += $_->[1]; + } ## end foreach ( [ mup => 1 ], [ incw... + $currch =~ s/\n+//g; + + print_status( $mup, $incw, $ds, $next_run, $last_ping, $currch ); + close(STATFIFO); + + # This sleep is necessary so that we can't reopen the FIFO + # immediately, in case the reader hasn't closed it yet if we get to + # the open again. Is there a better solution for this?? + sleep 1; + } ## end while (1) +} ## end sub fork_statusd() # # update the status file, in case we use a plain file and not a FIFO # sub write_status_file() { - return if !$conf::statusfile; - - open( STATFILE, ">$conf::statusfile" ) or - (msg( "log", "Could not open $conf::statusfile: $!\n" ), return); - my $oldsel = select( STATFILE ); + return if !$conf::statusfile; - print_status( $main::target_up, $main::incoming_writable, $main::dstat, - $main::next_run, $main::last_ping_time, - $main::current_changes ); + open( STATFILE, ">$conf::statusfile" ) + or ( msg( "log", "Could not open $conf::statusfile: $!\n" ), return ); + my $oldsel = select(STATFILE); - select( $oldsel ); - close( STATFILE ); -} + print_status( + $main::target_up, $main::incoming_writable, + $main::dstat, $main::next_run, + $main::last_ping_time, $main::current_changes + ); + + select($oldsel); + close(STATFILE); +} ## end sub write_status_file() sub print_status($$$$$$) { - my $mup = shift; - my $incw = shift; - my $ds = shift; - my $next_run = shift; - my $last_ping = shift; - my $currch = shift; - my $approx; - my $version; - - ($version = 'Release: 0.9 $Revision: 1.51 $') =~ s/\$ ?//g; - print "debianqueued $version\n"; - - $approx = $conf::statusdelay ? "approx. " : ""; - - if ($mup eq "0") { - print "$conf::target is down, queue pausing\n"; - return; - } - elsif ($conf::upload_method ne "copy") { - print "$conf::target seems to be up, last ping $approx", - print_time(time-$last_ping), " ago\n"; - } - - if ($incw eq "0") { - print "The incoming directory is not writable, queue pausing\n"; - return; - } - - if ($ds eq "i") { - print "Next queue check in $approx",print_time($next_run-time),"\n"; - return; - } - elsif ($ds eq "c") { - print "Checking queue directory\n"; - } - elsif ($ds eq "u") { - print "Uploading to $conf::target\n"; - } - else { - print "Bad status data from daemon: \"$mup$incw$ds\"\n"; - return; - } - - print "Current job is $currch\n" if $currch; -} + my $mup = shift; + my $incw = shift; + my $ds = shift; + my $next_run = shift; + my $last_ping = shift; + my $currch = shift; + my $approx; + my $version; + + ( $version = 'Release: 0.9 $Revision: 1.51 $' ) =~ s/\$ ?//g; + print "debianqueued $version\n"; + + $approx = $conf::statusdelay ? "approx. " : ""; + + if ( $mup eq "0" ) { + print "$conf::target is down, queue pausing\n"; + return; + } elsif ( $conf::upload_method ne "copy" ) { + print "$conf::target seems to be up, last ping $approx", + print_time( time - $last_ping ), " ago\n"; + } + + if ( $incw eq "0" ) { + print "The incoming directory is not writable, queue pausing\n"; + return; + } + + if ( $ds eq "i" ) { + print "Next queue check in $approx", print_time( $next_run - time ), "\n"; + return; + } elsif ( $ds eq "c" ) { + print "Checking queue directory\n"; + } elsif ( $ds eq "u" ) { + print "Uploading to $conf::target\n"; + } else { + print "Bad status data from daemon: \"$mup$incw$ds\"\n"; + return; + } + + print "Current job is $currch\n" if $currch; +} ## end sub print_status($$$$$$) # # format a number for sending to statusd (fixed length STATNUM_LEN) # sub format_status_num(\$$) { - my $varref = shift; - my $num = shift; - - $$varref = sprintf "%".STATNUM_LEN."d", $num; -} + my $varref = shift; + my $num = shift; + + $$varref = sprintf "%" . STATNUM_LEN . "d", $num; +} ## end sub format_status_num(\$$) # # format a string for sending to statusd (fixed length STATSTR_LEN) # sub format_status_str(\$$) { - my $varref = shift; - my $str = shift; + my $varref = shift; + my $str = shift; - $$varref = substr( $str, 0, STATSTR_LEN ); - $$varref .= "\n" x (STATSTR_LEN - length($$varref)); -} + $$varref = substr( $str, 0, STATSTR_LEN ); + $$varref .= "\n" x ( STATSTR_LEN - length($$varref) ); +} ## end sub format_status_str(\$$) # # send a status string to the status daemon @@ -1688,19 +1855,18 @@ sub format_status_str(\$$) { # signal handler. So use only already-defined variables. # sub send_status() { - local $! = 0; # preserve errno - - # re-setup handler, in case we have broken SysV signals - $SIG{"USR1"} = \&send_status; - - syswrite( STATUSD, $main::target_up, 1 ); - syswrite( STATUSD, $main::incoming_writable, 1 ); - syswrite( STATUSD, $main::dstat, 1 ); - syswrite( STATUSD, $main::next_run, STATNUM_LEN ); - syswrite( STATUSD, $main::last_ping_time, STATNUM_LEN ); - syswrite( STATUSD, $main::current_changes, STATSTR_LEN ); -} + local $! = 0; # preserve errno + + # re-setup handler, in case we have broken SysV signals + $SIG{"USR1"} = \&send_status; + syswrite( STATUSD, $main::target_up, 1 ); + syswrite( STATUSD, $main::incoming_writable, 1 ); + syswrite( STATUSD, $main::dstat, 1 ); + syswrite( STATUSD, $main::next_run, STATNUM_LEN ); + syswrite( STATUSD, $main::last_ping_time, STATNUM_LEN ); + syswrite( STATUSD, $main::current_changes, STATSTR_LEN ); +} ## end sub send_status() # --------------------------------------------------------------------------- # FTP functions @@ -1711,80 +1877,92 @@ sub send_status() { # sub ftp_open() { - if ($main::FTP_chan) { - # is already open, but might have timed out; test with a cwd - return $main::FTP_chan if $main::FTP_chan->cwd( $conf::targetdir ); - # cwd didn't work, channel is closed, try to reopen it - $main::FTP_chan = undef; - } - - if (!($main::FTP_chan = Net::FTP->new( $conf::target, - Debug => $conf::ftpdebug, - Timeout => $conf::ftptimeout ))) { - msg( "log,mail", "Cannot open FTP server $conf::target\n" ); - goto err; - } - if (!$main::FTP_chan->login()) { - msg( "log,mail", "Anonymous login on FTP server $conf::target failed\n" ); - goto err; - } - if (!$main::FTP_chan->binary()) { - msg( "log,mail", "Can't set binary FTP mode on $conf::target\n" ); - goto err; - } - if (!$main::FTP_chan->cwd( $conf::targetdir )) { - msg( "log,mail", "Can't cd to $conf::targetdir on $conf::target\n" ); - goto err; - } - debug( "opened FTP channel to $conf::target" ); - return 1; - - err: - $main::FTP_chan = undef; - return 0; -} + if ($main::FTP_chan) { + + # is already open, but might have timed out; test with a cwd + return $main::FTP_chan + if $main::FTP_chan->cwd($main::current_targetdir); + + # cwd didn't work, channel is closed, try to reopen it + $main::FTP_chan = undef; + } ## end if ($main::FTP_chan) + + if ( + !( + $main::FTP_chan = + Net::FTP->new( + $conf::target, + Debug => $conf::ftpdebug, + Timeout => $conf::ftptimeout, + Passive => 1, + ) + ) + ) + { + msg( "log,mail", "Cannot open FTP server $conf::target\n" ); + goto err; + } ## end if ( !( $main::FTP_chan... + if ( !$main::FTP_chan->login() ) { + msg( "log,mail", "Anonymous login on FTP server $conf::target failed\n" ); + goto err; + } + if ( !$main::FTP_chan->binary() ) { + msg( "log,mail", "Can't set binary FTP mode on $conf::target\n" ); + goto err; + } + if ( !$main::FTP_chan->cwd($main::current_targetdir) ) { + msg( "log,mail", + "Can't cd to $main::current_targetdir on $conf::target\n" ); + goto err; + } + debug("opened FTP channel to $conf::target"); + return 1; + +err: + $main::FTP_chan = undef; + return 0; +} ## end sub ftp_open() sub ftp_cmd($@) { - my $cmd = shift; - my ($rv, $err); - my $direct_resp_cmd = ($cmd eq "quot"); - - debug( "executing FTP::$cmd(".join(", ",@_).")" ); - $SIG{"ALRM"} = sub { die "timeout in FTP::$cmd\n" } ; - alarm( $conf::remote_timeout ); - eval { $rv = $main::FTP_chan->$cmd( @_ ); }; - alarm( 0 ); - $err = ""; - $rv = (ftp_code() =~ /^2/) ? 1 : 0 if $direct_resp_cmd; - if ($@) { - $err = $@; - undef $rv; - } - elsif (!$rv) { - $err = ftp_response(); - } - return ($rv, $err); -} + my $cmd = shift; + my ( $rv, $err ); + my $direct_resp_cmd = ( $cmd eq "quot" ); + + debug( "executing FTP::$cmd(" . join( ", ", @_ ) . ")" ); + $SIG{"ALRM"} = sub { die "timeout in FTP::$cmd\n" }; + alarm($conf::remote_timeout); + eval { $rv = $main::FTP_chan->$cmd(@_); }; + alarm(0); + $err = ""; + $rv = ( ftp_code() =~ /^2/ ) ? 1 : 0 if $direct_resp_cmd; + if ($@) { + $err = $@; + undef $rv; + } elsif ( !$rv ) { + $err = ftp_response(); + } + return ( $rv, $err ); +} ## end sub ftp_cmd($@) sub ftp_close() { - if ($main::FTP_chan) { - $main::FTP_chan->quit(); - $main::FTP_chan = undef; - } - return 1; -} + if ($main::FTP_chan) { + $main::FTP_chan->quit(); + $main::FTP_chan = undef; + } + return 1; +} ## end sub ftp_close() sub ftp_response() { - return join( '', @{${*$main::FTP_chan}{'net_cmd_resp'}} ); + return join( '', @{ ${*$main::FTP_chan}{'net_cmd_resp'} } ); } sub ftp_code() { - return ${*$main::FTP_chan}{'net_cmd_code'}; + return ${*$main::FTP_chan}{'net_cmd_code'}; } sub ftp_error() { - my $code = ftp_code(); - return ($code =~ /^[45]/) ? 1 : 0; + my $code = ftp_code(); + return ( $code =~ /^[45]/ ) ? 1 : 0; } # --------------------------------------------------------------------------- @@ -1792,176 +1970,178 @@ sub ftp_error() { # --------------------------------------------------------------------------- sub ssh_cmd($) { - my $cmd = shift; - my ($msg, $stat); - - my $ecmd = "$conf::ssh $conf::ssh_options $conf::target ". - "-l $conf::targetlogin \'cd $conf::targetdir; $cmd\'"; - debug( "executing $ecmd" ); - $SIG{"ALRM"} = sub { die "timeout in ssh command\n" } ; - alarm( $conf::remote_timeout ); - eval { $msg = `$ecmd 2>&1`; }; - alarm( 0 ); - if ($@) { - $msg = $@; - $stat = 1; - } - else { - $stat = $?; - } - return ($msg, $stat); -} + my $cmd = shift; + my ( $msg, $stat ); + + my $ecmd = "$conf::ssh $conf::ssh_options $conf::target " + . "-l $conf::targetlogin \'cd $main::current_targetdir; $cmd\'"; + debug("executing $ecmd"); + $SIG{"ALRM"} = sub { die "timeout in ssh command\n" }; + alarm($conf::remote_timeout); + eval { $msg = `$ecmd 2>&1`; }; + alarm(0); + if ($@) { + $msg = $@; + $stat = 1; + } else { + $stat = $?; + } + return ( $msg, $stat ); +} ## end sub ssh_cmd($) sub scp_cmd(@) { - my ($msg, $stat); - - my $ecmd = "$conf::scp $conf::ssh_options @_ ". - "$conf::targetlogin\@$conf::target:$conf::targetdir"; - debug( "executing $ecmd" ); - $SIG{"ALRM"} = sub { die "timeout in scp\n" } ; - alarm( $conf::remote_timeout ); - eval { $msg = `$ecmd 2>&1`; }; - alarm( 0 ); - if ($@) { - $msg = $@; - $stat = 1; - } - else { - $stat = $?; - } - return ($msg, $stat); -} + my ( $msg, $stat ); + + my $ecmd = "$conf::scp $conf::ssh_options @_ " + . "$conf::targetlogin\@$conf::target:$main::current_targetdir"; + debug("executing $ecmd"); + $SIG{"ALRM"} = sub { die "timeout in scp\n" }; + alarm($conf::remote_timeout); + eval { $msg = `$ecmd 2>&1`; }; + alarm(0); + if ($@) { + $msg = $@; + $stat = 1; + } else { + $stat = $?; + } + return ( $msg, $stat ); +} ## end sub scp_cmd(@) sub local_cmd($;$) { - my $cmd = shift; - my $nocd = shift; - my ($msg, $stat); - - my $ecmd = ($nocd ? "" : "cd $conf::targetdir; ") . $cmd; - debug( "executing $ecmd" ); - $msg = `($ecmd) 2>&1`; - $stat = $?; - return ($msg, $stat); - -} + my $cmd = shift; + my $nocd = shift; + my ( $msg, $stat ); + + my $ecmd = ( $nocd ? "" : "cd $main::current_targetdir; " ) . $cmd; + debug("executing $ecmd"); + $msg = `($ecmd) 2>&1`; + $stat = $?; + return ( $msg, $stat ); + +} ## end sub local_cmd($;$) # # check if target is alive (code stolen from Net::Ping.pm) # sub check_alive(;$) { - my $timeout = shift; - my( $saddr, $ret, $target_ip ); - local( *PINGSOCK ); - - if ($conf::upload_method eq "copy") { - format_status_num( $main::last_ping_time, time ); - $main::target_up = 1; - return; - } - - $timeout ||= 30; - - if (!($target_ip = (gethostbyname($conf::target))[4])) { - msg( "log", "Cannot get IP address of $conf::target\n" ); - $ret = 0; - goto out; - } - $saddr = pack( 'S n a4 x8', AF_INET, $main::echo_port, $target_ip ); - $SIG{'ALRM'} = sub { die } ; - alarm( $timeout ); - - $ret = $main::tcp_proto; # avoid warnings about unused variable + my $timeout = shift; + my ( $saddr, $ret, $target_ip ); + local (*PINGSOCK); + + if ( $conf::upload_method eq "copy" ) { + format_status_num( $main::last_ping_time, time ); + $main::target_up = 1; + return; + } + + $timeout ||= 30; + + if ( !( $target_ip = ( gethostbyname($conf::target) )[4] ) ) { + msg( "log", "Cannot get IP address of $conf::target\n" ); $ret = 0; - eval <<'EOM' ; + goto out; + } + $saddr = pack( 'S n a4 x8', AF_INET, $main::echo_port, $target_ip ); + $SIG{'ALRM'} = sub { die }; + alarm($timeout); + + $ret = $main::tcp_proto; # avoid warnings about unused variable + $ret = 0; + eval <<'EOM' ; return unless socket( PINGSOCK, PF_INET, SOCK_STREAM, $main::tcp_proto ); return unless connect( PINGSOCK, $saddr ); $ret = 1; EOM - alarm( 0 ); - close( PINGSOCK ); - msg( "log", "pinging $conf::target: " . ($ret ? "ok" : "down") . "\n" ); - out: - $main::target_up = $ret ? "1" : "0"; - format_status_num( $main::last_ping_time, time ); - write_status_file() if $conf::statusdelay; -} + alarm(0); + close(PINGSOCK); + msg( "log", "pinging $conf::target: " . ( $ret ? "ok" : "down" ) . "\n" ); +out: + $main::target_up = $ret ? "1" : "0"; + format_status_num( $main::last_ping_time, time ); + write_status_file() if $conf::statusdelay; +} ## end sub check_alive(;$) # # check if incoming dir on target is writable # sub check_incoming_writable() { - my $testfile = ".debianqueued-testfile"; - my ($msg, $stat); - - if ($conf::upload_method eq "ssh") { - ($msg, $stat) = ssh_cmd( "rm -f $testfile; touch $testfile; ". - "rm -f $testfile" ); - } - elsif ($conf::upload_method eq "ftp") { - my $file = "junk-for-writable-test-".format_time(); - $file =~ s/[ :.]/-/g; - local( *F ); - open( F, ">$file" ); close( F ); - my $rv; - ($rv, $msg) = ftp_cmd( "put", $file ); - $stat = 0; - $msg = "" if !defined $msg; - unlink $file; - ftp_cmd( "delete", $file ); - } - elsif ($conf::upload_method eq "copy") { - ($msg, $stat) = local_cmd( "rm -f $testfile; touch $testfile; ". - "rm -f $testfile" ); - } - chomp( $msg ); - debug( "exit status: $stat, output was: $msg" ); - - if (!$stat) { - # change incoming_writable only if ssh didn't return an error - $main::incoming_writable = - ($msg =~ /(permission denied|read-?only file|cannot create)/i) ? "0":"1"; - } - else { - debug( "local error, keeping old status" ); - } - debug( "incoming_writable = $main::incoming_writable" ); - write_status_file() if $conf::statusdelay; - return $main::incoming_writable; -} + my $testfile = ".debianqueued-testfile"; + my ( $msg, $stat ); + + if ( $conf::upload_method eq "ssh" ) { + ( $msg, $stat ) = + ssh_cmd( "rm -f $testfile; touch $testfile; " . "rm -f $testfile" ); + } elsif ( $conf::upload_method eq "ftp" ) { + my $file = "junk-for-writable-test-" . format_time(); + $file =~ s/[ :.]/-/g; + local (*F); + open( F, ">$file" ); + close(F); + my $rv; + ( $rv, $msg ) = ftp_cmd( "put", $file ); + $stat = 0; + $msg = "" if !defined $msg; + unlink $file; + ftp_cmd( "delete", $file ); + } elsif ( $conf::upload_method eq "copy" ) { + ( $msg, $stat ) = + local_cmd( "rm -f $testfile; touch $testfile; " . "rm -f $testfile" ); + } + chomp($msg); + debug("exit status: $stat, output was: $msg"); + + if ( !$stat ) { + + # change incoming_writable only if ssh didn't return an error + $main::incoming_writable = + ( $msg =~ /(permission denied|read-?only file|cannot create)/i ) + ? "0" + : "1"; + } else { + debug("local error, keeping old status"); + } + debug("incoming_writable = $main::incoming_writable"); + write_status_file() if $conf::statusdelay; + return $main::incoming_writable; +} ## end sub check_incoming_writable() # # remove a list of files, log failing ones # sub rm(@) { - my $done = 0; + my $done = 0; - foreach ( @_ ) { - (unlink $_ and ++$done) - or $! == ENOENT or msg( "log", "Could not delete $_: $!\n" ); - } - return $done; -} + foreach (@_) { + ( unlink $_ and ++$done ) + or $! == ENOENT + or msg( "log", "Could not delete $_: $!\n" ); + } + return $done; +} ## end sub rm(@) # # get md5 checksum of a file # sub md5sum($) { - my $file = shift; - my $line; + my $file = shift; + my $line; - chomp( $line = `$conf::md5sum $file` ); - debug( "md5sum($file): ", $? ? "exit status $?" : - $line =~ /^(\S+)/ ? $1 : "match failed" ); - return $? ? "" : $line =~ /^(\S+)/ ? $1 : ""; -} + chomp( $line = `$conf::md5sum $file` ); + debug( "md5sum($file): ", + $? ? "exit status $?" + : $line =~ /^(\S+)/ ? $1 + : "match failed" ); + return $? ? "" : $line =~ /^(\S+)/ ? $1 : ""; +} ## end sub md5sum($) # # check if a file probably belongs to a Debian upload # sub is_debian_file($) { - my $file = shift; - return $file =~ /\.(deb|dsc|(diff|tar)\.gz)$/ && - $file !~ /\.orig\.tar\.gz/; + my $file = shift; + return $file =~ /\.(deb|dsc|(diff|tar)\.gz)$/ + && $file !~ /\.orig\.tar\.gz/; } # @@ -1969,104 +2149,109 @@ sub is_debian_file($) { # return "" if not possible # sub get_maintainer($) { - my $file = shift; - my $maintainer = ""; - local( *F ); - - if ($file =~ /\.diff\.gz$/) { - # parse a diff - open( F, "$conf::gzip -dc '$file' 2>/dev/null |" ) or return ""; - while( <F> ) { - # look for header line of a file */debian/control - last if m,^\+\+\+\s+[^/]+/debian/control(\s+|$),; - } - while( <F> ) { - last if /^---/; # end of control file patch, no Maintainer: found - # inside control file patch look for Maintainer: field - $maintainer = $1, last if /^\+Maintainer:\s*(.*)$/i; - } - while( <F> ) { } # read to end of file to avoid broken pipe - close( F ) or return ""; - } - elsif ($file =~ /\.(deb|dsc|tar\.gz)$/) { - if ($file =~ /\.deb$/ && $conf::ar) { - # extract control.tar.gz from .deb with ar, then let tar extract - # the control file itself - open( F, "($conf::ar p '$file' control.tar.gz | ". - "$conf::tar -xOf - ". - "--use-compress-program $conf::gzip ". - "control) 2>/dev/null |" ) - or return ""; - } - elsif ($file =~ /\.dsc$/) { - # just do a plain grep - debug( "get_maint: .dsc, no cmd" ); - open( F, "<$file" ) or return ""; - } - elsif ($file =~ /\.tar\.gz$/) { - # let tar extract a file */debian/control - open(F, "$conf::tar -xOf '$file' ". - "--use-compress-program $conf::gzip ". - "\\*/debian/control 2>&1 |") - or return ""; - } - else { - return ""; - } - while( <F> ) { - $maintainer = $1, last if /^Maintainer:\s*(.*)$/i; - } - close( F ) or return ""; - } - - return $maintainer; -} + my $file = shift; + my $maintainer = ""; + local (*F); + + if ( $file =~ /\.diff\.gz$/ ) { + + # parse a diff + open( F, "$conf::gzip -dc '$file' 2>/dev/null |" ) or return ""; + while (<F>) { + + # look for header line of a file */debian/control + last if m,^\+\+\+\s+[^/]+/debian/control(\s+|$),; + } + while (<F>) { + last if /^---/; # end of control file patch, no Maintainer: found + # inside control file patch look for Maintainer: field + $maintainer = $1, last if /^\+Maintainer:\s*(.*)$/i; + } + while (<F>) { } # read to end of file to avoid broken pipe + close(F) or return ""; + } elsif ( $file =~ /\.(deb|dsc|tar\.gz)$/ ) { + if ( $file =~ /\.deb$/ && $conf::ar ) { + + # extract control.tar.gz from .deb with ar, then let tar extract + # the control file itself + open( F, + "($conf::ar p '$file' control.tar.gz | " + . "$conf::tar -xOf - " + . "--use-compress-program $conf::gzip " + . "control) 2>/dev/null |" + ) or return ""; + } elsif ( $file =~ /\.dsc$/ ) { + + # just do a plain grep + debug("get_maint: .dsc, no cmd"); + open( F, "<$file" ) or return ""; + } elsif ( $file =~ /\.tar\.gz$/ ) { + + # let tar extract a file */debian/control + open( F, + "$conf::tar -xOf '$file' " + . "--use-compress-program $conf::gzip " + . "\\*/debian/control 2>&1 |" + ) or return ""; + } else { + return ""; + } + while (<F>) { + $maintainer = $1, last if /^Maintainer:\s*(.*)$/i; + } + close(F) or return ""; + } ## end elsif ( $file =~ /\.(deb|dsc|tar\.gz)$/) + + return $maintainer; +} ## end sub get_maintainer($) # # return a pattern that matches all files that probably belong to one job # sub debian_file_stem($) { - my $file = shift; - my( $pkg, $version ); - - # strip file suffix - $file =~ s,\.(deb|dsc|changes|(orig\.)?tar\.gz|diff\.gz)$,,; - # if not is *_* (name_version), can't derive a stem and return just - # the file's name - return $file if !($file =~ /^([^_]+)_([^_]+)/); - ($pkg, $version) = ($1, $2); - # strip Debian revision from version - $version =~ s/^(.*)-[\d.+-]+$/$1/; - - return "${pkg}_${version}*"; -} - + my $file = shift; + my ( $pkg, $version ); + + # strip file suffix + $file =~ s,\.(deb|dsc|changes|(orig\.)?tar\.gz|diff\.gz)$,,; + + # if not is *_* (name_version), can't derive a stem and return just + # the file's name + return $file if !( $file =~ /^([^_]+)_([^_]+)/ ); + ( $pkg, $version ) = ( $1, $2 ); + + # strip Debian revision from version + $version =~ s/^(.*)-[\d.+-]+$/$1/; + + return "${pkg}_${version}*"; +} ## end sub debian_file_stem($) + # # output a messages to several destinations # # first arg is a comma-separated list of destinations; valid are "log" # and "mail"; rest is stuff to be printed, just as with print -# +# sub msg($@) { - my @dest = split( ',', shift ); + my @dest = split( ',', shift ); - if (grep /log/, @dest ) { - my $now = format_time(); - print LOG "$now ", @_; - } + if ( grep /log/, @dest ) { + my $now = format_time(); + print LOG "$now ", @_; + } - if (grep /mail/, @dest ) { - $main::mail_text .= join( '', @_ ); - } -} + if ( grep /mail/, @dest ) { + $main::mail_text .= join( '', @_ ); + } +} ## end sub msg($@) # # print a debug messages, if $debug is true # sub debug(@) { - return if !$conf::debug; - my $now = format_time(); - print LOG "$now DEBUG ", @_, "\n"; + return if !$conf::debug; + my $now = format_time(); + print LOG "$now DEBUG ", @_, "\n"; } # @@ -2074,196 +2259,232 @@ sub debug(@) { # address, subject, ...) # sub init_mail(;$) { - my $file = shift; + my $file = shift; - $main::mail_addr = ""; - $main::mail_text = ""; - $main::mail_subject = $file ? "Processing of $file" : ""; -} + $main::mail_addr = ""; + $main::mail_text = ""; + %main::packages = (); + $main::mail_subject = $file ? "Processing of $file" : ""; +} ## end sub init_mail(;$) # # finalize mail to be sent from msg(): check if something present, and # then send out # sub finish_mail() { - local( *MAIL ); - - debug( "No mail for $main::mail_addr" ) - if $main::mail_addr && !$main::mail_text; - return unless $main::mail_addr && $main::mail_text; - - if (!send_mail($main::mail_addr, $main::mail_subject, $main::mail_text)) { - # store this mail in memory so it isn't lost if executing sendmail - # failed. - push( @main::stored_mails, { addr => $main::mail_addr, - subject => $main::mail_subject, - text => $main::mail_text } ); - } - init_mail(); - - # try to send out stored mails - my $mailref; - while( $mailref = shift(@main::stored_mails) ) { - if (!send_mail( $mailref->{'addr'}, $mailref->{'subject'}, - $mailref->{'text'} )) { - unshift( @main::stored_mails, $mailref ); - last; - } - } -} + + debug("No mail for $main::mail_addr") + if $main::mail_addr && !$main::mail_text; + return unless $main::mail_addr && $main::mail_text; + + if ( !send_mail( $main::mail_addr, $main::mail_subject, $main::mail_text ) ) + { + + # store this mail in memory so it isn't lost if executing sendmail + # failed. + push( + @main::stored_mails, + { + addr => $main::mail_addr, + subject => $main::mail_subject, + text => $main::mail_text + } + ); + } ## end if ( !send_mail( $main::mail_addr... + init_mail(); + + # try to send out stored mails + my $mailref; + while ( $mailref = shift(@main::stored_mails) ) { + if ( + !send_mail( $mailref->{'addr'}, $mailref->{'subject'}, + $mailref->{'text'} ) + ) + { + unshift( @main::stored_mails, $mailref ); + last; + } ## end if ( !send_mail( $mailref... + } ## end while ( $mailref = shift(... +} ## end sub finish_mail() # # send one mail # sub send_mail($$$) { - my $addr = shift; - my $subject = shift; - my $text = shift; - - debug( "Sending mail to $addr" ); - debug( "executing $conf::mail -s '$subject' '$addr'" ); - if (!open( MAIL, "|$conf::mail -s '$subject' '$addr'" )) { - msg( "log", "Could not open pipe to $conf::mail: $!\n" ); - return 0; - } - print MAIL $text; - print MAIL "\nGreetings,\n\n\tYour Debian queue daemon\n"; - if (!close( MAIL )) { - msg( "log", "$conf::mail failed (exit status ", $? >> 8, ")\n" ); - return 0; - } - return 1; -} + my $addr = shift; + my $subject = shift; + my $text = shift; + + my $package = + keys %main::packages ? join( ' ', keys %main::packages ) : ""; + + use Email::Send; + + unless ( defined($Email::Send::Sendmail::SENDMAIL) ) { + $Email::Send::Sendmail::SENDMAIL = $conf::mail; + } + + my $date = sprintf "%s", + strftime( "%a, %d %b %Y %T %z", ( localtime(time) ) ); + my $message = <<__MESSAGE__; +To: $addr +From: Archive Administrator <dak\@ftp-master.debian.org> +Subject: $subject +Date: $date +X-Debian: DAK +__MESSAGE__ + + if ( length $package ) { + $message .= "X-Debian-Package: $package\n"; + } + + $message .= "\n$text"; + $message .= "\nGreetings,\n\n\tYour Debian queue daemon\n"; + + my $mail = Email::Send->new; + for (qw[Sendmail SMTP]) { + $mail->mailer($_) and last if $mail->mailer_available($_); + } + + my $ret = $mail->send($message); + if ( $ret && $ret !~ /Message sent|success/ ) { + return 0; + } + + return 1; +} ## end sub send_mail($$$) # # try to find a mail address for a name in the keyrings # sub try_to_get_mail_addr($$) { - my $name = shift; - my $listref = shift; - - @$listref = (); - open( F, "$conf::gpg --no-options --batch --no-default-keyring ". - "--always-trust --keyring ". - join (" --keyring ",@conf::keyrings). - " --list-keys |" ) - or return ""; - while( <F> ) { - if (/^pub / && / $name /) { - /<([^>]*)>/; - push( @$listref, $1 ); - } - } - close( F ); - - return (@$listref >= 1) ? $listref->[0] : ""; -} + my $name = shift; + my $listref = shift; + + @$listref = (); + open( F, + "$conf::gpg --no-options --batch --no-default-keyring " + . "--always-trust --keyring " + . join( " --keyring ", @conf::keyrings ) + . " --list-keys |" + ) or return ""; + while (<F>) { + if ( /^pub / && / $name / ) { + /<([^>]*)>/; + push( @$listref, $1 ); + } + } ## end while (<F>) + close(F); + + return ( @$listref >= 1 ) ? $listref->[0] : ""; +} ## end sub try_to_get_mail_addr($$) # # return current time as string # sub format_time() { - my $t; + my $t; - # omit weekday and year for brevity - ($t = localtime) =~ /^\w+\s(.*)\s\d+$/; - return $1; -} + # omit weekday and year for brevity + ( $t = localtime ) =~ /^\w+\s(.*)\s\d+$/; + return $1; +} ## end sub format_time() sub print_time($) { - my $secs = shift; - my $hours = int($secs/(60*60)); + my $secs = shift; + my $hours = int( $secs / ( 60 * 60 ) ); - $secs -= $hours*60*60; - return sprintf "%d:%02d:%02d", $hours, int($secs/60), $secs % 60; -} + $secs -= $hours * 60 * 60; + return sprintf "%d:%02d:%02d", $hours, int( $secs / 60 ), $secs % 60; +} ## end sub print_time($) # # block some signals during queue processing -# +# # This is just to avoid data inconsistency or uploads being aborted in the # middle. Only "soft" signals are blocked, i.e. SIGINT and SIGTERM, try harder # ones if you really want to kill the daemon at once. # sub block_signals() { - POSIX::sigprocmask( SIG_BLOCK, $main::block_sigset ); + POSIX::sigprocmask( SIG_BLOCK, $main::block_sigset ); } sub unblock_signals() { - POSIX::sigprocmask( SIG_UNBLOCK, $main::block_sigset ); + POSIX::sigprocmask( SIG_UNBLOCK, $main::block_sigset ); } # # process SIGHUP: close log file and reopen it (for logfile cycling) # sub close_log($) { - close( LOG ); - close( STDOUT ); - close( STDERR ); - - open( LOG, ">>$conf::logfile" ) - or die "Cannot open my logfile $conf::logfile: $!\n"; - chmod( 0644, $conf::logfile ) - or msg( "log", "Cannot set modes of $conf::logfile: $!\n" ); - select( (select(LOG), $| = 1)[0] ); - - open( STDOUT, ">&LOG" ) - or msg( "log", "$main::progname: Can't redirect stdout to ". - "$conf::logfile: $!\n" ); - open( STDERR, ">&LOG" ) - or msg( "log", "$main::progname: Can't redirect stderr to ". - "$conf::logfile: $!\n" ); - msg( "log", "Restart after SIGHUP\n" ); -} + close(LOG); + close(STDOUT); + close(STDERR); + + open( LOG, ">>$conf::logfile" ) + or die "Cannot open my logfile $conf::logfile: $!\n"; + chmod( 0644, $conf::logfile ) + or msg( "log", "Cannot set modes of $conf::logfile: $!\n" ); + select( ( select(LOG), $| = 1 )[0] ); + + open( STDOUT, ">&LOG" ) + or msg( "log", + "$main::progname: Can't redirect stdout to " . "$conf::logfile: $!\n" ); + open( STDERR, ">&LOG" ) + or msg( "log", + "$main::progname: Can't redirect stderr to " . "$conf::logfile: $!\n" ); + msg( "log", "Restart after SIGHUP\n" ); +} ## end sub close_log($) # # process SIGCHLD: check if it was our statusd process # sub kid_died($) { - my $pid; - - # reap statusd, so that it's no zombie when we try to kill(0) it - waitpid( $main::statusd_pid, WNOHANG ); - -# Uncomment the following line if your Perl uses unreliable System V signal -# (i.e. if handlers reset to default if the signal is delivered). -# (Unfortunately, the re-setup can't be done in any case, since on some -# systems this will cause the SIGCHLD to be delivered again if there are -# still unreaped children :-(( ) - -# $SIG{"CHLD"} = \&kid_died; # resetup handler for SysV -} + my $pid; + + # reap statusd, so that it's no zombie when we try to kill(0) it + waitpid( $main::statusd_pid, WNOHANG ); + + # Uncomment the following line if your Perl uses unreliable System V signal + # (i.e. if handlers reset to default if the signal is delivered). + # (Unfortunately, the re-setup can't be done in any case, since on some + # systems this will cause the SIGCHLD to be delivered again if there are + # still unreaped children :-(( ) + + # $SIG{"CHLD"} = \&kid_died; # resetup handler for SysV +} ## end sub kid_died($) sub restart_statusd() { - # restart statusd if it died - if (!kill( 0, $main::statusd_pid)) { - close( STATUSD ); # close out pipe end - $main::statusd_pid = fork_statusd(); - } -} + + # restart statusd if it died + if ( !kill( 0, $main::statusd_pid ) ) { + close(STATUSD); # close out pipe end + $main::statusd_pid = fork_statusd(); + } +} ## end sub restart_statusd() # # process a fatal signal: cleanup and exit # sub fatal_signal($) { - my $signame = shift; - my $sig; - - # avoid recursions of fatal_signal in case of BSD signals - foreach $sig ( qw( ILL ABRT BUS FPE SEGV PIPE ) ) { - $SIG{$sig} = "DEFAULT"; - } - - if ($$ == $main::maind_pid) { - # only the main daemon should do this - kill( $main::signo{"TERM"}, $main::statusd_pid ) - if defined $main::statusd_pid; - unlink( $conf::statusfile, $conf::pidfile ); - } - msg( "log", "Caught SIG$signame -- exiting (pid $$)\n" ); - exit 1; -} - + my $signame = shift; + my $sig; + + # avoid recursions of fatal_signal in case of BSD signals + foreach $sig (qw( ILL ABRT BUS FPE SEGV PIPE )) { + $SIG{$sig} = "DEFAULT"; + } + + if ( $$ == $main::maind_pid ) { + + # only the main daemon should do this + kill( $main::signo{"TERM"}, $main::statusd_pid ) + if defined $main::statusd_pid; + unlink( $conf::statusfile, $conf::pidfile ); + } ## end if ( $$ == $main::maind_pid) + msg( "log", "Caught SIG$signame -- exiting (pid $$)\n" ); + exit 1; +} ## end sub fatal_signal($) # Local Variables: # tab-width: 4 diff --git a/web/dinstall.html b/web/dinstall.html new file mode 100644 index 00000000..2d0f967f --- /dev/null +++ b/web/dinstall.html @@ -0,0 +1,59 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> +<HTML> +<HEAD> +<TITLE>Dinstall might be running in... + + + + + + + +

Dinstall might be running (more or less) in...

+ +
+ +
+ +

+ +dinstall should run 07h 52min and 19h 52min or 07h 52min AM/PM (UTC) + +

+Made by Eduard Bloch <blade@debian.org> +
Small update to use 12h dinstall by Felipe Augusto van de Wiel (faw) +
Please check this announcement about dinstall twice daily. + + + + + +