From: Joerg Jaspert Date: Fri, 25 Apr 2008 22:49:05 +0000 (+0200) Subject: Merge mainline X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=cae814a1ffbbb2944931693f35f73062a5ea99e7;hp=de08e1819d9164cbda04bc9b86fd0716a6e7bc2f;p=dak.git Merge mainline --- diff --git a/ChangeLog b/ChangeLog index 3cb970b2..c6be882a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,8 +1,195 @@ -2008-01-09 Joerg Jaspert +2008-04-25 Joerg Jaspert * dak/cruft_report.py (main): Make it possible to look at experimental too, especially NBS + * dak/split_done.py (main): Only move files into their subdirs if + they are older than 30 days. That enables us to run this script as + part of a cronjob. + + * config/debian/cron.weekly: Run dak split-done + +2008-04-23 Thomas Viehmann + + * dak/process_unchecked.py: add changes["sponsoremail"] + for sponsored uploads if desired + * daklib/queue.py: add changes["sponsoremail"] to + Subst["__MAINTAINER_TO__"] if present + * daklib/utils.py: add functions + is_email_alias to check which accounts allow email forwarding, + which_alias_file to find the alias file, and + gpg_get_key_addresses to find uid addresses for a given + fingerprint + +2008-04-22 Joerg Jaspert + + * setup/init_pool.sql: added a function/aggregate for the release + team to base some script on it. + + * config/debian/cron.daily: push katie@merkel to immediately start + the sync of projectb there. + +2008-04-21 Joerg Jaspert + + * scripts/debian/expire_dumps: New script, expires old database + dumps, using a scheme to keep more of the recent dumps. + + * config/debian/cron.daily: Use the new script. Also - compress + all files older than 7 days, instead of 30. + + * dak/process_accepted.py (install): Do not break if a + source/maintainer combination is already in src_uploaders, "just" + warn us. + +2008-04-20 Thomas Viehmann + + * daklib/utils.py (build_file_list): Deal with "Format 3 style" + Format lines (ie. those having extra text appended). + +2008-04-19 Joerg Jaspert + + * dak/process_unchecked.py (check_files): Sanity check the + provides field, which closes #472783 + +2008-04-18 Joerg Jaspert + + * config/debian/dak.conf: Add mapping stable-proposed-updates + -> proposed-updates. + + * dak/transitions.py (load_transitions): Additionally check for + invalid package list indentation + +2008-04-17 Joerg Jaspert + + * config/debian/dak.conf: Add TempPath statement for the Release + Transitions script + + * dak/transitions.py (temp_transitions_file): Use the TempPath + (write_transitions_from_file): Check if the file we should get our + transitions from is in our TempPath, error out if it isnt + (main): Check for TempPath existance + +2008-04-12 James Troup + + * dak/clean_proposed_updates.py: add support for -s/--suite and + -n/--no-action. + +2008-04-11 Anthony Towns + + * dak/utils.py: build_file_list() extra parameters so it can + build a file list for checksums-foo fields. Don't use float() to + compare formats, because Format: 1.10 should compare greater than + Format: 1.9 (use "1.9".split(".",1) and tuple comparison instead) + + * dak/process_unchecked.py: check_md5sum becomes check_hashes + and check_hash. If changes format is 1.8 or later, also check + checksums-sha1 and checksums-sha256 for both .changes and .dsc, + and reject on presence/absence of un/expected checksums-* fields. + +2008-04-07 Joerg Jaspert + + * daklib/utils.py (build_file_list): Check for dpkg .changes + adjusted to reject newer (and right now broken) 1.8 version, until + dpkg (or debsign) is fixed and doesn't produce invalid .changes anymore + +2008-03-22 Joerg Jaspert + + * dak/transitions.py (load_transitions): Check if all our keys are + defined, if there are only keys defined we want and also the types + of the various keys. + +2008-03-22 Anthony Towns + + * dak/edit_transitions.py: Add --import option. + Add --use-sudo option. Use fcntl locking for writing. + Move writing into a function (write_transitions). + Reinvoke self using sudo and --import if necessary. + Move temporary file creation into a function, use mkstemp. + Rename to "dak transitions". + +2008-03-21 Joerg Jaspert + + * dak/edit_transitions.py (edit_transitions): Use sudo to copy the + edited file back in place + (check_transitions): Use proper locking and also use sudo to copy + the new file in place + +2008-03-21 Anthony Towns + + * config/debian/extensions.py: Add infrastructure for replacing + functions in dak modules; add upload blocking for dpkg. + +2008-03-12 Joerg Jaspert + + * dak/edit_transitions.py: Done a number of cleanups to make code + working. Also changed the way prompting/answering goes, to not + have to import daklib/queue. + (edit_transitions): When done with a successful edit - also print + a final overview about defined transitions + +2008-03-11 Joerg Jaspert + + * dak/process_unchecked.py: Import syck module directly, not "from + syck import *" + (check_transition): Do the check for sourceful upload in here + Also adjust the syck loading commands, rename new_vers to + expected, curvers to current, to make it more clear what they mean. + + * daklib/database.py (get_suite_version): Renamed from + get_testing_version. Also changed the cache variables name + + * The above changes are based on modifications from Anthony. + + * dak/dak.py (init): Renamed check -> edit transitions + + * dak/edit_transitions.py: Renamed from check_transitions.py + (main): Also rename new_vers/curvers to expected/current + Basically a nice rewrite, so it now does checks and edit, + depending on how you call it. Check also removes old transitions, + if user wants it. + +2008-03-02 Joerg Jaspert + + * debian/control (Suggests): Add python-syck to Depends: + + * dak/dak.py (init): Tell it about check_transitions + + * dak/check_transitions.py (usage): Added, checks the transitions + file (if any) + + * daklib/database.py (get_testing_version): Added. Returns the + version for the source in testing, if any + + * dak/process_unchecked.py (check_transition): Added. Checks if a + release team member defined a transition, and rejects based on + that data. + (process_it): Use it. + (check_transition): Warn on broken transitions file and return, + not doing anything. + (check_transition): Moved out of here, into daklib/queue + (process_it): Call check_transitions only if + changes[architecture] has source included. + (check_transition): Now call the database.get_testing_version + +2008-02-09 Christoph Berg + + * daklib/queue.py (get_type): fubar does not exist in global + namespace. + + * setup/add_constraints.sql setup/init_pool.sql: Add changedby column + to source table, and move src_uploaders after source so the REFERNCES + clause works. + * dak/process_accepted.py (install): Fill the changedby column from + the information found in the .changes. This will allow to identify + NMUs and sponsored uploads more precisely in tools querying projectb. + * scripts/debian/insert_missing_changedby.py: Script to import yet + missing fields from filippo's uploads-history DB. + +2008-02-06 Joerg Jaspert + + * daklib/utils.py (check_signature): Make variable key available, + so we can access it. + 2008-01-07 Joerg Jaspert * dak/examine_package.py (check_deb): Remove linda call. It diff --git a/config/debian-security/apt.conf b/config/debian-security/apt.conf index a231affe..41b10ef1 100644 --- a/config/debian-security/apt.conf +++ b/config/debian-security/apt.conf @@ -1,3 +1,5 @@ +APT::FTPArchive::Contents off; + Dir { ArchiveDir "/org/security.debian.org/ftp/"; @@ -7,8 +9,8 @@ Dir Default { - Packages::Compress ". gzip"; - Sources::Compress "gzip"; + Packages::Compress ". gzip bzip2"; + Sources::Compress "gzip bzip2"; DeLinkLimit 0; FileMode 0664; } @@ -18,10 +20,10 @@ tree "dists/oldstable/updates" FileList "/org/security.debian.org/dak-database/dists/oldstable_updates/$(SECTION)_binary-$(ARCH).list"; SourceFileList "/org/security.debian.org/dak-database/dists/oldstable_updates/$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha arm hppa i386 ia64 mips mipsel m68k powerpc s390 sparc source"; - BinOverride "override.woody.$(SECTION)"; - ExtraOverride "override.woody.extra.$(SECTION)"; - SrcOverride "override.woody.$(SECTION).src"; + Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel m68k powerpc s390 sparc source"; + BinOverride "override.sarge.$(SECTION)"; + ExtraOverride "override.sarge.extra.$(SECTION)"; + SrcOverride "override.sarge.$(SECTION).src"; Contents " "; }; @@ -30,11 +32,13 @@ tree "dists/stable/updates" FileList "/org/security.debian.org/dak-database/dists/stable_updates/$(SECTION)_binary-$(ARCH).list"; SourceFileList "/org/security.debian.org/dak-database/dists/stable_updates/$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel m68k powerpc s390 sparc source"; - BinOverride "override.sarge.$(SECTION)"; - ExtraOverride "override.sarge.extra.$(SECTION)"; - SrcOverride "override.sarge.$(SECTION).src"; + Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc source"; + BinOverride "override.etch.$(SECTION)"; + ExtraOverride "override.etch.extra.$(SECTION)"; + SrcOverride "override.etch.$(SECTION).src"; Contents " "; + Packages::Compress "gzip bzip2"; + Sources::Compress "gzip bzip2"; }; tree "dists/testing/updates" @@ -42,10 +46,10 @@ tree "dists/testing/updates" FileList "/org/security.debian.org/dak-database/dists/testing_updates/$(SECTION)_binary-$(ARCH).list"; SourceFileList "/org/security.debian.org/dak-database/dists/testing_updates/$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel m68k powerpc s390 sparc source"; - BinOverride "override.etch.$(SECTION)"; - ExtraOverride "override.etch.extra.$(SECTION)"; - SrcOverride "override.etch.$(SECTION).src"; + Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc source"; + BinOverride "override.lenny.$(SECTION)"; + ExtraOverride "override.lenny.extra.$(SECTION)"; + SrcOverride "override.lenny.$(SECTION).src"; Contents " "; Packages::Compress "gzip bzip2"; Sources::Compress "gzip bzip2"; diff --git a/config/debian-security/apt.conf.buildd b/config/debian-security/apt.conf.buildd index e021791f..0285236e 100644 --- a/config/debian-security/apt.conf.buildd +++ b/config/debian-security/apt.conf.buildd @@ -1,8 +1,10 @@ +APT::FTPArchive::Contents off; + Dir { - ArchiveDir "/org/security.debian.org/buildd/"; - OverrideDir "/org/security.debian.org/override/"; - CacheDir "/org/security.debian.org/dak-database/"; + ArchiveDir "/srv/security.debian.org/buildd/"; + OverrideDir "/srv/security.debian.org/override/"; + CacheDir "/srv/security.debian.org/dak-database/"; }; Default @@ -20,19 +22,21 @@ bindirectory "etch" Contents " "; BinOverride "override.etch.all3"; + SrcOverride "override.etch.all3.src"; BinCacheDB "packages-accepted-etch.db"; PathPrefix ""; Packages::Extensions ".deb .udeb"; }; -bindirectory "woody" +bindirectory "lenny" { - Packages "woody/Packages"; - Sources "woody/Sources"; + Packages "lenny/Packages"; + Sources "lenny/Sources"; Contents " "; - BinOverride "override.woody.all3"; - BinCacheDB "packages-accepted-woody.db"; + BinOverride "override.lenny.all3"; + SrcOverride "override.lenny.all3.src"; + BinCacheDB "packages-accepted-lenny.db"; PathPrefix ""; Packages::Extensions ".deb .udeb"; }; @@ -44,6 +48,7 @@ bindirectory "sarge" Contents " "; BinOverride "override.sarge.all3"; + SrcOverride "override.sarge.all3.src"; BinCacheDB "packages-accepted-sarge.db"; PathPrefix ""; Packages::Extensions ".deb .udeb"; diff --git a/config/debian-security/cron.buildd b/config/debian-security/cron.buildd index 7aa42e8a..96607e48 100755 --- a/config/debian-security/cron.buildd +++ b/config/debian-security/cron.buildd @@ -2,8 +2,8 @@ # # Executed after cron.unchecked -ARCHS_oldstable="alpha arm hppa i386 ia64 m68k mips mipsel powerpc sparc s390" -ARCHS_stable="$ARCHS_oldstable" +ARCHS_oldstable="alpha arm hppa i386 ia64 m68k mips mipsel powerpc sparc s390 amd64" +ARCHS_stable="alpha amd64 arm hppa i386 ia64 mips mipsel powerpc sparc s390" ARCHS_testing="$ARCHS_stable" DISTS="oldstable stable testing" SSH_SOCKET=~/.ssh/buildd.debian.org.socket @@ -12,56 +12,65 @@ set -e export SCRIPTVARS=/org/security.debian.org/dak/config/debian-security/vars . $SCRIPTVARS -if [ ! -e $ftpdir/Archive_Maintenance_In_Progress ]; then - cd $masterdir - for d in $DISTS; do - eval SOURCES_$d=`stat -c "%Y" $base/buildd/$d/Sources.gz` - eval PACKAGES_$d=`stat -c "%Y" $base/buildd/$d/Packages.gz` - done - apt-ftparchive -qq generate apt.conf.buildd-security - dists= - for d in $DISTS; do - eval NEW_SOURCES_$d=`stat -c "%Y" $base/buildd/$d/Sources.gz` - eval NEW_PACKAGES_$d=`stat -c "%Y" $base/buildd/$d/Packages.gz` - old=SOURCES_$d - new=NEW_$old - if [ ${!new} -gt ${!old} ]; then - if [ -z "$dists" ]; then - dists="$d" - else - dists="$dists $d" - fi - continue +if [ -e $ftpdir/Archive_Maintenance_In_Progress ]; then + exit 0 +fi + +cd $masterdir +for d in $DISTS; do + eval SOURCES_$d=`stat -c "%Y" $base/buildd/$d/Sources.gz` + eval PACKAGES_$d=`stat -c "%Y" $base/buildd/$d/Packages.gz` +done + +apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate apt.conf.buildd +dists= +for d in $DISTS; do + eval NEW_SOURCES_$d=`stat -c "%Y" $base/buildd/$d/Sources.gz` + eval NEW_PACKAGES_$d=`stat -c "%Y" $base/buildd/$d/Packages.gz` + old=SOURCES_$d + new=NEW_$old + if [ ${!new} -gt ${!old} ]; then + if [ -z "$dists" ]; then + dists="$d" + else + dists="$dists $d" fi - old=PACKAGES_$d - new=NEW_$old - if [ ${!new} -gt ${!old} ]; then - if [ -z "$dists" ]; then - dists="$d" - else - dists="$dists $d" - fi - continue + continue + fi + old=PACKAGES_$d + new=NEW_$old + if [ ${!new} -gt ${!old} ]; then + if [ -z "$dists" ]; then + dists="$d" + else + dists="$dists $d" fi + continue + fi +done + +if [ ! -z "$dists" ]; then + # setup ssh master process + ssh buildd@buildd -S $SSH_SOCKET -MN 2> /dev/null & + SSH_PID=$! + while [ ! -S $SSH_SOCKET ]; do + sleep 1 done - if [ ! -z "$dists" ]; then - # setup ssh master process - ssh buildd@buildd -S $SSH_SOCKET -MN 2> /dev/null & - SSH_PID=$! - while [ ! -S $SSH_SOCKET ]; do - sleep 1 - done - trap 'kill -TERM $SSH_PID' 0 - for d in $dists; do - archs=ARCHS_$d - ARCHS=${!archs} - cd /org/security.debian.org/buildd/$d + trap 'kill -TERM $SSH_PID' 0 + for d in $dists; do + archs=ARCHS_$d + ARCHS=${!archs} + cd /org/security.debian.org/buildd/$d + if [ "$d" != "oldstable" ]; then + # disabled for oldstable-security by ajt 2008-01-01 for a in $ARCHS; do quinn-diff -a /org/security.debian.org/buildd/Packages-arch-specific -A $a 2>/dev/null | ssh buildd@buildd -S $SSH_SOCKET wanna-build -d $d-security -b $a/build-db --merge-partial-quinn + ssh buildd@buildd -S $SSH_SOCKET wanna-build -d $d-security -A $a -b $a/build-db --merge-packages < Packages done - done - fi + else + ssh buildd@bester.farm.ftbfs.de -i ~/.ssh/id_bester sleep 1 + fi + done fi -ssh buildd@bester.farm.ftbfs.de -i ~/.ssh/id_bester sleep 1 diff --git a/config/debian-security/cron.daily b/config/debian-security/cron.daily index 6a1dbcae..dbc34b6a 100644 --- a/config/debian-security/cron.daily +++ b/config/debian-security/cron.daily @@ -10,31 +10,18 @@ export SCRIPTVARS=/org/security.debian.org/dak/config/debian-security/vars # Fix overrides -rsync -ql ftp-master::indices/override\* $overridedir +# disabled by ajt 2008-01-01: requires auth +#rsync -ql ftp-master::indices/override\* $overridedir cd $overridedir find . -name override\*.gz -type f -maxdepth 1 -mindepth 1 | xargs gunzip -f -find . -type l -maxdepth 1 -mindepth 1 | xargs rm - -rm -fr non-US -mkdir non-US -cd non-US -rsync -ql non-us::indices/override\* . -find . -name override\*.gz -type f -maxdepth 1 -mindepth 1 | xargs gunzip -find . -type l -maxdepth 1 -mindepth 1 | xargs rm -for i in *; do - if [ -f ../$i ]; then - cat $i >> ../$i; - fi; -done -cd .. -rm -fr non-US +find . -type l -maxdepth 1 -mindepth 1 | xargs --no-run-if-empty rm for suite in $suites; do case $suite in - oldstable) override_suite=woody;; - stable) override_suite=sarge;; - testing) override_suite=etch;; + oldstable) override_suite=sarge;; + stable) override_suite=etch;; + testing) override_suite=lenny;; *) echo "Unknown suite type ($suite)"; exit 1;; esac for component in $components; do @@ -47,19 +34,18 @@ for suite in $suites; do # XXX RUN AFUCKINGAWAY if [ "$override_type" = "udeb" ]; then if [ ! "$component" = "main" ]; then - continue; + continue fi if [ "$suite" = "unstable" ]; then dak control-overrides -q -S -t $override_type -s $suite -c updates/$component < override.$override_suite.$component$type fi else - dak control-overrides -q -S -t $override_type -s $suite -c updates/$component < override.$override_suite.$component$type + # XXX removed 2007-08-16 ajt + #dak control-overrides -q -S -t $override_type -s $suite -c updates/$component < override.$override_suite.$component$type + true fi case $suite in oldstable) - if [ ! "$override_type" = "udeb" ]; then - dak control-overrides -q -a -t $override_type -s $suite -c updates/$component < override.sarge.$component$type - fi dak control-overrides -q -a -t $override_type -s $suite -c updates/$component < override.sid.$component$type ;; stable) @@ -75,15 +61,18 @@ for suite in $suites; do done # Generate .all3 overides for the buildd support -for dist in woody sarge etch; do +for dist in sarge etch lenny; do rm -f override.$dist.all3 components="main contrib non-free"; if [ -f override.$dist.main.debian-installer ]; then - components="$components main.debian-installer"; + components="$components main.debian-installer" fi for component in $components; do - cat override.$dist.$component >> override.$dist.all3; - done; + cat override.$dist.$component >> override.$dist.all3 + if [ -e "override.$dist.$component.src" ]; then + cat override.$dist.$component.src >> override.$dist.all3.src + fi + done done ################################################################################ diff --git a/config/debian-security/cron.unchecked b/config/debian-security/cron.unchecked index 9f91e688..641f8bfb 100755 --- a/config/debian-security/cron.unchecked +++ b/config/debian-security/cron.unchecked @@ -33,4 +33,4 @@ if ! $doanything; then exit 0 fi -sh $masterdir/cron.buildd-security +sh $masterdir/cron.buildd diff --git a/config/debian-security/dak.conf b/config/debian-security/dak.conf index 0af66820..fb219e5e 100644 --- a/config/debian-security/dak.conf +++ b/config/debian-security/dak.conf @@ -1,12 +1,12 @@ Dinstall { GPGKeyring { - "/org/keyring.debian.org/keyrings/debian-keyring.gpg"; - "/org/keyring.debian.org/keyrings/debian-keyring.pgp"; + "/org/keyring.debian.org/keyrings/debian-keyring.gpg"; + "/org/keyring.debian.org/keyrings/debian-keyring.pgp"; }; SigningKeyring "/org/non-us.debian.org/s3kr1t/dot-gnupg/secring.gpg"; SigningPubKeyring "/org/non-us.debian.org/s3kr1t/dot-gnupg/pubring.gpg"; - SigningKeyIds "2D230C5F"; + SigningKeyIds "6070D3A1"; SendmailCommand "/usr/sbin/sendmail -odq -oi -t"; MyEmailAddress "Debian Installer "; MyAdminAddress "ftpmaster@debian.org"; @@ -16,6 +16,7 @@ Dinstall PackagesServer "packages.debian.org"; LockFile "/org/security.debian.org/dak/lock"; Bcc "archive@ftp-master.debian.org"; + // GroupOverrideFilename "override.group-maint"; FutureTimeTravelGrace 28800; // 8 hours PastCutoffYear "1984"; SkipTime 300; @@ -30,14 +31,14 @@ Dinstall }; SecurityQueueHandling "true"; SecurityQueueBuild "true"; - DefaultSuite "Testing"; + DefaultSuite "oldstable"; SuiteSuffix "updates"; OverrideMaintainer "dak@security.debian.org"; StableDislocationSupport "false"; LegacyStableHasNoSections "false"; }; -Process-Unchecked +Process-New { AcceptedLockFile "/org/security.debian.org/lock/unchecked.lock"; }; @@ -53,9 +54,9 @@ Queue-Report { Directories { - byhand; - new; - accepted; + // byhand; + // new; + unembargoed; }; }; @@ -123,7 +124,8 @@ Suite { source; all; - alpha; + alpha; + amd64; arm; hppa; i386; @@ -136,12 +138,12 @@ Suite sparc; }; Announce "dak@security.debian.org"; - Version "3.0"; + Version "3.1"; Origin "Debian"; Label "Debian-Security"; - Description "Debian 3.0 Security Updates"; - CodeName "woody"; - OverrideCodeName "woody"; + Description "Debian 3.1 Security Updates"; + CodeName "sarge"; + OverrideCodeName "sarge"; CopyDotDak "/org/security.debian.org/queue/done/"; }; @@ -157,13 +159,12 @@ Suite { source; all; - alpha; amd64; + alpha; arm; hppa; i386; ia64; - m68k; mips; mipsel; powerpc; @@ -171,12 +172,12 @@ Suite sparc; }; Announce "dak@security.debian.org"; - Version "3.1"; + Version ""; Origin "Debian"; Label "Debian-Security"; - Description "Debian 3.1 Security Updates"; - CodeName "sarge"; - OverrideCodeName "sarge"; + Description "Debian 4.0 Security Updates"; + CodeName "etch"; + OverrideCodeName "etch"; CopyDotDak "/org/security.debian.org/queue/done/"; }; @@ -198,7 +199,6 @@ Suite hppa; i386; ia64; - m68k; mips; mipsel; powerpc; @@ -210,11 +210,10 @@ Suite Origin "Debian"; Label "Debian-Security"; Description "Debian testing Security Updates"; - CodeName "etch"; - OverrideCodeName "etch"; + CodeName "lenny"; + OverrideCodeName "lenny"; CopyDotDak "/org/security.debian.org/queue/done/"; }; - }; SuiteMappings @@ -223,7 +222,7 @@ SuiteMappings "silent-map stable-security stable"; // JT - FIXME, hackorama // "silent-map testing-security stable"; - "silent-map etch-secure testing"; + "silent-map etch-secure stable"; "silent-map testing-security testing"; }; @@ -251,6 +250,7 @@ Dir New "/org/security.debian.org/queue/new/"; Reject "/org/security.debian.org/queue/reject/"; Unchecked "/org/security.debian.org/queue/unchecked/"; + ProposedUpdates "/does/not/exist/"; // XXX fixme Embargoed "/org/security.debian.org/queue/embargoed/"; Unembargoed "/org/security.debian.org/queue/unembargoed/"; diff --git a/config/debian/apt.conf b/config/debian/apt.conf index aee3ab85..408e7dce 100644 --- a/config/debian/apt.conf +++ b/config/debian/apt.conf @@ -50,7 +50,7 @@ tree "dists/testing" FileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/testing_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc source"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel powerpc s390 sparc source"; BinOverride "override.lenny.$(SECTION)"; ExtraOverride "override.lenny.extra.$(SECTION)"; SrcOverride "override.lenny.$(SECTION).src"; @@ -61,7 +61,7 @@ tree "dists/testing-proposed-updates" FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc source"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel powerpc s390 sparc source"; BinOverride "override.lenny.$(SECTION)"; ExtraOverride "override.lenny.extra.$(SECTION)"; SrcOverride "override.lenny.$(SECTION).src"; @@ -73,7 +73,7 @@ tree "dists/unstable" FileList "/srv/ftp.debian.org/database/dists/unstable_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/unstable_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc source"; + Architectures "alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc source"; BinOverride "override.sid.$(SECTION)"; ExtraOverride "override.sid.extra.$(SECTION)"; SrcOverride "override.sid.$(SECTION).src"; @@ -109,7 +109,7 @@ tree "dists/testing/main" { FileList "/srv/ftp.debian.org/database/dists/testing_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel powerpc s390 sparc"; BinOverride "override.lenny.main.$(SECTION)"; SrcOverride "override.lenny.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -121,7 +121,7 @@ tree "dists/testing/non-free" { FileList "/srv/ftp.debian.org/database/dists/testing_non-free_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel powerpc s390 sparc"; BinOverride "override.lenny.main.$(SECTION)"; SrcOverride "override.lenny.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -133,7 +133,7 @@ tree "dists/testing-proposed-updates/main" { FileList "/srv/ftp.debian.org/database/dists/testing-proposed-updates_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel powerpc s390 sparc"; BinOverride "override.lenny.main.$(SECTION)"; SrcOverride "override.lenny.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -145,7 +145,7 @@ tree "dists/unstable/main" { FileList "/srv/ftp.debian.org/database/dists/unstable_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; BinOverride "override.sid.main.$(SECTION)"; SrcOverride "override.sid.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -157,7 +157,7 @@ tree "dists/unstable/non-free" { FileList "/srv/ftp.debian.org/database/dists/unstable_non-free_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; BinOverride "override.sid.main.$(SECTION)"; SrcOverride "override.sid.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -169,7 +169,7 @@ tree "dists/experimental/main" { FileList "/srv/ftp.debian.org/database/dists/experimental_main_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa i386 ia64 mips mipsel m68k powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa i386 ia64 mips mipsel m68k powerpc s390 sparc"; BinOverride "override.sid.main.$(SECTION)"; SrcOverride "override.sid.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -181,7 +181,7 @@ tree "dists/experimental/non-free" { FileList "/srv/ftp.debian.org/database/dists/experimental_non-free_$(SECTION)_binary-$(ARCH).list"; Sections "debian-installer"; - Architectures "alpha amd64 arm hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; + Architectures "alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc"; BinOverride "override.sid.main.$(SECTION)"; SrcOverride "override.sid.main.src"; BinCacheDB "packages-debian-installer-$(ARCH).db"; @@ -196,7 +196,7 @@ tree "dists/experimental" FileList "/srv/ftp.debian.org/database/dists/experimental_$(SECTION)_binary-$(ARCH).list"; SourceFileList "/srv/ftp.debian.org/database/dists/experimental_$(SECTION)_source.list"; Sections "main contrib non-free"; - Architectures "alpha amd64 arm hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc source"; + Architectures "alpha amd64 arm armel hppa hurd-i386 i386 ia64 mips mipsel m68k powerpc s390 sparc source"; BinOverride "override.sid.$(SECTION)"; SrcOverride "override.sid.$(SECTION).src"; Contents " "; diff --git a/config/debian/cron.daily b/config/debian/cron.daily index b1af5312..48f74687 100755 --- a/config/debian/cron.daily +++ b/config/debian/cron.daily @@ -89,8 +89,7 @@ dak make-suite-file-list TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) # Update fingerprints -# [JT - disabled, dak import-ldap-fingerprints currently can ask questions] -#dak import-ldap-fingerprints +dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg # Generate override files cd $overridedir @@ -158,7 +157,14 @@ pg_dump projectb > $POSTDUMP TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) # Vacuum the database -echo "VACUUM; VACUUM ANALYZE;" | psql projectb 2>&1 | grep -v "^NOTICE: Skipping.*only table owner can VACUUM it$" +# (JJ, 20-04-2008) disabled, as we have autovacuum set to on in postgres. +# refer to http://www.postgresql.org/docs/current/static/routine-vacuuming.html#AUTOVACUUM +# which says "Beginning in PostgreSQL 8.1, there is an optional feature called autovacuum, +# whose purpose is to automate the execution of VACUUM and ANALYZE commands." +# echo "VACUUM; VACUUM ANALYZE;" | psql projectb 2>&1 | grep -v "^NOTICE: Skipping.*only table owner can VACUUM it$" + +echo "Expiring old database dumps..." +(cd $base/backup; $scriptsdir/expire_dumps -d . -p -f "dump_*") ################################################################################ @@ -173,6 +179,9 @@ $scriptsdir/dm-monitor >$webdir/dm-uploaders.html ################################################################################ +# Push katie@merkel so it syncs the projectb there. Returns immediately, the sync runs detached +ssh -2 -i ~/.ssh/push_merkel_projectb katie@merkel.debian.org sleep 1 + # Run mirror-split #time dak mirror-split @@ -203,10 +212,10 @@ apt-ftparchive -q clean apt.conf TS=$(($TS+1)); echo Archive maintenance timestamp $TS: $(date +%X) -# Compress psql backups older than a month, but no more than 20 of them +# Compress psql backups older than a week, but no more than 20 of them (cd $base/backup/ - find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mtime +30 | + find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mtime +7 | sort | head -n20 | while read dumpname; do echo "Compressing $dumpname" bzip2 -9 "$dumpname" diff --git a/config/debian/cron.weekly b/config/debian/cron.weekly index 53c866f2..f7fa9c04 100755 --- a/config/debian/cron.weekly +++ b/config/debian/cron.weekly @@ -17,8 +17,12 @@ fi # Clean up apt-ftparchive's databases +# Split queue/done +dak split-done > /dev/null + cd $configdir apt-ftparchive -q clean apt.conf apt-ftparchive -q clean apt.conf.buildd + ################################################################################ diff --git a/config/debian/dak.conf b/config/debian/dak.conf index 4b8412c2..d5a7df21 100644 --- a/config/debian/dak.conf +++ b/config/debian/dak.conf @@ -26,6 +26,7 @@ Dinstall OverrideDisparityCheck "true"; StableDislocationSupport "false"; DefaultSuite "unstable"; + UserExtensions "/srv/ftp.debian.org/dak/config/debian/extensions.py"; QueueBuildSuites { unstable; @@ -33,9 +34,15 @@ Dinstall Reject { NoSourceOnly "true"; + ReleaseTransitions "/srv/ftp.debian.org/testing/hints/transitions.yaml"; }; }; +Transitions +{ + TempPath "/srv/ftp.debian.org/tmp/"; +}; + Binary-Upload-Restrictions { Components @@ -220,9 +227,9 @@ Suite sparc; }; Announce "debian-changes@lists.debian.org"; - Version "3.1r6"; + Version "3.1r8"; Origin "Debian"; - Description "Debian 3.1r6 Released 7 April 2007"; + Description "Debian 3.1r8 Released 12 April 2008"; CodeName "sarge"; OverrideCodeName "sarge"; Priority "2"; @@ -406,6 +413,7 @@ Suite alpha; amd64; arm; + armel; hppa; i386; ia64; @@ -443,6 +451,7 @@ Suite alpha; amd64; arm; + armel; hppa; i386; ia64; @@ -528,6 +537,7 @@ Suite alpha; amd64; arm; + armel; hppa; hurd-i386; i386; @@ -577,6 +587,7 @@ Suite alpha; amd64; arm; + armel; hppa; hurd-i386; i386; @@ -625,6 +636,7 @@ SuiteMappings "map oldstable-security oldstable-proposed-updates"; "map stable proposed-updates"; "map stable-security proposed-updates"; + "map stable-proposed-updates proposed-updates"; "map-unreleased oldstable unstable"; "map-unreleased stable unstable"; "map-unreleased proposed-updates unstable"; @@ -636,7 +648,7 @@ SuiteMappings AutomaticByHandPackages { "debian-installer-images" { - Source "xxx-debian-installer"; + Source "debian-installer"; Section "raw-installer"; Extension "tar.gz"; Script "/srv/ftp.debian.org/dak/scripts/debian/byhand-di"; diff --git a/config/debian/extensions.py b/config/debian/extensions.py new file mode 100644 index 00000000..e17e9af8 --- /dev/null +++ b/config/debian/extensions.py @@ -0,0 +1,100 @@ +import sys, os, textwrap + +import apt_pkg +import daklib.utils, daklib.database +import syck + +import daklib.extensions +from daklib.extensions import replace_dak_function + +def check_transition(): + changes = dak_module.changes + reject = dak_module.reject + Cnf = dak_module.Cnf + + sourcepkg = changes["source"] + + # No sourceful upload -> no need to do anything else, direct return + # We also work with unstable uploads, not experimental or those going to some + # proposed-updates queue + if "source" not in changes["architecture"] or "unstable" not in changes["distribution"]: + return + + # Also only check if there is a file defined (and existant) with + # checks. + transpath = Cnf.get("Dinstall::Reject::ReleaseTransitions", "") + if transpath == "" or not os.path.exists(transpath): + return + + # Parse the yaml file + sourcefile = file(transpath, 'r') + sourcecontent = sourcefile.read() + try: + transitions = syck.load(sourcecontent) + except syck.error, msg: + # This shouldn't happen, there is a wrapper to edit the file which + # checks it, but we prefer to be safe than ending up rejecting + # everything. + daklib.utils.warn("Not checking transitions, the transitions file is broken: %s." % (msg)) + return + + # Now look through all defined transitions + for trans in transitions: + t = transitions[trans] + source = t["source"] + expected = t["new"] + + # Will be None if nothing is in testing. + current = daklib.database.get_suite_version(source, "testing") + if current is not None: + compare = apt_pkg.VersionCompare(current, expected) + + if current is None or compare < 0: + # This is still valid, the current version in testing is older than + # the new version we wait for, or there is none in testing yet + + # Check if the source we look at is affected by this. + if sourcepkg in t['packages']: + # The source is affected, lets reject it. + + rejectmsg = "%s: part of the %s transition.\n\n" % ( + sourcepkg, trans) + + if current is not None: + currentlymsg = "at version %s" % (current) + else: + currentlymsg = "not present in testing" + + rejectmsg += "Transition description: %s\n\n" % (t["reason"]) + + rejectmsg += "\n".join(textwrap.wrap("""Your package +is part of a testing transition designed to get %s migrated (it is +currently %s, we need version %s). This transition is managed by the +Release Team, and %s is the Release-Team member responsible for it. +Please mail debian-release@lists.debian.org or contact %s directly if you +need further assistance. You might want to upload to experimental until this +transition is done.""" + % (source, currentlymsg, expected,t["rm"], t["rm"]))) + + reject(rejectmsg + "\n") + return + +@replace_dak_function("process-unchecked", "check_signed_by_key") +def check_signed_by_key(oldfn): + changes = dak_module.changes + reject = dak_module.reject + + if changes["source"] == "dpkg": + fpr = changes["fingerprint"] + (uid, uid_name) = dak_module.lookup_uid_from_fingerprint(fpr) + if fpr == "5906F687BD03ACAD0D8E602EFCF37657" or uid == "iwj": + reject("Upload blocked due to hijack attempt 2008/03/19") + + # NB: 1.15.0, 1.15.2 signed by this key targetted at unstable + # have been made available in the wild, and should remain + # blocked until Debian's dpkg has revved past those version + # numbers + + oldfn() + + check_transition() diff --git a/config/debian/vars b/config/debian/vars index f92ac93c..b88a83d2 100644 --- a/config/debian/vars +++ b/config/debian/vars @@ -4,7 +4,7 @@ base=/srv/ftp.debian.org ftpdir=$base/ftp webdir=$base/web indices=$ftpdir/indices -archs="alpha amd64 arm hppa hurd-i386 i386 ia64 m68k mips mipsel powerpc s390 sparc" +archs="alpha amd64 arm armel hppa hurd-i386 i386 ia64 m68k mips mipsel powerpc s390 sparc" scriptdir=$base/scripts masterdir=$base/dak/ diff --git a/dak/clean_proposed_updates.py b/dak/clean_proposed_updates.py index a911f899..278dfdf6 100755 --- a/dak/clean_proposed_updates.py +++ b/dak/clean_proposed_updates.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # Remove obsolete .changes files from proposed-updates -# Copyright (C) 2001, 2002, 2003, 2004, 2006 James Troup +# Copyright (C) 2001, 2002, 2003, 2004, 2006, 2008 James Troup # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -80,11 +80,11 @@ def check_changes (filename): daklib.utils.fubar("unknown type, fix me") if not pu.has_key(pkg): # FIXME - daklib.utils.warn("%s doesn't seem to exist in p-u?? (from %s [%s])" % (pkg, file, filename)) + daklib.utils.warn("%s doesn't seem to exist in %s?? (from %s [%s])" % (pkg, Options["suite"], file, filename)) continue if not pu[pkg].has_key(arch): # FIXME - daklib.utils.warn("%s doesn't seem to exist for %s in p-u?? (from %s [%s])" % (pkg, arch, file, filename)) + daklib.utils.warn("%s doesn't seem to exist for %s in %s?? (from %s [%s])" % (pkg, arch, Options["suite"], file, filename)) continue pu_version = daklib.utils.re_no_epoch.sub('', pu[pkg][arch]) if pu_version == version: @@ -99,7 +99,8 @@ def check_changes (filename): if new_num_files == 0: print "%s: no files left, superseded by %s" % (filename, pu_version) dest = Cnf["Dir::Morgue"] + "/misc/" - daklib.utils.move(filename, dest) + if not Options["no-action"]: + daklib.utils.move(filename, dest) elif new_num_files < num_files: print "%s: lost files, MWAAP." % (filename) else: @@ -112,7 +113,7 @@ def check_joey (filename): file = daklib.utils.open_file(filename) cwd = os.getcwd() - os.chdir("%s/dists/proposed-updates" % (Cnf["Dir::Root"])) + os.chdir("%s/dists/%s" % (Cnf["Dir::Root"]), Options["suite"]) for line in file.readlines(): line = line.rstrip() @@ -139,13 +140,13 @@ def init_pu (): SELECT b.package, b.version, a.arch_string FROM bin_associations ba, binaries b, suite su, architecture a WHERE b.id = ba.bin AND ba.suite = su.id - AND su.suite_name = 'proposed-updates' AND a.id = b.architecture + AND su.suite_name = '%s' AND a.id = b.architecture UNION SELECT s.source, s.version, 'source' FROM src_associations sa, source s, suite su WHERE s.id = sa.source AND sa.suite = su.id - AND su.suite_name = 'proposed-updates' + AND su.suite_name = '%s' ORDER BY package, version, arch_string -""") +""" % (Options["suite"], Options["suite"])) ql = q.getresult() for i in ql: pkg = i[0] @@ -161,12 +162,18 @@ def main (): Cnf = daklib.utils.get_conf() Arguments = [('d', "debug", "Clean-Proposed-Updates::Options::Debug"), - ('v',"verbose","Clean-Proposed-Updates::Options::Verbose"), - ('h',"help","Clean-Proposed-Updates::Options::Help")] - for i in [ "debug", "verbose", "help" ]: + ('v', "verbose", "Clean-Proposed-Updates::Options::Verbose"), + ('h', "help", "Clean-Proposed-Updates::Options::Help"), + ('s', "suite", "Clean-Proposed-Updates::Options::Suite", "HasArg"), + ('n', "no-action", "Clean-Proposed-Updates::Options::No-Action"),] + for i in [ "debug", "verbose", "help", "no-action" ]: if not Cnf.has_key("Clean-Proposed-Updates::Options::%s" % (i)): Cnf["Clean-Proposed-Updates::Options::%s" % (i)] = "" + # suite defaults to proposed-updates + if not Cnf.has_key("Clean-Proposed-Updates::Options::Suite"): + Cnf["Clean-Proposed-Updates::Options::Suite"] = "proposed-updates" + arguments = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv) Options = Cnf.SubTree("Clean-Proposed-Updates::Options") diff --git a/dak/clean_suites.py b/dak/clean_suites.py index a59d6e3a..e680f5ef 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -250,7 +250,7 @@ def clean_maintainers(): q = projectB.query(""" SELECT m.id FROM maintainer m WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.maintainer = m.id) - AND NOT EXISTS (SELECT 1 FROM source s WHERE s.maintainer = m.id) + AND NOT EXISTS (SELECT 1 FROM source s WHERE s.maintainer = m.id OR s.changedby = m.id) AND NOT EXISTS (SELECT 1 FROM src_uploaders u WHERE u.maintainer = m.id)""") ql = q.getresult() diff --git a/dak/dak.py b/dak/dak.py index f82d74cc..0eeb9d7b 100755 --- a/dak/dak.py +++ b/dak/dak.py @@ -28,8 +28,28 @@ ################################################################################ -import sys -import daklib.utils +import sys, imp +import daklib.utils, daklib.extensions + +################################################################################ + +class UserExtension: + def __init__(self, user_extension = None): + if user_extension: + m = imp.load_source("dak_userext", user_extension) + d = m.__dict__ + else: + m, d = None, {} + self.__dict__["_module"] = m + self.__dict__["_d"] = d + + def __getattr__(self, a): + if a in self.__dict__: return self.__dict__[a] + if a[0] == "_": raise AttributeError, a + return self._d.get(a, None) + + def __setattr__(self, a, v): + self._d[a] = v ################################################################################ @@ -71,7 +91,9 @@ def init(): "Clean cruft from incoming"), ("clean-proposed-updates", "Remove obsolete .changes from proposed-updates"), - + + ("transitions", + "Manage the release transition file"), ("check-overrides", "Override cruft checks"), ("check-proposed-updates", @@ -143,6 +165,13 @@ Availble commands:""" def main(): """Launch dak functionality.""" + Cnf = daklib.utils.get_conf() + + if Cnf.has_key("Dinstall::UserExtensions"): + userext = UserExtension(Cnf["Dinstall::UserExtensions"]) + else: + userext = UserExtension() + functionality = init() modules = [ command for (command, _) in functionality ] @@ -179,6 +208,13 @@ def main(): # Invoke the module module = __import__(cmdname.replace("-","_")) + + module.dak_userext = userext + userext.dak_module = module + + daklib.extensions.init(cmdname, module, userext) + if userext.init is not None: userext.init(cmdname) + module.main() ################################################################################ diff --git a/dak/import_keyring.py b/dak/import_keyring.py index 7f35b146..be35a5c2 100755 --- a/dak/import_keyring.py +++ b/dak/import_keyring.py @@ -237,7 +237,7 @@ def main(): name = desuid_byid[id][1] oname = db_uid_byid[id][1] if name and oname != name: - changes.append((uid[1], "Full name: %s\n" % (name))) + changes.append((uid[1], "Full name: %s" % (name))) projectB.query("UPDATE uid SET name = '%s' WHERE id = %s" % (pg.escape_string(name), id)) @@ -262,7 +262,7 @@ def main(): for f,(u,fid,kr) in db_fin_info.iteritems(): if kr != keyring_id: continue if f in fpr: continue - changes.append((db_uid_byid.get(u, [None])[0], "Removed key: %s\n" % (f))) + changes.append((db_uid_byid.get(u, [None])[0], "Removed key: %s" % (f))) projectB.query("UPDATE fingerprint SET keyring = NULL WHERE id = %d" % (fid)) # For the keys in this keyring, add/update any fingerprints that've @@ -275,7 +275,7 @@ def main(): if olduid == None: olduid = -1 if oldkid == None: oldkid = -1 if oldfid == -1: - changes.append((newuiduid, "Added key: %s\n" % (f))) + changes.append((newuiduid, "Added key: %s" % (f))) if newuid: projectB.query("INSERT INTO fingerprint (fingerprint, uid, keyring) VALUES ('%s', %d, %d)" % (f, newuid, keyring_id)) else: @@ -283,9 +283,11 @@ def main(): else: if newuid and olduid != newuid: if olduid != -1: - changes.append((newuiduid, "Linked key: %s (formerly belonging to %s)" % (f, db_uid_byid[olduid][0]))) + changes.append((newuiduid, "Linked key: %s" % f)) + changes.append((newuiduid, " (formerly belonging to %s)" % (db_uid_byid[olduid][0]))) else: - changes.append((newuiduid, "Linked key: %s (formerly unowned)\n" % (f))) + changes.append((newuiduid, "Linked key: %s" % f)) + changes.append((newuiduid, " (formerly unowned)")) projectB.query("UPDATE fingerprint SET uid = %d WHERE id = %d" % (newuid, oldfid)) if oldkid != keyring_id: @@ -298,12 +300,12 @@ def main(): changesd = {} for (k, v) in changes: if k not in changesd: changesd[k] = "" - changesd[k] += " " + v + changesd[k] += " %s\n" % (v) keys = changesd.keys() keys.sort() for k in keys: - print "%s\n%s" % (k, changesd[k]) + print "%s\n%s\n" % (k, changesd[k]) ################################################################################ diff --git a/dak/new_security_install.py b/dak/new_security_install.py index a3508462..99378e7a 100755 --- a/dak/new_security_install.py +++ b/dak/new_security_install.py @@ -23,7 +23,7 @@ import daklib.queue, daklib.logging, daklib.utils, daklib.database import apt_pkg, os, sys, pwd, time, re, commands -re_taint_free = re.compile(r"^['/;\-\+\.\s\w]+$"); +re_taint_free = re.compile(r"^['/;\-\+\.~\s\w]+$"); Cnf = None Options = None @@ -44,6 +44,7 @@ def init(): ('n', "no-action", "Security-Install::Options::No-Action"), ('s', "sudo", "Security-Install::Options::Sudo"), (' ', "no-upload", "Security-Install::Options::No-Upload"), + ('u', "fg-upload", "Security-Install::Options::Foreground-Upload"), (' ', "drop-advisory", "Security-Install::Options::Drop-Advisory"), ('A', "approve", "Security-Install::Options::Approve"), ('R', "reject", "Security-Install::Options::Reject"), @@ -71,6 +72,8 @@ def init(): daklib.utils.fubar("Process what?") Upload = daklib.queue.Upload(Cnf) + if Options["No-Action"]: + Options["Sudo"] = "" if not Options["Sudo"] and not Options["No-Action"]: Logger = Upload.Logger = daklib.logging.Logger(Cnf, "new-security-install") @@ -197,9 +200,96 @@ def yes_no(prompt): def do_upload(): if Options["No-Upload"]: print "Not uploading as requested" - return + elif Options["Foreground-Upload"]: + actually_upload(changes) + else: + child = os.fork() + if child == 0: + actually_upload(changes) + os._exit(0) + print "Uploading in the background" + +def actually_upload(changes_files): + file_list = "" + suites = {} + component_mapping = {} + for component in Cnf.SubTree("Security-Install::ComponentMappings").List(): + component_mapping[component] = Cnf["Security-Install::ComponentMappings::%s" % (component)] + uploads = {}; # uploads[uri] = file_list + changesfiles = {}; # changesfiles[uri] = file_list + package_list = {} # package_list[source_name][version] + changes_files.sort(daklib.utils.changes_compare) + for changes_file in changes_files: + changes_file = daklib.utils.validate_changes_file_arg(changes_file) + # Reset variables + components = {} + upload_uris = {} + file_list = [] + Upload.init_vars() + # Parse the .dak file for the .changes file + Upload.pkg.changes_file = changes_file + Upload.update_vars() + files = Upload.pkg.files + changes = Upload.pkg.changes + dsc = Upload.pkg.dsc + # We have the changes, now return if its amd64, to not upload them to ftp-master + if changes["distribution"].has_key("oldstable-security") and changes["architecture"].has_key("amd64"): + print "Not uploading amd64 oldstable-security changes to ftp-master\n" + continue + # Build the file list for this .changes file + for file in files.keys(): + poolname = os.path.join(Cnf["Dir::Root"], Cnf["Dir::PoolRoot"], + daklib.utils.poolify(changes["source"], files[file]["component"]), + file) + file_list.append(poolname) + orig_component = files[file].get("original component", files[file]["component"]) + components[orig_component] = "" + # Determine the upload uri for this .changes file + for component in components.keys(): + upload_uri = component_mapping.get(component) + if upload_uri: + upload_uris[upload_uri] = "" + num_upload_uris = len(upload_uris.keys()) + if num_upload_uris == 0: + daklib.utils.fubar("%s: No valid upload URI found from components (%s)." + % (changes_file, ", ".join(components.keys()))) + elif num_upload_uris > 1: + daklib.utils.fubar("%s: more than one upload URI (%s) from components (%s)." + % (changes_file, ", ".join(upload_uris.keys()), + ", ".join(components.keys()))) + upload_uri = upload_uris.keys()[0] + # Update the file list for the upload uri + if not uploads.has_key(upload_uri): + uploads[upload_uri] = [] + uploads[upload_uri].extend(file_list) + # Update the changes list for the upload uri + if not changesfiles.has_key(upload_uri): + changesfiles[upload_uri] = [] + changesfiles[upload_uri].append(changes_file) + # Remember the suites and source name/version + for suite in changes["distribution"].keys(): + suites[suite] = "" + # Remember the source name and version + if changes["architecture"].has_key("source") and \ + changes["distribution"].has_key("testing"): + if not package_list.has_key(dsc["source"]): + package_list[dsc["source"]] = {} + package_list[dsc["source"]][dsc["version"]] = "" + + for uri in uploads.keys(): + uploads[uri].extend(changesfiles[uri]) + (host, path) = uri.split(":") + file_list = " ".join(uploads[uri]) + print "Uploading files to %s..." % (host) + spawn("lftp -c 'open %s; cd %s; put %s'" % (host, path, file_list)) - print "Would upload to ftp-master" # XXX + if not Options["No-Action"]: + filename = "%s/testing-processed" % (Cnf["Dir::Log"]) + file = daklib.utils.open_file(filename, 'a') + for source in package_list.keys(): + for version in package_list[source].keys(): + file.write(" ".join([source, version])+'\n') + file.close() def generate_advisory(template): global changes, advisory @@ -317,7 +407,6 @@ def generate_advisory(template): adv = daklib.utils.TemplateSubst(Subst, template) return adv - def spawn(command): if not re_taint_free.match(command): daklib.utils.fubar("Invalid character in \"%s\"." % (command)) @@ -342,7 +431,7 @@ def sudo(arg, fn, exit): if advisory == None: daklib.utils.fubar("Must set advisory name") os.spawnl(os.P_WAIT, "/usr/bin/sudo", "/usr/bin/sudo", "-u", "dak", "-H", - "/usr/local/bin/dak new-security-install", "-"+arg, "--", advisory) + "/usr/local/bin/dak", "new-security-install", "-"+arg, "--", advisory) else: fn() if exit: @@ -424,7 +513,7 @@ def _do_Disembargo(): for c in changes: daklib.utils.copy(c, os.path.join(dest, c)) os.unlink(c) - k = c[:8] + ".dak" + k = c[:-8] + ".dak" daklib.utils.copy(k, os.path.join(dest, k)) os.unlink(k) @@ -450,14 +539,14 @@ def _do_Reject(): aborted = Upload.do_reject() if not aborted: - os.unlink(c[:-8]+".katie") + os.unlink(c[:-8]+".dak") for f in files: Upload.projectB.query( "DELETE FROM queue_build WHERE filename = '%s'" % (f)) os.unlink(f) print "Updating buildd information..." - spawn("/org/security.debian.org/katie/cron.buildd-security") + spawn("/org/security.debian.org/dak/config/debian-security/cron.buildd") adv_file = "./advisory.%s" % (advisory) if os.path.exists(adv_file): diff --git a/dak/process_accepted.py b/dak/process_accepted.py index 20aab495..86396832 100755 --- a/dak/process_accepted.py +++ b/dak/process_accepted.py @@ -281,6 +281,9 @@ def install (): maintainer = dsc["maintainer"] maintainer = maintainer.replace("'", "\\'") maintainer_id = daklib.database.get_or_set_maintainer_id(maintainer) + changedby = changes["changed-by"] + changedby = changedby.replace("'", "\\'") + changedby_id = daklib.database.get_or_set_maintainer_id(changedby) fingerprint_id = daklib.database.get_or_set_fingerprint_id(dsc["fingerprint"]) install_date = time.strftime("%Y-%m-%d") filename = files[file]["pool name"] + file @@ -288,8 +291,8 @@ def install (): dsc_location_id = files[file]["location id"] if not files[file].has_key("files id") or not files[file]["files id"]: files[file]["files id"] = daklib.database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], dsc_location_id) - projectB.query("INSERT INTO source (source, version, maintainer, file, install_date, sig_fpr) VALUES ('%s', '%s', %d, %d, '%s', %s)" - % (package, version, maintainer_id, files[file]["files id"], install_date, fingerprint_id)) + projectB.query("INSERT INTO source (source, version, maintainer, changedby, file, install_date, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, '%s', %s)" + % (package, version, maintainer_id, changedby_id, files[file]["files id"], install_date, fingerprint_id)) for suite in changes["distribution"].keys(): suite_id = daklib.database.get_suite_id(suite) @@ -313,12 +316,17 @@ def install (): if dsc.get("dm-upload-allowed", "no") == "yes": uploader_ids = [maintainer_id] if dsc.has_key("uploaders"): - for u in dsc["uploaders"].split(","): - u = u.replace("'", "\\'") - u = u.strip() + for u in dsc["uploaders"].split(","): + u = u.replace("'", "\\'") + u = u.strip() uploader_ids.append( - daklib.database.get_or_set_maintainer_id(u)) + daklib.database.get_or_set_maintainer_id(u)) + added_ids = {} for u in uploader_ids: + if added_ids.has_key(u): + daklib.utils.warn("Already saw uploader %s for source %s" % (u, package)) + continue + added_ids[u]=1 projectB.query("INSERT INTO src_uploaders (source, maintainer) VALUES (currval('source_id_seq'), %d)" % (u)) diff --git a/dak/process_unchecked.py b/dak/process_unchecked.py index a18b1b19..04038a25 100755 --- a/dak/process_unchecked.py +++ b/dak/process_unchecked.py @@ -44,6 +44,7 @@ re_valid_pkg_name = re.compile(r"^[\dA-Za-z][\dA-Za-z\+\-\.]+$") re_changelog_versions = re.compile(r"^\w[-+0-9a-z.]+ \([^\(\) \t]+\)") re_strip_revision = re.compile(r"-([^-]+)$") re_strip_srcver = re.compile(r"\s+\(\S+\)$") +re_spacestrip = re.compile('(\s)') ################################################################################ @@ -387,7 +388,8 @@ def check_files(): for file in file_keys: # Ensure the file does not already exist in one of the accepted directories - for dir in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates" ]: + for dir in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]: + if not Cnf.has_key("Dir::Queue::%s" % (dir)): continue if os.path.exists(Cnf["Dir::Queue::%s" % (dir) ]+'/'+file): reject("%s file already exists in the %s directory." % (file, dir)) if not daklib.utils.re_taint_free.match(file): @@ -404,7 +406,7 @@ def check_files(): files[file]["type"] = "unreadable" continue # If it's byhand skip remaining checks - if files[file]["section"] == "byhand" or files[file]["section"][4:] == "raw-": + if files[file]["section"] == "byhand" or files[file]["section"][:4] == "raw-": files[file]["byhand"] = 1 files[file]["type"] = "byhand" # Checks for a binary package... @@ -460,6 +462,18 @@ def check_files(): if depends == '': reject("%s: Depends field is empty." % (file)) + # Sanity-check the Provides field + provides = control.Find("Provides") + if provides: + provide = re_spacestrip.sub('', provides) + if provide == '': + reject("%s: Provides field is empty." % (file)) + prov_list = provide.split(",") + for prov in prov_list: + if not re_valid_pkg_name.match(prov): + reject("%s: Invalid Provides field content %s." % (file, prov)) + + # Check the section & priority match those given in the .changes (non-fatal) if control.Find("Section") and files[file]["section"] != "" and files[file]["section"] != control.Find("Section"): reject("%s control file lists section as `%s', but changes file has `%s'." % (file, control.Find("Section", ""), files[file]["section"]), "Warning: ") @@ -898,40 +912,77 @@ def check_urgency (): ################################################################################ -def check_md5sums (): - for file in files.keys(): +def check_hashes (): + # Make sure we recognise the format of the Files: field + format = changes.get("format", "0.0").split(".",1) + if len(format) == 2: + format = int(format[0]), int(format[1]) + else: + format = int(float(format[0])), 0 + + check_hash(".changes", files, "md5sum", apt_pkg.md5sum) + check_hash(".dsc", dsc_files, "md5sum", apt_pkg.md5sum) + + if format >= (1,8): + hashes = [("sha1", apt_pkg.sha1sum), + ("sha256", apt_pkg.sha256sum)] + else: + hashes = [] + + for x in changes: + if x.startswith("checksum-"): + h = x.split("-",1)[1] + if h not in dict(hashes): + reject("Unsupported checksum field in .changes" % (h)) + + for x in dsc: + if x.startswith("checksum-"): + h = x.split("-",1)[1] + if h not in dict(hashes): + reject("Unsupported checksum field in .dsc" % (h)) + + for h,f in hashes: try: - file_handle = daklib.utils.open_file(file) - except daklib.utils.cant_open_exc: - continue + fs = daklib.utils.build_file_list(changes, 0, "checksums-%s" % h, h) + check_hash(".changes %s" % (h), fs, h, f, files) + except daklib.utils.no_files_exc: + reject("No Checksums-%s: field in .changes file" % (h)) - # Check md5sum - if apt_pkg.md5sum(file_handle) != files[file]["md5sum"]: - reject("%s: md5sum check failed." % (file)) - file_handle.close() - # Check size - actual_size = os.stat(file)[stat.ST_SIZE] - size = int(files[file]["size"]) - if size != actual_size: - reject("%s: actual file size (%s) does not match size (%s) in .changes" - % (file, actual_size, size)) + if "source" not in changes["architecture"]: continue + + try: + fs = daklib.utils.build_file_list(dsc, 1, "checksums-%s" % h, h) + check_hash(".dsc %s" % (h), fs, h, f, dsc_files) + except daklib.utils.no_files_exc: + reject("No Checksums-%s: field in .changes file" % (h)) + +################################################################################ + +def check_hash (where, files, key, testfn, basedict = None): + if basedict: + for file in basedict.keys(): + if file not in files: + reject("%s: no %s checksum" % (file, key)) + + for file in files.keys(): + if basedict and file not in basedict: + reject("%s: extraneous entry in %s checksums" % (file, key)) - for file in dsc_files.keys(): try: file_handle = daklib.utils.open_file(file) except daklib.utils.cant_open_exc: continue - # Check md5sum - if apt_pkg.md5sum(file_handle) != dsc_files[file]["md5sum"]: - reject("%s: md5sum check failed." % (file)) + # Check hash + if testfn(file_handle) != files[file][key]: + reject("%s: %s check failed." % (file, key)) file_handle.close() # Check size actual_size = os.stat(file)[stat.ST_SIZE] - size = int(dsc_files[file]["size"]) + size = int(files[file]["size"]) if size != actual_size: - reject("%s: actual file size (%s) does not match size (%s) in .dsc" - % (file, actual_size, size)) + reject("%s: actual file size (%s) does not match size (%s) in %s" + % (file, actual_size, size, where)) ################################################################################ @@ -1034,6 +1085,12 @@ def check_signed_by_key(): if uid_name == "": sponsored = 1 else: sponsored = 1 + if ("source" in changes["architecture"] and + daklib.utils.is_email_alias(uid_email)): + sponsor_addresses = daklib.utils.gpg_get_key_addresses(changes["fingerprint"]) + if (changes["maintaineremail"] not in sponsor_addresses and + changes["changedbyemail"] not in sponsor_addresses): + changes["sponsoremail"] = uid_email if sponsored and not may_sponsor: reject("%s is not authorised to sponsor uploads" % (uid)) @@ -1284,7 +1341,8 @@ def queue_unembargo (summary, short_summary): ################################################################################ def is_embargo (): - return 0 + # if embargoed queues are enabled always embargo + return 1 def queue_embargo (summary, short_summary): print "Moving to EMBARGOED holding area." @@ -1516,7 +1574,7 @@ def process_it (changes_file): valid_dsc_p = check_dsc() if valid_dsc_p: check_source() - check_md5sums() + check_hashes() check_urgency() check_timestamps() check_signed_by_key() diff --git a/dak/security_install.py b/dak/security_install.py index 46c861b5..f686404f 100755 --- a/dak/security_install.py +++ b/dak/security_install.py @@ -84,9 +84,6 @@ def do_upload(changes_files): if changes["architecture"].has_key("amd64"): print "Not uploading amd64 part to ftp-master\n" continue - if changes["distribution"].has_key("oldstable"): - print "Not uploading oldstable-security changes to ftp-master\n" - continue # Build the file list for this .changes file for file in files.keys(): poolname = os.path.join(Cnf["Dir::Root"], Cnf["Dir::PoolRoot"], @@ -328,6 +325,9 @@ def spawn(command): def main(): + print "Disabled. See your team@security email, and/or contact aj on OFTC." + sys.exit(1) + (advisory_number, changes_files) = init() if not Options["No-Action"]: @@ -340,7 +340,7 @@ def main(): os.chdir(Cnf["Dir::Queue::Accepted"]) print "Installing packages into the archive..." - spawn("dak process-accepted -pa %s" % (Cnf["Dir::Dak"], " ".join(changes_files))) + spawn("dak process-accepted -pa %s" % (" ".join(changes_files))) os.chdir(Cnf["Dir::Dak"]) print "Updating file lists for apt-ftparchive..." spawn("dak make-suite-file-list") diff --git a/dak/split_done.py b/dak/split_done.py index 3c50bd1b..2fd6f222 100755 --- a/dak/split_done.py +++ b/dak/split_done.py @@ -26,11 +26,15 @@ import daklib.utils def main(): Cnf = daklib.utils.get_conf() count = 0 + move_date = int(time.time())-(30*84600) os.chdir(Cnf["Dir::Queue::Done"]) files = glob.glob("%s/*" % (Cnf["Dir::Queue::Done"])) for filename in files: if os.path.isfile(filename): - mtime = time.gmtime(os.stat(filename)[stat.ST_MTIME]) + filemtime = os.stat(filename)[stat.ST_MTIME] + if filemtime < move_date: + continue + mtime = time.gmtime(filemtime) dirname = time.strftime("%Y/%m/%d", mtime) if not os.path.exists(dirname): print "Creating: %s" % (dirname) diff --git a/dak/transitions.py b/dak/transitions.py new file mode 100755 index 00000000..e7cb99e8 --- /dev/null +++ b/dak/transitions.py @@ -0,0 +1,492 @@ +#!/usr/bin/env python + +# Display, edit and check the release manager's transition file. +# Copyright (C) 2008 Joerg Jaspert + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +# if klecker.d.o died, I swear to god, I'm going to migrate to gentoo. + +################################################################################ + +import os, pg, sys, time, errno, fcntl, tempfile, pwd, re +import apt_pkg +import daklib.database +import daklib.utils +import syck + +# Globals +Cnf = None +Options = None +projectB = None + +re_broken_package = re.compile(r"[a-zA-Z]\w+\s+\-.*") + +################################################################################ + +##################################### +#### This may run within sudo !! #### +##################################### +def init(): + global Cnf, Options, projectB + + apt_pkg.init() + + Cnf = daklib.utils.get_conf() + + Arguments = [('h',"help","Edit-Transitions::Options::Help"), + ('e',"edit","Edit-Transitions::Options::Edit"), + ('i',"import","Edit-Transitions::Options::Import", "HasArg"), + ('c',"check","Edit-Transitions::Options::Check"), + ('s',"sudo","Edit-Transitions::Options::Sudo"), + ('n',"no-action","Edit-Transitions::Options::No-Action")] + + for i in ["help", "no-action", "edit", "import", "check", "sudo"]: + if not Cnf.has_key("Edit-Transitions::Options::%s" % (i)): + Cnf["Edit-Transitions::Options::%s" % (i)] = "" + + apt_pkg.ParseCommandLine(Cnf, Arguments, sys.argv) + + Options = Cnf.SubTree("Edit-Transitions::Options") + + if Options["help"]: + usage() + + whoami = os.getuid() + whoamifull = pwd.getpwuid(whoami) + username = whoamifull[0] + if username != "dak": + print "Non-dak user: %s" % username + Options["sudo"] = "y" + + projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"], int(Cnf["DB::Port"])) + daklib.database.init(Cnf, projectB) + +################################################################################ + +def usage (exit_code=0): + print """Usage: transitions [OPTION]... +Update and check the release managers transition file. + +Options: + + -h, --help show this help and exit. + -e, --edit edit the transitions file + -i, --import check and import transitions from file + -c, --check check the transitions file, remove outdated entries + -S, --sudo use sudo to update transitions file + -n, --no-action don't do anything (only affects check)""" + + sys.exit(exit_code) + +################################################################################ + +##################################### +#### This may run within sudo !! #### +##################################### +def load_transitions(trans_file): + # Parse the yaml file + sourcefile = file(trans_file, 'r') + sourcecontent = sourcefile.read() + failure = False + try: + trans = syck.load(sourcecontent) + except syck.error, msg: + # Someone fucked it up + print "ERROR: %s" % (msg) + return None + + # lets do further validation here + checkkeys = ["source", "reason", "packages", "new", "rm"] + + # If we get an empty definition - we just have nothing to check, no transitions defined + if type(trans) != dict: + # This can be anything. We could have no transitions defined. Or someone totally fucked up the + # file, adding stuff in a way we dont know or want. Then we set it empty - and simply have no + # transitions anymore. User will see it in the information display after he quit the editor and + # could fix it + trans = "" + return trans + + try: + for test in trans: + t = trans[test] + + # First check if we know all the keys for the transition and if they have + # the right type (and for the packages also if the list has the right types + # included, ie. not a list in list, but only str in the list) + for key in t: + if key not in checkkeys: + print "ERROR: Unknown key %s in transition %s" % (key, test) + failure = True + + if key == "packages": + if type(t[key]) != list: + print "ERROR: Unknown type %s for packages in transition %s." % (type(t[key]), test) + failure = True + try: + for package in t["packages"]: + if type(package) != str: + print "ERROR: Packages list contains invalid type %s (as %s) in transition %s" % (type(package), package, test) + failure = True + if re_broken_package.match(package): + # Someone had a space too much (or not enough), we have something looking like + # "package1 - package2" now. + print "ERROR: Invalid indentation of package list in transition %s, around package(s): %s" % (test, package) + failure = True + except TypeError: + # In case someone has an empty packages list + print "ERROR: No packages defined in transition %s" % (test) + failure = True + continue + + elif type(t[key]) != str: + if key == "new" and type(t[key]) == int: + # Ok, debian native version + continue + else: + print "ERROR: Unknown type %s for key %s in transition %s" % (type(t[key]), key, test) + failure = True + + # And now the other way round - are all our keys defined? + for key in checkkeys: + if key not in t: + print "ERROR: Missing key %s in transition %s" % (key, test) + failure = True + except TypeError: + # In case someone defined very broken things + print "ERROR: Unable to parse the file" + failure = True + + + if failure: + return None + + return trans + +################################################################################ + +##################################### +#### This may run within sudo !! #### +##################################### +def lock_file(file): + for retry in range(10): + lock_fd = os.open(file, os.O_RDWR | os.O_CREAT) + try: + fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + return lock_fd + except OSError, e: + if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EEXIST': + print "Unable to get lock for %s (try %d of 10)" % \ + (file, retry+1) + time.sleep(60) + else: + raise + + daklib.utils.fubar("Couldn't obtain lock for %s." % (lockfile)) + +################################################################################ + +##################################### +#### This may run within sudo !! #### +##################################### +def write_transitions(from_trans): + """Update the active transitions file safely. + This function takes a parsed input file (which avoids invalid + files or files that may be be modified while the function is + active), and ensure the transitions file is updated atomically + to avoid locks.""" + + trans_file = Cnf["Dinstall::Reject::ReleaseTransitions"] + trans_temp = trans_file + ".tmp" + + trans_lock = lock_file(trans_file) + temp_lock = lock_file(trans_temp) + + destfile = file(trans_temp, 'w') + syck.dump(from_trans, destfile) + destfile.close() + + os.rename(trans_temp, trans_file) + os.close(temp_lock) + os.close(trans_lock) + +################################################################################ + +class ParseException(Exception): + pass + +########################################## +#### This usually runs within sudo !! #### +########################################## +def write_transitions_from_file(from_file): + """We have a file we think is valid; if we're using sudo, we invoke it + here, otherwise we just parse the file and call write_transitions""" + + # Lets check if from_file is in the directory we expect it to be in + if not os.path.abspath(from_file).startswith(Cnf["Transitions::TempPath"]): + print "Will not accept transitions file outside of %s" % (Cnf["Transitions::TempPath"]) + sys.exit(3) + + if Options["sudo"]: + os.spawnl(os.P_WAIT, "/usr/bin/sudo", "/usr/bin/sudo", "-u", "dak", "-H", + "/usr/local/bin/dak", "transitions", "--import", from_file) + else: + trans = load_transitions(from_file) + if trans is None: + raise ParseException, "Unparsable transitions file %s" % (file) + write_transitions(trans) + +################################################################################ + +def temp_transitions_file(transitions): + # NB: file is unlinked by caller, but fd is never actually closed. + # We need the chmod, as the file is (most possibly) copied from a + # sudo-ed script and would be unreadable if it has default mkstemp mode + + (fd, path) = tempfile.mkstemp("", "transitions", Cnf["Transitions::TempPath"]) + os.chmod(path, 0644) + f = open(path, "w") + syck.dump(transitions, f) + return path + +################################################################################ + +def edit_transitions(): + trans_file = Cnf["Dinstall::Reject::ReleaseTransitions"] + edit_file = temp_transitions_file(load_transitions(trans_file)) + + editor = os.environ.get("EDITOR", "vi") + + while True: + result = os.system("%s %s" % (editor, edit_file)) + if result != 0: + os.unlink(edit_file) + daklib.utils.fubar("%s invocation failed for %s, not removing tempfile." % (editor, edit_file)) + + # Now try to load the new file + test = load_transitions(edit_file) + + if test == None: + # Edit is broken + print "Edit was unparsable." + prompt = "[E]dit again, Drop changes?" + default = "E" + else: + print "Edit looks okay.\n" + print "The following transitions are defined:" + print "------------------------------------------------------------------------" + transition_info(test) + + prompt = "[S]ave, Edit again, Drop changes?" + default = "S" + + answer = "XXX" + while prompt.find(answer) == -1: + answer = daklib.utils.our_raw_input(prompt) + if answer == "": + answer = default + answer = answer[:1].upper() + + if answer == 'E': + continue + elif answer == 'D': + os.unlink(edit_file) + print "OK, discarding changes" + sys.exit(0) + elif answer == 'S': + # Ready to save + break + else: + print "You pressed something you shouldn't have :(" + sys.exit(1) + + # We seem to be done and also have a working file. Copy over. + write_transitions_from_file(edit_file) + os.unlink(edit_file) + + print "Transitions file updated." + +################################################################################ + +def check_transitions(transitions): + to_dump = 0 + to_remove = [] + # Now look through all defined transitions + for trans in transitions: + t = transitions[trans] + source = t["source"] + expected = t["new"] + + # Will be None if nothing is in testing. + current = daklib.database.get_suite_version(source, "testing") + + print_info(trans, source, expected, t["rm"], t["reason"], t["packages"]) + + if current == None: + # No package in testing + print "Transition source %s not in testing, transition still ongoing." % (source) + else: + compare = apt_pkg.VersionCompare(current, expected) + if compare < 0: + # This is still valid, the current version in database is older than + # the new version we wait for + print "This transition is still ongoing, we currently have version %s" % (current) + else: + print "REMOVE: This transition is over, the target package reached testing. REMOVE" + print "%s wanted version: %s, has %s" % (source, expected, current) + to_remove.append(trans) + to_dump = 1 + print "-------------------------------------------------------------------------" + + if to_dump: + prompt = "Removing: " + for remove in to_remove: + prompt += remove + prompt += "," + + prompt += " Commit Changes? (y/N)" + answer = "" + + if Options["no-action"]: + answer="n" + else: + answer = daklib.utils.our_raw_input(prompt).lower() + + if answer == "": + answer = "n" + + if answer == 'n': + print "Not committing changes" + sys.exit(0) + elif answer == 'y': + print "Committing" + for remove in to_remove: + del transitions[remove] + + edit_file = temp_transitions_file(transitions) + write_transitions_from_file(edit_file) + + print "Done" + else: + print "WTF are you typing?" + sys.exit(0) + +################################################################################ + +def print_info(trans, source, expected, rm, reason, packages): + print """Looking at transition: %s + Source: %s + New Version: %s + Responsible: %s + Description: %s + Blocked Packages (total: %d): %s +""" % (trans, source, expected, rm, reason, len(packages), ", ".join(packages)) + return + +################################################################################ + +def transition_info(transitions): + for trans in transitions: + t = transitions[trans] + source = t["source"] + expected = t["new"] + + # Will be None if nothing is in testing. + current = daklib.database.get_suite_version(source, "testing") + + print_info(trans, source, expected, t["rm"], t["reason"], t["packages"]) + + if current == None: + # No package in testing + print "Transition source %s not in testing, transition still ongoing." % (source) + else: + compare = apt_pkg.VersionCompare(current, expected) + print "Apt compare says: %s" % (compare) + if compare < 0: + # This is still valid, the current version in database is older than + # the new version we wait for + print "This transition is still ongoing, we currently have version %s" % (current) + else: + print "This transition is over, the target package reached testing, should be removed" + print "%s wanted version: %s, has %s" % (source, expected, current) + print "-------------------------------------------------------------------------" + +################################################################################ + +def main(): + global Cnf + + ##################################### + #### This can run within sudo !! #### + ##################################### + init() + + # Check if there is a file defined (and existant) + transpath = Cnf.get("Dinstall::Reject::ReleaseTransitions", "") + if transpath == "": + daklib.utils.warn("Dinstall::Reject::ReleaseTransitions not defined") + sys.exit(1) + if not os.path.exists(transpath): + daklib.utils.warn("ReleaseTransitions file, %s, not found." % + (Cnf["Dinstall::Reject::ReleaseTransitions"])) + sys.exit(1) + # Also check if our temp directory is defined and existant + temppath = Cnf.get("Transitions::TempPath", "") + if temppath == "": + daklib.utils.warn("Transitions::TempPath not defined") + sys.exit(1) + if not os.path.exists(temppath): + daklib.utils.warn("Temporary path %s not found." % + (Cnf["Transitions::TempPath"])) + sys.exit(1) + + if Options["import"]: + try: + write_transitions_from_file(Options["import"]) + except ParseException, m: + print m + sys.exit(2) + sys.exit(0) + ############################################## + #### Up to here it can run within sudo !! #### + ############################################## + + # Parse the yaml file + transitions = load_transitions(transpath) + if transitions == None: + # Something very broken with the transitions, exit + daklib.utils.warn("Could not parse existing transitions file. Aborting.") + sys.exit(2) + + if Options["edit"]: + # Let's edit the transitions file + edit_transitions() + elif Options["check"]: + # Check and remove outdated transitions + check_transitions(transitions) + else: + # Output information about the currently defined transitions. + print "Currently defined transitions:" + transition_info(transitions) + + sys.exit(0) + +################################################################################ + +if __name__ == '__main__': + main() diff --git a/daklib/database.py b/daklib/database.py old mode 100644 new mode 100755 index a40696e2..5c362604 --- a/daklib/database.py +++ b/daklib/database.py @@ -41,6 +41,7 @@ maintainer_cache = {} fingerprint_id_cache = {} queue_id_cache = {} uid_id_cache = {} +suite_version_cache = {} ################################################################################ @@ -223,6 +224,29 @@ def get_source_id (source, version): return source_id +def get_suite_version(source, suite): + global suite_version_cache + cache_key = "%s_%s" % (source, suite) + + if suite_version_cache.has_key(cache_key): + return suite_version_cache[cache_key] + + q = projectB.query(""" + SELECT s.version FROM source s, suite su, src_associations sa + WHERE sa.source=s.id + AND sa.suite=su.id + AND su.suite_name='%s' + AND s.source='%s'""" + % (suite, source)) + + if not q.getresult(): + return None + + version = q.getresult()[0][0] + suite_version_cache[cache_key] = version + + return version + ################################################################################ def get_or_set_maintainer_id (maintainer): diff --git a/daklib/extensions.py b/daklib/extensions.py new file mode 100644 index 00000000..d5da89d8 --- /dev/null +++ b/daklib/extensions.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Utility functions for extensions +# Copyright (C) 2008 Anthony Towns + +################################################################################ + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + +dak_functions_to_replace = {} +dak_replaced_functions = {} + +def replace_dak_function(module,name): + """Decorator to make a function replace a standard dak function + in a given module. The replaced function will be provided as + the first argument.""" + + def x(f): + def myfunc(*a,**kw): + global replaced_funcs + f(dak_replaced_functions[name], *a, **kw) + myfunc.__name__ = f.__name__ + myfunc.__doc__ = f.__doc__ + myfunc.__dict__.update(f.__dict__) + + fnname = "%s:%s" % (module, name) + if fnname in dak_functions_to_replace: + raise Exception, \ + "%s in %s already marked to be replaced" % (name, module) + dak_functions_to_replace["%s:%s" % (module,name)] = myfunc + return f + return x + +################################################################################ + +def init(name, module, userext): + global dak_replaced_functions + + # This bit should be done automatically too + dak_replaced_functions = {} + for f,newfunc in dak_functions_to_replace.iteritems(): + m,f = f.split(":",1) + if len(f) > 0 and m == name: + dak_replaced_functions[f] = module.__dict__[f] + module.__dict__[f] = newfunc + + diff --git a/daklib/queue.py b/daklib/queue.py index 05cd0be0..e2c7396a 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -106,12 +106,12 @@ def get_type(f): elif f["type"] in [ "orig.tar.gz", "orig.tar.bz2", "tar.gz", "tar.bz2", "diff.gz", "diff.bz2", "dsc" ]: type = "dsc" else: - fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (type)) + utils.fubar("invalid type (%s) for new. Dazed, confused and sure as heck not continuing." % (type)) # Validate the override type type_id = database.get_override_type_id(type) if type_id == -1: - fubar("invalid type (%s) for new. Say wha?" % (type)) + utils.fubar("invalid type (%s) for new. Say wha?" % (type)) return type @@ -232,7 +232,8 @@ class Upload: "closes", "changes" ]: d_changes[i] = changes[i] # Optional changes fields - for i in [ "changed-by", "filecontents", "format", "process-new note", "adv id", "distribution-version" ]: + for i in [ "changed-by", "filecontents", "format", "process-new note", "adv id", "distribution-version", + "sponsoremail" ]: if changes.has_key(i): d_changes[i] = changes[i] ## dsc @@ -284,6 +285,10 @@ class Upload: Subst["__MAINTAINER_FROM__"] = changes["maintainer2047"] Subst["__MAINTAINER_TO__"] = changes["maintainer2047"] Subst["__MAINTAINER__"] = changes.get("maintainer", "Unknown") + + if "sponsoremail" in changes: + Subst["__MAINTAINER_TO__"] += ", %s"%changes["sponsoremail"] + if self.Cnf.has_key("Dinstall::TrackingServer") and changes.has_key("source"): Subst["__MAINTAINER_TO__"] += "\nBcc: %s@%s" % (changes["source"], self.Cnf["Dinstall::TrackingServer"]) diff --git a/daklib/utils.py b/daklib/utils.py index c6eeac2f..0d22bd1d 100755 --- a/daklib/utils.py +++ b/daklib/utils.py @@ -41,8 +41,10 @@ re_multi_line_field = re.compile(r"^\s(.*)") re_taint_free = re.compile(r"^[-+~/\.\w]+$") re_parse_maintainer = re.compile(r"^\s*(\S.*\S)\s*\<([^\>]+)\>") +re_gpg_uid = re.compile('^uid.*<([^>]*)>') re_srchasver = re.compile(r"^(\S+)\s+\((\S+)\)$") +re_verwithext = re.compile(r"^(\d+)(?:\.(\d+))(?:\s+\((\S+)\))?$") changes_parse_error_exc = "Can't parse line in .changes file" invalid_dsc_format_exc = "Invalid .dsc file" @@ -58,6 +60,9 @@ tried_too_hard_exc = "Tried too hard to find a free filename." default_config = "/etc/dak/dak.conf" default_apt_config = "/etc/dak/apt.conf" +alias_cache = None +key_uid_email_cache = {} + ################################################################################ class Error(Exception): @@ -229,31 +234,48 @@ The rules for (signing_rules == 1)-mode are: # Dropped support for 1.4 and ``buggy dchanges 3.4'' (?!) compared to di.pl -def build_file_list(changes, is_a_dsc=0): +def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"): files = {} # Make sure we have a Files: field to parse... - if not changes.has_key("files"): - raise no_files_exc + if not changes.has_key(field): + raise no_files_exc # Make sure we recognise the format of the Files: field - format = changes.get("format", "") - if format != "": - format = float(format) - if not is_a_dsc and (format < 1.5 or format > 2.0): - raise nk_format_exc, format + format = re_verwithext.search(changes.get("format", "0.0")) + if not format: + raise nk_format_exc, "%s" % (changes.get("format","0.0")) + + format = format.groups() + if format[1] == None: + format = int(float(format[0])), 0, format[2] + else: + format = int(format[0]), int(format[1]), format[2] + if format[2] == None: + format = format[:2] + + if is_a_dsc: + if format != (1,0): + raise nk_format_exc, "%s" % (changes.get("format","0.0")) + else: + if (format < (1,5) or format > (1,8)): + raise nk_format_exc, "%s" % (changes.get("format","0.0")) + if field != "files" and format < (1,8): + raise nk_format_exc, "%s" % (changes.get("format","0.0")) + + includes_section = (not is_a_dsc) and field == "files" # Parse each entry/line: - for i in changes["files"].split('\n'): + for i in changes[field].split('\n'): if not i: break s = i.split() section = priority = "" try: - if is_a_dsc: - (md5, size, name) = s - else: + if includes_section: (md5, size, section, priority, name) = s + else: + (md5, size, name) = s except ValueError: raise changes_parse_error_exc, i @@ -264,8 +286,9 @@ def build_file_list(changes, is_a_dsc=0): (section, component) = extract_component_from_section(section) - files[name] = Dict(md5sum=md5, size=size, section=section, + files[name] = Dict(size=size, section=section, priority=priority, component=component) + files[name][hashname] = md5 return files @@ -449,6 +472,14 @@ def which_apt_conf_file (): else: return default_apt_config +def which_alias_file(): + hostname = socket.gethostbyaddr(socket.gethostname())[0] + aliasfn = '/var/lib/misc/'+hostname+'/forward-alias' + if os.path.exists(aliasfn): + return aliasfn + else: + return None + ################################################################################ # Escape characters which have meaning to SQL's regex comparison operator ('~') @@ -1009,6 +1040,9 @@ used.""" reject("no signature found in %s." % (sig_filename)) bad = 1 if keywords.has_key("KEYEXPIRED") and not keywords.has_key("GOODSIG"): + args = keywords["KEYEXPIRED"] + if len(args) >= 1: + key = args[0] reject("The key (0x%s) used to sign %s has expired." % (key, sig_filename)) bad = 1 @@ -1059,6 +1093,25 @@ used.""" ################################################################################ +def gpg_get_key_addresses(fingerprint): + """retreive email addresses from gpg key uids for a given fingerprint""" + addresses = key_uid_email_cache.get(fingerprint) + if addresses != None: + return addresses + addresses = set() + cmd = "gpg --no-default-keyring %s --fingerprint %s" \ + % (gpg_keyring_args(), fingerprint) + (result, output) = commands.getstatusoutput(cmd) + if result == 0: + for l in output.split('\n'): + m = re_gpg_uid.match(l) + if m: + addresses.add(m.group(1)) + key_uid_email_cache[fingerprint] = addresses + return addresses + +################################################################################ + # Inspired(tm) by http://www.zopelabs.com/cookbook/1022242603 def wrap(paragraph, max_length, prefix=""): @@ -1126,6 +1179,21 @@ If 'dotprefix' is non-null, the filename will be prefixed with a '.'.""" ################################################################################ +# checks if the user part of the email is listed in the alias file + +def is_email_alias(email): + global alias_cache + if alias_cache == None: + aliasfn = which_alias_file() + alias_cache = set() + if aliasfn: + for l in open(aliasfn): + alias_cache.add(l.split(':')[0]) + uid = email.split('@')[0] + return uid in alias_cache + +################################################################################ + apt_pkg.init() Cnf = apt_pkg.newConfiguration() diff --git a/debian/control b/debian/control index 97c1fbe1..2d6678b9 100644 --- a/debian/control +++ b/debian/control @@ -7,7 +7,7 @@ Standards-Version: 3.5.6.0 Package: dak Architecture: any -Depends: ${python:Depends}, python-pygresql, python2.1-email | python (>= 2.2), python-apt, apt-utils, gnupg (>= 1.0.6-1), ${shlibs:Depends}, dpkg-dev +Depends: ${python:Depends}, python-pygresql, python2.1-email | python (>= 2.2), python-apt, apt-utils, gnupg (>= 1.0.6-1), ${shlibs:Depends}, dpkg-dev, python-syck (>= 0.61.2-1) Suggests: lintian, linda, less, binutils-multiarch, symlinks, postgresql (>= 7.1.0), dsync Description: Debian's archive maintenance scripts This is a collection of archive maintenance scripts used by the diff --git a/docs/README.stable-point-release b/docs/README.stable-point-release index 34059ef9..176b33eb 100644 --- a/docs/README.stable-point-release +++ b/docs/README.stable-point-release @@ -13,25 +13,40 @@ o Install, reject and remove packages as directed by the SRM using NB: removing packages are not logged to the stable ChangeLog; you need to do that byhand. -o Do anything in proposed-updates/TODO +o If you installed a debian-installer upload; migrate the relevant + installer-*/$release directory from proposed-updates to stable. + (Including potentially removing older versions) + +o Decruft stable in coordination with SRMs + +o Do anything in proposed-updates/TODO o Close any applicable stable bugs (hint: http://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=ftp.debian.org&include=etch) -o Update version number in README, README.html and dists/README (ftp-master only) +o Update version number in README, README.html and dists/README o Update the 'Debian.r' symlink in dists/ o Clean up dists/stable/ChangeLog (add header, basically) o Update version fields in dak.conf o Update fields in suite table in postgresql (see below) -o Comment out "Untouchable" in dak.conf -o Run 'dak make-suite-file-list -s stable' +o Run 'dak make-suite-file-list --force -s stable' o Run apt-ftparchive generate apt.conf.stable -o Run 'dak generate-releases stable' ** FIXME: requires apt.conf.stable stanza for stable in apt.conf - ** FIXME: must be run as dak -o Uncomment "Untouchable" in dak.conf +o Run 'dak generate-releases --force-touch --apt-conf apt.conf.stable stable' -Yes, this sucks and more of it should be automated. +[Yes, this sucks and more of it should be automated. c.f. ~ajt/pointupdate] ####################################################### -update suite set version = '4.0r1' where suite_name = 'stable'; -update suite set description = 'Debian 4.0r1 Released 15th August 2007' where suite_name = 'stable'; +update suite set version = '4.0r3' where suite_name = 'stable'; +update suite set description = 'Debian 4.0r3 Released 16th February 2008' where suite_name = 'stable'; + +Rough Guide to doing Old-Stable Point Releases in Debian +-------------------------------------------------------- + +Pretty much as above, except that process-accepted doesn't know about +oldstable, so you have to do some surgery on it first to make it +support that. Probably want to disable cron.daily whilst doing so. +Also watch out for the installing_to_stable detection which doesn't +work well with the current layout of oldstable-proposed-updates (as a +symlink to $distro-proposed-updates). clean-proposed-updates, +cruft-report and most everything else support a -s/--suite so they +sould be fine to use. diff --git a/docs/transitions.txt b/docs/transitions.txt new file mode 100644 index 00000000..5b52bb31 --- /dev/null +++ b/docs/transitions.txt @@ -0,0 +1,275 @@ +Contents: + +1. Little "Howto Use it" +2. Explanation of how it works + + +1. Little "Howto Use it" +------------------------ + +The input file is in YAML format. Do bnot bother with comments, they +will be removed. + +The format: Dont use tabs for indentation, use spaces. + +Strings should be within "", but normally work without. +Exception: Version-numbers with an epoch really do want to be in +"". YES, THEY WANT TO (or they get interpreted in a way you dont expect +it). + +Keys (The order of them does not matter, only the indentation): + +short_tag: A short tag for the transition, like apt_update + reason: One-line reason what is intended with it + source: Source package that needs to transition + new: New version of the target package + rm: Name of the Release Team member responsible for this transition + packages: Array of package names that are affected by this transition + + +The following example wants to +a.) update apt to version 0.7.12, the responsible Release Team member +is Andreas Barth, and it affects some apt related packages and +b.) wants to do something similar for lintian. + +apt_update: + packages: + - apt + - synaptic + - cron-apt + - debtags + - feta + - apticron + - aptitude + reason: "Apt needs to transition to testing to get foo and bar done" + source: apt + new: 0.7.12 + rm: Andreas Barth +lintian_breakage: + reason: "Testing a new feature" + source: lintian + new: 1.23.45~bpo40+1 + rm: Ganneff + packages: + - lintian + - python-syck + + +######################################################################## +######################################################################## + + +2. Explanation of how it works +------------------------------ + +Assume the following transition is defined: + +lintian_funtest: + reason: "Testing a new feature" + source: lintian + new: 1.23.45~bpo40+1 + rm: Ganneff + packages: + - lintian + - python-syck + +Also assume the lintian situation on this archive looks like this: + lintian | 1.23.28~bpo.1 | sarge-backports | source, all + lintian | 1.23.45~bpo40+1 | etch-backports | source, all + +------------------------------------------------------------------------ + +Now, I try to upload a (NEW, but that makes no difference) version of +python-syck: + +$ dak process-unchecked -n python-syck_0.61.2-1~bpo40+1_i386.changes + +python-syck_0.61.2-1~bpo40+1_i386.changes +REJECT +Rejected: python-syck: part of the lintian_funtest transition. + +Your package is part of a testing transition designed to get lintian migrated +(it currently is at version 1.23.28~bpo.1, we need version 1.23.45~bpo40+1) + +Transition description: Testing a new feature + +This transition is managed by the Release Team, and Ganneff +is the Release-Team member responsible for it. +Please contact Ganneff or debian-release@lists.debian.org if you +need further assistance. + +------------------------------------------------------------------------ + +Lets change the definition of the transition, assume it is now: + +lintian_funtest: + reason: "Testing a new feature" + source: lintian + new: 1.22.28~bpo.1 + rm: Ganneff + packages: + - lintian + - python-syck + +Which checks for a version older than the version actually available. Result: + +dak process-unchecked -n python-syck_0.61.2-1~bpo40+1_i386.changes + +python-syck_0.61.2-1~bpo40+1_i386.changes +NEW for etch-backports +(new) python-syck_0.61.2-1~bpo40+1.diff.gz extra python +(new) python-syck_0.61.2-1~bpo40+1.dsc extra python +(new) python-syck_0.61.2-1~bpo40+1_i386.deb extra python +PySyck python bindings to the Syck YAML parser kit + Syck is a simple YAML parser kit. + . +[...] the whole stuff about a new package. + +------------------------------------------------------------------------ + +For completness, change the transition to (exact existing version): +lintian_funtest: + reason: "Testing a new feature" + source: lintian + new: 1.23.28~bpo.1 + rm: Ganneff + packages: + - lintian + +and the result is: + +dak process-unchecked -n python-syck_0.61.2-1~bpo40+1_i386.changes + +python-syck_0.61.2-1~bpo40+1_i386.changes +NEW for etch-backports +[... we know this ...] + +------------------------------------------------------------------------ + +The second part is the check_transitions script. +For that we take the following transitions as example: + +apt_update: + reason: "Apt needs to transition to testing to get foo and bar done" + source: apt + new: 0.2.12-1+b1.3 + rm: Andreas Barth + packages: + - apt + - synaptic + - cron-apt + - debtags + - feta + - apticron + - aptitude +lintian_funtest: + reason: "Testing a new feature" + source: lintian + new: 1.23.45~bpo40+1 + rm: Ganneff + packages: + - lintian + - python-syck +bar_breaks_it: + reason: We dont want bar to break it + source: bar + new: "9:99" + rm: Ganneff + packages: + - kdelibs + - qt4-x11 + - libqt-perl + +Running check-transitions ends up with the following output: + +Looking at transition: lintian_funtest + Source: lintian + New Version: 1.23.45~bpo40+1 + Responsible: Ganneff + Description: Testing a new feature + Blocked Packages (total: 2): lintian, python-syck + +Apt compare says: -2 +This transition is still ongoing, we currently have version 1.23.28~bpo.1 +------------------------------------------------------------------------- + +Looking at transition: apt_update + Source: apt + New Version: 0.2.12-1+b1.3 + Responsible: Andreas Barth + Description: Apt needs to transition to testing to get foo and bar done + Blocked Packages (total: 7): apt, synaptic, cron-apt, debtags, feta, apticron, aptitude + +Apt compare says: 4 +This transition is over, the target package reached testing, removing +apt wanted version: 0.2.12-1+b1.3, has 0.6.46.4-0.1~bpo.1 +------------------------------------------------------------------------- + +Looking at transition: bar_breaks_it + Source: bar + New Version: 9:99 + Responsible: Ganneff + Description: We dont want bar to break it + Blocked Packages (total: 3): kdelibs, qt4-x11, libqt-perl + +Transition source bar not in testing, transition still ongoing. +------------------------------------------------------------------------- +I: I would remove the apt_update transition + + +Changing our transition definitions for lintian (keeping the rest as +above) to + +lintian_funtest: + reason: "Testing a new feature" + source: lintian + new: 1.22.28~bpo.1 + rm: Ganneff + packages: + - lintian + - python-syck + +now we get + +Looking at transition: lintian_funtest + Source: lintian + New Version: 1.22.28~bpo.1 + Responsible: Ganneff + Description: Testing a new feature + Blocked Packages (total: 2): lintian, python-syck + +Apt compare says: 1 +This transition is over, the target package reached testing, removing +lintian wanted version: 1.22.28~bpo.1, has 1.23.28~bpo.1 +------------------------------------------------------------------------- + +Looking at transition: apt_update + Source: apt + New Version: 0.2.12-1+b1.3 + Responsible: Andreas Barth + Description: Apt needs to transition to testing to get foo and bar done + Blocked Packages (total: 7): apt, synaptic, cron-apt, debtags, feta, apticron, aptitude + +Apt compare says: 4 +This transition is over, the target package reached testing, removing +apt wanted version: 0.2.12-1+b1.3, has 0.6.46.4-0.1~bpo.1 +------------------------------------------------------------------------- + +Looking at transition: bar_breaks_it + Source: bar + New Version: 9:99 + Responsible: Ganneff + Description: We dont want bar to break it + Blocked Packages (total: 3): kdelibs, qt4-x11, libqt-perl + +Transition source bar not in testing, transition still ongoing. +------------------------------------------------------------------------- +I: I would remove the lintian_funtest transition +I: I would remove the apt_update transition + + +Not using the -n switch would turn the I: in actual removals :) +The check-transition command is meant for the release team to always run +it when they change a transition definition. It checks if the yaml is +valid and can be loaded (but if not the archive simply does no reject) +and also shows a nice overview. diff --git a/scripts/debian/byhand-di b/scripts/debian/byhand-di new file mode 100755 index 00000000..0a004f38 --- /dev/null +++ b/scripts/debian/byhand-di @@ -0,0 +1,101 @@ +#!/bin/sh -ue + +if [ $# -lt 4 ]; then + echo "Usage: $0 filename version arch changes_file" + exit 1 +fi + +TARBALL="$1" # Tarball to read, compressed with gzip +VERSION="$2" +ARCH="$3" +CHANGES="$4" # Changes file for the upload + +error() { + echo "$*" + exit 1 +} + +# Check validity of version number +# Expected are: YYYYMMDD, YYYYMMDD.x, YYYYMMDDx +if ! echo "$VERSION" | grep -Eq "^[0-9]{8}(|(\.|[a-z]+)[0-9]+)$"; then + error "Invalid version: '$VERSION'" +fi + +# Get the target suite from the Changes file +# NOTE: it may be better to pass this to the script as a parameter! +SUITE="$(grep "^Distribution:" "$CHANGES" | awk '{print $2}')" +case $SUITE in + "") + error "Error: unable to determine suite from Changes file" + ;; + unstable|sid) + : # nothing to do + ;; + *) + SUITE="${SUITE}-proposed-updates" + ;; +esac + +# This must end with / +TARGET="/srv/ftp.debian.org/ftp/dists/$SUITE/main/installer-$ARCH/" + +# Check validity of the target directory +# This could fail, for example for new architectures; doing +# a regular BYHAND is safer in that case +if [ ! -d "$TARGET" ]; then + mkdir -p "$TARGET" +fi +# Check that there isn't already a directory for this version +if [ -d "$TARGET/$VERSION" ]; then + error "Directory already exists: $TARGET/$VERSION" +fi + +# We know all data to be in ./installer-/; see if there's +# anything else in the tarball except that and the 'current' symlink +if tar tzf "$TARBALL" | \ + grep -Eqv "^\./(installer-$ARCH/($VERSION/.*|current|)|)$"; then + error "Tarball contains unexpected contents" +fi + +# Create a temporary directory where to store the images +umask 002 +TMPDIR="$(mktemp -td byhand-di.XXXXXX)" + +# If we fail somewhere, cleanup the temporary directory +cleanup() { + rm -rf "$TMPDIR" +} +trap cleanup EXIT + +# Extract the data into the temporary directory +tar xzf "$TARBALL" --directory="$TMPDIR" "./installer-$ARCH/" + +# Check the 'current' symlink +if [ ! -L $TMPDIR/installer-$ARCH/current ]; then + error "Missing 'current' symlink" +elif [ X"$(readlink "$TMPDIR/installer-$ARCH/current")" != X"$VERSION" ]; then + error "Incorrect 'current' symlink" +fi + +# We should have an MD5SUMS file; use that for a final check +if [ -r "$TMPDIR/installer-$ARCH/$VERSION/images/MD5SUMS" ]; then + ( + cd "$TMPDIR/installer-$ARCH/$VERSION/images" + md5sum -c --status MD5SUMS || error "Error while checking MD5SUMS" + ) +else + error "Missing MD5SUMS file" +fi + +# Move the data to the final location +mv "$TMPDIR/installer-$ARCH/$VERSION" "$TARGET" +mv "$TMPDIR/installer-$ARCH/current" "$TARGET" + +# Fixup permissions +find "$TARGET/$VERSION" -type d -exec chmod 755 {} + +find "$TARGET/$VERSION" -type f -exec chmod 644 {} + + +trap - EXIT +cleanup + +exit 0 diff --git a/scripts/debian/dm-monitor b/scripts/debian/dm-monitor index d4616433..87846b70 100755 --- a/scripts/debian/dm-monitor +++ b/scripts/debian/dm-monitor @@ -9,6 +9,25 @@ psql --html projectb <' OR + m.name LIKE '% <' || substring(u.uid FROM 4) || '>') + WHERE u.uid LIKE 'dm:%' AND sa.suite = 5 + ) +ORDER BY uid.uid; +EOF + echo "Packages debian maintainers may update:" psql --html projectb <') + JOIN uid u ON (m.name LIKE u.name || ' <%>' OR + m.name LIKE '% <' || substring(u.uid FROM 4) || '>') WHERE u.uid LIKE 'dm:%' AND sa.suite = 5 ORDER BY u.uid, s.source, s.version; EOF diff --git a/scripts/debian/expire_dumps b/scripts/debian/expire_dumps new file mode 100755 index 00000000..9fa6adeb --- /dev/null +++ b/scripts/debian/expire_dumps @@ -0,0 +1,143 @@ +#!/usr/bin/python + +# Copyright (C) 2007 Florian Reitmeir +# Copyright (C) 2008 Joerg Jaspert + +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +# requires: python-dateutil + +import glob, os, sys +import time, datetime +import re +from datetime import datetime +from datetime import timedelta +from optparse import OptionParser + +RULES = [ + {'days':14, 'interval':0}, + {'days':31, 'interval':7}, + {'days':365, 'interval':31}, + {'days':3650, 'interval':365}, + + # keep 14 days, all each day + # keep 31 days, 1 each 7th day + # keep 365 days, 1 each 31th day +] + +TODAY = datetime.today() +VERBOSE = False +NOACTION = False +PRINT = False +PREFIX = '' +PATH = '' + +def all_files(pattern, search_path, pathsep=os.pathsep): + """ Given a search path, yield all files matching the pattern. """ + for path in search_path.split(pathsep): + for match in glob.glob(os.path.join(path, pattern)): + yield match + +def parse_file_dates(list): + out = [] + # dump_2006.05.02-11:52:01.bz2 + p = re.compile('^\./dump_([0-9]{4})\.([0-9]{2})\.([0-9]{2})-([0-9]{2}):([0-9]{2}):([0-9]{2})(.bz2)?$') + for file in list: + m = p.search(file) + if m: + d = datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)), int(m.group(5)), int(m.group(6))) + out.append({'name': file, 'date': d}) + return out + +def prepare_rules(rules): + out = [] + for rule in rules: + out.append( + { + 'days':timedelta(days=rule['days']), + 'interval':timedelta(days=rule['interval'])} + ) + return out + +def expire(rules, list): + t_rules=prepare_rules(rules) + rule = t_rules.pop(0) + last = list.pop(0) + + for file in list: + if VERBOSE: + print "current file to expire: " + file['name'] + print file['date'] + + # check if rule applies + if (file['date'] < (TODAY-rule['days'])): + if VERBOSE: + print "move to next rule" + if t_rules: + rule = t_rules.pop(0) + + if (last['date'] - file['date']) < rule['interval']: + if VERBOSE: + print "unlink file:" + file['name'] + if PRINT: + print file['name'] + if not NOACTION: + os.unlink(file['name']) + else: + last = file + if VERBOSE: + print "kept file:" + file['name'] + + +parser = OptionParser() +parser.add_option("-d", "--directory", dest="directory", + help="directory name", metavar="Name") +parser.add_option("-f", "--pattern", dest="pattern", + help="Pattern maybe some glob", metavar="*.backup") +parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, + help="verbose") +parser.add_option("-n", "--no-action", action="store_true", dest="noaction", default=False, + help="just prints what would be done, this implies verbose") +parser.add_option("-p", "--print", action="store_true", dest="printfiles", default=False, + help="just print the filenames that should be deleted, this forbids verbose") + +(options, args) = parser.parse_args() + +if (not options.directory): + parser.error("no directory to check given") + +if options.noaction: + VERBOSE=True + NOACTION=True + +if options.verbose: + VERBOSE=True + +if options.printfiles: + VERBOSE=False + PRINT=True + +files = sorted( list(all_files(options.pattern,options.directory)), reverse=True ); + +if not files: + sys.exit(0) + +files_dates = parse_file_dates(files); +expire(RULES, files_dates) diff --git a/scripts/debian/insert_missing_changedby.py b/scripts/debian/insert_missing_changedby.py new file mode 100755 index 00000000..7b817b5e --- /dev/null +++ b/scripts/debian/insert_missing_changedby.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python + +# Adds yet unknown changedby fields when this column is added to an existing +# database. If everything goes well, it needs to be run only once. Data is +# extracted from Filippo Giunchedi's upload-history project, get the file at +# merkel:/home/filippo/upload-history/*.db. + +# Copyright (C) 2008 Christoph Berg + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +############################################################################### + +# /Everybody stand back/ +# +# I know regular expressions + +############################################################################### + +import errno, fcntl, os, sys, time, re +import apt_pkg +import daklib.database +import daklib.queue +import daklib.utils +from pysqlite2 import dbapi2 as sqlite + +projectB = None +DBNAME = "uploads-ddc.db" +sqliteConn = None + +############################################################################### + +def insert (): + print "Adding missing changedby fields." + + projectB.query("BEGIN WORK") + + q = projectB.query("SELECT id, source, version FROM source WHERE changedby IS NULL") + + for i in q.getresult(): + print i[1] + "/" + i[2] + ":", + + cur = sqliteConn.cursor() + cur.execute("SELECT changedby FROM uploads WHERE package = '%s' AND version = '%s' LIMIT 1" % (i[1], i[2])) + res = cur.fetchall() + if len(res) != 1: + print "nothing found" + continue + + changedby = res[0][0].replace("'", "\\'") + changedby_id = daklib.database.get_or_set_maintainer_id(changedby) + + projectB.query("UPDATE source SET changedby = %d WHERE id = %d" % (changedby_id, i[0])) + print changedby, "(%d)" % changedby_id + + projectB.query("COMMIT WORK") + +############################################################################### + +def main(): + global projectB, sqliteConn + + Cnf = daklib.utils.get_conf() + Upload = daklib.queue.Upload(Cnf) + projectB = Upload.projectB + + sqliteConn = sqlite.connect(DBNAME) + + insert() + +############################################################################### + +if __name__ == '__main__': + main() diff --git a/scripts/debian/update-ftpstats b/scripts/debian/update-ftpstats index 17a47cb1..d0d519d5 100755 --- a/scripts/debian/update-ftpstats +++ b/scripts/debian/update-ftpstats @@ -2,7 +2,7 @@ my %data; my %data2; -my @archs = ("source", "all", "amd64", "i386", "alpha", "arm", "hppa", +my @archs = ("source", "all", "amd64", "i386", "alpha", "arm", "armel", "hppa", "hurd-i386", "ia64", "m68k", "mips", "mipsel", "powerpc", "s390", "sparc"); diff --git a/setup/add_constraints.sql b/setup/add_constraints.sql index f25165c1..1d2bad66 100644 --- a/setup/add_constraints.sql +++ b/setup/add_constraints.sql @@ -5,6 +5,7 @@ ALTER TABLE files ADD CONSTRAINT files_location FOREIGN KEY (location) REFERENCES location(id) MATCH FULL; ALTER TABLE source ADD CONSTRAINT source_maintainer FOREIGN KEY (maintainer) REFERENCES maintainer(id) MATCH FULL; +ALTER TABLE source ADD CONSTRAINT source_changedby FOREIGN KEY (changedby) REFERENCES maintainer(id) MATCH FULL; ALTER TABLE source ADD CONSTRAINT source_file FOREIGN KEY (file) REFERENCES files(id) MATCH FULL; ALTER TABLE source ADD CONSTRAINT source_sig_fpr FOREIGN KEY (sig_fpr) REFERENCES fingerprint(id) MATCH FULL; @@ -104,10 +105,12 @@ GRANT ALL ON architecture, architecture_id_seq, archive, archive_id_seq, bin_associations, bin_associations_id_seq, binaries, binaries_id_seq, component, component_id_seq, dsc_files, dsc_files_id_seq, files, files_id_seq, fingerprint, - fingerprint_id_seq, location, location_id_seq, maintainer, + fingerprint_id_seq, keyrings, keyrings_id_seq, + location, location_id_seq, maintainer, maintainer_id_seq, override, override_type, override_type_id_seq, priority, priority_id_seq, section, section_id_seq, source, - source_id_seq, src_associations, src_associations_id_seq, suite, + source_id_seq, src_uploaders, src_uploaders_id_seq, + src_associations, src_associations_id_seq, suite, suite_architectures, suite_id_seq, queue_build, uid, uid_id_seq TO GROUP ftpmaster; @@ -116,9 +119,11 @@ GRANT SELECT ON architecture, architecture_id_seq, archive, archive_id_seq, bin_associations, bin_associations_id_seq, binaries, binaries_id_seq, component, component_id_seq, dsc_files, dsc_files_id_seq, files, files_id_seq, fingerprint, - fingerprint_id_seq, location, location_id_seq, maintainer, + fingerprint_id_seq, keyrings, keyrings_id_seq, + location, location_id_seq, maintainer, maintainer_id_seq, override, override_type, override_type_id_seq, priority, priority_id_seq, section, section_id_seq, source, - source_id_seq, src_associations, src_associations_id_seq, suite, + source_id_seq, src_uploaders, src_uploaders_id_seq, + src_associations, src_associations_id_seq, suite, suite_architectures, suite_id_seq, queue_build, uid, uid_id_seq TO PUBLIC; diff --git a/setup/init_pool.sql b/setup/init_pool.sql index 168d981a..9925148c 100644 --- a/setup/init_pool.sql +++ b/setup/init_pool.sql @@ -34,10 +34,17 @@ CREATE TABLE uid ( name TEXT ); +CREATE TABLE keyrings ( + id SERIAL PRIMARY KEY, + name TEXT +); + + CREATE TABLE fingerprint ( id SERIAL PRIMARY KEY, fingerprint TEXT UNIQUE NOT NULL, - uid INT4 REFERENCES uid + uid INT4 REFERENCES uid, + keyring INT4 REFERENCES keyrings ); CREATE TABLE location ( @@ -65,12 +72,19 @@ CREATE TABLE source ( source TEXT NOT NULL, version TEXT NOT NULL, maintainer INT4 NOT NULL, -- REFERENCES maintainer + changedby INT4 NOT NULL, -- REFERENCES maintainer file INT4 UNIQUE NOT NULL, -- REFERENCES files install_date TIMESTAMP NOT NULL, sig_fpr INT4 NOT NULL, -- REFERENCES fingerprint unique (source, version) ); +CREATE TABLE src_uploaders ( + id SERIAL PRIMARY KEY, + source INT4 NOT NULL REFERENCES source, + maintainer INT4 NOT NULL REFERENCES maintainer +); + CREATE TABLE dsc_files ( id SERIAL PRIMARY KEY, source INT4 NOT NULL, -- REFERENCES source, @@ -171,3 +185,19 @@ CREATE INDEX binaries_maintainer ON binaries (maintainer); CREATE INDEX binaries_fingerprint on binaries (sig_fpr); CREATE INDEX source_fingerprint on source (sig_fpr); CREATE INDEX dsc_files_file ON dsc_files (file); + +-- Own function +CREATE FUNCTION space_concat(text, text) RETURNS text + AS $_$select case +WHEN $2 is null or $2 = '' THEN $1 +WHEN $1 is null or $1 = '' THEN $2 +ELSE $1 || ' ' || $2 +END$_$ + LANGUAGE sql; + +CREATE AGGREGATE space_separated_list ( + BASETYPE = text, + SFUNC = space_concat, + STYPE = text, + INITCOND = '' +); diff --git a/templates/security-install.advisory b/templates/security-install.advisory index 7f523292..baebe916 100644 --- a/templates/security-install.advisory +++ b/templates/security-install.advisory @@ -1,15 +1,12 @@ From: __DAK_ADDRESS__ To: __WHOAMI__ __BCC__ -MIME-Version: 1.0 -Content-Type: text/plain; charset="utf-8" -Content-Transfer-Encoding: 8bit Subject: Template Advisory __ADVISORY__ ------------------------------------------------------------------------ -Debian Security Advisory __ADVISORY__ security@debian.org +Debian Security Advisory __ADVISORY__ security@debian.org http://www.debian.org/security/ __WHOAMI__ -__DATE__ +__DATE__ http://www.debian.org/security/faq ------------------------------------------------------------------------ Package : __PACKAGE__ @@ -21,20 +18,27 @@ CERT advisory : XXX BugTraq ID : XXX Debian Bug : XXX -... +Several local/remote vulnerabilities have been discovered in... +The Common +Vulnerabilities and Exposures project identifies the following problems: [single issue] -For the stable distribution (woody), this problem has been fixed in version XXX +Foo discovered that -For the old stable distribution (potato), this problem has been fixed in -version XXX + +[single issue] +For the stable distribution (etch), this problem has been fixed in version XXX +__PACKAGE__ + +For the old stable distribution (sarge), this problem has been fixed in +version __PACKAGE__ [multiple issues] -For the stable distribution (woody), these problems have been fixed in version -XXX +For the stable distribution (etch), these problems have been fixed in version +__PACKAGE__ -For the old stable distribution (potato), these problems have been fixed in -version XXX +For the old stable distribution (sarge), these problems have been fixed in +version __PACKAGE__ We recommend that you upgrade your __PACKAGE__ package. @@ -58,6 +62,14 @@ apt-get upgrade You may use an automated update by adding the resources from the footer to the proper configuration. + +Debian GNU/Linux 3.1 alias sarge +-------------------------------- + +Debian GNU/Linux 4.0 alias etch +------------------------------- + + __ADVISORY_TEXT__ diff --git a/tools/debianqueued-0.9/ChangeLog b/tools/debianqueued-0.9/ChangeLog new file mode 100644 index 00000000..8bd3cf99 --- /dev/null +++ b/tools/debianqueued-0.9/ChangeLog @@ -0,0 +1,282 @@ + +-- Version 0.9 released + +1999-07-07 Linux FTP-Administrator + + * debianqueued: Implemented new upload methods "copy" and "ftp" as + alternatives to "ssh". "copy" simply copies files to another + directory on the queue host, "ftp" uses FTP to upload files. Both + of course need no ssh-agent. + New config vars: + $upload_method, $ftptimeout, $ftpdebug, $ls, $cp, $chmod, + Renamed config vars: + $master -> $target + $masterlogin -> $targetlogin + $masterdir -> $targetdir + $chmod_on_master -> $chmod_on_target + + Note that the FTP method has some limitations: If no SITE MD5SUM + command is supported by the server, uploaded files can be verified + by their size only. And if removing of files in the target dir + isn't allowed, upload errors can't be handled gracefully. + + * debianqueued: .changes files can now also be signed by GnuPG. + + * dqueued-watcher: Also updates debian-keyring.gpg. + +Tue Dec 8 14:09:44 1998 Linux FTP-Administrator + + * debianqueued (process_changes): After an upload, do not remove + files with the same name stem if a .changes file is among them. + Then there is probably a second upload for a different + version/architecture. + +-- Version 0.8 released + +Thu May 14 16:17:48 1998 Linux FTP-Administrator + + * debianqueued (process_changes): When --after a successfull + upload-- deleting files that seem to belong to the same job, check + for equal revision number on files that have one. It has happened + that the daemon deleted files that belonged to another job with + different revision, which shouldn't happen. The current algorithm + is more conservative, i.e. it tends not to delete such files. They + will be removed as stray files anyway after some time. + +Tue Apr 21 10:29:01 1998 Linux FTP-Administrator + + * debianqueued (check_incoming_writable): Also recognize + "read-only filesystem" as an error message that makes the daemon + think the incoming is unwritable. + + * debianqueued (check_dir): Break from the .changes loop if + $incoming_writable has become cleared. + + * debianqueued (process_changes): Don't increment failure count if + upload failed due to incoming dir being unwritable. + + * debianqueued (check_dir): Don't use return value of + debian_file_stem as regexp, it's a shell pattern. + +Tue Mar 31 11:06:11 1998 Linux FTP-Administrator + + * debianqueued (process_changes, process_commands): Check for + improper mail addresses from Maintainer: fields and try to handle + them by looking up the string in the Debian keyring. New funtion + try_to_get_mail_addr for the latter. + + * debianqueued (fatal_signal): Kill status daemon only if it has + been started. + + * debianqueued (copy_to_master): Change mode of files uploaded to + master explicitly to 644. scp uses the permission from the + original files, and those could be restricted due to local upload + policies. + +Mon Mar 30 13:24:51 1998 Linux FTP-Administrator + + * dqueued-watcher (main): If called with arguments, only make + summaries for the log files given. With this, you can view the + summaries also between normal watcher runs. + + * dqueued-watcher (make_summary): New arg $to_stdout, to print + report directly to stdout instead of sending via mail. + +Tue Mar 24 14:18:18 1998 Linux FTP-Administrator + + * debianqueued (check_incoming_writable): New function that checks + if the incoming dir on master is writable (it isn't during a + freeze is done). The check is triggered if an upload fails due to + "permission denied" errors. Until the incoming is writable again, + the queue is holded and no uploads are tried (so that the max. + number of tries isn't exceeded.) + +-- Version 0.7 released + +Mon Mar 23 13:23:20 1998 Linux FTP-Administrator + + * debianqueued (process_changes): In an upload failure message, + say explicitly that the job will be retried, to avoid confusion of + users. + + * debianqueued (process_changes): $failure_file was put on + @keep_list only for first retry. + + * debianqueued (process_changes): If the daemon removes a + .changes, set SGID bit on all files associated with it, so that + the test for Debian files without a .changes doesn't find them. + + * debianqueued (check_dir): Don't send reports for files without a + .changes if the files look like a recompilation for another + architecture. Then the maintainer extracted from the files isn't + the uploader. A job is treated like that if it doesn't include a + .dsc file and no *_{i386,all}.deb files. + + * debianqueued (check_dir): Also don't send such a report if the + list of files with the same stem contains a .changes. This can be + the case if an upload failed and the .changes is still around, and + there's some file with the same name stem but which isn't in the + .changes (e.g. .orig.tar.gz). + + * debianqueued (process_changes): Set @keep_list earlier, before + PGP and non-US checks. + + * debianqueued (main): Fix recognition of -k argument. + +Tue Feb 17 11:54:33 1998 Linux FTP-Administrator + + * debianqueued (check_dir): Added test for binaries that could + reside on slow NFS filesystems. It is specially annoying if pgp + isn't found, because then the .changes is deleted. If one of the + files listed in @conf::test_binaries isn't present immediately + before a queue run, that one is delayed. + +-- Version 0.6 released + +Tue Dec 9 14:53:23 1997 Linux FTP-Administrator + + * debianqueued (process_changes): Reject jobs whose package name + is in @nonus_packages (new config var). These must be uploaded to + nonus.debian.org instead of master itself. + +Tue Nov 25 11:02:38 1997 Linux FTP-Administrator + + * debianqueued (main): Implemented -k and -r arguments (kill or + restart daemon, resp.) + + * debianqueued (is_debian_file): Exclude orig.tar.gz files from + that class, so that the maintainer address isn't searched in them + if they happen to come first in the dir. + + * debianqueued (END): Fix kill call (pid and signo were swapped) + + * debianqueued (process_changes): Moved check if job is already on + master to a later stage, to avoid connecting to master as long as + there are still errors with the job (missing files or the like). + + * debianqueued (check_alive): Lookup master's IP address before + every ping, it could change while the daemon is running... + +-- Version 0.5 released + +Mon Nov 11 14:37:52 1997 Linux FTP-Administrator + + * debianqueued (process_commands): rm command now can process more + than one argument and knows about wildcards + +Mon Nov 6 15:09:53 1997 Linux FTP-Administrator + + * debianqueued (process_commands): Recognize commands on the same + line as the Commands: keyword, not only on continuation lines. + +Mon Nov 3 16:49:57 1997 Linux FTP-Administrator + + * debianqueued (close_log): After reopening the log file, write + one message it. This avoids that dqueued-watcher's rotating + algorithm delays from several minutes to a few hours on every + rotate, since it looks at the time of the first entry. + +Thu Oct 30 13:56:35 1997 Linux FTP-Administrator + + * dqueued-watcher (make_summary): Added some new summary counters + for command files. + + * debianqueued (process_changes): Added check for files that seem + to belong to an upload (match debian_file_stem($changes)), but + aren't listed in the .changes. Most probably these are unneeded + .orig.tar.gz files. They are deleted. + + * debianqueued (print_status): Print revision and version number + of debianqueued in status file. + + * debianqueued (process_commands): New function, for processing + the new feature of .command files. These enable uploaders to + correct mistakes in the queue dir (corrupted/misnamed files) + +Wed Oct 29 15:35:03 1997 Linux FTP-Administrator + + *debianqueued (check_dir): Extra check for files that look like an + upload, but miss a .changes file. A problem report is sent to the + probable uploader after $no_changes_timeout seconds (new config + var). The maintainer email can be extracted from .dsc, .deb, + .diff.gz and .tar.gz files (though the maintainer needs not + necessarily be the uploader...) New utility functions + is_debian_file, get_maintainer, debian_file_stem. + + * debianqueued (pgp_check, get_maintainer): Quote filenames used + on sh command lines, so metacharacters in the names can't do bad + things. (Though wu-ftpd generally shouldn't allow uploading files + with such names.) + + * debianqueued (print_time): Print times always as + hour:minute:second, i.e. don't omit the hour if it's 0. This could + confuse users, because they don't know if the hour or the seconds + are missing. + +-- Version 0.4 released + +Thu Sep 25 13:18:57 1997 Linux FTP-Administrator + + * debianqueued (process_changes): Forgot to remove a bad .changes + file in some cases (no mail address, not PGP signed at all, no + files mentioned). Also initialize some variables to avoid Perl + warnings. + +Wed Sep 17 14:15:21 1997 Linux FTP-Administrator + + * dqueued-watcher (make_summary): Add feature of writing summaries + also to a file. Config var do_summary renamed to mail_summary, + additional var summary_file. + +Mon Sep 15 11:56:59 1997 Linux FTP-Administrator + + * dqueued-watcher: Log several activities of the watcher to the log + file; new function logger() for this. + + * debianqueued (process_changes, check_alive): Make some things more + verbose in non-debug mode. + +Mon Aug 18 13:25:04 1997 Linux FTP-Administrator + + * dqueued-watcher (rotate_log): Using the log file's ctime for + calculating its age was a rather bad idea -- starting the daemon + updates that time stamp. Now the first date found in the log file + is used as basis for age calculation. + + * dqeued-watcher (make_summary): New function to build a summary + of daemon actions when rotating logs. Controlled by config + variable $do_summary. + +Tue Aug 12 13:26:52 1997 Linux FTP-Administrator + + * Makefile: new files with targets for automating various + administrative tasks + +-- Version 0.3 released + +Mon Aug 11 10:48:31 1997 Linux FTP-Administrator + + * debianqueued (is_on_master, copy_to_master): Oops, forget + alarm(0)'s to turn off timeouts again. + + * debianqueued: Revised the startup scheme so that it also works + with the socket-based ssh-agent. That agent periodically checks + whether the process it started is still alive and otherwise exits. + For that, the go-into-background fork must be done before + ssh-agent is started. + + * debianqueued: Implemented close_log and SIGHUP handling for + logfile rotating. + + * dqueued-watcher: Implemented log file rotating. + +Thu Aug 07 11:25:22 1997 Linux FTP-Administrator + + * debianqueued (is_on_master, copy_to_master): added timeouts to + all ssh/scp operations, because I've seen one once hanging... + +-- Started ChangeLog +-- Version 0.2 released + +$Id: ChangeLog,v 1.36 1999/07/08 09:43:24 ftplinux Exp $ + diff --git a/tools/debianqueued-0.9/Makefile b/tools/debianqueued-0.9/Makefile new file mode 100644 index 00000000..2e8727e6 --- /dev/null +++ b/tools/debianqueued-0.9/Makefile @@ -0,0 +1,117 @@ +# +# Makefile for debianqueued -- only targets for package maintainance +# +# $Id: Makefile,v 1.10 1998/03/25 09:21:01 ftplinux Exp $ +# +# $Log: Makefile,v $ +# Revision 1.10 1998/03/25 09:21:01 ftplinux +# Implemented snapshot target +# +# Revision 1.9 1998/03/23 14:10:28 ftplinux +# $$num in make upload needs braces because _ follows +# +# Revision 1.8 1997/12/16 13:20:57 ftplinux +# add _all to changes name in upload target +# +# Revision 1.7 1997/11/20 15:34:11 ftplinux +# upload target should copy only current release to queue dir +# +# Revision 1.6 1997/09/29 14:28:38 ftplinux +# Also fill in Version: for .changes file +# +# Revision 1.5 1997/09/25 11:33:48 ftplinux +# Added automatic adding of release number to ChangeLog +# +# Revision 1.4 1997/08/18 11:29:11 ftplinux +# Include new release number in message of cvs commits +# +# Revision 1.3 1997/08/12 10:39:08 ftplinux +# Added generation of .changes file in 'dist' target; added 'upload' +# target (using the queue :-) +# +# Revision 1.2 1997/08/12 10:01:32 ftplinux +# Fixed dist target to work (last checkin was needed to test it at all) +# +# + +CVS = cvs +RELNUMFILE = release-num +# files that contain the release number +FILES_WITH_NUM = debianqueued dqueued-watcher +# name of cvs module +MODULE = debianqueued + +.PHONY: default release dist + +default: + @echo "Nothing to make -- the Makefile is only for maintainance purposes" + @exit 1 + +# Usage: +# make release (use number from file release-num) +# or +# make release RELNUM=x.y (writes new number to release-num) + +release: + if cvs status $(RELNUMFILE) | grep -q Up-to-date; then true; else \ + echo "$(RELNUMFILE) needs commit first"; exit 1; \ + fi +ifdef RELNUM + echo $(RELNUM) >$(RELNUMFILE) + cvs commit -m "Bumped release number to `cat $(RELNUMFILE)`" $(RELNUMFILE) +endif + perl -pi -e "s/Release: \S+/Release: `cat $(RELNUMFILE)`/;" \ + $(FILES_WITH_NUM) + cvs commit -m "Bumped release number to `cat $(RELNUMFILE)`" $(FILES_WITH_NUM) + if grep -q "Version `cat release-num` released" ChangeLog; then true; else \ + mv ChangeLog ChangeLog.orig; \ + echo "" >ChangeLog; \ + echo "-- Version `cat $(RELNUMFILE)` released" >>ChangeLog; \ + echo "" >>ChangeLog; \ + cat ChangeLog.orig >>ChangeLog; \ + rm ChangeLog.orig; \ + cvs commit -m "Bumped release number to `cat $(RELNUMFILE)`" ChangeLog; \ + fi + cvs tag release-`cat $(RELNUMFILE) | sed 's/\./-/'` + +dist: + set -e; \ + num=`cat $(RELNUMFILE)`; name=debianqueued-$$num; \ + mkdir tmp; \ + (cd tmp; cvs export -r release-`echo $$num | sed 's/\./-/'` $(MODULE); \ + mv $(MODULE) $$name; \ + tar cvf ../../$$name.tar $$name); \ + gzip -9f ../$$name.tar; \ + rm -rf tmp; \ + file=../$$name.tar.gz; \ + md5=`md5sum $$file | awk -e '{print $$1}'`; \ + size=`ls -l $$file | awk -e '{print $$4}'`; \ + chfile=../debianqueued_`cat $(RELNUMFILE)`_all.changes; \ + sed -e "s/^Date: .*/Date: `822-date`/" -e "s/Version: .*/Version: `cat $(RELNUMFILE)`/" $$chfile; \ + echo " $$md5 $$size byhand - $$name.tar.gz" >>$$chfile; \ + pgp -u 'Roman Hodek' +clearsig=on -fast <$$chfile >$$chfile.asc; \ + mv $$chfile.asc $$chfile + +# can only be used on ftp.uni-erlangen.de :-) +upload: + set -e; \ + num=`cat $(RELNUMFILE)`; \ + cp ../debianqueued-$$num.tar.gz ../debianqueued_$${num}_all.changes $$HOME/Linux/debian/UploadQueue + +# make snapshot from current sources +snapshot: + set -e; \ + modified=`cvs status 2>/dev/null | awk '/Status:/ { if ($$4 != "Up-to-date") print $$2 }'`; \ + if [ "x$$modified" != "x" ]; then \ + echo "There are modified files: $$modified"; \ + echo "Commit first"; \ + exit 1; \ + fi; \ + name=debianqueued-snapshot-`date +%y%m%d`; \ + rm -rf tmp; \ + mkdir tmp; \ + (cd tmp; cvs export -D now $(MODULE); \ + mv $(MODULE) $$name; \ + tar cvf ../../$$name.tar $$name); \ + gzip -9f ../$$name.tar; \ + rm -rf tmp diff --git a/tools/debianqueued-0.9/PROBLEMS b/tools/debianqueued-0.9/PROBLEMS new file mode 100644 index 00000000..139fb59d --- /dev/null +++ b/tools/debianqueued-0.9/PROBLEMS @@ -0,0 +1,29 @@ + +This is a list of problems that I have seen: + + - One an upload failed with the following error: + + Jul 8 12:13:53 Upload to master.debian.org failed, last exit status 1 + Jul 8 12:13:53 Error messages from scp: + bind: Permission denied + lost connection + + Never seen such an error from ssh/scp before... But since it didn't + happen again, I suspect something with master and/or the net. + + - There are some protocol problems between certain ssh version (on + client/server side). The effect is that scp either hangs itself + (times out after $remote_timeout), or leaves ssh processes hanging + around. I've noticed that with ssh 1.2.19 on the server. I have a + prototype for a workaround, but haven't included it in + debianqueued, because master has been updated to 1.2.20 now and the + problem disappeared. + + - The "ftp" method has some limitiations: + 1) Files in the target dir can't be deleted. + 2) Uploaded files can't be verified as good as with the other methods. + 3) $chmod_on_target often doesn't work. + 4) The check for a writable incoming directory leaves temporary files + behind. + +$Id: PROBLEMS,v 1.4 1999/07/08 09:34:52 ftplinux Exp $ diff --git a/tools/debianqueued-0.9/Queue.README b/tools/debianqueued-0.9/Queue.README new file mode 100644 index 00000000..a8681d15 --- /dev/null +++ b/tools/debianqueued-0.9/Queue.README @@ -0,0 +1,59 @@ + +This directory is the Debian upload queue of ftp.uni-erlangen.de. All +files uploaded here will be moved into the project incoming dir on +master.debian.org. + +Only known Debian developers can upload here. All uploads must be in +the same format as they would go to master, i.e. with a PGP-signed +.changes file that lists all files that belong to the upload. Files +not meeting this condition will be removed automatically after some +time. + +The queue daemon will notify you by mail of success or any problems +with your upload. For this, the Maintainer: field in the .changes must +contain your (the uploader's) correct e-mail address, not the address +of the real maintainer (if different). The same convention applies to +master itself, which sends installation acknowledgements to the +address in Maintainer:. + + +*.commands Files +---------------- + +Besides *.changes files, you can also upload *.commands files for the +daemon to process. With *.commands files, you can instruct the daemon +to remove or rename files in the queue directory that, for example, +resulted from failed or interrupted uploads. A *.commands file looks +much like a *.changes, but contains only two fields: Uploader: and +Commands:. It must be PGP-signed by a known Debian developer, to avoid +that E.V.L. Hacker can remove/rename files in the queue. The basename +(the part before the .commands extension) doesn't matter, but best +make it somehow unique. + +The Uploader: field should contain the mail address to which the reply +should go, just like Maintainer: in a *.changes. Commands: is a +multi-line field like e.g. Description:, so each continuation line +should start with a space. Each line in Commands: can contain a +standard 'rm' or 'mv' command, but no options are allowed, and +filenames may not contain slashes (so that they're restricted to the +queue directory). 'rm' can process as much arguments as you give it +(not only one), and also knows about the shell wildcards *, ?, and []. + +Example of a *.commands file: + +-----BEGIN PGP SIGNED MESSAGE----- + +Uploader: Roman Hodek +Commands: + rm hello_1.0-1_i386.deb + mv hello_1.0-1.dsx hello_1.0-1.dsc + +-----BEGIN PGP SIGNATURE----- +Version: 2.6.3ia + +iQCVAwUBNFiQSXVhJ0HiWnvJAQG58AP+IDJVeSWmDvzMUphScg1EK0mvChgnuD7h +BRiVQubXkB2DphLJW5UUSRnjw1iuFcYwH/lFpNpl7XP95LkLX3iFza9qItw4k2/q +tvylZkmIA9jxCyv/YB6zZCbHmbvUnL473eLRoxlnYZd3JFaCZMJ86B0Ph4GFNPAf +Z4jxNrgh7Bc= +=pH94 +-----END PGP SIGNATURE----- diff --git a/tools/debianqueued-0.9/Queue.message b/tools/debianqueued-0.9/Queue.message new file mode 100644 index 00000000..1653f57c --- /dev/null +++ b/tools/debianqueued-0.9/Queue.message @@ -0,0 +1,3 @@ + +This directory is the Debian upload queue of ftp.uni-erlangen.de. Only +known Debian developers can upload here. diff --git a/tools/debianqueued-0.9/README b/tools/debianqueued-0.9/README new file mode 100644 index 00000000..3f5435af --- /dev/null +++ b/tools/debianqueued-0.9/README @@ -0,0 +1,724 @@ + debianqueued -- daemon for managing Debian upload queues + ======================================================== + +Copyright (C) 1997 Roman Hodek +$Id: README,v 1.20 1999/07/08 09:35:37 ftplinux Exp $ + + +Copyright and Disclaimer +------------------------ + +This program is free software. You can redistribute it and/or +modify it under the terms of the GNU General Public License as +published by the Free Software Foundation: either version 2 or +(at your option) any later version. + +This program comes with ABSOLUTELY NO WARRANTY! + +You're free to modify this program at your will, according to the GPL, +and I don't object if you modify the program. But it would be nice if +you could send me back such changes if they could be of public +interest. I will try to integrate them into the mainstream version +then. + + +Installation +------------ + +debianqueued has been written for running a new Debian upload queue at +ftp.uni-erlangen.de, but I tried to keep it as general as possible and +it should be useable for other sites, too. If there should be +non-portabilities, tell me about them and we'll try to get them fixed! + +Before installing debianqueued, you should have the following +utilities installed: + + - pgp (needed for checking signatures) + + - ssh & Co. (but not necessarily sshd, only client programs used) + + - md5sum (for checking file integrity) + + - mkfifo (for creating the status FIFO) + + - GNU tar + + - gzip + + - ar (for analyzing .deb files) + +The daemon needs a directory of its own where the scripts reside and +where it can put certain files. This directory is called $queued_dir +in the Perl scripts and below. There are no special requirements where +in the filesystem hierarchy this directory should be. + +All configurations are done in file 'config' in $queued_dir. For +security reasons, the $queued_dir should not be in a public FTP area, +and should be writeable (as the files in it) only for the user +maintaining the local debianqueued. + +The file Queue.README and Queue.message in the distribution archive +are examples for README and .message files to put into the queue +directory. Modify them as you like, or don't install them if you +don't like them... + + +Running debianqueued +-------------------- + +debianqueued is intended to run all time, not as a cron job. +Unfortunately, you can't start it at system boot time automatically, +because a human has to type in the pass phrase for the ssh key. So you +have to start the daemon manually. + +The daemon can be stopped by simply killing it (with SIGTERM +preferrably). SIGTERM and SIGINT are blocked during some operations, +where it could leave files in a inconsistent state. So it make take +some time until the daemon really dies. If you have the urgent need +that it goes away immediately, use SIGQUIT. Please don't use SIGKILL +except unavoidable, because the daemon can't clean up after this +signal. + +For your convenience, the daemon can kill and restart itself. If you +start debianqueued with a "-k" argument, it tries to kill a running +daemon (and it complains if none is running.) If "-r" is on the +command line, it tries to kill a running daemon first if there is one. +(If not, it starts anyway, but prints a little warning.) If a daemon +is running and a new one is started without "-r", you get an error +message about this. This is to protect you from restarting the daemon +without intention. + +The other script, dqueued-watcher, is intended as cron job, and it +watches that the daemon is running, in case that it should crash +sometimes. It also takes care of updating the Debian keyring files if +necessary. You should enter it e.g. like + + 0,30 * * * * .../dqueued-watcher + +into your crontab. (Assuming you want to run it every 30 minutes, +which seems a good compromise.) + +Both scripts (debianqueued and dqueued-watcher) need no special +priviledges and thus can be run as an ordinary user (not root). You +can create an own user for debianqueued (e.g. "dqueue"), but you need +not. The only difference could be which ssh key is used for connects +to the target host. But you can configure the file to take the ssh key +from in the config file. + + +The Config File +--------------- + +The config file, $queued_dir/config, is plain Perl code and is +included by debianqueued and dqueued-watcher. You can set the +following variables there: + + - $debug: + Non-zero values enable debugging output (to log file). + +The following are all programs that debianqueued calls. You should +always use absolute pathnames! + + - $pgp, $ssh, $scp, $ssh_agent, $ssh_add, $md5sum, $mail, $mkfifo, + $tar, $gzip, $ar + + Notes: + + o $mail should support the -s option for supplying a subject. + Therefore choose mailx if your mail doesn't know -s. + + o $tar should be GNU tar, several GNU features are used (e.g. + --use-compress-program). + + o $ar must be able to unpack *.deb files and must understand the + 'p' command. Better check this first... If you don't define $ar + (or define it to be empty), debianqueued won't be able to + extract a maintainer address from .deb files. (Which isn't that + disturbing...) + + - @test_binaries: + + All binaries listed in this variable are tested to be present + before each queue run. If any is not available, the queue run is + delayed. This test can be useful if those binaries reside on NFS + filesystems which may be (auto-)mounted only slowly. It is + specially annoying for users if pgp can't be found and a .changes + is deleted. + + - $ssh_options: + Options passed to ssh and scp on every call. General ssh + configuration should be done here and not in ~/.ssh/config, to + avoid dependency on the user's settings. A good idea for + $ssh_options seems to be + + -o'BatchMode yes' -o'FallBackToRsh no' -o'ForwardAgent no' + -o'ForwardX11 no' -o'PasswordAuthentication no' + -o'StrictHostKeyChecking yes' + + - $ssh_key_file: + The file containing the ssh key you want the daemon to use for + connects to the target host. If you leave this empty, the default + ~/.ssh/identity is used, which may or may not be what you want. + + - $incoming: + This names the queue directory itself. Probably it will be inside + the public FTP area. Don't forget to allow uploads to it in + ftpaccess if you're using wu-ftpd. + + Maybe you should also allow anonymous users to rename files in that + directory, to fix upload problems (they can't delete files, so they + have to move the errorneous file out of the way). But this + introduces a denial-of-service security hole, that an attacker + renames files of other people and then a job won't be done. But at + least the data aren't lost, and the rename command probably was + logged by ftpd. Nevertheless, there's no urgent need to allow + renamings, because the queue daemon deletes all bad files + automatically, so they can be reuploaded under the same name. + Decide on your own... + + - $keep_files: + This is a regular expression for files that never should be deleted + in the queue directory. The status file must be included here, + other probable candicates are .message and/or README files. + + - $chmod_on_target: + If this variable is true (i.e., not 0 or ""), all files belonging + to a job are changed to mode 644 only on the target host. The + alternative (if the variable is false, i.e. 0) is to change the + mode already locally, after the sizes and md5 sums have been + verified. The latter is the default. + + The background for this is the following: The files must be + word-readable on master for dinstall to work, so they must be at + least mode 444, but 644 seems more useful. If the upload policy of + your site says that uploaded files shouldn't be readable for world, + the queue daemon has to change the permission at some point of + time. (scp copies a file's permissions just as the contents, so + after scp, the files on the target have the same mode as in the + queue directory.) If the files in the queue are mode 644 anyway, + you don't need to care about this option. The default --to give + word read permission in the queue already after some checks-- is + obviously less restrictive, but might be against the policy of your + site. The alternative keeps the files unreadable in the queue in + any case, and they'll be readable only on the target host. + + - $statusfile: + This is the name of the status file or FIFO, through which users + can ask the daemon what it's currently doing. It should normally be + in the queue directory. If you change the name, please don't forget + to check $keep_files. See also the own section on the status file. + + If you leave $statusfile empty, the daemon doesn't create and + manage a status file at all, if you don't want it. Unfortunately, + dqueued-watcher's algorithm to determine whether it already has + reported a missing daemon depends on the status file, so this + doesn't work anymore in this case. You'll get dead daemon mails on + every run of dqueued-watcher. + + - $statusdelay: + If this number is greater than 0, the status file is implemented as + a regular file, and updated at least every $statusdelay seconds. If + $statusdelay is 0, the FIFO implementation is used (see status file + section). + + - $keyring: + The name of the PGP keyring the daemon uses to check PGP signatures + of .changes files. This is usually $queued_dir/debian-keyring.pgp. + It should contain exactly the keys of all Debian developers (i.e. + those and no other keys). + + - $gpg_keyring: + The name of the GnuPG keyring. The daemon now alternatively accepts + GnuPG signatures on .changes and .commands files. The value here is + usually $queued_dir/debian-keyring.gpg. It should contain only keys + of Debian developers (but not all developers have a GPG key + yet...). + + - $keyring_archive: + Path of the debian-keyring.tar.gz file inside a Debian mirror. The + file is "/debian/doc/debian-keyring.tar.gz" on ftp.debian.org, + don't know where you mirror it to... Leave it empty if you don't + have that file on your local machine. But then you'll have to + update the keyring manually from time to time. + + - $keyring_archive_name: + Name of the PGP keyring file in the archive $keyring_archive. Currently + "debian-keyring*/debian-keyring.pgp". + + - $gpg_keyring_archive_name: + Name of the GnuPG keyring file in the archive $keyring_archive. Currently + "debian-keyring*/debian-keyring.gpg". + + - $logfile: + The file debianqueued writes its logging data to. Usually "log" in + $queued_dir. + + - $pidfile: + The file debianqueued writes its pid to. Usually "pid" in + $queued_dir. + + - $target: + Name of the target host, i.e. the host where the queue uploads to. + Usually "master.debian.org". (Ignored with "copy" upload method.) + + - $targetlogin: + The login on the target to use for uploads. (Ignored with "copy" + and "ftp" upload methods; "ftp" always does anonymous logins.) + + - $targetdir: + The directory on the target to where files should be uploaded. On + master.debian.org this currently is + "/home/Debian/ftp/private/project/Incoming". + + - $max_upload_retries: + This is the number how often the daemon tries to upload a job (a + .changes + the files belonging to it). After that number is + exhausted, all these files are deleted. + + - $log_age: + This is how many days are waited before logfiles are rotated. (The + age of the current log files is derived from the first date found + in it.) + + - $log_keep: + How many old log files to keep. The current logfile is what you + configured as $logfile above, older versions have ".0", ".1.gz", + ".2.gz", ... appended. I.e., all old versions except the first are + additionally gzipped. $log_keep is one higher than the max. + appended number that should exist. + + - $mail_summary: + If this is set to a true value (not 0 and not ""), dqueued-watcher + will send a mail with a summary of the daemon's acivities whenever + logfiles are rotated. + + - $summary_file: + If that value is a file name (and not an empty string), + dqueued-watcher will write the same summary of daemon activities as + above to the named file. This can be in addition to sending a mail. + + - @nonus_packages: + This is a (Perl) list of names of packages that must be uploaded to + nonus.debian.org and not to master. Since the queue daemon only can + deal with one target, it can't do that upload and thus must reject + the job. Generally you can treat this variable as a list of any + packages that should be rejected. + +All the following timing variables are in seconds: + + - $upload_delay_1: + The time between the first (failed) upload try and the next one. + Usually shorter then $upload_delay_2 for quick retry after + transient errors. + + - $upload_delay_2: + The time between the following (except the first) upload retries. + + - $queue_delay: + The time between two queue runs. (May not be obeyed too exactly... + a few seconds deviation are normal). + + - $stray_remove_timeout: + If a file not associated with any .changes file is found in the + queue directory, it is removed after this many seconds. + + - $problem_report_timeout: + If there are problems with a job that could also be result of a + not-yet-complete upload (missing or too small files), the daemon + waits this long before reporting the problem to the uploader. This + avoids warning mails for slow but ongoing uploads. + + - $no_changes_timeout: + + If files are found in the queue directory that look like a Debian + upload (*.tar.gz, *.diff.gz, *.deb, or *.dsc files), but aren't + accompanied by a .changes file, then debianqueued tries to notify + the uploader after $no_changes_timeout seconds about this. This + value is somewhat similar to $problem_report_timeout, and the + values can be equal. + + Since there's no .changes, the daemon can't never be sure who + really uploaded the files, but it tries to extract the maintainer + address from all of the files mentioned above. If they're real + Debian files (except a .orig.tar.gz), this works in most cases. + + - $bad_changes_timeout: + After this time, a job with persisting problems (missing files, + wrong size or md5 checksum) is removed. + + - $remote_timeout: + This is the maximum time a remote command (ssh/scp) may take. It's + to protect against network unreliabilities and the like. Choose the + number sufficiently high, so that the timeout doesn't inadventedly + kill a longish upload. A few hours seems ok. + +Contents of $queued_dir +----------------------- + +$queued_dir contains usually the following files: + + - config: + The configuration file, described above. + + - log: + Log file of debianqueued. All interesting actions and errors are + logged there, in a format similar to syslog. + + - pid: + This file contains the pid of debianqueued, to detect double + daemons and for killing a running daemon. + + - debian-keyring.pgp, debian-keyring.gpg: + These are the PGP and GnuPG key rings used by debianqueued to + verify the signatures of .changes files. It should contain the keys + of all Debian developers and no other keys. The current Debian key + ring can be obtained from + ftp.debian.org:/debian/doc/debian-keyring.tar.gz. dqueued-watcher + supports the facility to update this file automatically if you also + run a Debian mirror. + + - debianqueued, dqueued-watcher: + The Perl scripts. + +All filenames except "config" can be changed in the config file. The +files are not really required to reside in $queued_dir, but it seems +practical to have them all together... + + +Details of Queue Processing +--------------------------- + +The details of how the files in the queue are processed may be a bit +complicated. You can skip this section if you're not interested in +those details and everything is running fine... :-) + +The first thing the daemon does on every queue run is determining all +the *.changes files present. All of them are subsequently read and +analyzed. The .changes MUST contain a Maintainer: field, and the +contents of that field should be the mail address of the uploader. The +address is used for sending back acknowledges and error messages. +(dinstall on master uses the same convention.) + +Next, the PGP or GnuPG signature of the .changes is checked. The +signature must be valid and must belong to one of the keys in the +Debian keyring (see config variables $keyring and $gpg_keyring). This +ensures that only registered Debian developers can use the upload +queue to transfer files to master. + +Then all files mentioned in the Files: field of the .changes are +checked. All of them must be present, and must have correct size and +md5 checksum. If any of this conditions is violated, the upload +doesn't happen and an error message is sent to the uploader. If the +error is a incorrect size/md5sum, the file is also deleted, because it +has to be reuploaded anyway, and it could be the case that the +uploader cannot easily overwrite a file in the queue dir (due to +upload permission restrictions). If the error is a missing file or a +too small file, the error message is hold back for some time +($problems_report_timeout), because they can also be result of an +not-yet-complete upload. + +The time baseline for when to send such a problem report is the +maximum modification time of the .changes itself and all files +mentioned in it. When such a report is sent, the setgid bit (show as +'S' in ls -l listing, in group x position) on the .changes is set to +note that fact, and to avoid the report being sent on every following +queue run. If any modification time becomes greater than the time the +setgid bit was set, a new problem report is sent, because obviously +something has changed to the files. + +If a job is hanging around for too long with errors +($bad_changes_timeout), the .changes and all its files are deleted. +The base for that timeout is again the maximum modification time as +explained above. + +If now the .changes itself and all its files are ok, an upload is +tried. The upload itself is done with scp. In that stage, various +errors from the net and/or ssh can occur. All these simply count as +upload failures, since it's not easy to distinguish transient and +permanent failures :-( If the scp goes ok, the md5sums of the files on +the target are compared with the local ones. This is to ensure that +the transfer didn't corrupt anything. On any error in the upload or in +the md5 check, the files written to the target host are deleted again +(they may be broken), and an error message is sent to the uploader. + +The upload is retied $upload_delay_1 seconds later. If it fails again, +the next retries have a (longer) delay $upload_delay_2 between them. +At most $max_upload_retries retries are done. After all these failed, +all the files are deleted, since it seems we can't move them... For +remembering how many tries were alredy done (and when), debianqueued +uses a separate file. Its name is the .changes' filename with +".failures" appended. It contains simply two integers, the retry count +and the last upload time (in Unix time format). + +After a successfull upload, the daemon also checks for files that look +like they belonged to the same job, but weren't listed in the +.changes. Due to experience, this happens rather often with +.orig.tar.gz files, which people upload though they're aren't needed +nor mentioned in the .changes. The daemon uses the filename pattern +_* to find such unneeded files, where the Debian +revision is stripped from . The latter is needed to include +.orig.tar.gz files, which don't have the Debian revision part. But +this also introduces the possibility that files of another upload for +the same package but with another revision are deleted though they +shouldn't. However, this case seems rather unlikely, so I didn't care +about it. If such files are deleted, that fact is mentioned in the +reply mail to the uploader. + +If any files are found in the queue dir that don't belong to any +.changes, they are considered "stray". Such files are remove after +$stray_remove_timeout. This should be around 1 day or so, to avoid +files being removed that belong to a job, but whose .changes is still +to come. The daemon also tries to find out whether such stray files +could be part of an incomplete upload, where the .changes file is +still missing or has been forgotten. Files that match the patterns +*.deb, *.dsc, *.diff.gz, or *.tar.gz are analyzed whether a maintainer +address can be extracted from them. If yes, the maintainer is notified +about the incomplete upload after $no_changes_timeout seconds. +However, the maintainer needs not really be the uploader... It could +be a binary-only upload for another architecture, or a non-maintainer +upload. In these cases, the mail goes to the wrong wrong person :-( +But better than not writing at all, IMHO... + + +The status file +--------------- + +debianqueued provides a status file for the user in the queue +directory. By reading this file, the user can get an idea what the +daemon is currently doing. + +There are two possible implementations of the status file: as a plain +file, or as a named pipe (FIFO). Both have their advantages and +disadvantages. + +If using the FIFO, the data printed (last ping time, next queue run) +are always up to date, because they're interrogated (by a signal) just +at the time the FIFO is opened for reading. Also, the daemon hasn't to +care about the status file if nobody accesses it. The bad things about +the FIFO: It is a potential portability problem, because not all +systems have FIFOs, or they behave different than I expect... But the +more severe problem: wu-ftpd refuses to send the contents of a FIFO on +a FTP GET request :-(( It does an explicit check whether a file to be +retrieved is a regular file. This can be easily patched [1], but not +everybody wants to do that or can do that (but I did it for +ftp.uni-erlangen.de). (BTW, there could still be problems (races) if +more than one process try to read the status file at the same time...) + +The alternative is using a plain file, which is updated regularily by +the daemon. This works on every system, but causes more overhead (the +daemon has to wake up each $statusdelay seconds and write a file), and +the time figures in the file can't be exact. $statusdelay should be a +compromise between CPU wastage and desired accuracy of the times found +in the status file. I think 15 or 30 seconds should be ok, but your +milage may vary. + +If the status file is a FIFO, the queue daemon forks a second process +for watching the FIFO (so don't wonder if debianqueued shows up twice +in ps output :-), to avoid blocking a reading process too long until +the main daemon has time to watch the pipe. The status daemon requests +data from the main daemon by sending a signal (SIGUSR1). Nevertheless +it can happen that a process that opens the status file (for reading) +is blocked, because the daemon has crashed (or never has been started, +after reboot). To minimize chances for that situation, dqueued-watcher +replaces the FIFO by a plain file (telling that the daemon is down) if +it sees that no queue daemon is running. + + + [1]: This is such a patch, for wu-ftpd-2.4.2-BETA-13: + +--- wu-ftpd/src/ftpd.c~ Wed Jul 9 13:18:44 1997 ++++ wu-ftpd/src/ftpd.c Wed Jul 9 13:19:15 1997 +@@ -1857,7 +1857,9 @@ + return; + } + if (cmd == NULL && +- (fstat(fileno(fin), &st) < 0 || (st.st_mode & S_IFMT) != S_IFREG)) { ++ (fstat(fileno(fin), &st) < 0 || ++ ((st.st_mode & S_IFMT) != S_IFREG && ++ (st.st_mode & S_IFMT) != S_IFIFO))) { + reply(550, "%s: not a plain file.", name); + goto done; + } + + +Command Files +------------- + +The practical experiences with debianqueued showed that users +sometimes make errors with their uploads, resulting in misnamed or +corrupted files... Formerly they didn't have any chance to fix such +errors, because the ftpd usually doesn't allow deleting or renaming +files in the queue directory. (If you would allow this, *anybody* can +remove/rename files, which isn't desirable.) So users had to wait +until the daemon deleted the bad files (usually ~ 24 hours), before +they could start the next try. + +To overcome this, I invented the *.command files. The daemon looks for +such files just as it tests for *.changes files on every queue run, +and processes them before the usual jobs. *.commands files must be PGP +or GnuPG signed by a known Debian developer (same test as for +*.changes), so only these people can give the daemon commands. Since +Debian developers can also delete files in master's incoming, the +*.commands feature doesn't give away any security. + +The syntax of a *.commands file is much like a *.changes, but it +contains only two (mandatory) fields: Uploader: and Commands. +Uploader: contains the e-mail address of the uploader for reply mails, +and should have same contents as Maintainer: in a .changes. Commands: +is a multi-line field like e.g. Description: or Changes:. Every +continuation line must start with a space. Each line in Commands: +contains a command for the daemon that looks like a shell command (but +it isn't one, the daemon parses and executes it itself and doesn't use +sh or the respective binaries). + +Example: +-----BEGIN PGP SIGNED MESSAGE----- + +Uploader: Roman Hodek +Commands: + rm hello_1.0-1_i386.deb + mv hello_1.0-1.dsx hello_1.0-1.dsc + +-----BEGIN PGP SIGNATURE----- +Version: 2.6.3ia + +iQCVAwUBNFiQSXVhJ0HiWnvJAQG58AP+IDJVeSWmDvzMUphScg1EK0mvChgnuD7h +BRiVQubXkB2DphLJW5UUSRnjw1iuFcYwH/lFpNpl7XP95LkLX3iFza9qItw4k2/q +tvylZkmIA9jxCyv/YB6zZCbHmbvUnL473eLRoxlnYZd3JFaCZMJ86B0Ph4GFNPAf +Z4jxNrgh7Bc= +=pH94 +-----END PGP SIGNATURE----- + +The only commands implemented at this time are 'rm' and 'mv'. No +options are implemented, and filenames may not contain slashes and are +interpreted relative to the queue directory. This ensures that only +files there can be modified. 'mv' always takes two arguments. 'rm' can +take any number of args. It also knows about the following shell +wildcard chars: *, ?, and [...]. {..,..} constructs are *not* +supported. The daemon expands these patterns itself and doesn't use sh +for that (for security reasons). + +*.commands files are processed before the usual *.changes jobs, so if +a commands file fixes a job so that it can be processed, that +processing happens in the same queue run and no unnecessary delay is +introduced. + +The uploader of a *.commands will receive a reply mail with a comment +(OK or error message) to each of the commands given. The daemon not +only logs the contents of the Uploader: field, but also the owner of +the PGP/GnuPG key that was used to sign the file. In case you want to +find out who issued some commands, the Uploader: field is insecure, +since its contents can't be checked. + + +Security Considerations +----------------------- + +You already know that debianqueued uses ssh & Co. to get access to +master, or in general any target host. You also probably know that you +need to unlock your ssh secret key with a passphrase before it can be +used. For the daemon this creates a problem: It needs the passphrase +to be able to use ssh/scp, but obviously you can't type in the phrase +every time the daemon needs it... It would also be very ugly and +insecure to write the passphase into some config file of the daemon! + +The solution is using ssh-agent, which comes with the ssh package. +This agent's purpose is to store passphrases and give it to +ssh/scp/... if they need it. ssh-agent has to ways how it can be +accessed: through a Unix domain socket, or with an inherited file +descriptor (ssh-agent is the father of your login shell then). The +second method is much more secure than the first, because the socket +can be easily exploited by root. On the other hand, an inherited file +descriptor can be access *only* from a child process, so even root has +bad chances to get its hands on it. Unfortunately, the fd method has +been removed in ssh-1.2.17, so I STRONGLY recommend to use ssh-1.2.16. +(You can still have a newer version for normal use, but separate +binaries for debianqueued.) Also, using debianqueued with Unix domain +sockets is basically untested, though I've heard that it doesn't +work... + +debianqueued starts the ssh-agent automatically and runs ssh-add. This +will ask you for your passphrase. The phrase is stored in the agent +and available only to child processes of the agent. The agent will +also start up a second instance of the queue daemon that notices that +the agent is already running. + +Currently, there's no method to store the passphrase in a file, due to +all the security disadvantages of this. If you don't mind this and +would like to have some opportunity to do it nevertheless, please ask +me. If there's enough demand, I'll do it. + + +New Upload Methods +------------------ + +Since release 0.9, debianqueued has two new upload methods as +alternatives to ssh: copy and ftp. + +The copy method simply moves the files to another directory on the +same host. This seems a bit silly, but is for a special purpose: The +admins of master intend to run an upload queue there, too, in the +future to avoid non-anonymous FTP connections, which transmit the +password in cleartext. And, additionally to simply moving the files, +the queue daemon also checks the signature and integrity of uploads +and can reject non-US packages. + +The ftp method uploads to a standard anon-FTP incoming directory. The +intention here is that you could create second-level queue daemons. +I.e., those daemons would upload into the queue of another daemon +(and, for example, this could be the queue of the daemon on master). + +However, the ftp method still has some limitations: + + 1) Files in the target dir can't be deleted. + 2) Uploaded files can't be verified as good as with the other methods. + 3) $chmod_on_target often doesn't work. + 4) The check for a writable incoming directory leaves temporary files + behind. + +Ad 1): In anon-FTP incoming directories removing of files usually +isn't allowed (this would widely open doors to denial-of-service +attacks). But debianqueued has to remove files on the target as part +of handling upload errors. So if an transmission error happens during +a job, the bad file can't be deleted. On the next try, the file is +already present on the target and can't be overwritten, so all the +following tries will fail, too, except the upstream queue daemon has +deleted them already. And if the .changes was among the files already +(at least partially) uploaded, the daemon even will think that the +whole job is already present on the target and will delete the job in +its queue. + +Ad 2): Uploaded files are usually verified with md5sum if they're +really the same as the originals. But getting the md5sum for a file on +a FTP server usually isn't possible. It's currently handled as +follows: If the server supports a SITE MD5SUM command (non-standard!), +then this is used and you have the same checking quality. Otherwise, +debianqueued falls back to only comparing the file sizes. This is +better than nothing, but doesn't detected changed contents that don't +result in size changes. + +Ad 3): Often SITE CHMOD (standard) isn't allowed in incoming +directories. If this is the case, $chmod_on_target must be off, +otherwise all uploads will fail. The mode of uploaded files if forced +anyway by the FTP server in most cases. + +Ad 4): As you know, the queue daemon has a special check if the target +directory is writable at all (it isn't during a freeze) to protect +against repeated upload errors. (Jobs would be even deleted otherwise +if the target dir is unaccessible for too long.) This check is +performed by creating a test file and deleting it immediately again. +But since in FTP incoming dirs deletion isn't permitted, the temporary +file ("junk-for-writable-test-DATE") will remain there. As a partial +fix, the daemon deletes such files immediately, it doesn't even wait +for $stray_remove_timeout. So if the upload goes to the queue dir of +an upstream debianqueued, those temporary files won't be there for +long. + +These problems of the FTP method might be remove in future, if I have +better ideas how to bypass the limitations of anon-FTP incoming +directories. Hints welcome :-) + + +# Local Variables: +# mode: indented-text +# End: diff --git a/tools/debianqueued-0.9/TODO b/tools/debianqueued-0.9/TODO new file mode 100644 index 00000000..4a98842c --- /dev/null +++ b/tools/debianqueued-0.9/TODO @@ -0,0 +1,13 @@ +$Header: /allftp/CVS/debianqueued/TODO,v 1.8 1998/04/01 15:27:39 ftplinux Exp $ + + - There are numerous potential portability problems... They'll show + up as this script is used on more and different machines. + + - There was a suggestion how bad files on uploads could be handled + easier than with command files: Give them some known extension + (e.g. .), and the daemon could look for those files if the + main file has bad size or md5. + + - Make provisions for the (rare) case that the daemon looks at a + yet-incomplete .changes file. + diff --git a/tools/debianqueued-0.9/changes-template b/tools/debianqueued-0.9/changes-template new file mode 100644 index 00000000..ea4ecbff --- /dev/null +++ b/tools/debianqueued-0.9/changes-template @@ -0,0 +1,12 @@ +Format: 1.5 +Date: +Source: debianqueued +Binary: debianqueued +Architecture: source all +Version: +Distribution: unstable +Urgency: low +Maintainer: Roman Hodek +Description: + Debian Upload Queue Daemon +Files: diff --git a/tools/debianqueued-0.9/config b/tools/debianqueued-0.9/config new file mode 100644 index 00000000..bc432459 --- /dev/null +++ b/tools/debianqueued-0.9/config @@ -0,0 +1,189 @@ +# +# example configuration file for debianqueued +# +# $Id: config,v 1.15 1999/07/07 16:19:32 ftplinux Exp $ +# +# $Log: config,v $ +# Revision 1.15 1999/07/07 16:19:32 ftplinux +# New variables for upload methods: $upload_method, $ftptimeout, +# $ftpdebug, $ls, $cp, $chmod. +# New variables for GnuPG checking: $gpg, $gpg_keyring, +# $gpg_keyring_archive_name. +# Renamed "master" in vars to "target". +# Updated list of non-US packages. +# +# Revision 1.14 1998/07/06 14:25:46 ftplinux +# Make $keyring_archive_name use a wildcard, newer debian keyring tarball +# contain a dir with a date. +# +# Revision 1.13 1998/04/23 10:56:53 ftplinux +# Added new config var $chmod_on_master. +# +# Revision 1.12 1998/02/17 10:57:21 ftplinux +# Added @test_binaries +# +# Revision 1.11 1997/12/09 13:51:46 ftplinux +# Implemented rejecting of nonus packages (new config var @nonus_packages) +# +# Revision 1.10 1997/10/30 11:32:39 ftplinux +# Implemented warning mails for incomplete uploads that miss a .changes +# file. Maintainer address can be extracted from *.deb, *.diff.gz, +# *.dsc, or *.tar.gz files with help of new utility functions +# is_debian_file, get_maintainer, and debian_file_stem. +# +# Revision 1.9 1997/09/17 12:16:33 ftplinux +# Added writing summaries to a file +# +# Revision 1.8 1997/08/18 13:07:14 ftplinux +# Implemented summary mails +# +# Revision 1.7 1997/08/11 12:49:09 ftplinux +# Implemented logfile rotating +# +# Revision 1.6 1997/08/07 09:25:21 ftplinux +# Added timeout for remote operations +# +# Revision 1.5 1997/07/09 10:14:58 ftplinux +# Change RCS Header: to Id: +# +# Revision 1.4 1997/07/09 10:13:51 ftplinux +# Alternative implementation of status file as plain file (not FIFO), because +# standard wu-ftpd doesn't allow retrieval of non-regular files. New config +# option $statusdelay for this. +# +# Revision 1.3 1997/07/08 08:34:14 ftplinux +# If dqueued-watcher runs as cron job, $PATH might not contain gzip. Use extra +# --use-compress-program option to tar, and new config var $gzip. +# +# Revision 1.2 1997/07/03 13:06:48 ftplinux +# Little last changes before beta release +# +# Revision 1.1.1.1 1997/07/03 12:54:59 ftplinux +# Import initial sources +# + +# set to != 0 for debugging output (to log file) +$debug = 0; + +# various programs: +# ----------------- +$gpg = "/usr/bin/gpg"; +$ssh = "/usr/bin/ssh"; +$scp = "/usr/bin/scp"; +$ssh_agent = "/usr/bin/ssh-agent"; +$ssh_add = "/usr/bin/ssh-add"; +$md5sum = "/usr/bin/md5sum"; +$mail = "/usr/bin/mail"; +$mkfifo = "/usr/bin/mkfifo"; +$tar = "/bin/tar"; # must be GNU tar! +$gzip = "/bin/gzip"; +$ar = "/usr/bin/ar"; # must support p option, optional +$ls = "/bin/ls"; +$cp = "/bin/cp"; +$chmod = "/bin/chmod"; + +# binaries which existance should be tested before each queue run +#@test_binaries = (); + +# general options to ssh/scp +$ssh_options = "-o'BatchMode yes' -o'FallBackToRsh no' ". + "-o'ForwardAgent no' -o'ForwardX11 no' ". + "-o'PasswordAuthentication no' -o'StrictHostKeyChecking yes'"; + +# ssh key file to use for connects to master (empty: default ~/.ssh/identity) +$ssh_key_file = ""; + +# the incoming dir we live in +$incoming = "/srv/queued/UploadQueue"; + +# files not to delete in $incoming (regexp) +$keep_files = '(status|\.message|README)$'; + +# file patterns that aren't deleted right away +$valid_files = '(\.changes|\.tar\.gz|\.dsc|\.u?deb|diff\.gz|\.sh)$'; + +# Change files to mode 644 locally (after md5 check) or only on master? +$chmod_on_target = 0; + +# name of the status file or named pipe in the incoming dir +$statusfile = "$incoming/status"; + +# if 0, status file implemented as FIFO; if > 0, status file is plain +# file and updated with a delay of this many seconds +$statusdelay = 30; + +# names of the keyring files +@keyrings = ( "/srv/keyring.debian.org/keyrings/debian-keyring.gpg", + "/srv/keyring.debian.org/keyrings/debian-keyring.pgp", + "/srv/ftp.debian.org/keyrings/debian-maintainers.gpg" ); + +# our log file +$logfile = "$queued_dir/log"; + +# our pid file +$pidfile = "$queued_dir/pid"; + +# upload method (ssh, copy, ftp) +$upload_method = "copy"; + +# name of target host (ignored on copy method) +$target = "localhost"; + +# login name on target host (for ssh, always 'ftp' for ftp, ignored for copy) +$targetlogin = "queue"; + +# incoming on target host +$targetdir = "/srv/ftp.debian.org/queue/unchecked/"; + +# select FTP debugging +#$ftpdebug = 0; + +# FTP timeout +$ftptimeout = 900; + +# max. number of tries to upload +$max_upload_retries = 8; + +# delay after first failed upload +$upload_delay_1 = 30*60; # 30 min. + +# delay between successive failed uploads +$upload_delay_2 = 4*60*60; # 4 hours + +# packages that must go to nonus.debian.org and thus are rejected here +#@nonus_packages = qw(gpg-rsaidea); + +# timings: +# -------- +# time between two queue checks +$queue_delay = 5*60; # 5 min. +# when are stray files deleted? +$stray_remove_timeout = 24*60*60; # 1 day +# delay before reporting problems with a .changes file (not +# immediately for to-be-continued uploads) +$problem_report_timeout = 30*60; # 30 min. +# delay before reporting that a .changes file is missing (not +# immediately for to-be-continued uploads) +$no_changes_timeout = 30*60; # 30 min. +# when are .changes with persistent problems removed? +$bad_changes_timeout = 2*24*60*60; # 2 days +# how long may a remote operation (ssh/scp) take? +$remote_timeout = 3*60*60; # 3 hours + +# mail address of maintainer +$maintainer_mail = "james\@nocrew.org"; + + +# logfile rotating: +# ----------------- +# how often to rotate (in days) +$log_age = 7; +# how much old logs to keep +$log_keep = 4; +# send summary mail when rotating logs? +$mail_summary = 1; +# write summary to file when rotating logs? (no if name empty) +$summary_file = "$queued_dir/summary"; + +# don't remove this, Perl needs it! +1; diff --git a/tools/debianqueued-0.9/debianqueued b/tools/debianqueued-0.9/debianqueued new file mode 100755 index 00000000..d6f2afdc --- /dev/null +++ b/tools/debianqueued-0.9/debianqueued @@ -0,0 +1,2271 @@ +#!/usr/bin/perl -w +# +# debianqueued -- daemon for managing Debian upload queues +# +# Copyright (C) 1997 Roman Hodek +# Copyright (C) 2001-2007 Ryan Murray +# +# This program is free software. You can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation: either version 2 or +# (at your option) any later version. +# This program comes with ABSOLUTELY NO WARRANTY! +# +# $Id: debianqueued,v 1.51 1999/07/08 09:43:21 ftplinux Exp $ +# +# $Log: debianqueued,v $ +# Revision 1.51 1999/07/08 09:43:21 ftplinux +# Bumped release number to 0.9 +# +# Revision 1.50 1999/07/07 16:17:30 ftplinux +# Signatures can now also be created by GnuPG; in pgp_check, also try +# gpg for checking. +# In several messages, also mention GnuPG. +# +# Revision 1.49 1999/07/07 16:14:43 ftplinux +# Implemented new upload methods "copy" and "ftp" as alternatives to "ssh". +# Replaced "master" in many function and variable names by "target". +# New functions ssh_cmd, ftp_cmd, and local_cmd for more abstraction and +# better readable code. +# +# Revision 1.48 1998/12/08 13:09:39 ftplinux +# At the end of process_changes, do not remove the @other_files with the same +# stem if a .changes file is in that list; then there is probably another +# upload for a different version or another architecture. +# +# Revision 1.47 1998/05/14 14:21:44 ftplinux +# Bumped release number to 0.8 +# +# Revision 1.46 1998/05/14 14:17:00 ftplinux +# When --after a successfull upload-- deleting files for the same job, check +# for equal revision number on files that have one. It has happened that the +# daemon deleted files that belonged to another job with different revision. +# +# Revision 1.45 1998/04/23 11:05:47 ftplinux +# Implemented $conf::chmod_on_master. If 0, new part to change mode locally in +# process_changes. +# +# Revision 1.44 1998/04/21 08:44:44 ftplinux +# Don't use return value of debian_file_stem as regexp, it's a shell pattern. +# +# Revision 1.43 1998/04/21 08:22:21 ftplinux +# Also recogize "read-only filesystem" as error message so it triggers assuming +# that incoming is unwritable. +# Don't increment failure count after an upload try that did clear +# $incoming_writable. +# Fill in forgotten pattern for mail addr in process_commands. +# +# Revision 1.42 1998/03/31 13:27:32 ftplinux +# In fatal_signal, kill status daemon only if it has been started (otherwise +# warning about uninitialized variable). +# Change mode of files uploaded to master explicitly to 644 there, scp copies the +# permissions in the queue. +# +# Revision 1.41 1998/03/31 09:06:00 ftplinux +# Implemented handling of improper mail addresses in Maintainer: field. +# +# Revision 1.40 1998/03/24 13:17:33 ftplinux +# Added new check if incoming dir on master is writable. This check is triggered +# if an upload returns "permission denied" errors. If the dir is unwritable, the +# queue is holded (no upload tries) until it's writable again. +# +# Revision 1.39 1998/03/23 14:05:14 ftplinux +# Bumped release number to 0.7 +# +# Revision 1.38 1998/03/23 14:03:55 ftplinux +# In an upload failure message, say explicitly that the job will be +# retried, to avoid confusion of users. +# $failure_file was put on @keep_list only for first retry. +# If the daemon removes a .changes, set SGID bit on all files associated +# with it, so that the test for Debian files without a .changes doesn't +# find them. +# Don't send reports for files without a .changes if the files look like +# a recompilation for another architecture. +# Also don't send such a report if the list of files with the same stem +# contains a .changes. +# Set @keep_list earlier, before PGP and non-US checks. +# Fix recognition of -k argument. +# +# Revision 1.37 1998/02/17 12:29:58 ftplinux +# Removed @conf::test_binaries used only once warning +# Try to kill old daemon for 20secs instead of 10 +# +# Revision 1.36 1998/02/17 10:53:47 ftplinux +# Added test for binaries on maybe-slow NFS filesystems (@conf::test_binaries) +# +# Revision 1.35 1997/12/16 13:19:28 ftplinux +# Bumped release number to 0.6 +# +# Revision 1.34 1997/12/09 13:51:24 ftplinux +# Implemented rejecting of nonus packages (new config var @nonus_packages) +# +# Revision 1.33 1997/11/25 10:40:53 ftplinux +# In check_alive, loop up the IP address everytime, since it can change +# while the daemon is running. +# process_changes: Check presence of .changes on master at a later +# point, to avoid bothering master as long as there are errors in a +# .changes. +# Don't view .orig.tar.gz files as is_debian_file, to avoid that they're +# picked for extracting the maintainer address in the +# job-without-changes processing. +# END statement: Fix swapped arguments to kill +# Program startup: Implemented -r and -k arguments. +# +# Revision 1.32 1997/11/20 15:18:47 ftplinux +# Bumped release number to 0.5 +# +# Revision 1.31 1997/11/11 13:37:52 ftplinux +# Replaced <./$pattern> contruct be cleaner glob() call +# Avoid potentially uninitialized $_ in process_commands file read loop +# Implemented rm command with more than 1 arg and wildcards in rm args +# +# Revision 1.30 1997/11/06 14:09:53 ftplinux +# In process_commands, also recognize commands given on the same line as +# the Commands: keyword, not only the continuation lines. +# +# Revision 1.29 1997/11/03 15:52:20 ftplinux +# After reopening the log file write one line to it for dqueued-watcher. +# +# Revision 1.28 1997/10/30 15:37:23 ftplinux +# Removed some leftover comments in process_commands. +# Changed pgp_check so that it returns the address of the signator. +# process_commands now also logs PGP signator, since Uploader: address +# can be choosen freely by uploader. +# +# Revision 1.27 1997/10/30 14:05:37 ftplinux +# Added "command" to log string for command file uploader, to make it +# unique for dqueued-watcher. +# +# Revision 1.26 1997/10/30 14:01:05 ftplinux +# Implemented .commands files +# +# Revision 1.25 1997/10/30 13:05:29 ftplinux +# Removed date from status version info (too long) +# +# Revision 1.24 1997/10/30 13:04:02 ftplinux +# Print revision, version, and date in status data +# +# Revision 1.23 1997/10/30 12:56:01 ftplinux +# Implemented deletion of files that (probably) belong to an upload, but +# weren't listed in the .changes. +# +# Revision 1.22 1997/10/30 12:22:32 ftplinux +# When setting sgid bit for stray files without a .changes, check for +# files deleted in the meantime. +# +# Revision 1.21 1997/10/30 11:32:19 ftplinux +# Added quotes where filenames are used on sh command lines, in case +# they contain metacharacters. +# print_time now always print three-field times, as omitting the hour if +# 0 could cause confusing (hour or seconds missing?). +# Implemented warning mails for incomplete uploads that miss a .changes +# file. Maintainer address can be extracted from *.deb, *.diff.gz, +# *.dsc, or *.tar.gz files with help of new utility functions +# is_debian_file, get_maintainer, and debian_file_stem. +# +# Revision 1.20 1997/10/13 09:12:21 ftplinux +# On some .changes errors (missing/bad PGP signature, no files) also log the +# uploader +# +# Revision 1.19 1997/09/25 11:20:42 ftplinux +# Bumped release number to 0.4 +# +# Revision 1.18 1997/09/25 08:15:02 ftplinux +# In process_changes, initialize some vars to avoid warnings +# If first consistency checks failed, don't forget to delete .changes file +# +# Revision 1.17 1997/09/16 10:53:35 ftplinux +# Made logging more verbose in queued and dqueued-watcher +# +# Revision 1.16 1997/08/12 09:54:39 ftplinux +# Bumped release number +# +# Revision 1.15 1997/08/11 12:49:09 ftplinux +# Implemented logfile rotating +# +# Revision 1.14 1997/08/11 11:35:05 ftplinux +# Revised startup scheme so it works with the socket-based ssh-agent, too. +# That watches whether its child still exists, so the go-to-background fork must be done before the ssh-agent. +# +# Revision 1.13 1997/08/11 08:48:31 ftplinux +# Aaarg... forgot the alarm(0)'s +# +# Revision 1.12 1997/08/07 09:25:22 ftplinux +# Added timeout for remote operations +# +# Revision 1.11 1997/07/28 13:20:38 ftplinux +# Added release numner to startup message +# +# Revision 1.10 1997/07/28 11:23:39 ftplinux +# $main::statusd_pid not necessarily defined in status daemon -- rewrite check +# whether to delete pid file in signal handler. +# +# Revision 1.9 1997/07/28 08:12:16 ftplinux +# Again revised SIGCHLD handling. +# Set $SHELL to /bin/sh explicitly before starting ssh-agent. +# Again raise ping timeout. +# +# Revision 1.8 1997/07/25 10:23:03 ftplinux +# Made SIGCHLD handling more portable between perl versions +# +# Revision 1.7 1997/07/09 10:15:16 ftplinux +# Change RCS Header: to Id: +# +# Revision 1.6 1997/07/09 10:13:53 ftplinux +# Alternative implementation of status file as plain file (not FIFO), because +# standard wu-ftpd doesn't allow retrieval of non-regular files. New config +# option $statusdelay for this. +# +# Revision 1.5 1997/07/09 09:21:22 ftplinux +# Little revisions to signal handling; status daemon should ignore SIGPIPE, +# in case someone closes the FIFO before completely reading it; in fatal_signal, +# only the main daemon should remove the pid file. +# +# Revision 1.4 1997/07/08 11:31:51 ftplinux +# Print messages of ssh call in is_on_master to debug log. +# In ssh call to remove bad files on master, the split() doesn't work +# anymore, now that I use -o'xxx y'. Use string interpolation and let +# the shell parse the stuff. +# +# Revision 1.3 1997/07/07 09:29:30 ftplinux +# Call check_alive also if master hasn't been pinged for 8 hours. +# +# Revision 1.2 1997/07/03 13:06:49 ftplinux +# Little last changes before beta release +# +# Revision 1.1.1.1 1997/07/03 12:54:59 ftplinux +# Import initial sources +# +# + +require 5.002; +use strict; +use POSIX; +use POSIX qw( sys_stat_h sys_wait_h signal_h ); +use Net::Ping; +use Net::FTP; +use Socket qw( PF_INET AF_INET SOCK_STREAM ); +use Config; + +# --------------------------------------------------------------------------- +# configuration +# --------------------------------------------------------------------------- + +package conf; +($conf::queued_dir = (($0 !~ m,^/,) ? POSIX::getcwd()."/" : "") . $0) + =~ s,/[^/]+$,,; +require "$conf::queued_dir/config"; +my $junk = $conf::debug; # avoid spurious warnings about unused vars +$junk = $conf::ssh_key_file; +$junk = $conf::stray_remove_timeout; +$junk = $conf::problem_report_timeout; +$junk = $conf::queue_delay; +$junk = $conf::keep_files; +$junk = $conf::valid_files; +$junk = $conf::max_upload_retries; +$junk = $conf::upload_delay_1; +$junk = $conf::upload_delay_2; +$junk = $conf::ar; +$junk = $conf::gzip; +$junk = $conf::cp; +$junk = $conf::ls; +$junk = $conf::chmod; +$junk = $conf::ftpdebug; +$junk = $conf::ftptimeout; +$junk = $conf::no_changes_timeout; +$junk = @conf::nonus_packages; +$junk = @conf::test_binaries; +$junk = @conf::maintainer_mail; +$conf::target = "localhost" if $conf::upload_method eq "copy"; +package main; + +($main::progname = $0) =~ s,.*/,,; + +# extract -r and -k args +$main::arg = ""; +if (@ARGV == 1 && $ARGV[0] =~ /^-[rk]$/) { + $main::arg = ($ARGV[0] eq '-k') ? "kill" : "restart"; + shift @ARGV; +} + +# test for another instance of the queued already running +my $pid; +if (open( PIDFILE, "<$conf::pidfile" )) { + chomp( $pid = ); + close( PIDFILE ); + if (!$pid) { + # remove stale pid file + unlink( $conf::pidfile ); + } + elsif ($main::arg) { + local($|) = 1; + print "Killing running daemon (pid $pid) ..."; + kill( 15, $pid ); + my $cnt = 20; + while( kill( 0, $pid ) && $cnt-- > 0 ) { + sleep 1; + print "."; + } + if (kill( 0, $pid )) { + print " failed!\nProcess $pid still running.\n"; + exit 1; + } + print "ok\n"; + if (-e "$conf::incoming/core") { + unlink( "$conf::incoming/core" ); + print "(Removed core file)\n"; + } + exit 0 if $main::arg eq "kill"; + } + else { + die "Another $main::progname is already running (pid $pid)\n" + if $pid && kill( 0, $pid ); + } +} +elsif ($main::arg eq "kill") { + die "No daemon running\n"; +} +elsif ($main::arg eq "restart") { + print "(No daemon running; starting anyway)\n"; +} + +# if started without arguments (initial invocation), then fork +if (!@ARGV) { + # now go to background + die "$main::progname: fork failed: $!\n" unless defined( $pid = fork ); + if ($pid) { + # parent: wait for signal from child (SIGCHLD or SIGUSR1) and exit + my $sigset = POSIX::SigSet->new(); + $sigset->emptyset(); + $SIG{"CHLD"} = sub { }; + $SIG{"USR1"} = sub { }; + POSIX::sigsuspend( $sigset ); + waitpid( $pid, WNOHANG ); + if (kill( 0, $pid )) { + print "Daemon started in background (pid $pid)\n"; + exit 0; + } + else { + exit 1; + } + } + else { + # child + setsid; + if ($conf::upload_method eq "ssh") { + # exec an ssh-agent that starts us again + # force shell to be /bin/sh, ssh-agent may base its decision + # whether to use a fd or a Unix socket on the shell... + $ENV{"SHELL"} = "/bin/sh"; + exec $conf::ssh_agent, $0, "startup", getppid(); + die "$main::progname: Could not exec $conf::ssh_agent: $!\n"; + } + else { + # no need to exec, just set up @ARGV as expected below + @ARGV = ("startup", getppid()); + } + } +} +die "Please start without any arguments.\n" + if @ARGV != 2 || $ARGV[0] ne "startup"; +my $parent_pid = $ARGV[1]; + +do { + my $version; + ($version = 'Release: 0.9 $Revision: 1.51 $ $Date: 1999/07/08 09:43:21 $ $Author: ftplinux $') =~ s/\$ ?//g; + print "debianqueued $version\n"; +}; + +# check if all programs exist +my $prg; +foreach $prg ( $conf::gpg, $conf::ssh, $conf::scp, $conf::ssh_agent, + $conf::ssh_add, $conf::md5sum, $conf::mail, $conf::mkfifo ) { + die "Required program $prg doesn't exist or isn't executable\n" + if ! -x $prg; +# check for correct upload method +die "Bad upload method '$conf::upload_method'.\n" + if $conf::upload_method ne "ssh" && + $conf::upload_method ne "ftp" && + $conf::upload_method ne "copy"; +die "No keyrings\n" if ! @conf::keyrings; + +} + +# --------------------------------------------------------------------------- +# initializations +# --------------------------------------------------------------------------- + +# prototypes +sub calc_delta(); +sub check_dir(); +sub process_changes($\@); +sub process_commands($); +sub is_on_target($); +sub copy_to_target(@); +sub pgp_check($); +sub check_alive(;$); +sub check_incoming_writable(); +sub fork_statusd(); +sub write_status_file(); +sub print_status($$$$$$); +sub format_status_num(\$$); +sub format_status_str(\$$); +sub send_status(); +sub ftp_open(); +sub ftp_cmd($@); +sub ftp_close(); +sub ftp_response(); +sub ftp_code(); +sub ftp_error(); +sub ssh_cmd($); +sub scp_cmd(@); +sub local_cmd($;$); +sub check_alive(;$); +sub check_incoming_writable(); +sub rm(@); +sub md5sum($); +sub is_debian_file($); +sub get_maintainer($); +sub debian_file_stem($); +sub msg($@); +sub debug(@); +sub init_mail(;$); +sub finish_mail(); +sub send_mail($$$); +sub try_to_get_mail_addr($$); +sub format_time(); +sub print_time($); +sub block_signals(); +sub unblock_signals(); +sub close_log($); +sub kid_died($); +sub restart_statusd(); +sub fatal_signal($); + +$ENV{"PATH"} = "/bin:/usr/bin"; +$ENV{"IFS"} = "" if defined($ENV{"IFS"} && $ENV{"IFS"} ne ""); + +# constants for stat +sub ST_DEV() { 0 } +sub ST_INO() { 1 } +sub ST_MODE() { 2 } +sub ST_NLINK() { 3 } +sub ST_UID() { 4 } +sub ST_GID() { 5 } +sub ST_RDEV() { 6 } +sub ST_SIZE() { 7 } +sub ST_ATIME() { 8 } +sub ST_MTIME() { 9 } +sub ST_CTIME() { 10 } +# fixed lengths of data items passed over status pipe +sub STATNUM_LEN() { 30 } +sub STATSTR_LEN() { 128 } + +# init list of signals +defined $Config{sig_name} or die "$main::progname: No signal list defined!\n"; +my $i = 0; +my $name; +foreach $name (split( ' ', $Config{sig_name} )) { + $main::signo{$name} = $i++; +} + +@main::fatal_signals = qw( INT QUIT ILL TRAP ABRT BUS FPE USR2 SEGV PIPE + TERM XCPU XFSZ PWR ); + +$main::block_sigset = POSIX::SigSet->new; +$main::block_sigset->addset( $main::signo{"INT"} ); +$main::block_sigset->addset( $main::signo{"TERM"} ); + +# some constant net stuff +$main::tcp_proto = (getprotobyname('tcp'))[2] + or die "Cannot get protocol number for 'tcp'\n"; +my $used_service = ($conf::upload_method eq "ssh") ? "ssh" : "ftp"; +$main::echo_port = (getservbyname($used_service, 'tcp'))[2] + or die "Cannot get port number for service '$used_service'\n"; + +# clear queue of stored mails +@main::stored_mails = (); + +# run ssh-add to bring the key into the agent (will use stdin/stdout) +if ($conf::upload_method eq "ssh") { + system "$conf::ssh_add $conf::ssh_key_file" + and die "$main::progname: Running $conf::ssh_add failed ". + "(exit status ", $? >> 8, ")\n"; +} + +# change to queue dir +chdir( $conf::incoming ) + or die "$main::progname: cannot cd to $conf::incoming: $!\n"; + +# needed before /dev/null redirects, some system send a SIGHUP when loosing +# the controlling tty +$SIG{"HUP"} = "IGNORE"; + +# open logfile, make it unbuffered +open( LOG, ">>$conf::logfile" ) + or die "Cannot open my logfile $conf::logfile: $!\n"; +chmod( 0644, $conf::logfile ) + or die "Cannot set modes of $conf::logfile: $!\n"; +select( (select(LOG), $| = 1)[0] ); + +sleep( 1 ); +$SIG{"HUP"} = \&close_log; + +# redirect stdin, ... to /dev/null +open( STDIN, "&LOG" ) + or die "$main::progname: Can't redirect stdout to $conf::logfile: $!\n"; +open( STDERR, ">&LOG" ) + or die "$main::progname: Can't redirect stderr to $conf::logfile: $!\n"; +# ok, from this point usually no "die" anymore, stderr is gone! +msg( "log", "daemon (pid $$) started\n" ); + +# initialize variables used by send_status before launching the status daemon +$main::dstat = "i"; +format_status_num( $main::next_run, time+10 ); +format_status_str( $main::current_changes, "" ); +check_alive(); +$main::incoming_writable = 1; # assume this for now + +# start the daemon watching the 'status' FIFO +if ($conf::statusfile && $conf::statusdelay == 0) { + $main::statusd_pid = fork_statusd(); + $SIG{"CHLD"} = \&kid_died; # watch out for dead status daemon + # SIGUSR1 triggers status info + $SIG{"USR1"} = \&send_status; +} +$main::maind_pid = $$; + +END { kill( $main::signo{"ABRT"}, $$ ) if defined $main::signo{"ABRT"}; } + +# write the pid file +open( PIDFILE, ">$conf::pidfile" ) + or msg( "log", "Can't open $conf::pidfile: $!\n" ); +printf PIDFILE "%5d\n", $$; +close( PIDFILE ); +chmod( 0644, $conf::pidfile ) + or die "Cannot set modes of $conf::pidfile: $!\n"; + +# other signals will just log an error and exit +foreach ( @main::fatal_signals ) { + $SIG{$_} = \&fatal_signal; +} + +# send signal to user-started process that we're ready and it can exit +kill( $main::signo{"USR1"}, $parent_pid ); + +# --------------------------------------------------------------------------- +# the mainloop +# --------------------------------------------------------------------------- + +$main::dstat = "i"; +write_status_file() if $conf::statusdelay; +while( 1 ) { + + # ping target only if there is the possibility that we'll contact it (but + # also don't wait too long). + my @have_changes = <*.changes *.commands>; + check_alive() if @have_changes || (time - $main::last_ping_time) > 8*60*60; + + if (@have_changes && $main::target_up) { + check_incoming_writable if !$main::incoming_writable; + check_dir() if $main::incoming_writable; + } + $main::dstat = "i"; + write_status_file() if $conf::statusdelay; + + # sleep() returns if we received a signal (SIGUSR1 for status FIFO), so + # calculate the end time once and wait for it being reached. + format_status_num( $main::next_run, time + $conf::queue_delay ); + my $delta; + while( ($delta = calc_delta()) > 0 ) { + debug( "mainloop sleeping $delta secs" ); + sleep( $delta ); + # check if statusd died, if using status FIFO, or update status file + if ($conf::statusdelay) { + write_status_file(); + } + else { + restart_statusd(); + } + } +} + +sub calc_delta() { + my $delta; + + $delta = $main::next_run - time; + $delta = $conf::statusdelay + if $conf::statusdelay && $conf::statusdelay < $delta; + return $delta; +} + + +# --------------------------------------------------------------------------- +# main working functions +# --------------------------------------------------------------------------- + + +# +# main function for checking the incoming dir +# +sub check_dir() { + my( @files, @changes, @keep_files, @this_keep_files, @stats, $file ); + + debug( "starting checkdir" ); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + # test if needed binaries are available; this is if they're on maybe + # slow-mounted NFS filesystems + foreach (@conf::test_binaries) { + next if -f $_; + # maybe the mount succeeds now + sleep 5; + next if -f $_; + msg( "log", "binary test failed for $_; delaying queue run\n"); + goto end_run; + } + + # look for *.commands files + foreach $file ( <*.commands> ) { + init_mail( $file ); + block_signals(); + process_commands( $file ); + unblock_signals(); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + finish_mail(); + } + + opendir( INC, "." ) + or (msg( "log", "Cannot open incoming dir $conf::incoming: $!\n" ), + return); + @files = readdir( INC ); + closedir( INC ); + + # process all .changes files found + @changes = grep /\.changes$/, @files; + push( @keep_files, @changes ); # .changes files aren't stray + foreach $file ( @changes ) { + init_mail( $file ); + # wrap in an eval to allow jumpbacks to here with die in case + # of errors + block_signals(); + eval { process_changes( $file, @this_keep_files ); }; + unblock_signals(); + msg( "log,mail", $@ ) if $@; + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + # files which are ok in conjunction with this .changes + debug( "$file tells to keep @this_keep_files" ); + push( @keep_files, @this_keep_files ); + finish_mail(); + + # break out of this loop if the incoming dir has become unwritable + goto end_run if !$main::incoming_writable; + } + ftp_close() if $conf::upload_method eq "ftp"; + + # find files which aren't related to any .changes + foreach $file ( @files ) { + # filter out files we never want to delete + next if ! -f $file || # may have disappeared in the meantime + $file eq "." || $file eq ".." || + (grep { $_ eq $file } @keep_files) || + $file =~ /$conf::keep_files/; + # Delete such files if they're older than + # $stray_remove_timeout; they could be part of an + # yet-incomplete upload, with the .changes still missing. + # Cannot send any notification, since owner unknown. + next if !(@stats = stat( $file )); + my $age = time - $stats[ST_MTIME]; + my( $maint, $pattern, @job_files ); + if ($file =~ /^junk-for-writable-test/ || + $file !~ m,$conf::valid_files, || + $age >= $conf::stray_remove_timeout) { + msg( "log", "Deleted stray file $file\n" ) if rm( $file ); + } + elsif ($age > $conf::no_changes_timeout && + is_debian_file( $file ) && + # not already reported + !($stats[ST_MODE] & S_ISGID) && + ($pattern = debian_file_stem( $file )) && + (@job_files = glob($pattern)) && + # If a .changes is in the list, it has the same stem as the + # found file (probably a .orig.tar.gz). Don't report in this + # case. + !(grep( /\.changes$/, @job_files ))) { + $maint = get_maintainer( $file ); + # Don't send a mail if this looks like the recompilation of a + # package for a non-i386 arch. For those, the maintainer field is + # useless :-( + if (!grep( /(\.dsc|_(i386|all)\.deb)$/, @job_files )) { + msg( "log", "Found an upload without .changes and with no ", + ".dsc file\n" ); + msg( "log", "Not sending a report, because probably ", + "recompilation job\n" ); + } + elsif ($maint) { + init_mail(); + $main::mail_addr = $maint; + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + $main::mail_subject = "Incomplete upload found in ". + "Debian upload queue"; + msg( "mail", "Probably you are the uploader of the following ". + "file(s) in\n" ); + msg( "mail", "the Debian upload queue directory:\n " ); + msg( "mail", join( "\n ", @job_files ), "\n" ); + msg( "mail", "This looks like an upload, but a .changes file ". + "is missing, so the job\n" ); + msg( "mail", "cannot be processed.\n\n" ); + msg( "mail", "If no .changes file arrives within ", + print_time( $conf::stray_remove_timeout - $age ), + ", the files will be deleted.\n\n" ); + msg( "mail", "If you didn't upload those files, please just ". + "ignore this message.\n" ); + finish_mail(); + msg( "log", "Sending problem report for an upload without a ". + ".changes\n" ); + msg( "log", "Maintainer: $maint\n" ); + } + else { + msg( "log", "Found an upload without .changes, but can't ". + "find a maintainer address\n" ); + } + msg( "log", "Files: @job_files\n" ); + # remember we already have sent a mail regarding this file + foreach ( @job_files ) { + my @st = stat($_); + next if !@st; # file may have disappeared in the meantime + chmod +($st[ST_MODE] |= S_ISGID), $_; + } + } + else { + debug( "found stray file $file, deleting in ", + print_time($conf::stray_remove_timeout - $age) ); + } + } + + end_run: + $main::dstat = "i"; + write_status_file() if $conf::statusdelay; +} + +# +# process one .changes file +# +sub process_changes($\@) { + my $changes = shift; + my $keep_list = shift; + my( $pgplines, @files, @filenames, @changes_stats, $failure_file, + $retries, $last_retry, $upload_time, $file, $do_report, $ls_l, + $problems_reported, $errs, $pkgname, $signator ); + local( *CHANGES ); + local( *FAILS ); + + format_status_str( $main::current_changes, $changes ); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + @$keep_list = (); + msg( "log", "processing $changes\n" ); + + # parse the .changes file + open( CHANGES, "<$changes" ) + or die "Cannot open $changes: $!\n"; + $pgplines = 0; + $main::mail_addr = ""; + @files = (); + outer_loop: while( ) { + if (/^---+(BEGIN|END) PGP .*---+$/) { + ++$pgplines; + } + elsif (/^Maintainer:\s*/i) { + chomp( $main::mail_addr = $' ); + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + } + elsif (/^Source:\s*/i) { + chomp( $pkgname = $' ); + $pkgname =~ s/\s+$//; + } + elsif (/^Files:/i) { + while( ) { + redo outer_loop if !/^\s/; + my @field = split( /\s+/ ); + next if @field != 6; + # forbid shell meta chars in the name, we pass it to a + # subshell several times... + $field[5] =~ /^([a-zA-Z0-9.+_:@=%-][~a-zA-Z0-9.+_:@=%-]*)/; + if ($1 ne $field[5]) { + msg( "log", "found suspicious filename $field[5]\n" ); + msg( "mail", "File '$field[5]' mentioned in $changes\n", + "has bad characters in its name. Removed.\n" ); + rm( $field[5] ); + next; + } + push( @files, { md5 => $field[1], + size => $field[2], + name => $field[5] } ); + push( @filenames, $field[5] ); + debug( "includes file $field[5], size $field[2], ", + "md5 $field[1]" ); + } + } + } + close( CHANGES ); + + # tell check_dir that the files mentioned in this .changes aren't stray, + # we know about them somehow + @$keep_list = @filenames; + + # some consistency checks + if (!$main::mail_addr) { + msg( "log,mail", "$changes doesn't contain a Maintainer: field; ". + "cannot process\n" ); + goto remove_only_changes; + } + if ($main::mail_addr !~ /^(buildd_\S+-\S+|\S+\@\S+\.\S+)/) { + # doesn't look like a mail address, maybe only the name + my( $new_addr, @addr_list ); + if ($new_addr = try_to_get_mail_addr( $main::mail_addr, \@addr_list )){ + # substitute (unique) found addr, but give a warning + msg( "mail", "(The Maintainer: field didn't contain a proper ". + "mail address.\n" ); + msg( "mail", "Looking for `$main::mail_addr' in the Debian ". + "keyring gave your address\n" ); + msg( "mail", "as unique result, so I used this.)\n" ); + msg( "log", "Substituted $new_addr for malformed ". + "$main::mail_addr\n" ); + $main::mail_addr = $new_addr; + } + else { + # not found or not unique: hold the job and inform queue maintainer + my $old_addr = $main::mail_addr; + $main::mail_addr = $conf::maintainer_mail; + msg( "mail", "The job $changes doesn't have a correct email\n" ); + msg( "mail", "address in the Maintainer: field:\n" ); + msg( "mail", " $old_addr\n" ); + msg( "mail", "A check for this in the Debian keyring gave:\n" ); + msg( "mail", @addr_list ? + " " . join( ", ", @addr_list ) . "\n" : + " nothing\n" ); + msg( "mail", "Please fix this manually\n" ); + msg( "log", "Bad Maintainer: field in $changes: $old_addr\n" ); + goto remove_only_changes; + } + } + if ($pgplines < 3) { + msg( "log,mail", "$changes isn't signed with PGP/GnuPG\n" ); + msg( "log", "(uploader $main::mail_addr)\n" ); + goto remove_only_changes; + } + if (!@files) { + msg( "log,mail", "$changes doesn't mention any files\n" ); + msg( "log", "(uploader $main::mail_addr)\n" ); + goto remove_only_changes; + } + + # check for packages that shouldn't be processed + if (grep( $_ eq $pkgname, @conf::nonus_packages )) { + msg( "log,mail", "$pkgname is a package that must be uploaded ". + "to nonus.debian.org\n" ); + msg( "log,mail", "instead of target.\n" ); + msg( "log,mail", "Job rejected and removed all files belonging ". + "to it:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames ); + return; + } + + $failure_file = $changes . ".failures"; + $retries = $last_retry = 0; + if (-f $failure_file) { + open( FAILS, "<$failure_file" ) + or die "Cannot open $failure_file: $!\n"; + my $line = ; + close( FAILS ); + ( $retries, $last_retry ) = ( $1, $2 ) if $line =~ /^(\d+)\s+(\d+)$/; + push( @$keep_list, $failure_file ); + } + + # run PGP on the file to check the signature + if (!($signator = pgp_check( $changes ))) { + msg( "log,mail", "$changes has bad PGP/GnuPG signature!\n" ); + msg( "log", "(uploader $main::mail_addr)\n" ); + remove_only_changes: + msg( "log,mail", "Removing $changes, but keeping its associated ", + "files for now.\n" ); + rm( $changes ); + # Set SGID bit on associated files, so that the test for Debian files + # without a .changes doesn't consider them. + foreach ( @filenames ) { + my @st = stat($_); + next if !@st; # file may have disappeared in the meantime + chmod +($st[ST_MODE] |= S_ISGID), $_; + } + return; + } + elsif ($signator eq "LOCAL ERROR") { + # An error has appened when starting pgp... Don't process the file, + # but also don't delete it + debug( "Can't PGP/GnuPG check $changes -- don't process it for now" ); + return; + } + + die "Cannot stat $changes (??): $!\n" + if !(@changes_stats = stat( $changes )); + # Make $upload_time the maximum of all modification times of files + # related to this .changes (and the .changes it self). This is the + # last time something changes to these files. + $upload_time = $changes_stats[ST_MTIME]; + for $file ( @files ) { + my @stats; + next if !(@stats = stat( $file->{"name"} )); + $file->{"stats"} = \@stats; + $upload_time = $stats[ST_MTIME] if $stats[ST_MTIME] > $upload_time; + } + + $do_report = (time - $upload_time) > $conf::problem_report_timeout; + $problems_reported = $changes_stats[ST_MODE] & S_ISGID; + # if any of the files is newer than the .changes' ctime (the time + # we sent a report and set the sticky bit), send new problem reports + if ($problems_reported && $changes_stats[ST_CTIME] < $upload_time) { + $problems_reported = 0; + chmod +($changes_stats[ST_MODE] &= ~S_ISGID), $changes; + debug( "upload_time>changes-ctime => resetting problems reported" ); + } + debug( "do_report=$do_report problems_reported=$problems_reported" ); + + # now check all files for correct size and md5 sum + for $file ( @files ) { + my $filename = $file->{"name"}; + if (!defined( $file->{"stats"} )) { + # could be an upload that isn't complete yet, be quiet, + # but don't process the file; + msg( "log,mail", "$filename doesn't exist\n" ) + if $do_report && !$problems_reported; + msg( "log", "$filename doesn't exist (ignored for now)\n" ) + if !$do_report; + msg( "log", "$filename doesn't exist (already reported)\n" ) + if $problems_reported; + ++$errs; + } + elsif ($file->{"stats"}->[ST_SIZE] < $file->{"size"} && !$do_report) { + # could be an upload that isn't complete yet, be quiet, + # but don't process the file + msg( "log", "$filename is too small (ignored for now)\n" ); + ++$errs; + } + elsif ($file->{"stats"}->[ST_SIZE] != $file->{"size"}) { + msg( "log,mail", "$filename has incorrect size; deleting it\n" ); + rm( $filename ); + ++$errs; + } + elsif (md5sum( $filename ) ne $file->{"md5"}) { + msg( "log,mail", "$filename has incorrect md5 checksum; ", + "deleting it\n" ); + rm( $filename ); + ++$errs; + } + } + + if ($errs) { + if ((time - $upload_time) > $conf::bad_changes_timeout) { + # if a .changes fails for a really long time (several days + # or so), remove it and all associated files + msg( "log,mail", + "$changes couldn't be processed for ", + int($conf::bad_changes_timeout/(60*60)), + " hours and is now deleted\n" ); + msg( "log,mail", + "All files it mentions are also removed:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames, $failure_file ); + } + elsif ($do_report && !$problems_reported) { + # otherwise, send a problem report, if not done already + msg( "mail", + "Due to the errors above, the .changes file couldn't ", + "be processed.\n", + "Please fix the problems for the upload to happen.\n" ); + # remember we already have sent a mail regarding this file + debug( "Sending problem report mail and setting SGID bit" ); + my $mode = $changes_stats[ST_MODE] |= S_ISGID; + msg( "log", "chmod failed: $!" ) if (chmod ($mode, $changes) != 1); + } + # else: be quiet + + return; + } + + # if this upload already failed earlier, wait until the delay requirement + # is fulfilled + if ($retries > 0 && (time - $last_retry) < + ($retries == 1 ? $conf::upload_delay_1 : $conf::upload_delay_2)) { + msg( "log", "delaying retry of upload\n" ); + return; + } + + if ($conf::upload_method eq "ftp") { + return if !ftp_open(); + } + + # check if the job is already present on target + # (moved to here, to avoid bothering target as long as there are errors in + # the job) + if ($ls_l = is_on_target( $changes )) { + msg( "log,mail", "$changes is already present on target host:\n" ); + msg( "log,mail", "$ls_l\n" ); + msg( "mail", "Either you already uploaded it, or someone else ", + "came first.\n" ); + msg( "log,mail", "Job $changes removed.\n" ); + rm( $changes, @filenames, $failure_file ); + return; + } + + # clear sgid bit before upload, scp would copy it to target. We don't need + # it anymore, we know there are no problems if we come here. Also change + # mode of files to 644 if this should be done locally. + $changes_stats[ST_MODE] &= ~S_ISGID; + if (!$conf::chmod_on_target) { + $changes_stats[ST_MODE] &= ~0777; + $changes_stats[ST_MODE] |= 0644; + } + chmod +($changes_stats[ST_MODE]), $changes; + + # try uploading to target + if (!copy_to_target( $changes, @filenames )) { + # if the upload failed, increment the retry counter and remember the + # current time; both things are written to the .failures file. Don't + # increment the fail counter if the error was due to incoming + # unwritable. + return if !$main::incoming_writable; + if (++$retries >= $conf::max_upload_retries) { + msg( "log,mail", + "$changes couldn't be uploaded for $retries times now.\n" ); + msg( "log,mail", + "Giving up and removing it and its associated files:\n" ); + msg( "log,mail", " ", join( ", ", @filenames ), "\n" ); + rm( $changes, @filenames, $failure_file ); + } + else { + $last_retry = time; + if (open( FAILS, ">$failure_file" )) { + print FAILS "$retries $last_retry\n"; + close( FAILS ); + chmod( 0600, $failure_file ) + or die "Cannot set modes of $failure_file: $!\n"; + } + push( @$keep_list, $failure_file ); + debug( "now $retries failed uploads" ); + msg( "mail", + "The upload will be retried in ", + print_time( $retries == 1 ? $conf::upload_delay_1 : + $conf::upload_delay_2 ), "\n" ); + } + return; + } + + # If the files were uploaded ok, remove them + rm( $changes, @filenames, $failure_file ); + + msg( "mail", "$changes uploaded successfully to $conf::target\n" ); + msg( "mail", "along with the files:\n ", + join( "\n ", @filenames ), "\n" ); + msg( "log", "$changes processed successfully (uploader $main::mail_addr)\n" ); + + # Check for files that have the same stem as the .changes (and weren't + # mentioned there) and delete them. It happens often enough that people + # upload a .orig.tar.gz where it isn't needed and also not in the + # .changes. Explicitly deleting it (and not waiting for the + # $stray_remove_timeout) reduces clutter in the queue dir and maybe also + # educates uploaders :-) + +# my $pattern = debian_file_stem( $changes ); +# my $spattern = substr( $pattern, 0, -1 ); # strip off '*' at end +# my @other_files = glob($pattern); + # filter out files that have a Debian revision at all and a different + # revision. Those belong to a different upload. +# if ($changes =~ /^\Q$spattern\E-([\d.+-]+)/) { +# my $this_rev = $1; +# @other_files = grep( !/^\Q$spattern\E-([\d.+-]+)/ || $1 eq $this_rev, +# @other_files); + #} + # Also do not remove those files if a .changes is among them. Then there + # is probably a second upload for another version or another architecture. +# if (@other_files && !grep( /\.changes$/, @other_files )) { +# rm( @other_files ); +# msg( "mail", "\nThe following file(s) seemed to belong to the same ". +# "upload, but weren't listed\n" ); +# msg( "mail", "in the .changes file:\n " ); +# msg( "mail", join( "\n ", @other_files ), "\n" ); +# msg( "mail", "They have been deleted.\n" ); +# msg( "log", "Deleted files in upload not in $changes: @other_files\n" ); + #} +} + +# +# process one .commands file +# +sub process_commands($) { + my $commands = shift; + my( @cmds, $cmd, $pgplines, $signator ); + local( *COMMANDS ); + + format_status_str( $main::current_changes, $commands ); + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + + msg( "log", "processing $commands\n" ); + + # parse the .commands file + if (!open( COMMANDS, "<$commands" )) { + msg( "log", "Cannot open $commands: $!\n" ); + return; + } + $pgplines = 0; + $main::mail_addr = ""; + @cmds = (); + outer_loop: while( ) { + if (/^---+(BEGIN|END) PGP .*---+$/) { + ++$pgplines; + } + elsif (/^Uploader:\s*/i) { + chomp( $main::mail_addr = $' ); + $main::mail_addr = $1 if $main::mail_addr =~ /<([^>]*)>/; + } + elsif (/^Commands:/i) { + $_ = $'; + for(;;) { + s/^\s*(.*)\s*$/$1/; # delete whitespace at both ends + if (!/^\s*$/) { + push( @cmds, $_ ); + debug( "includes cmd $_" ); + } + last outer_loop if !defined( $_ = scalar() ); + chomp; + redo outer_loop if !/^\s/ || /^$/; + } + } + } + close( COMMANDS ); + + # some consistency checks + if (!$main::mail_addr || $main::mail_addr !~ /^\S+\@\S+\.\S+/) { + msg( "log,mail", "$commands contains no or bad Uploader: field: ". + "$main::mail_addr\n" ); + msg( "log,mail", "cannot process $commands\n" ); + $main::mail_addr = ""; + goto remove; + } + msg( "log", "(command uploader $main::mail_addr)\n" ); + + if ($pgplines < 3) { + msg( "log,mail", "$commands isn't signed with PGP/GnuPG\n" ); + goto remove; + } + + # run PGP on the file to check the signature + if (!($signator = pgp_check( $commands ))) { + msg( "log,mail", "$commands has bad PGP/GnuPG signature!\n" ); + remove: + msg( "log,mail", "Removing $commands\n" ); + rm( $commands ); + return; + } + elsif ($signator eq "LOCAL ERROR") { + # An error has appened when starting pgp... Don't process the file, + # but also don't delete it + debug( "Can't PGP/GnuPG check $commands -- don't process it for now" ); + return; + } + msg( "log", "(PGP/GnuPG signature by $signator)\n" ); + + # now process commands + msg( "mail", "Log of processing your commands file $commands:\n\n" ); + foreach $cmd ( @cmds ) { + my @word = split( /\s+/, $cmd ); + msg( "mail,log", "> @word\n" ); + next if @word < 1; + + if ($word[0] eq "rm") { + my( @files, $file, @removed ); + foreach ( @word[1..$#word] ) { + if (m,/,) { + msg( "mail,log", "$_: filename may not contain slashes\n" ); + } + elsif (/[*?[]/) { + # process wildcards + my $pat = quotemeta($_); + $pat =~ s/\\\*/.*/g; + $pat =~ s/\\\?/.?/g; + $pat =~ s/\\([][])/$1/g; + opendir( DIR, "." ); + push( @files, grep /^$pat$/, readdir(DIR) ); + closedir( DIR ); + } + else { + push( @files, $_ ); + } + } + if (!@files) { + msg( "mail,log", "No files to delete\n" ); + } + else { + @removed = (); + foreach $file ( @files ) { + if (!-f $file) { + msg( "mail,log", "$file: no such file\n" ); + } + elsif ($file =~ /$conf::keep_files/) { + msg( "mail,log", "$file is protected, cannot ". + "remove\n" ); + } + elsif (!unlink( $file )) { + msg( "mail,log", "$file: rm: $!\n" ); + } + else { + push( @removed, $file ); + } + } + msg( "mail,log", "Files removed: @removed\n" ) if @removed; + } + } + elsif ($word[0] eq "mv") { + if (@word != 3) { + msg( "mail,log", "Wrong number of arguments\n" ); + } + elsif ($word[1] =~ m,/,) { + msg( "mail,log", "$word[1]: filename may not contain slashes\n" ); + } + elsif ($word[2] =~ m,/,) { + msg( "mail,log", "$word[2]: filename may not contain slashes\n" ); + } + elsif (!-f $word[1]) { + msg( "mail,log", "$word[1]: no such file\n" ); + } + elsif (-e $word[2]) { + msg( "mail,log", "$word[2]: file exists\n" ); + } + elsif ($word[1] =~ /$conf::keep_files/) { + msg( "mail,log", "$word[1] is protected, cannot rename\n" ); + } + else { + if (!rename( $word[1], $word[2] )) { + msg( "mail,log", "rename: $!\n" ); + } + else { + msg( "mail,log", "OK\n" ); + } + } + } + else { + msg( "mail,log", "unknown command $word[0]\n" ); + } + } + rm( $commands ); + msg( "log", "-- End of $commands processing\n" ); +} + +# +# check if a file is already on target +# +sub is_on_target($) { + my $file = shift; + my $msg; + my $stat; + + if ($conf::upload_method eq "ssh") { + ($msg, $stat) = ssh_cmd( "ls -l $file" ); + } + elsif ($conf::upload_method eq "ftp") { + my $err; + ($msg, $err) = ftp_cmd( "dir", $file ); + if ($err) { + $stat = 1; + $msg = $err; + } + elsif (!$msg) { + $stat = 1; + $msg = "ls: no such file\n"; + } + else { + $stat = 0; + $msg = join( "\n", @$msg ); + } + } + else { + ($msg, $stat) = local_cmd( "$conf::ls -l $file" ); + } + chomp( $msg ); + debug( "exit status: $stat, output was: $msg" ); + + return "" if $stat && $msg =~ /no such file/i; # file not present + msg( "log", "strange ls -l output on target:\n", $msg ), return "" + if $stat || $@; # some other error, but still try to upload + + # ls -l returned 0 -> file already there + $msg =~ s/\s\s+/ /g; # make multiple spaces into one, to save space + return $msg; +} + +# +# copy a list of files to target +# +sub copy_to_target(@) { + my @files = @_; + my( @md5sum, @expected_files, $sum, $name, $msgs, $stat ); + + $main::dstat = "u"; + write_status_file() if $conf::statusdelay; + + # copy the files + if ($conf::upload_method eq "ssh") { + ($msgs, $stat) = scp_cmd( @files ); + goto err if $stat; + } + elsif ($conf::upload_method eq "ftp") { + my($rv, $file); + foreach $file (@files) { + ($rv, $msgs) = ftp_cmd( "put", $file ); + goto err if !$rv; + } + } + else { + ($msgs, $stat) = local_cmd( "$conf::cp @files $conf::targetdir", 'NOCD' ); + goto err if $stat; + } + + # check md5sums or sizes on target against our own + my $have_md5sums = 1; + if ($conf::upload_method eq "ssh") { + ($msgs, $stat) = ssh_cmd( "md5sum @files" ); + goto err if $stat; + @md5sum = split( "\n", $msgs ); + } + elsif ($conf::upload_method eq "ftp") { + my ($rv, $err, $file); + foreach $file (@files) { + ($rv, $err) = ftp_cmd( "quot", "site", "md5sum", $file ); + if ($err) { + next if ftp_code() == 550; # file not found + if (ftp_code() == 500) { # unimplemented + $have_md5sums = 0; + goto get_sizes_instead; + } + $msgs = $err; + goto err; + } + chomp( my $t = ftp_response() ); + push( @md5sum, $t ); + } + if (!$have_md5sums) { + get_sizes_instead: + foreach $file (@files) { + ($rv, $err) = ftp_cmd( "size", $file ); + if ($err) { + next if ftp_code() == 550; # file not found + $msgs = $err; + goto err; + } + push( @md5sum, "$rv $file" ); + } + } + } + else { + ($msgs, $stat) = local_cmd( "$conf::md5sum @files" ); + goto err if $stat; + @md5sum = split( "\n", $msgs ); + } + + @expected_files = @files; + foreach (@md5sum) { + chomp; + ($sum,$name) = split; + next if !grep { $_ eq $name } @files; # a file we didn't upload?? + next if $sum eq "md5sum:"; # looks like an error message + if (($have_md5sums && $sum ne md5sum( $name )) || + (!$have_md5sums && $sum != (-s $name))) { + msg( "log,mail", "Upload of $name to $conf::target failed ", + "(".($have_md5sums ? "md5sum" : "size")." mismatch)\n" ); + goto err; + } + # seen that file, remove it from expect list + @expected_files = map { $_ eq $name ? () : $_ } @expected_files; + } + if (@expected_files) { + msg( "log,mail", "Failed to upload the files\n" ); + msg( "log,mail", " ", join( ", ", @expected_files ), "\n" ); + msg( "log,mail", "(Not present on target after upload)\n" ); + goto err; + } + + if ($conf::chmod_on_target) { + # change file's mode explicitly to 644 on target + if ($conf::upload_method eq "ssh") { + ($msgs, $stat) = ssh_cmd( "chmod 644 @files" ); + goto err if $stat; + } + elsif ($conf::upload_method eq "ftp") { + my ($rv, $file); + foreach $file (@files) { + ($rv, $msgs) = ftp_cmd( "quot", "site", "chmod", "644", $file ); + msg( "log", "Can't chmod $file on target:\n$msgs" ) + if $msgs; + goto err if !$rv; + } + } + else { + ($msgs, $stat) = local_cmd( "$conf::chmod 644 @files" ); + goto err if $stat; + } + } + + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + return 1; + + err: + msg( "log,mail", "Upload to $conf::target failed", + $? ? ", last exit status ".sprintf( "%s", $?>>8 ) : "", "\n" ); + msg( "log,mail", "Error messages:\n", $msgs ) + if $msgs; + + # If "permission denied" was among the errors, test if the incoming is + # writable at all. + if ($msgs =~ /(permission denied|read-?only file)/i) { + if (!check_incoming_writable()) { + msg( "log,mail", "(The incoming directory seems to be ", + "unwritable.)\n" ); + } + } + + # remove bad files or an incomplete upload on target + if ($conf::upload_method eq "ssh") { + ssh_cmd( "rm -f @files" ); + } + elsif ($conf::upload_method eq "ftp") { + my $file; + foreach $file (@files) { + my ($rv, $err); + ($rv, $err) = ftp_cmd( "delete", $file ); + msg( "log", "Can't delete $file on target:\n$err" ) + if $err; + } + } + else { + my @tfiles = map { "$conf::targetdir/$_" } @files; + debug( "executing unlink(@tfiles)" ); + rm( @tfiles ); + } + $main::dstat = "c"; + write_status_file() if $conf::statusdelay; + return 0; +} + +# +# check if a file is correctly signed with PGP +# +sub pgp_check($) { + my $file = shift; + my $output = ""; + my $signator; + my $found = 0; + my $stat; + local( *PIPE ); + + $stat = 1; + if (-x $conf::gpg) { + debug( "executing $conf::gpg --no-options --batch ". + "--no-default-keyring --always-trust ". + "--keyring ". join (" --keyring ",@conf::keyrings). + " --verify '$file'" ); + if (!open( PIPE, "$conf::gpg --no-options --batch ". + "--no-default-keyring --always-trust ". + "--keyring " . join (" --keyring ",@conf::keyrings). + " --verify '$file'". + " 2>&1 |" )) { + msg( "log", "Can't open pipe to $conf::gpg: $!\n" ); + return "LOCAL ERROR"; + } + $output .= $_ while( ); + close( PIPE ); + $stat = $?; + } + + if ($stat) { + msg( "log,mail", "GnuPG signature check failed on $file\n" ); + msg( "mail", $output ); + msg( "log,mail", "(Exit status ", $stat >> 8, ")\n" ); + return ""; + } + + $output =~ /^(gpg: )?good signature from (user )?"(.*)"\.?$/im; + ($signator = $3) ||= "unknown signator"; + if ($conf::debug) { + debug( "GnuPG signature ok (by $signator)" ); + } + return $signator; +} + + +# --------------------------------------------------------------------------- +# the status daemon +# --------------------------------------------------------------------------- + +# +# fork a subprocess that watches the 'status' FIFO +# +# that process blocks until someone opens the FIFO, then sends a +# signal (SIGUSR1) to the main process, expects +# +sub fork_statusd() { + my $statusd_pid; + my $main_pid = $$; + my $errs; + local( *STATFIFO ); + + $statusd_pid = open( STATUSD, "|-" ); + die "cannot fork: $!\n" if !defined( $statusd_pid ); + # parent just returns + if ($statusd_pid) { + msg( "log", "forked status daemon (pid $statusd_pid)\n" ); + return $statusd_pid; + } + # child: the status FIFO daemon + + # ignore SIGPIPE here, in case some closes the FIFO without completely + # reading it + $SIG{"PIPE"} = "IGNORE"; + # also ignore SIGCLD, we don't want to inherit the restart-statusd handler + # from our parent + $SIG{"CHLD"} = "DEFAULT"; + + rm( $conf::statusfile ); + $errs = `$conf::mkfifo $conf::statusfile`; + die "$main::progname: cannot create named pipe $conf::statusfile: $errs" + if $?; + chmod( 0644, $conf::statusfile ) + or die "Cannot set modes of $conf::statusfile: $!\n"; + + # close log file, so that log rotating works + close( LOG ); + close( STDOUT ); + close( STDERR ); + + while( 1 ) { + my( $status, $mup, $incw, $ds, $next_run, $last_ping, $currch, $l ); + + # open the FIFO for writing; this blocks until someone (probably ftpd) + # opens it for reading + open( STATFIFO, ">$conf::statusfile" ) + or die "Cannot open $conf::statusfile\n"; + select( STATFIFO ); + # tell main daemon to send us status infos + kill( $main::signo{"USR1"}, $main_pid ); + + # get the infos from stdin; must loop until enough bytes received! + my $expect_len = 3 + 2*STATNUM_LEN + STATSTR_LEN; + for( $status = ""; ($l = length($status)) < $expect_len; ) { + sysread( STDIN, $status, $expect_len-$l, $l ); + } + + # disassemble the status byte stream + my $pos = 0; + foreach ( [ mup => 1 ], [ incw => 1 ], [ ds => 1 ], + [ next_run => STATNUM_LEN ], [ last_ping => STATNUM_LEN ], + [ currch => STATSTR_LEN ] ) { + eval "\$$_->[0] = substr( \$status, $pos, $_->[1] );"; + $pos += $_->[1]; + } + $currch =~ s/\n+//g; + + print_status( $mup, $incw, $ds, $next_run, $last_ping, $currch ); + close( STATFIFO ); + + # This sleep is necessary so that we can't reopen the FIFO + # immediately, in case the reader hasn't closed it yet if we get to + # the open again. Is there a better solution for this?? + sleep 1; + } +} + +# +# update the status file, in case we use a plain file and not a FIFO +# +sub write_status_file() { + + return if !$conf::statusfile; + + open( STATFILE, ">$conf::statusfile" ) or + (msg( "log", "Could not open $conf::statusfile: $!\n" ), return); + my $oldsel = select( STATFILE ); + + print_status( $main::target_up, $main::incoming_writable, $main::dstat, + $main::next_run, $main::last_ping_time, + $main::current_changes ); + + select( $oldsel ); + close( STATFILE ); +} + +sub print_status($$$$$$) { + my $mup = shift; + my $incw = shift; + my $ds = shift; + my $next_run = shift; + my $last_ping = shift; + my $currch = shift; + my $approx; + my $version; + + ($version = 'Release: 0.9 $Revision: 1.51 $') =~ s/\$ ?//g; + print "debianqueued $version\n"; + + $approx = $conf::statusdelay ? "approx. " : ""; + + if ($mup eq "0") { + print "$conf::target is down, queue pausing\n"; + return; + } + elsif ($conf::upload_method ne "copy") { + print "$conf::target seems to be up, last ping $approx", + print_time(time-$last_ping), " ago\n"; + } + + if ($incw eq "0") { + print "The incoming directory is not writable, queue pausing\n"; + return; + } + + if ($ds eq "i") { + print "Next queue check in $approx",print_time($next_run-time),"\n"; + return; + } + elsif ($ds eq "c") { + print "Checking queue directory\n"; + } + elsif ($ds eq "u") { + print "Uploading to $conf::target\n"; + } + else { + print "Bad status data from daemon: \"$mup$incw$ds\"\n"; + return; + } + + print "Current job is $currch\n" if $currch; +} + +# +# format a number for sending to statusd (fixed length STATNUM_LEN) +# +sub format_status_num(\$$) { + my $varref = shift; + my $num = shift; + + $$varref = sprintf "%".STATNUM_LEN."d", $num; +} + +# +# format a string for sending to statusd (fixed length STATSTR_LEN) +# +sub format_status_str(\$$) { + my $varref = shift; + my $str = shift; + + $$varref = substr( $str, 0, STATSTR_LEN ); + $$varref .= "\n" x (STATSTR_LEN - length($$varref)); +} + +# +# send a status string to the status daemon +# +# Avoid all operations that could call malloc() here! Most libc +# implementations aren't reentrant, so we may not call it from a +# signal handler. So use only already-defined variables. +# +sub send_status() { + local $! = 0; # preserve errno + + # re-setup handler, in case we have broken SysV signals + $SIG{"USR1"} = \&send_status; + + syswrite( STATUSD, $main::target_up, 1 ); + syswrite( STATUSD, $main::incoming_writable, 1 ); + syswrite( STATUSD, $main::dstat, 1 ); + syswrite( STATUSD, $main::next_run, STATNUM_LEN ); + syswrite( STATUSD, $main::last_ping_time, STATNUM_LEN ); + syswrite( STATUSD, $main::current_changes, STATSTR_LEN ); +} + + +# --------------------------------------------------------------------------- +# FTP functions +# --------------------------------------------------------------------------- + +# +# open FTP connection to target host if not already open +# +sub ftp_open() { + + if ($main::FTP_chan) { + # is already open, but might have timed out; test with a cwd + return $main::FTP_chan if $main::FTP_chan->cwd( $conf::targetdir ); + # cwd didn't work, channel is closed, try to reopen it + $main::FTP_chan = undef; + } + + if (!($main::FTP_chan = Net::FTP->new( $conf::target, + Debug => $conf::ftpdebug, + Timeout => $conf::ftptimeout ))) { + msg( "log,mail", "Cannot open FTP server $conf::target\n" ); + goto err; + } + if (!$main::FTP_chan->login()) { + msg( "log,mail", "Anonymous login on FTP server $conf::target failed\n" ); + goto err; + } + if (!$main::FTP_chan->binary()) { + msg( "log,mail", "Can't set binary FTP mode on $conf::target\n" ); + goto err; + } + if (!$main::FTP_chan->cwd( $conf::targetdir )) { + msg( "log,mail", "Can't cd to $conf::targetdir on $conf::target\n" ); + goto err; + } + debug( "opened FTP channel to $conf::target" ); + return 1; + + err: + $main::FTP_chan = undef; + return 0; +} + +sub ftp_cmd($@) { + my $cmd = shift; + my ($rv, $err); + my $direct_resp_cmd = ($cmd eq "quot"); + + debug( "executing FTP::$cmd(".join(", ",@_).")" ); + $SIG{"ALRM"} = sub { die "timeout in FTP::$cmd\n" } ; + alarm( $conf::remote_timeout ); + eval { $rv = $main::FTP_chan->$cmd( @_ ); }; + alarm( 0 ); + $err = ""; + $rv = (ftp_code() =~ /^2/) ? 1 : 0 if $direct_resp_cmd; + if ($@) { + $err = $@; + undef $rv; + } + elsif (!$rv) { + $err = ftp_response(); + } + return ($rv, $err); +} + +sub ftp_close() { + if ($main::FTP_chan) { + $main::FTP_chan->quit(); + $main::FTP_chan = undef; + } + return 1; +} + +sub ftp_response() { + return join( '', @{${*$main::FTP_chan}{'net_cmd_resp'}} ); +} + +sub ftp_code() { + return ${*$main::FTP_chan}{'net_cmd_code'}; +} + +sub ftp_error() { + my $code = ftp_code(); + return ($code =~ /^[45]/) ? 1 : 0; +} + +# --------------------------------------------------------------------------- +# utility functions +# --------------------------------------------------------------------------- + +sub ssh_cmd($) { + my $cmd = shift; + my ($msg, $stat); + + my $ecmd = "$conf::ssh $conf::ssh_options $conf::target ". + "-l $conf::targetlogin \'cd $conf::targetdir; $cmd\'"; + debug( "executing $ecmd" ); + $SIG{"ALRM"} = sub { die "timeout in ssh command\n" } ; + alarm( $conf::remote_timeout ); + eval { $msg = `$ecmd 2>&1`; }; + alarm( 0 ); + if ($@) { + $msg = $@; + $stat = 1; + } + else { + $stat = $?; + } + return ($msg, $stat); +} + +sub scp_cmd(@) { + my ($msg, $stat); + + my $ecmd = "$conf::scp $conf::ssh_options @_ ". + "$conf::targetlogin\@$conf::target:$conf::targetdir"; + debug( "executing $ecmd" ); + $SIG{"ALRM"} = sub { die "timeout in scp\n" } ; + alarm( $conf::remote_timeout ); + eval { $msg = `$ecmd 2>&1`; }; + alarm( 0 ); + if ($@) { + $msg = $@; + $stat = 1; + } + else { + $stat = $?; + } + return ($msg, $stat); +} + +sub local_cmd($;$) { + my $cmd = shift; + my $nocd = shift; + my ($msg, $stat); + + my $ecmd = ($nocd ? "" : "cd $conf::targetdir; ") . $cmd; + debug( "executing $ecmd" ); + $msg = `($ecmd) 2>&1`; + $stat = $?; + return ($msg, $stat); + +} + +# +# check if target is alive (code stolen from Net::Ping.pm) +# +sub check_alive(;$) { + my $timeout = shift; + my( $saddr, $ret, $target_ip ); + local( *PINGSOCK ); + + if ($conf::upload_method eq "copy") { + format_status_num( $main::last_ping_time, time ); + $main::target_up = 1; + return; + } + + $timeout ||= 30; + + if (!($target_ip = (gethostbyname($conf::target))[4])) { + msg( "log", "Cannot get IP address of $conf::target\n" ); + $ret = 0; + goto out; + } + $saddr = pack( 'S n a4 x8', AF_INET, $main::echo_port, $target_ip ); + $SIG{'ALRM'} = sub { die } ; + alarm( $timeout ); + + $ret = $main::tcp_proto; # avoid warnings about unused variable + $ret = 0; + eval <<'EOM' ; + return unless socket( PINGSOCK, PF_INET, SOCK_STREAM, $main::tcp_proto ); + return unless connect( PINGSOCK, $saddr ); + $ret = 1; +EOM + alarm( 0 ); + close( PINGSOCK ); + msg( "log", "pinging $conf::target: " . ($ret ? "ok" : "down") . "\n" ); + out: + $main::target_up = $ret ? "1" : "0"; + format_status_num( $main::last_ping_time, time ); + write_status_file() if $conf::statusdelay; +} + +# +# check if incoming dir on target is writable +# +sub check_incoming_writable() { + my $testfile = ".debianqueued-testfile"; + my ($msg, $stat); + + if ($conf::upload_method eq "ssh") { + ($msg, $stat) = ssh_cmd( "rm -f $testfile; touch $testfile; ". + "rm -f $testfile" ); + } + elsif ($conf::upload_method eq "ftp") { + my $file = "junk-for-writable-test-".format_time(); + $file =~ s/[ :.]/-/g; + local( *F ); + open( F, ">$file" ); close( F ); + my $rv; + ($rv, $msg) = ftp_cmd( "put", $file ); + $stat = 0; + $msg = "" if !defined $msg; + unlink $file; + ftp_cmd( "delete", $file ); + } + elsif ($conf::upload_method eq "copy") { + ($msg, $stat) = local_cmd( "rm -f $testfile; touch $testfile; ". + "rm -f $testfile" ); + } + chomp( $msg ); + debug( "exit status: $stat, output was: $msg" ); + + if (!$stat) { + # change incoming_writable only if ssh didn't return an error + $main::incoming_writable = + ($msg =~ /(permission denied|read-?only file|cannot create)/i) ? "0":"1"; + } + else { + debug( "local error, keeping old status" ); + } + debug( "incoming_writable = $main::incoming_writable" ); + write_status_file() if $conf::statusdelay; + return $main::incoming_writable; +} + +# +# remove a list of files, log failing ones +# +sub rm(@) { + my $done = 0; + + foreach ( @_ ) { + (unlink $_ and ++$done) + or $! == ENOENT or msg( "log", "Could not delete $_: $!\n" ); + } + return $done; +} + +# +# get md5 checksum of a file +# +sub md5sum($) { + my $file = shift; + my $line; + + chomp( $line = `$conf::md5sum $file` ); + debug( "md5sum($file): ", $? ? "exit status $?" : + $line =~ /^(\S+)/ ? $1 : "match failed" ); + return $? ? "" : $line =~ /^(\S+)/ ? $1 : ""; +} + +# +# check if a file probably belongs to a Debian upload +# +sub is_debian_file($) { + my $file = shift; + return $file =~ /\.(deb|dsc|(diff|tar)\.gz)$/ && + $file !~ /\.orig\.tar\.gz/; +} + +# +# try to extract maintainer email address from some a non-.changes file +# return "" if not possible +# +sub get_maintainer($) { + my $file = shift; + my $maintainer = ""; + local( *F ); + + if ($file =~ /\.diff\.gz$/) { + # parse a diff + open( F, "$conf::gzip -dc '$file' 2>/dev/null |" ) or return ""; + while( ) { + # look for header line of a file */debian/control + last if m,^\+\+\+\s+[^/]+/debian/control(\s+|$),; + } + while( ) { + last if /^---/; # end of control file patch, no Maintainer: found + # inside control file patch look for Maintainer: field + $maintainer = $1, last if /^\+Maintainer:\s*(.*)$/i; + } + while( ) { } # read to end of file to avoid broken pipe + close( F ) or return ""; + } + elsif ($file =~ /\.(deb|dsc|tar\.gz)$/) { + if ($file =~ /\.deb$/ && $conf::ar) { + # extract control.tar.gz from .deb with ar, then let tar extract + # the control file itself + open( F, "($conf::ar p '$file' control.tar.gz | ". + "$conf::tar -xOf - ". + "--use-compress-program $conf::gzip ". + "control) 2>/dev/null |" ) + or return ""; + } + elsif ($file =~ /\.dsc$/) { + # just do a plain grep + debug( "get_maint: .dsc, no cmd" ); + open( F, "<$file" ) or return ""; + } + elsif ($file =~ /\.tar\.gz$/) { + # let tar extract a file */debian/control + open(F, "$conf::tar -xOf '$file' ". + "--use-compress-program $conf::gzip ". + "\\*/debian/control 2>&1 |") + or return ""; + } + else { + return ""; + } + while( ) { + $maintainer = $1, last if /^Maintainer:\s*(.*)$/i; + } + close( F ) or return ""; + } + + return $maintainer; +} + +# +# return a pattern that matches all files that probably belong to one job +# +sub debian_file_stem($) { + my $file = shift; + my( $pkg, $version ); + + # strip file suffix + $file =~ s,\.(deb|dsc|changes|(orig\.)?tar\.gz|diff\.gz)$,,; + # if not is *_* (name_version), can't derive a stem and return just + # the file's name + return $file if !($file =~ /^([^_]+)_([^_]+)/); + ($pkg, $version) = ($1, $2); + # strip Debian revision from version + $version =~ s/^(.*)-[\d.+-]+$/$1/; + + return "${pkg}_${version}*"; +} + +# +# output a messages to several destinations +# +# first arg is a comma-separated list of destinations; valid are "log" +# and "mail"; rest is stuff to be printed, just as with print +# +sub msg($@) { + my @dest = split( ',', shift ); + + if (grep /log/, @dest ) { + my $now = format_time(); + print LOG "$now ", @_; + } + + if (grep /mail/, @dest ) { + $main::mail_text .= join( '', @_ ); + } +} + +# +# print a debug messages, if $debug is true +# +sub debug(@) { + return if !$conf::debug; + my $now = format_time(); + print LOG "$now DEBUG ", @_, "\n"; +} + +# +# intialize the "mail" destination of msg() (this clears text, +# address, subject, ...) +# +sub init_mail(;$) { + my $file = shift; + + $main::mail_addr = ""; + $main::mail_text = ""; + $main::mail_subject = $file ? "Processing of $file" : ""; +} + +# +# finalize mail to be sent from msg(): check if something present, and +# then send out +# +sub finish_mail() { + local( *MAIL ); + + debug( "No mail for $main::mail_addr" ) + if $main::mail_addr && !$main::mail_text; + return unless $main::mail_addr && $main::mail_text; + + if (!send_mail($main::mail_addr, $main::mail_subject, $main::mail_text)) { + # store this mail in memory so it isn't lost if executing sendmail + # failed. + push( @main::stored_mails, { addr => $main::mail_addr, + subject => $main::mail_subject, + text => $main::mail_text } ); + } + init_mail(); + + # try to send out stored mails + my $mailref; + while( $mailref = shift(@main::stored_mails) ) { + if (!send_mail( $mailref->{'addr'}, $mailref->{'subject'}, + $mailref->{'text'} )) { + unshift( @main::stored_mails, $mailref ); + last; + } + } +} + +# +# send one mail +# +sub send_mail($$$) { + my $addr = shift; + my $subject = shift; + my $text = shift; + + debug( "Sending mail to $addr" ); + debug( "executing $conf::mail -s '$subject' '$addr'" ); + if (!open( MAIL, "|$conf::mail -s '$subject' '$addr'" )) { + msg( "log", "Could not open pipe to $conf::mail: $!\n" ); + return 0; + } + print MAIL $text; + print MAIL "\nGreetings,\n\n\tYour Debian queue daemon\n"; + if (!close( MAIL )) { + msg( "log", "$conf::mail failed (exit status ", $? >> 8, ")\n" ); + return 0; + } + return 1; +} + +# +# try to find a mail address for a name in the keyrings +# +sub try_to_get_mail_addr($$) { + my $name = shift; + my $listref = shift; + + @$listref = (); + open( F, "$conf::gpg --no-options --batch --no-default-keyring ". + "--always-trust --keyring ". + join (" --keyring ",@conf::keyrings). + " --list-keys |" ) + or return ""; + while( ) { + if (/^pub / && / $name /) { + /<([^>]*)>/; + push( @$listref, $1 ); + } + } + close( F ); + + return (@$listref >= 1) ? $listref->[0] : ""; +} + +# +# return current time as string +# +sub format_time() { + my $t; + + # omit weekday and year for brevity + ($t = localtime) =~ /^\w+\s(.*)\s\d+$/; + return $1; +} + +sub print_time($) { + my $secs = shift; + my $hours = int($secs/(60*60)); + + $secs -= $hours*60*60; + return sprintf "%d:%02d:%02d", $hours, int($secs/60), $secs % 60; +} + +# +# block some signals during queue processing +# +# This is just to avoid data inconsistency or uploads being aborted in the +# middle. Only "soft" signals are blocked, i.e. SIGINT and SIGTERM, try harder +# ones if you really want to kill the daemon at once. +# +sub block_signals() { + POSIX::sigprocmask( SIG_BLOCK, $main::block_sigset ); +} + +sub unblock_signals() { + POSIX::sigprocmask( SIG_UNBLOCK, $main::block_sigset ); +} + +# +# process SIGHUP: close log file and reopen it (for logfile cycling) +# +sub close_log($) { + close( LOG ); + close( STDOUT ); + close( STDERR ); + + open( LOG, ">>$conf::logfile" ) + or die "Cannot open my logfile $conf::logfile: $!\n"; + chmod( 0644, $conf::logfile ) + or msg( "log", "Cannot set modes of $conf::logfile: $!\n" ); + select( (select(LOG), $| = 1)[0] ); + + open( STDOUT, ">&LOG" ) + or msg( "log", "$main::progname: Can't redirect stdout to ". + "$conf::logfile: $!\n" ); + open( STDERR, ">&LOG" ) + or msg( "log", "$main::progname: Can't redirect stderr to ". + "$conf::logfile: $!\n" ); + msg( "log", "Restart after SIGHUP\n" ); +} + +# +# process SIGCHLD: check if it was our statusd process +# +sub kid_died($) { + my $pid; + + # reap statusd, so that it's no zombie when we try to kill(0) it + waitpid( $main::statusd_pid, WNOHANG ); + +# Uncomment the following line if your Perl uses unreliable System V signal +# (i.e. if handlers reset to default if the signal is delivered). +# (Unfortunately, the re-setup can't be done in any case, since on some +# systems this will cause the SIGCHLD to be delivered again if there are +# still unreaped children :-(( ) + +# $SIG{"CHLD"} = \&kid_died; # resetup handler for SysV +} + +sub restart_statusd() { + # restart statusd if it died + if (!kill( 0, $main::statusd_pid)) { + close( STATUSD ); # close out pipe end + $main::statusd_pid = fork_statusd(); + } +} + +# +# process a fatal signal: cleanup and exit +# +sub fatal_signal($) { + my $signame = shift; + my $sig; + + # avoid recursions of fatal_signal in case of BSD signals + foreach $sig ( qw( ILL ABRT BUS FPE SEGV PIPE ) ) { + $SIG{$sig} = "DEFAULT"; + } + + if ($$ == $main::maind_pid) { + # only the main daemon should do this + kill( $main::signo{"TERM"}, $main::statusd_pid ) + if defined $main::statusd_pid; + unlink( $conf::statusfile, $conf::pidfile ); + } + msg( "log", "Caught SIG$signame -- exiting (pid $$)\n" ); + exit 1; +} + + +# Local Variables: +# tab-width: 4 +# fill-column: 78 +# End: diff --git a/tools/debianqueued-0.9/dqueued-watcher b/tools/debianqueued-0.9/dqueued-watcher new file mode 100755 index 00000000..b44470a3 --- /dev/null +++ b/tools/debianqueued-0.9/dqueued-watcher @@ -0,0 +1,504 @@ +#!/usr/bin/perl -w +# +# dqueued-watcher -- for regularily watching the queue daemon +# +# This script is intended to check periodically (e.g. started by cron) that +# everything is ok with debianqueued. If the daemon isn't running, it notifies +# the maintainer. It also checks if a new Debian keyring is available (in a +# Debian mirror aera, f.i.) and then updates the keyring used by debianqueued. +# +# Copyright (C) 1997 Roman Hodek +# +# This program is free software. You can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation: either version 2 or +# (at your option) any later version. +# This program comes with ABSOLUTELY NO WARRANTY! +# +# $Id: dqueued-watcher,v 1.28 1999/07/08 09:43:22 ftplinux Exp $ +# +# $Log: dqueued-watcher,v $ +# Revision 1.28 1999/07/08 09:43:22 ftplinux +# Bumped release number to 0.9 +# +# Revision 1.27 1999/07/07 11:58:22 ftplinux +# Also update gpg keyring if $conf::gpg_keyring is set. +# +# Revision 1.26 1998/07/06 14:24:36 ftplinux +# Some changes to handle debian-keyring.tar.gz files which expand to a +# directory including a date. +# +# Revision 1.25 1998/05/14 14:21:45 ftplinux +# Bumped release number to 0.8 +# +# Revision 1.24 1998/03/30 12:31:05 ftplinux +# Don't count "already reported" or "ignored for now" errors as .changes errors. +# Also list files for several error types. +# Also print out names of processed jobs. +# +# Revision 1.23 1998/03/30 11:27:37 ftplinux +# If called with args, make summaries for the log files given. +# make_summary: New arg $to_stdout, for printing report directly. +# +# Revision 1.22 1998/03/23 14:05:15 ftplinux +# Bumped release number to 0.7 +# +# Revision 1.21 1997/12/16 13:19:29 ftplinux +# Bumped release number to 0.6 +# +# Revision 1.20 1997/11/20 15:18:48 ftplinux +# Bumped release number to 0.5 +# +# Revision 1.19 1997/10/31 12:26:31 ftplinux +# Again added new counters in make_summary: suspicious_files, +# transient_changes_errs. +# Extended tests for bad_changes. +# Quotes in pattern seem not to work, replaced by '.'. +# +# Revision 1.18 1997/10/30 14:17:32 ftplinux +# In make_summary, implemented some new counters for command files. +# +# Revision 1.17 1997/10/17 09:39:09 ftplinux +# Fixed wrong args to plural_s +# +# Revision 1.16 1997/09/25 11:20:42 ftplinux +# Bumped release number to 0.4 +# +# Revision 1.15 1997/09/17 12:16:33 ftplinux +# Added writing summaries to a file +# +# Revision 1.14 1997/09/16 11:39:29 ftplinux +# In make_summary, initialize all counters to avoid warnings about uninited +# values. +# +# Revision 1.13 1997/09/16 10:53:36 ftplinux +# Made logging more verbose in queued and dqueued-watcher +# +# Revision 1.12 1997/08/18 13:07:15 ftplinux +# Implemented summary mails +# +# Revision 1.11 1997/08/18 12:11:44 ftplinux +# Replaced timegm by timelocal in parse_date; times in log file are +# local times... +# +# Revision 1.10 1997/08/18 11:27:20 ftplinux +# Revised age calculation of log file for rotating +# +# Revision 1.9 1997/08/12 09:54:40 ftplinux +# Bumped release number +# +# Revision 1.8 1997/08/11 12:49:10 ftplinux +# Implemented logfile rotating +# +# Revision 1.7 1997/07/28 13:20:38 ftplinux +# Added release numner to startup message +# +# Revision 1.6 1997/07/25 10:23:04 ftplinux +# Made SIGCHLD handling more portable between perl versions +# +# Revision 1.5 1997/07/09 10:13:55 ftplinux +# Alternative implementation of status file as plain file (not FIFO), because +# standard wu-ftpd doesn't allow retrieval of non-regular files. New config +# option $statusdelay for this. +# +# Revision 1.4 1997/07/08 08:39:56 ftplinux +# Need to remove -z from tar options if --use-compress-program +# +# Revision 1.3 1997/07/08 08:34:15 ftplinux +# If dqueued-watcher runs as cron job, $PATH might not contain gzip. Use extra +# --use-compress-program option to tar, and new config var $gzip. +# +# Revision 1.2 1997/07/03 13:05:57 ftplinux +# Added some verbosity if stdin is a terminal +# +# Revision 1.1.1.1 1997/07/03 12:54:59 ftplinux +# Import initial sources +# +# + +require 5.002; +use strict; +use POSIX; +require "timelocal.pl"; + +sub LINEWIDTH { 79 } +my $batchmode = !(-t STDIN); +$main::curr_year = (localtime)[5]; + +do { + my $version; + ($version = 'Release: 0.9 $Revision: 1.28 $ $Date: 1999/07/08 09:43:22 $ $Author: ftplinux $') =~ s/\$ ?//g; + print "dqueued-watcher $version\n" if !$batchmode; +}; + +package conf; +($conf::queued_dir = (($0 !~ m,^/,) ? POSIX::getcwd()."/" : "") . $0) + =~ s,/[^/]+$,,; +require "$conf::queued_dir/config"; +my # avoid spurious warnings about unused vars +$junk = $conf::gzip; +$junk = $conf::maintainer_mail; +$junk = $conf::log_age; +package main; + +# prototypes +sub check_daemon(); +sub daemon_running(); +sub rotate_log(); +sub logf($); +sub parse_date($); +sub make_summary($$$); +sub stimes($); +sub plural_s($); +sub format_list($@); +sub mail($@); +sub logger(@); +sub format_time(); + +# the main program: +if (@ARGV) { + # with arguments, make summaries (to stdout) for the logfiles given + foreach (@ARGV) { + make_summary( 1, undef, $_ ); + } +} +else { + # without args, just do normal maintainance actions + check_daemon(); + rotate_log(); +} +exit 0; + + +# +# check if the daemon is running, notify maintainer if not +# +sub check_daemon() { + my $daemon_down_text = "Daemon is not running\n"; + my( $line, $reported ); + + if (daemon_running()) { + print "Daemon is running\n" if !$batchmode; + return; + } + print "Daemon is NOT running!\n" if !$batchmode; + + $reported = 0; + if ($conf::statusfile && -f $conf::statusfile && ! -p _ && + open( STATUSFILE, "<$conf::statusfile" )) { + $line = ; + close( STATUSFILE ); + $reported = $line eq $daemon_down_text; + } + if (!$reported) { + mail( "debianqueued down", + "The Debian queue daemon isn't running!\n", + "Please start it up again.\n" ); + logger( "Found that daemon is not running\n" ); + } + + # remove unnecessary pid file + # also remove status FIFO, so opening it for reading won't block + # forever + unlink( $conf::pidfile, $conf::statusfile ); + + # replace status FIFO by a file that tells the user the daemon is down + if ($conf::statusfile) { + open( STATUSFILE, ">$conf::statusfile" ) + or die "Can't open $conf::statusfile: $!\n"; + print STATUSFILE $daemon_down_text; + close( STATUSFILE ); + } +} + +# +# check if daemon is running +# +sub daemon_running() { + my $pid; + local( *PIDFILE ); + + if (open( PIDFILE, "<$conf::pidfile" )) { + chomp( $pid = ); + close( PIDFILE ); + $main::daemon_pid = $pid, return 1 if $pid && kill( 0, $pid ); + } + return 0; +} + +# +# check if new keyring is available, if yes extract it +# + +sub rotate_log() { + my( $first_date, $f1, $f2, $i ); + local( *F ); + + return if !defined $main::daemon_pid || !-f $conf::logfile; + + open( F, "<$conf::logfile" ) or die "Can't open $conf::logfile: $!\n"; + while( ) { + last if $first_date = parse_date( $_ ); + } + close( F ); + # Simply don't rotate if nothing couldn't be parsed as date -- probably + # the file is empty. + return if !$first_date; + # assume year-wrap if $first_date is in the future + $first_date -= 365*24*60*60 if $first_date > time; + # don't rotate if first date too young + return if time - $first_date < $conf::log_age*24*60*60; + logger( "Logfile older than $conf::log_age days, rotating\n" ); + + # remove oldest log + $f1 = logf($conf::log_keep-1); + if (-f $f1) { + unlink( $f1 ) or warn "Can't remove $f1: $!\n"; + } + + # rename other logs + for( $i = $conf::log_keep-2; $i > 0; --$i ) { + $f1 = logf($i); + $f2 = logf($i+1); + if ($i == 0) { + } + if (-f $f1) { + rename( $f1, $f2 ) or warn "Can't rename $f1 to $f2: $!\n"; + } + } + + # compress newest log + $f1 = "$conf::logfile.0"; + $f2 = "$conf::logfile.1.gz"; + if (-f $f1) { + system $conf::gzip, "-9f", $f1 + and die "gzip failed on $f1 (status $?)\n"; + rename( "$f1.gz", $f2 ) or warn "Can't rename $f1.gz to $f2: $!\n"; + } + + # rename current log and signal the daemon to open a new logfile + rename( $conf::logfile, $f1 ); + kill( 1, $main::daemon_pid ); + + print "Rotated log files\n" if !$batchmode; + make_summary( 0, $first_date, $f1 ) + if $conf::mail_summary || $conf::summary_file; +} + +sub logf($) { + my $num = shift; + return sprintf( "$conf::logfile.%d.gz", $num ); +} + +sub parse_date($) { + my $date = shift; + my( $mon, $day, $hours, $mins, $month, $year, $secs ); + my %month_num = ( "jan", 0, "feb", 1, "mar", 2, "apr", 3, "may", 4, + "jun", 5, "jul", 6, "aug", 7, "sep", 8, "oct", 9, + "nov", 10, "dec", 11 ); + + warn "Invalid date: $date\n", return 0 + unless $date =~ /^(\w\w\w)\s+(\d+)\s+(\d+):(\d+):(\d+)\s/; + ($mon, $day, $hours, $mins, $secs) = ($1, $2, $3, $4, $5); + + $mon =~ tr/A-Z/a-z/; + return 0 if !exists $month_num{$mon}; + $month = $month_num{$mon}; + return timelocal( $secs, $mins, $hours, $day, $month, $main::curr_year ); +} + +sub make_summary($$$) { + my $to_stdout = shift; + my $startdate = shift; + my $file = shift; + my( $starts, $statusd_starts, $suspicious_files, $transient_errs, + $upl_failed, $success, $commands, $rm_cmds, $mv_cmds, $msg, + $uploader ); + my( @pgp_fail, %transient_errs, @changes_errs, @removed_changes, + @already_present, @del_stray, %uploaders, %cmd_uploaders ); + local( *F ); + + if (!open( F, "<$file" )) { + mail( "debianqueued summary failed", + "Couldn't open $file to make summary of events." ); + return; + } + + $starts = $statusd_starts = $suspicious_files = $transient_errs = + $upl_failed = $success = $commands = $rm_cmds = $mv_cmds = 0; + while( ) { + $startdate = parse_date( $_ ) if !$startdate; + ++$starts if /daemon \(pid \d+\) started$/; + ++$statusd_starts if /forked status daemon/; + push( @pgp_fail, $1 ) + if /PGP signature check failed on (\S+)/; + ++$suspicious_files if /found suspicious filename/; + ++$transient_errs, ++$transient_errs{$1} + if /(\S+) (doesn.t exist|is too small) \(ignored for now\)/; + push( @changes_errs, $1 ) + if (!/\((already reported|ignored for now)\)/ && + (/(\S+) doesn.t exist/ || /(\S+) has incorrect (size|md5)/)) || + /(\S+) doesn.t contain a Maintainer: field/ || + /(\S+) isn.t signed with PGP/ || + /(\S+) doesn.t mention any files/; + push( @removed_changes, $1 ) + if /(\S+) couldn.t be processed for \d+ hours and is now del/ || + /(\S+) couldn.t be uploaded for \d+ times/; + push( @already_present, $1 ) + if /(\S+) is already present on master/; + ++$upl_failed if /Upload to \S+ failed/; + ++$success, push( @{$uploaders{$2}}, $1 ) + if /(\S+) processed successfully \(uploader (\S*)\)$/; + push( @del_stray, $1 ) if /Deleted stray file (\S+)/; + ++$commands if /processing .*\.commands$/; + ++$rm_cmds if / > rm /; + ++$mv_cmds if / > mv /; + ++$cmd_uploaders{$1} + if /\(command uploader (\S*)\)$/; + } + close( F ); + + $msg .= "Queue Daemon Summary from " . localtime($startdate) . " to " . + localtime(time) . ":\n\n"; + + $msg .= "Daemon started ".stimes($starts)."\n" + if $starts; + $msg .= "Status daemon restarted ".stimes($statusd_starts-$starts)."\n" + if $statusd_starts > $starts; + $msg .= @pgp_fail." job".plural_s(@pgp_fail)." failed PGP check:\n" . + format_list(2,@pgp_fail) + if @pgp_fail; + $msg .= "$suspicious_files file".plural_s($suspicious_files)." with ". + "suspicious names found\n" + if $suspicious_files; + $msg .= "Detected ".$transient_errs." transient error". + plural_s($transient_errs)." in .changes files:\n". + format_list(2,keys %transient_errs) + if $transient_errs; + $msg .= "Detected ".@changes_errs." error".plural_s(@changes_errs). + " in .changes files:\n".format_list(2,@changes_errs) + if @changes_errs; + $msg .= @removed_changes." job".plural_s(@removed_changes). + " removed due to persistent errors:\n". + format_list(2,@removed_changes) + if @removed_changes; + $msg .= @already_present." job".plural_s(@already_present). + " were already present on master:\n".format_list(2,@already_present) + if @already_present; + $msg .= @del_stray." stray file".plural_s(@del_stray)." deleted:\n". + format_list(2,@del_stray) + if @del_stray; + $msg .= "$commands command file".plural_s($commands)." processed\n" + if $commands; + $msg .= " ($rm_cmds rm, $mv_cmds mv commands)\n" + if $rm_cmds || $mv_cmds; + $msg .= "$success job".plural_s($success)." processed successfully\n"; + + if ($success) { + $msg .= "\nPeople who used the queue:\n"; + foreach $uploader ( keys %uploaders ) { + $msg .= " $uploader (".@{$uploaders{$uploader}}."):\n". + format_list(4,@{$uploaders{$uploader}}); + } + } + + if (%cmd_uploaders) { + $msg .= "\nPeople who used command files:\n"; + foreach $uploader ( keys %cmd_uploaders ) { + $msg .= " $uploader ($cmd_uploaders{$uploader})\n"; + } + } + + if ($to_stdout) { + print $msg; + } + else { + if ($conf::mail_summary) { + mail( "debianqueued summary", $msg ); + } + + if ($conf::summary_file) { + local( *F ); + open( F, ">>$conf::summary_file" ) or + die "Cannot open $conf::summary_file for appending: $!\n"; + print F "\n", "-"x78, "\n", $msg; + close( F ); + } + } +} + +sub stimes($) { + my $num = shift; + return $num == 1 ? "once" : "$num times"; +} + +sub plural_s($) { + my $num = shift; + return $num == 1 ? "" : "s"; +} + +sub format_list($@) { + my $indent = shift; + my( $i, $pos, $ret, $item, $len ); + + $ret = " " x $indent; $pos += $indent; + while( $item = shift ) { + $len = length($item); + $item .= ", ", $len += 2 if @_; + if ($pos+$len > LINEWIDTH) { + $ret .= "\n" . " "x$indent; + $pos = $indent; + } + $ret .= $item; + $pos += $len; + } + $ret .= "\n"; + return $ret; +} + +# +# send mail to maintainer +# +sub mail($@) { + my $subject = shift; + local( *MAIL ); + + open( MAIL, "|$conf::mail -s '$subject' '$conf::maintainer_mail'" ) + or (warn( "Could not open pipe to $conf::mail: $!\n" ), return); + print MAIL @_; + print MAIL "\nGreetings,\n\n\tYour Debian queue daemon watcher\n"; + close( MAIL ) + or warn( "$conf::mail failed (exit status $?)\n" ); +} + +# +# log something to logfile +# +sub logger(@) { + my $now = format_time(); + local( *LOG ); + + if (!open( LOG, ">>$conf::logfile" )) { + warn( "Can't open $conf::logfile\n" ); + return; + } + print LOG "$now dqueued-watcher: ", @_; + close( LOG ); +} + +# +# return current time as string +# +sub format_time() { + my $t; + + # omit weekday and year for brevity + ($t = localtime) =~ /^\w+\s(.*)\s\d+$/; + return $1; +} + + +# Local Variables: +# tab-width: 4 +# fill-column: 78 +# End: diff --git a/tools/debianqueued-0.9/release-num b/tools/debianqueued-0.9/release-num new file mode 100644 index 00000000..b63ba696 --- /dev/null +++ b/tools/debianqueued-0.9/release-num @@ -0,0 +1 @@ +0.9 diff --git a/tools/dsync-0.0/COMPILING b/tools/dsync-0.0/COMPILING new file mode 100644 index 00000000..d2f39b8d --- /dev/null +++ b/tools/dsync-0.0/COMPILING @@ -0,0 +1,48 @@ +To compile this you need a couple things + - A working POSIX system with working POSIX sh, awk and sed + - GNU Make 3.74 or so, -- normal UNIX make will NOT work + - A working ANSI C++ compiler, this is not g++ 2.7.* + g++ 2.8 works OK and newer egcs work well also. Nobody has tried it + on other compilers :< + You will need a properly working STL as well. + - A C library with the usual POSIX functions and a BSD socket layer + +The MD5 routine needs to know about the architecture, many of the common +ones are in buildlib/archtable and buildlib/sizetable if your processor/host +is not listed then just add them.. + +This is a list of platforms and information that dsync has been compiled +and tested on: + +Debian GNU Linux 2.1 'slink' + Linux Wakko 2.0.35 #1 Sun Nov 15 20:54:42 MST 1998 i586 unknown + Linux faure 2.0.35 #1 Tue Oct 30 14:31:28 CST 2018 alpha unknown + g++ egcs-2.91.60 + dsync 0.0 18/01/1999 + - All versions work here + - Watch out! You get shared libraries! Use 'make ONLYSHAREDLIBS=' to + disable + - You will want to have debiandoc-sgml and yodl installed to get + best results. + +Sun Solaris + SunOS ohaton 5.6 Generic_105181-11 sun4u + g++ 2.8.1 + dsync 0.0 18/01/1999 + - The Sun I used did not have 'ar' in the path for some reason, it is + in /usr/ccs/bin/ar, export this before running configure or edit + environment.mak to fix it. + - libpthread seems to have some defectiveness issue with pthread_once, + it doesn't actually work. The code has a hack to advoid the + defectiveness + +HP-UX + HP-UX nyquist B.10.20 C 9000/780 2016574337 32-user license + g++ 2.8.1 + dsync 0.0 18/01/1999 + - I had alot of problems here initially, the utilities are very strict. + Things work well now. + - The HP-UX I used had gnu-make installed as 'gmake' this causes configure + to die when it does 'make dirs' I ran 'gmake dirs' by hand. + - There is a snprintf in the libraries someplace but it does not declare + it in any header, this causes all sorts of fun compile warnings diff --git a/tools/dsync-0.0/COPYING b/tools/dsync-0.0/COPYING new file mode 100644 index 00000000..2a0ef64d --- /dev/null +++ b/tools/dsync-0.0/COPYING @@ -0,0 +1,4 @@ +DSync is free software; you can redistribute them and/or modify them under +the terms of the GNU General Public License as published by the Free Software +Foundation; either version 2 of the License, or (at your option) any later +version. diff --git a/tools/dsync-0.0/Makefile b/tools/dsync-0.0/Makefile new file mode 100644 index 00000000..84d3bb2f --- /dev/null +++ b/tools/dsync-0.0/Makefile @@ -0,0 +1,25 @@ +# -*- make -*- + +# This is the top level make file for APT, it recurses to each lower +# level make file and runs it with the proper target +ifndef NOISY +.SILENT: +endif + +.PHONY: headers library clean veryclean all binary program doc +all headers library clean veryclean binary program doc: + $(MAKE) -C libdsync $@ + $(MAKE) -C cmdline $@ + $(MAKE) -C doc $@ + +# Some very common aliases +.PHONY: maintainer-clean dist-clean distclean pristine sanity +maintainer-clean dist-clean distclean pristine sanity: veryclean + +# The startup target builds the necessary configure scripts. It should +# be used after a CVS checkout. +CONVERTED=environment.mak include/config.h makefile +include buildlib/configure.mak +$(BUILDDIR)/include/config.h: buildlib/config.h.in +$(BUILDDIR)/environment.mak: buildlib/environment.mak.in +$(BUILDDIR)/makefile: buildlib/makefile.in diff --git a/tools/dsync-0.0/buildlib/archtable b/tools/dsync-0.0/buildlib/archtable new file mode 100644 index 00000000..87c0afd2 --- /dev/null +++ b/tools/dsync-0.0/buildlib/archtable @@ -0,0 +1,33 @@ +# This file contains a table of known architecture strings, with +# things to map them to. `configure' will take the output of gcc +# --print-libgcc-file-name, strip off leading directories up to and +# including gcc-lib, strip off trailing /libgcc.a and trailing version +# number directory, and then strip off everything after the first +# hyphen. The idea is that you're left with this bit: +# $ gcc --print-libgcc-file-name +# /usr/lib/gcc-lib/i486-linux/2.7.2/libgcc.a +# ^^^^ +# This is then looked up in the table below, to find out what to map +# it to. If it isn't found then configure will print a warning and +# continue. You can override configure's ideas using --with-arch. +# The third field is the GNU configure architecture to use with +# this build architecture. +# +# This file is mirrored from dpkg. +# + +i386 i386 i486 +i486 i386 i486 +i586 i386 i486 +i686 i386 i486 +pentium i386 i486 +sparc sparc sparc +alpha alpha alpha +m68k m68k m68k +arm arm arm +armv4l arm arm +powerpc powerpc powerpc +ppc powerpc powerpc +mipsel mipsel mipsel +x86_64 amd64 x86_64 + diff --git a/tools/dsync-0.0/buildlib/config.guess b/tools/dsync-0.0/buildlib/config.guess new file mode 100755 index 00000000..45bee139 --- /dev/null +++ b/tools/dsync-0.0/buildlib/config.guess @@ -0,0 +1,1465 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + +timestamp='2005-04-22' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Per Bothner . +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# The plan is that this can be called by configure scripts if you +# don't specify an explicit build system type. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d -q "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep __ELF__ >/dev/null + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit 0 ;; + amd64:OpenBSD:*:*) + echo x86_64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + amiga:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + cats:OpenBSD:*:*) + echo arm-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + hp300:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + luna88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mac68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + macppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme68k:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvme88k:OpenBSD:*:*) + echo m88k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + mvmeppc:OpenBSD:*:*) + echo powerpc-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sgi:OpenBSD:*:*) + echo mips64-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + sun3:OpenBSD:*:*) + echo m68k-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:OpenBSD:*:*) + echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE} + exit 0 ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit 0 ;; + macppc:MirBSD:*:*) + echo powerppc-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit 0 ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + exit 0 ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit 0 ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit 0 ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit 0;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit 0 ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit 0 ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit 0 ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit 0 ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit 0 ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit 0;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit 0;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit 0 ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit 0 ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit 0 ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7 && exit 0 ;; + esac ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + i86pc:SunOS:5.*:*) + echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit 0 ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit 0 ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit 0 ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit 0 ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit 0 ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit 0 ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit 0 ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit 0 ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit 0 ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit 0 ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit 0 ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit 0 ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit 0 ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c \ + && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \ + && exit 0 + echo mips-mips-riscos${UNAME_RELEASE} + exit 0 ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit 0 ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit 0 ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit 0 ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit 0 ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit 0 ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit 0 ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit 0 ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit 0 ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit 0 ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit 0 ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit 0 ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit 0 ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit 0 ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo rs6000-ibm-aix3.2.5 + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit 0 ;; + *:AIX:*:[45]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit 0 ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit 0 ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit 0 ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit 0 ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit 0 ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit 0 ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit 0 ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit 0 ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + # avoid double evaluation of $set_cc_for_build + test -n "$CC_FOR_BUILD" || eval $set_cc_for_build + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E -) | grep __LP64__ >/dev/null + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit 0 ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit 0 ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0 + echo unknown-hitachi-hiuxwe2 + exit 0 ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit 0 ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit 0 ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit 0 ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit 0 ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit 0 ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit 0 ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit 0 ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit 0 ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit 0 ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit 0 ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit 0 ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit 0 ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit 0 ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit 0 ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit 0 ;; + *:FreeBSD:*:*) + echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit 0 ;; + i*:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit 0 ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit 0 ;; + x86:Interix*:[34]*) + echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//' + exit 0 ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit 0 ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit 0 ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit 0 ;; + amd64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit 0 ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit 0 ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit 0 ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit 0 ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit 0 ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit 0 ;; + arm*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + cris:Linux:*:*) + echo cris-axis-linux-gnu + exit 0 ;; + crisv32:Linux:*:*) + echo crisv32-axis-linux-gnu + exit 0 ;; + frv:Linux:*:*) + echo frv-unknown-linux-gnu + exit 0 ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + mips:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips + #undef mipsel + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mipsel + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0 + ;; + mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef mips64 + #undef mips64el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=mips64el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=mips64 + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=` + test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0 + ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit 0 ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit 0 ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit 0 ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit 0 ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit 0 ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit 0 ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit 0 ;; + x86_64:Linux:*:*) + echo x86_64-unknown-linux-gnu + exit 0 ;; + i*86:Linux:*:*) + # The BFD linker knows what the default object file format is, so + # first see if it will tell us. cd to the root directory to prevent + # problems with other programs or directories called `ld' in the path. + # Set LC_ALL=C to ensure ld outputs messages in English. + ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \ + | sed -ne '/supported targets:/!d + s/[ ][ ]*/ /g + s/.*supported targets: *// + s/ .*// + p'` + case "$ld_supported_targets" in + elf32-i386) + TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu" + ;; + a.out-i386-linux) + echo "${UNAME_MACHINE}-pc-linux-gnuaout" + exit 0 ;; + coff-i386) + echo "${UNAME_MACHINE}-pc-linux-gnucoff" + exit 0 ;; + "") + # Either a pre-BFD a.out linker (linux-gnuoldld) or + # one that does not give us useful --help. + echo "${UNAME_MACHINE}-pc-linux-gnuoldld" + exit 0 ;; + esac + # Determine whether the default compiler is a.out or elf + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + #ifdef __ELF__ + # ifdef __GLIBC__ + # if __GLIBC__ >= 2 + LIBC=gnu + # else + LIBC=gnulibc1 + # endif + # else + LIBC=gnulibc1 + # endif + #else + #ifdef __INTEL_COMPILER + LIBC=gnu + #else + LIBC=gnuaout + #endif + #endif + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=` + test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0 + test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0 + ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit 0 ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit 0 ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit 0 ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit 0 ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit 0 ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit 0 ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit 0 ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit 0 ;; + i*86:*:5:[78]*) + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit 0 ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit 0 ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i386. + echo i386-pc-msdosdjgpp + exit 0 ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit 0 ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit 0 ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit 0 ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit 0 ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit 0 ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit 0 ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4.3${OS_REL} && exit 0 + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && echo i486-ncr-sysv4 && exit 0 ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit 0 ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit 0 ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit 0 ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit 0 ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit 0 ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit 0 ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit 0 ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit 0 ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit 0 ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit 0 ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit 0 ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit 0 ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit 0 ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit 0 ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit 0 ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit 0 ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit 0 ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit 0 ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit 0 ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + *86) UNAME_PROCESSOR=i686 ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit 0 ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit 0 ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit 0 ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit 0 ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit 0 ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit 0 ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit 0 ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit 0 ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit 0 ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit 0 ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit 0 ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit 0 ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit 0 ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit 0 ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit 0 ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit 0 ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit 0 ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms && exit 0 ;; + I*) echo ia64-dec-vms && exit 0 ;; + V*) echo vax-dec-vms && exit 0 ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit 0 ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && $dummy && exit 0 + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit 0 ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit 0 ;; + c34*) + echo c34-convex-bsd + exit 0 ;; + c38*) + echo c38-convex-bsd + exit 0 ;; + c4*) + echo c4-convex-bsd + exit 0 ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/tools/dsync-0.0/buildlib/config.h.in b/tools/dsync-0.0/buildlib/config.h.in new file mode 100644 index 00000000..f882ca25 --- /dev/null +++ b/tools/dsync-0.0/buildlib/config.h.in @@ -0,0 +1,37 @@ +/* Define if your processor stores words with the most significant + byte first (like Motorola and SPARC, unlike Intel and VAX). */ +#undef WORDS_BIGENDIAN + +/* The number of bytes in a usigned char. */ +#undef SIZEOF_CHAR + +/* The number of bytes in a unsigned int. */ +#undef SIZEOF_INT + +/* The number of bytes in a unsigned long. */ +#undef SIZEOF_LONG + +/* The number of bytes in a unsigned short. */ +#undef SIZEOF_SHORT + +/* Define if we have libgpm. */ +#undef HAVE_LIBGPM + +/* Define if we have the SLang library from Davis. */ +#undef HAVE_LIBSLANG + +/* Define if we have the X11 windowing system. */ +#undef HAVE_X11 + +/* Define if we have enabled pthread support */ +#undef HAVE_PTHREAD + +/* Define the architecture name string */ +#undef ARCHITECTURE + +/* The version number string */ +#undef VERSION + +/* The package name string */ +#undef PACKAGE + diff --git a/tools/dsync-0.0/buildlib/config.sub b/tools/dsync-0.0/buildlib/config.sub new file mode 100755 index 00000000..87a1ee49 --- /dev/null +++ b/tools/dsync-0.0/buildlib/config.sub @@ -0,0 +1,1569 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + +timestamp='2005-04-22' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, +# Boston, MA 02111-1307, USA. + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Please send patches to . Submit a context +# diff and a properly formatted ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit 0 ;; + --version | -v ) + echo "$version" ; exit 0 ;; + --help | --h* | -h ) + echo "$usage"; exit 0 ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit 0;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \ + kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray) + os= + basic_machine=$1 + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | m32r | m32rle | m68000 | m68k | m88k | maxq | mcore \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64vr | mips64vrel \ + | mips64orion | mips64orionel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | msp430 \ + | ns16k | ns32k \ + | openrisc | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ + | pyramid \ + | sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b \ + | strongarm \ + | tahoe | thumb | tic4x | tic80 | tron \ + | v850 | v850e \ + | we32k \ + | x86 | xscale | xscalee[bl] | xstormy16 | xtensa \ + | z8k) + basic_machine=$basic_machine-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12) + # Motorola 68HC11/12. + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | msp430-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ + | pyramid-* \ + | romp-* | rs6000-* \ + | sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \ + | tahoe-* | thumb-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tron-* \ + | v850-* | v850e-* | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xps100-* | xscale-* | xscalee[bl]-* \ + | xstormy16-* | xtensa-* \ + | ymp-* \ + | z8k-*) + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16c) + basic_machine=cr16c-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; +# I'm not sure what "Sysv32" means. Should this be sysv3.2? + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + or32 | or32-*) + basic_machine=or32-unknown + os=-coff + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc) basic_machine=powerpc-unknown + ;; + ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tic54x | c54x*) + basic_machine=tic54x-unknown + os=-coff + ;; + tic55x | c55x*) + basic_machine=tic55x-unknown + os=-coff + ;; + tic6x | c6x*) + basic_machine=tic6x-unknown + os=-coff + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* | -openbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* \ + | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + # This also exists in the configure program, but was not the + # default. + # os=-sunos4 + ;; + m68*-cisco) + os=-aout + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/tools/dsync-0.0/buildlib/configure.mak b/tools/dsync-0.0/buildlib/configure.mak new file mode 100644 index 00000000..2124ef88 --- /dev/null +++ b/tools/dsync-0.0/buildlib/configure.mak @@ -0,0 +1,27 @@ +# -*- make -*- + +# This make fragment is included by the toplevel make to handle configure +# and setup. It defines a target called startup that when run will init +# the build directory, generate configure from configure.in, create aclocal +# and has rules to run config.status should one of the .in files change. + +# Input +# BUILDDIR - The build directory +# CONVERTED - List of files output by configure $(BUILD) is prepended +# The caller must provide depends for these files +# It would be a fairly good idea to run this after a cvs checkout. +BUILDDIR=build + +.PHONY: startup +startup: configure $(BUILDDIR)/config.status $(addprefix $(BUILDDIR)/,$(CONVERTED)) + +configure: aclocal.m4 configure.in + autoconf + +aclocal.m4: + aclocal -I buildlib +$(BUILDDIR)/config.status: configure + test -e $(BUILDDIR) || mkdir $(BUILDDIR) + (HERE=`pwd`; cd $(BUILDDIR) && $$HERE/configure) +$(addprefix $(BUILDDIR)/,$(CONVERTED)): + (cd $(BUILDDIR) && ./config.status) diff --git a/tools/dsync-0.0/buildlib/copy.mak b/tools/dsync-0.0/buildlib/copy.mak new file mode 100644 index 00000000..973c4853 --- /dev/null +++ b/tools/dsync-0.0/buildlib/copy.mak @@ -0,0 +1,27 @@ +# -*- make -*- + +# This installs arbitary files into a directory + +# Input +# $(SOURCE) - The documents to use +# $(TO) - The directory to put them in +# All output is writtin to files in the build/$(TO) directory + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := copy-$(firstword $(SOURCE)) +$(LOCAL)-LIST := $(addprefix $(TO)/,$(SOURCE)) + +# Install generation hooks +doc: $($(LOCAL)-LIST) +veryclean: veryclean/$(LOCAL) + +$($(LOCAL)-LIST) : $(TO)/% : % + echo Installing $< to $(@D) + cp $< $(@D) + +# Clean rule +.PHONY: veryclean/$(LOCAL) +veryclean/$(LOCAL): + -rm -rf $($(@F)-LIST) diff --git a/tools/dsync-0.0/buildlib/debiandoc.mak b/tools/dsync-0.0/buildlib/debiandoc.mak new file mode 100644 index 00000000..5e08bda6 --- /dev/null +++ b/tools/dsync-0.0/buildlib/debiandoc.mak @@ -0,0 +1,58 @@ +# -*- make -*- + +# This processes debian-doc sgml to produce html and plain text output + +# Input +# $(SOURCE) - The documents to use + +# All output is writtin to files in the build doc directory + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := debiandoc-$(firstword $(SOURCE)) +$(LOCAL)-HTML := $(addsuffix .html,$(addprefix $(DOC)/,$(basename $(SOURCE)))) +$(LOCAL)-TEXT := $(addsuffix .text,$(addprefix $(DOC)/,$(basename $(SOURCE)))) + +#--------- + +# Rules to build HTML documentations +ifdef DEBIANDOC_HTML + +# Install generation hooks +doc: $($(LOCAL)-HTML) +veryclean: veryclean/html/$(LOCAL) + +vpath %.sgml $(SUBDIRS) +$(DOC)/%.html: %.sgml + echo Creating html for $< to $@ + -rm -rf $@ + (HERE=`pwd`; cd $(@D) && debiandoc2html $$HERE/$<) + +# Clean rule +.PHONY: veryclean/html/$(LOCAL) +veryclean/html/$(LOCAL): + -rm -rf $($(@F)-HTML) + +endif + +#--------- + +# Rules to build Text documentations +ifdef DEBIANDOC_TEXT + +# Install generation hooks +doc: $($(LOCAL)-TEXT) +veryclean: veryclean/text/$(LOCAL) + +vpath %.sgml $(SUBDIRS) +$(DOC)/%.text: %.sgml + echo Creating text for $< to $@ + debiandoc2text -O $< > $@ + +# Clean rule +.PHONY: veryclean/text/$(LOCAL) +veryclean/text/$(LOCAL): + -rm -rf $($(@F)-TEXT) + +endif diff --git a/tools/dsync-0.0/buildlib/defaults.mak b/tools/dsync-0.0/buildlib/defaults.mak new file mode 100644 index 00000000..d04b67ff --- /dev/null +++ b/tools/dsync-0.0/buildlib/defaults.mak @@ -0,0 +1,136 @@ +# -*- make -*- + +# This file configures the default environment for the make system +# The way it works is fairly simple, each module is defined in it's +# own *.mak file. It expects a set of variables to be set to values +# for it to operate as expected. When included the module generates +# the requested rules based on the contents of its control variables. + +# This works out very well and allows a good degree of flexability. +# To accomidate some of the features we introduce the concept of +# local variables. To do this we use the 'Computed Names' feature of +# gmake. Each module declares a LOCAL scope and access it with, +# $($(LOCAL)-VAR) +# This works very well but it is important to rembember that within +# a rule the LOCAL var is unavailble, it will have to be constructed +# from the information in the rule invokation. For stock rules like +# clean this is simple, we use a local clean rule called clean/$(LOCAL) +# and then within the rule $(@F) gets back $(LOCAL)! Other rules will +# have to use some other mechanism (filter perhaps?) The reason such +# lengths are used is so that each directory can contain several 'instances' +# of any given module. I notice that the very latest gmake has the concept +# of local variables for rules. It is possible this feature in conjunction +# with the generated names will provide a very powerfull solution indeed! + +# A build directory is used by default, all generated items get put into +# there. However unlike automake this is not done with a VPATH build +# (vpath builds break the distinction between #include "" and #include <>) +# but by explicly setting the BUILD variable. Make is invoked from +# within the source itself which is much more compatible with compilation +# environments. +ifndef NOISY +.SILENT: +endif + +# Search for the build directory +ifdef BUILD +BUILD_POSSIBLE := $(BUILD) $(BASE)/$(BUILD) +else +BUILD_POSSIBLE := $(BASE) $(BASE)/build-$(shell uname -m) $(BASE)/build +endif + +BUILDX:= $(foreach i,$(BUILD_POSSIBLE),$(wildcard $(i)/environment.mak*)) +BUILDX:= $(patsubst %/,%,$(firstword $(dir $(BUILDX)))) + +ifeq ($(words $(BUILDX)),0) +error-all: + echo Can't find the build directory in $(BUILD_POSSIBLE) -- use BUILD= +endif + +override BUILD := $(BUILDX) + +# Base definitions +INCLUDE := $(BUILD)/include +BIN := $(BUILD)/bin +LIB := $(BIN) +OBJ := $(BUILD)/obj/$(SUBDIR) +DEP := $(OBJ) +DOC := $(BUILD)/docs + +# Module types +LIBRARY_H = $(BASE)/buildlib/library.mak +DEBIANDOC_H = $(BASE)/buildlib/debiandoc.mak +MANPAGE_H = $(BASE)/buildlib/manpage.mak +PROGRAM_H = $(BASE)/buildlib/program.mak +COPY_H = $(BASE)/buildlib/copy.mak +YODL_MANPAGE_H = $(BASE)/buildlib/yodl_manpage.mak + +ifdef STATICLIBS +LIBRARY_H += $(BASE)/buildlib/staticlibrary.mak +endif + +ifdef ONLYSTATICLIBS +LIBRARY_H = $(BASE)/buildlib/staticlibrary.mak +endif + +# Source location control +# SUBDIRS specifies sub components of the module that +# may be located in subdrictories of the source dir. +# This should be declared before including this file +SUBDIRS+= + +# Header file control. +# TARGETDIRS indicitates all of the locations that public headers +# will be published to. +# This should be declared before including this file +HEADER_TARGETDIRS+= + +# Options +include $(BUILD)/environment.mak +CPPFLAGS+= -I$(INCLUDE) +LDFLAGS+= -L$(LIB) + +# Phony rules. Other things hook these by appending to the dependency +# list +.PHONY: headers library clean veryclean all binary program doc +.PHONY: maintainer-clean dist-clean distclean pristine sanity +all: binary doc +binary: library program +maintainer-clean dist-clean distclean pristine sanity: veryclean +headers library clean veryclean program: + +veryclean: + echo Very Clean done for $(SUBDIR) +clean: + echo Clean done for $(SUBDIR) + +# Header file control. We want all published interface headers to go +# into the build directory from thier source dirs. We setup some +# search paths here +vpath %.h $(SUBDIRS) +$(INCLUDE)/%.h $(addprefix $(INCLUDE)/,$(addsuffix /%.h,$(HEADER_TARGETDIRS))) : %.h + cp $< $@ + +# Dependency generation. We want to generate a .d file using gnu cpp. +# For GNU systems the compiler can spit out a .d file while it is compiling, +# this is specified with the INLINEDEPFLAG. Other systems might have a +# makedep program that can be called after compiling, that's illistrated +# by the DEPFLAG case. +# Compile rules are expected to call this macro after calling the compiler +ifdef INLINEDEPFLAG + define DoDep + sed -e "1s/.*:/$(subst /,\\/,$@):/" $(basename $(@F)).d > $(DEP)/$(@F).d + -rm -f $(basename $(@F)).d + endef +else + ifdef DEPFLAG + define DoDep + $(CXX) $(DEPFLAG) $(CPPFLAGS) -o $@ $< + sed -e "1s/.*:/$(subst /,\\/,$@):/" $(basename $(@F)).d > $(DEP)/$(@F).d + -rm -f $(basename $(@F)).d + endef + else + define DoDep + endef + endif +endif diff --git a/tools/dsync-0.0/buildlib/environment.mak.in b/tools/dsync-0.0/buildlib/environment.mak.in new file mode 100644 index 00000000..ab9938d6 --- /dev/null +++ b/tools/dsync-0.0/buildlib/environment.mak.in @@ -0,0 +1,35 @@ +# This file contains everything that autoconf guessed for your system. +# if you want you can edit it, just don't re-run configure. + +# C++ compiler options +AR = @AR@ +CC = @CC@ +CPPFLAGS+= @CPPFLAGS@ @DEFS@ -D_REENTRANT +CXX = @CXX@ +CXXFLAGS+= @CXXFLAGS@ + +# Linker stuff +PICFLAGS+= -fPIC -DPIC +LFLAGS+= @LDFLAGS@ + +# Dep generation - this only works for gnu stuff +INLINEDEPFLAG = + +# Debian doc stuff +DEBIANDOC_HTML = @DEBIANDOC_HTML@ +DEBIANDOC_TEXT = @DEBIANDOC_TEXT@ + +# YODL for the man pages +YODL_MAN = @YODL_MAN@ + +# Various library checks +PTHREADLIB = @PTHREADLIB@ +HAVE_C9X = @HAVE_C9X@ + +# Shared library things +HOST_OS = @host_os@ +ifeq ($(HOST_OS),linux-gnu) + ONLYSHAREDLIBS = yes + SONAME_MAGIC=-Wl,-h -Wl, + LFLAGS_SO= +endif diff --git a/tools/dsync-0.0/buildlib/install-sh b/tools/dsync-0.0/buildlib/install-sh new file mode 100644 index 00000000..ebc66913 --- /dev/null +++ b/tools/dsync-0.0/buildlib/install-sh @@ -0,0 +1,250 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5 (mit/util/scripts/install.sh). +# +# Copyright 1991 by the Massachusetts Institute of Technology +# +# Permission to use, copy, modify, distribute, and sell this software and its +# documentation for any purpose is hereby granted without fee, provided that +# the above copyright notice appear in all copies and that both that +# copyright notice and this permission notice appear in supporting +# documentation, and that the name of M.I.T. not be used in advertising or +# publicity pertaining to distribution of the software without specific, +# written prior permission. M.I.T. makes no representations about the +# suitability of this software for any purpose. It is provided "as is" +# without express or implied warranty. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. It can only install one file at a time, a restriction +# shared with many OS's install programs. + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +transformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/tools/dsync-0.0/buildlib/inttypes.h.in b/tools/dsync-0.0/buildlib/inttypes.h.in new file mode 100644 index 00000000..3be72079 --- /dev/null +++ b/tools/dsync-0.0/buildlib/inttypes.h.in @@ -0,0 +1,43 @@ +/* This is an ISO C 9X header file. We omit this copy to the include + directory if the local platform does not have inttypes.h, it contains + [u]int[8,16,32]_t fixed width types */ + +#include + +/* Generate the fixed bit size types */ +#if SIZEOF_INT == 4 + typedef int int32_t; + typedef unsigned int uint32_t; +#else +# if SIZEOF_LONG == 4 + typedef long int32_t; + typedef unsigned long uint32_t; +# else +# if SIZEOF_SHORT == 4 + typedef short int32_t; + typedef unsigned short uint32_t; +# else +# error Must have a form of 32-bit integer +# endif +# endif +#endif + +#if SIZEOF_INT == 2 + typedef int int16_t; + typedef unsigned int uint16_t; +#else +# if SIZEOF_LONG == 2 + typedef long int16_t; + typedef unsigned long uint16_t; +# else +# if SIZEOF_SHORT == 2 + typedef short int16_t; + typedef unsigned short uint16_t; +# else +# error Must have a form of 16-bit integer +# endif +# endif +#endif + +typedef signed char int8_t; +typedef unsigned char uint8_t; diff --git a/tools/dsync-0.0/buildlib/library.mak b/tools/dsync-0.0/buildlib/library.mak new file mode 100644 index 00000000..565baa39 --- /dev/null +++ b/tools/dsync-0.0/buildlib/library.mak @@ -0,0 +1,65 @@ +# -*- make -*- + +# This creates a shared library. + +# Input +# $(SOURCE) - The source code to use +# $(HEADERS) - Exported header files and private header files +# $(LIBRARY) - The name of the library without lib or .so +# $(MAJOR) - The major version number of this library +# $(MINOR) - The minor version number of this library + +# All output is writtin to .opic files in the build directory to +# signify the PIC output. + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := lib$(LIBRARY).so.$(MAJOR).$(MINOR) +$(LOCAL)-OBJS := $(addprefix $(OBJ)/,$(addsuffix .opic,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-DEP := $(addprefix $(DEP)/,$(addsuffix .opic.d,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-HEADERS := $(addprefix $(INCLUDE)/,$(HEADERS)) +$(LOCAL)-SONAME := lib$(LIBRARY).so.$(MAJOR) +$(LOCAL)-SLIBS := $(SLIBS) +$(LOCAL)-LIBRARY := $(LIBRARY) + +# Install the command hooks +headers: $($(LOCAL)-HEADERS) +library: $(LIB)/lib$(LIBRARY).so $(LIB)/lib$(LIBRARY).so.$(MAJOR) +clean: clean/$(LOCAL) +veryclean: veryclean/$(LOCAL) + +# The clean rules +.PHONY: clean/$(LOCAL) veryclean/$(LOCAL) +clean/$(LOCAL): + -rm -f $($(@F)-OBJS) $($(@F)-DEP) +veryclean/$(LOCAL): clean/$(LOCAL) + -rm -f $($(@F)-HEADERS) $(LIB)/lib$($(@F)-LIBRARY).so* + +# Build rules for the two symlinks +.PHONY: $(LIB)/lib$(LIBRARY).so.$(MAJOR) $(LIB)/lib$(LIBRARY).so +$(LIB)/lib$(LIBRARY).so.$(MAJOR): $(LIB)/lib$(LIBRARY).so.$(MAJOR).$(MINOR) + ln -sf $( /dev/null + echo Building shared library $@ + $(CXX) $(CXXFLAGS) $(LDFLAGS) $(PICFLAGS) $(LFLAGS) -o $@ \ + $(LFLAGS_SO) $(SONAME_MAGIC)$($(@F)-SONAME) -shared \ + $(filter %.opic,$^) $($(@F)-SLIBS) + +# Compilation rules +vpath %.cc $(SUBDIRS) +$(OBJ)/%.opic: %.cc + echo Compiling $< to $@ + $(CXX) -c $(INLINEDEPFLAG) $(CPPFLAGS) $(CXXFLAGS) $(PICFLAGS) -o $@ $< + $(DoDep) + +# Include the dependencies that are available +The_DFiles = $(wildcard $($(LOCAL)-DEP)) +ifneq ($(words $(The_DFiles)),0) +include $(The_DFiles) +endif diff --git a/tools/dsync-0.0/buildlib/makefile.in b/tools/dsync-0.0/buildlib/makefile.in new file mode 100644 index 00000000..de2f70de --- /dev/null +++ b/tools/dsync-0.0/buildlib/makefile.in @@ -0,0 +1,41 @@ +# -*- make -*- + +# This is the build directory make file, it sets the build directory +# and runs the src makefile. +ifndef NOISY +.SILENT: +endif +include environment.mak + +SRCDIR=@top_srcdir@ +DIRS:=./docs ./bin ./obj ./include +SUBDIRS:= $(DIRS) ./include/dsync ./obj/libdsync ./obj/test ./obj/cmdline +BUILD:=$(shell pwd) +export BUILD + +# Chain to the parent make to do the actual building +.PHONY: headers library clean veryclean all binary program doc \ + veryclean/local +all headers library clean veryclean binary program doc: + $(MAKE) -C $(SRCDIR) -f Makefile $@ + +# Purge everything. +.PHONY: maintainer-clean dist-clean pristine sanity distclean +maintainer-clean dist-clean pristine sanity distclean: + -rm -rf $(DIRS) + -rm -f config.cache config.log config.status environment.mak makefile + +# This makes any missing directories +.PHONY: dirs +MISSING_DIRS:= $(filter-out $(wildcard $(SUBDIRS)),$(SUBDIRS)) +dirs: +ifneq ($(words $(MISSING_DIRS)),0) + @mkdir $(MISSING_DIRS) +else + @echo > /dev/null +endif +ifeq ($(HAVE_C9X),yes) + -@rm include/inttypes.h > /dev/null 2>&1 +else + @cp $(SRCDIR)/buildlib/inttypes.h.in include/inttypes.h +endif diff --git a/tools/dsync-0.0/buildlib/manpage.mak b/tools/dsync-0.0/buildlib/manpage.mak new file mode 100644 index 00000000..cfa5fc1a --- /dev/null +++ b/tools/dsync-0.0/buildlib/manpage.mak @@ -0,0 +1,27 @@ +# -*- make -*- + +# This installs man pages into the doc directory + +# Input +# $(SOURCE) - The documents to use + +# All output is writtin to files in the build doc directory + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := manpage-$(firstword $(SOURCE)) +$(LOCAL)-LIST := $(addprefix $(DOC)/,$(SOURCE)) + +# Install generation hooks +doc: $($(LOCAL)-LIST) +veryclean: veryclean/$(LOCAL) + +$($(LOCAL)-LIST) : $(DOC)/% : % + echo Installing man page $< to $(@D) + cp $< $(@D) + +# Clean rule +.PHONY: veryclean/$(LOCAL) +veryclean/$(LOCAL): + -rm -rf $($(@F)-LIST) diff --git a/tools/dsync-0.0/buildlib/mkChangeLog b/tools/dsync-0.0/buildlib/mkChangeLog new file mode 100755 index 00000000..c54a4333 --- /dev/null +++ b/tools/dsync-0.0/buildlib/mkChangeLog @@ -0,0 +1,13 @@ +#!/bin/sh + +NAMES="`sed -ne 's/^.*CVS:\([^ ]\+\) \([^<]\+\) <\([^>]*\)>/\ + -u '\''\1:\2:\3'\''/gp' AUTHORS`" +OPTIONS="-l 78" + +# Generate the standard ChangeLog +echo CVSIGNORE=po rcs2log $OPTIONS $NAMES +eval CVSIGNORE=po rcs2log $OPTIONS $NAMES >> ChangeLog + +# Generate the po ChangeLog +echo rcs2log $OPTIONS $NAMES po +eval rcs2log $OPTIONS $NAMES po >> po/ChangeLog diff --git a/tools/dsync-0.0/buildlib/program.mak b/tools/dsync-0.0/buildlib/program.mak new file mode 100644 index 00000000..fe0d30de --- /dev/null +++ b/tools/dsync-0.0/buildlib/program.mak @@ -0,0 +1,50 @@ +# -*- make -*- + +# This creates a program + +# Input +# $(SOURCE) - The source code to use +# $(PROGRAM) - The name of the program +# $(SLIBS) - Shared libs to link against +# $(LIB_MAKES) - Shared libary make files to depend on - to ensure we get +# remade when the shared library version increases. + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := $(PROGRAM) +$(LOCAL)-OBJS := $(addprefix $(OBJ)/,$(addsuffix .o,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-DEP := $(addprefix $(DEP)/,$(addsuffix .o.d,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-BIN := $(BIN)/$(PROGRAM) +$(LOCAL)-SLIBS := $(SLIBS) +$(LOCAL)-MKS := $(addprefix $(BASE)/,$(LIB_MAKES)) + +# Install the command hooks +program: $(BIN)/$(PROGRAM) +clean: clean/$(LOCAL) +veryclean: veryclean/$(LOCAL) + +# The clean rules +.PHONY: clean/$(LOCAL) veryclean/$(LOCAL) +clean/$(LOCAL): + -rm -f $($(@F)-OBJS) $($(@F)-DEP) +veryclean/$(LOCAL): clean/$(LOCAL) + -rm -f $($(@F)-BIN) + +# The binary build rule +$($(LOCAL)-BIN): $($(LOCAL)-OBJS) $($(LOCAL)-MKS) + echo Building program $@ + $(CXX) $(CXXFLAGS) $(LDFLAGS) $(LFLAGS) -o $@ $(filter %.o,$^) $($(@F)-SLIBS) $(LEFLAGS) + +# Compilation rules +vpath %.cc $(SUBDIRS) +$(OBJ)/%.o: %.cc + echo Compiling $< to $@ + $(CXX) -c $(INLINEDEPFLAG) $(CPPFLAGS) $(CXXFLAGS) -o $@ $< + $(DoDep) + +# Include the dependencies that are available +The_DFiles = $(wildcard $($(LOCAL)-DEP)) +ifneq ($(words $(The_DFiles)),0) +include $(The_DFiles) +endif diff --git a/tools/dsync-0.0/buildlib/sizetable b/tools/dsync-0.0/buildlib/sizetable new file mode 100644 index 00000000..b6dbca3c --- /dev/null +++ b/tools/dsync-0.0/buildlib/sizetable @@ -0,0 +1,19 @@ +# +# This file lists common architectures for cross-compilation (CPUs, not +# OSs), and the endian-ness and relative type sizes. It is not needed for +# native compilation. +# +# If you wish to cross-compile APT, and your architecture is not listed +# here, you should add it, and submit it by email to the APT team at +# . +# +# This is used primarily for the MD5 algorithm. +# The format is:- +# CPU ':' endian sizeof: char, int, short, long +i386: little 1 4 2 4 +alpha: little 1 4 2 8 +sparc: big 1 4 2 4 +m68k: big 1 4 2 4 +powerpc: big 1 4 2 4 +mipsel: little 1 4 2 4 +x86_64: little 1 4 2 8 diff --git a/tools/dsync-0.0/buildlib/staticlibrary.mak b/tools/dsync-0.0/buildlib/staticlibrary.mak new file mode 100644 index 00000000..998ca5bf --- /dev/null +++ b/tools/dsync-0.0/buildlib/staticlibrary.mak @@ -0,0 +1,54 @@ +# -*- make -*- + +# This creates a static library. + +# Input +# $(SOURCE) - The source code to use +# $(HEADERS) - Exported header files and private header files +# $(LIBRARY) - The name of the library without lib or .so + +# All output is writtin to .o files in the build directory + +# See defaults.mak for information about LOCAL + +# Some local definitions +LOCAL := lib$(LIBRARY).a +$(LOCAL)-OBJS := $(addprefix $(OBJ)/,$(addsuffix .o,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-DEP := $(addprefix $(DEP)/,$(addsuffix .o.d,$(notdir $(basename $(SOURCE))))) +$(LOCAL)-HEADERS := $(addprefix $(INCLUDE)/,$(HEADERS)) +$(LOCAL)-LIB := $(LIB)/lib$(LIBRARY).a + +# Install the command hooks +headers: $($(LOCAL)-HEADERS) +library: $($(LOCAL)-LIB) +clean: clean/$(LOCAL) +veryclean: veryclean/$(LOCAL) + +# The clean rules +.PHONY: clean/$(LOCAL) veryclean/$(LOCAL) +clean/$(LOCAL): + -rm -f $($(@F)-OBJS) $($(@F)-DEP) +veryclean/$(LOCAL): clean/$(LOCAL) + -rm -f $($(@F)-HEADERS) $($(@F)-LIB) + +# Build rules for the two symlinks +.PHONY: $($(LOCAL)-LIB) + +# The binary build rule +$($(LOCAL)-LIB): $($(LOCAL)-HEADERS) $($(LOCAL)-OBJS) + echo Building library $@ + -rm $@ > /dev/null 2>&1 + $(AR) cq $@ $(filter %.o,$^) + +# Compilation rules +vpath %.cc $(SUBDIRS) +$(OBJ)/%.o: %.cc + echo Compiling $< to $@ + $(CXX) -c $(INLINEDEPFLAG) $(CPPFLAGS) $(CXXFLAGS) -o $@ $< + $(DoDep) + +# Include the dependencies that are available +The_DFiles = $(wildcard $($(LOCAL)-DEP)) +ifneq ($(words $(The_DFiles)),0) +include $(The_DFiles) +endif diff --git a/tools/dsync-0.0/buildlib/tools.m4 b/tools/dsync-0.0/buildlib/tools.m4 new file mode 100644 index 00000000..06f8770f --- /dev/null +++ b/tools/dsync-0.0/buildlib/tools.m4 @@ -0,0 +1,10 @@ +# tl_CHECK_TOOL_PREFIX will work _BEFORE_ AC_CANONICAL_HOST, etc., has been +# called. It should be called again after these have been called. +# +# Basically we want to check if the host alias specified by the user is +# different from the build alias. The rules work like this:- +# +# If host is not specified, it defaults to NONOPT +# If build is not specified, it defaults to NONOPT +# If nonopt is not specified, we guess all other values + diff --git a/tools/dsync-0.0/buildlib/yodl_manpage.mak b/tools/dsync-0.0/buildlib/yodl_manpage.mak new file mode 100644 index 00000000..a5f436f6 --- /dev/null +++ b/tools/dsync-0.0/buildlib/yodl_manpage.mak @@ -0,0 +1,42 @@ +# -*- make -*- + +# This handles man pages in YODL format. We convert to the respective +# output in the source directory then copy over to the final dest. This +# means yodl is only needed if compiling from CVS + +# Input +# $(SOURCE) - The documents to use, in the form foo.sect, ie apt-cache.8 +# the yodl files are called apt-cache.8.yo + +# See defaults.mak for information about LOCAL + +# Some local definitions +ifdef YODL_MAN + +LOCAL := yodl-manpage-$(firstword $(SOURCE)) +$(LOCAL)-LIST := $(SOURCE) + +# Install generation hooks +doc: $($(LOCAL)-LIST) +veryclean: veryclean/$(LOCAL) + +$($(LOCAL)-LIST) :: % : %.yo + echo Creating man page $@ + yodl2man -o $@ $< + +# Clean rule +.PHONY: veryclean/$(LOCAL) +veryclean/$(LOCAL): + -rm -rf $($(@F)-LIST) + +else + +# Strip from the source list any man pages we dont have compiled already +SOURCE := $(wildcard $(SOURCE)) + +endif + +# Chain to the manpage rule +ifneq ($(words $(SOURCE)),0) +include $(MANPAGE_H) +endif diff --git a/tools/dsync-0.0/cmdline/dsync-cdimage.cc b/tools/dsync-0.0/cmdline/dsync-cdimage.cc new file mode 100644 index 00000000..74e9128f --- /dev/null +++ b/tools/dsync-0.0/cmdline/dsync-cdimage.cc @@ -0,0 +1,174 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: dsync-cdimage.cc,v 1.2 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + DSync CD Image - CD Image transfer program + + This implements the DSync CD transfer method. This method is optimized + to reconstruct a CD from a mirror of the CD's contents and the original + ISO image. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +using namespace std; + /*}}}*/ + +// Externs /*{{{*/ +ostream c0out(cout.rdbuf()); +ostream c1out(cout.rdbuf()); +ostream c2out(cout.rdbuf()); +ofstream devnull("/dev/null"); +unsigned int ScreenWidth = 80; + /*}}}*/ + +// DoGenerate - Generate the checksum list /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool DoGenerate(CommandLine &CmdL) +{ + return true; +} + /*}}}*/ +// DoAggregate - Generate aggregated file records /*{{{*/ +// --------------------------------------------------------------------- +/* This takes a file list with already generated rsync checksums and builds + aggregated file lists for each checksum record */ +bool DoAggregate(CommandLine &CmdL) +{ + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + dsFList List; + if (List.Step(IO) == false || List.Tag != dsFList::tHeader) + return _error->Error("Unable to read header"); + + string Dir; + string File; + while (List.Step(IO) == true) + { + if (List.Tag == dsFList::tDirStart) + { + Dir = List.Dir.Name; + continue; + } + + if (List.Entity != 0) + { + File = List.Entity->Name; + continue; + } + + if (List.Tag == dsFList::tRSyncChecksum) + { + RSyncMatch Match(List.RChk); + } + + if (List.Tag == dsFList::tTrailer) + break; + } + + return true; +} + /*}}}*/ + +// ShowHelp - Show the help screen /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool ShowHelp(CommandLine &CmdL) +{ + cout << PACKAGE << ' ' << VERSION << " for " << ARCHITECTURE << + " compiled on " << __DATE__ << " " << __TIME__ << endl; + + cout << + "Usage: dsync-cdimage [options] command [file]\n" + "\n" + "dsync-cdimage is a tool for replicating CD images from a mirror of\n" + "their contents.\n" + "\n" + "Commands:\n" + " generate - Build a file+checksum index\n" + " help - This help text\n" + " verify - Compare the index against files in the current directory\n" + "\n" + "Options:\n" + " -h This help text.\n" + " -q Loggable output - no progress indicator\n" + " -qq No output except for errors\n" + " -c=? Read this configuration file\n" + " -o=? Set an arbitary configuration option, ie -o dir::cache=/tmp\n" + "See the dsync-cdimage(1) and dsync.conf(5) manual\n" + "pages for more information." << endl; + return 100; +} + /*}}}*/ + +int main(int argc, const char *argv[]) +{ + CommandLine::Args Args[] = { + {'h',"help","help",0}, + {'q',"quiet","quiet",CommandLine::IntLevel}, + {'q',"silent","quiet",CommandLine::IntLevel}, + {'v',"verbose","verbose",CommandLine::IntLevel}, + {'c',"config-file",0,CommandLine::ConfigFile}, + {'o',"option",0,CommandLine::ArbItem}, + {0,0,0,0}}; + CommandLine::Dispatch Cmds[] = {{"generate",&DoGenerate}, + {"help",&ShowHelp}, + {"aggregate",&DoAggregate}, + {0,0}}; + CommandLine CmdL(Args,_config); + if (CmdL.Parse(argc,argv) == false) + { + _error->DumpErrors(); + return 100; + } + + // See if the help should be shown + if (_config->FindB("help") == true || + CmdL.FileSize() == 0) + return ShowHelp(CmdL); + + // Setup the output streams + c0out.rdbuf(cout.rdbuf()); + c1out.rdbuf(cout.rdbuf()); + c2out.rdbuf(cout.rdbuf()); + if (_config->FindI("quiet",0) > 0) + c0out.rdbuf(devnull.rdbuf()); + if (_config->FindI("quiet",0) > 1) + c1out.rdbuf(devnull.rdbuf()); + + // Setup the signals +/* signal(SIGWINCH,SigWinch); + SigWinch(0);*/ + + // Match the operation + CmdL.DispatchArg(Cmds); + + // Print any errors or warnings found during parsing + if (_error->empty() == false) + { + + bool Errors = _error->PendingError(); + _error->DumpErrors(); + return Errors == true?100:0; + } + + return 0; +} diff --git a/tools/dsync-0.0/cmdline/dsync-flist.cc b/tools/dsync-0.0/cmdline/dsync-flist.cc new file mode 100644 index 00000000..e9ebb289 --- /dev/null +++ b/tools/dsync-0.0/cmdline/dsync-flist.cc @@ -0,0 +1,1097 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: dsync-flist.cc,v 1.27 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + Dsync FileList is a tool to manipulate and generate the dsync file + listing + + Several usefull functions are provided, the most notable is to generate + the file list and to dump it. There is also a function to compare the + file list against a local directory tree. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync-flist.h" +#endif + +#include "dsync-flist.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +using namespace std; + + /*}}}*/ + +// Externs /*{{{*/ +ostream c0out(cout.rdbuf()); +ostream c1out(cout.rdbuf()); +ostream c2out(cout.rdbuf()); +ofstream devnull("/dev/null"); +unsigned int ScreenWidth = 80; + /*}}}*/ + +// Progress::Progress - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +Progress::Progress() +{ + Quiet = false; + if (_config->FindI("quiet",0) > 0) + Quiet = true; + DirCount = 0; + FileCount = 0; + LinkCount = 0; + Bytes = 0; + CkSumBytes = 0; + gettimeofday(&StartTime,0); +} + /*}}}*/ +// Progress::Done - Clear the progress meter /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void Progress::Done() +{ + if (Quiet == false) + c0out << '\r' << BlankLine << '\r' << flush; + BlankLine[0] = 0; +} + /*}}}*/ +// Progress::ElaspedTime - Return the time that has elapsed /*{{{*/ +// --------------------------------------------------------------------- +/* Computes the time difference with maximum accuracy */ +double Progress::ElapsedTime() +{ + // Compute the CPS and elapsed time + struct timeval Now; + gettimeofday(&Now,0); + + return Now.tv_sec - StartTime.tv_sec + (Now.tv_usec - + StartTime.tv_usec)/1000000.0; +} + /*}}}*/ +// Progress::Update - Update the meter /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void Progress::Update(const char *Directory) +{ + LastCount = DirCount+LinkCount+FileCount; + + if (Quiet == true) + return; + + // Put the number of files and bytes at the end of the meter + char S[1024]; + if (ScreenWidth > sizeof(S)-1) + ScreenWidth = sizeof(S)-1; + + unsigned int Len = snprintf(S,sizeof(S),"|%lu %sb", + DirCount+LinkCount+FileCount, + SizeToStr(Bytes).c_str()); + + memmove(S + (ScreenWidth - Len),S,Len+1); + memset(S,' ',ScreenWidth - Len); + + // Put the directory name at the front, possibly shortened + if (Directory == 0 || Directory[0] == 0) + S[snprintf(S,sizeof(S),"")] = ' '; + else + { + // If the path is too long fix it and prefix it with '...' + if (strlen(Directory) >= ScreenWidth - Len - 1) + { + S[snprintf(S,sizeof(S),"%s",Directory + + strlen(Directory) - ScreenWidth + Len + 1)] = ' '; + S[0] = '.'; S[1] = '.'; S[2] = '.'; + } + else + S[snprintf(S,sizeof(S),"%s",Directory)] = ' '; + } + + strcpy(LastLine,S); + c0out << S << '\r' << flush; + memset(BlankLine,' ',strlen(S)); + BlankLine[strlen(S)] = 0; +} + /*}}}*/ +// Progress::Stats - Show a statistics report /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void Progress::Stats(bool CkSum) +{ + // Display some interesting statistics + double Elapsed = ElapsedTime(); + c1out << DirCount << " directories, " << FileCount << + " files and " << LinkCount << " links (" << + (DirCount+FileCount+LinkCount) << "). "; + if (CkSum == true) + { + if (CkSumBytes == Bytes) + c1out << "Total Size is " << SizeToStr(Bytes) << "b. "; + else + c1out << SizeToStr(CkSumBytes) << '/' << + SizeToStr(Bytes) << "b hashed."; + } + else + c1out << "Total Size is " << SizeToStr(Bytes) << "b. "; + + c1out << endl; + c1out << "Elapsed time " << TimeToStr((long)Elapsed) << + " (" << SizeToStr((DirCount+FileCount+LinkCount)/Elapsed) << + " files/sec) "; + if (CkSumBytes != 0) + c1out << " (" << SizeToStr(CkSumBytes/Elapsed) << "b/s hash)"; + c1out << endl; +} + /*}}}*/ + +// ListGenerator::ListGenerator - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +ListGenerator::ListGenerator() +{ + Act = !_config->FindB("noact",false); + StripDepth = _config->FindI("FileList::CkSum-PathStrip",0); + Verbose = false; + if (_config->FindI("verbose",0) > 0) + Verbose = true; + DB = 0; + DBIO = 0; + + // Set RSync checksum limits + MinRSyncSize = _config->FindI("FileList::MinRSyncSize",0); + if (MinRSyncSize == 0) + MinRSyncSize = 1; + if (_config->FindB("FileList::RSync-Hashes",false) == false) + MinRSyncSize = 0; + + // Load the rsync filter + if (RSyncFilter.LoadFilter(_config->Tree("FList::RSync-Filter")) == false) + return; + + // Load the clean filter + if (RemoveFilter.LoadFilter(_config->Tree("FList::Clean-Filter")) == false) + return; +} + /*}}}*/ +// ListGenerator::~ListGenerator - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +ListGenerator::~ListGenerator() +{ + delete DB; + delete DBIO; +} + /*}}}*/ +// ListGenerator::Visit - Collect statistics about the tree /*{{{*/ +// --------------------------------------------------------------------- +/* */ +int ListGenerator::Visit(const char *Directory,const char *File, + struct stat const &Stat) +{ + if (Prog.DirCount+Prog.LinkCount+Prog.FileCount - Prog.LastCount > 100 || + File == 0) + Prog.Update(Directory); + + // Ignore directory enters + if (File == 0) + return 0; + + // Increment our counters + if (S_ISDIR(Stat.st_mode) != 0) + Prog.DirCount++; + else + { + if (S_ISLNK(Stat.st_mode) != 0) + Prog.LinkCount++; + else + Prog.FileCount++; + } + + // Normal file + if (S_ISREG(Stat.st_mode) != 0) + Prog.Bytes += Stat.st_size; + + // Look for files to erase + if (S_ISDIR(Stat.st_mode) == 0 && + RemoveFilter.Test(Directory,File) == false) + { + Prog.Hide(); + c1out << "Unlinking " << Directory << File << endl; + Prog.Show(); + + if (Act == true && unlink(File) != 0) + { + _error->Errno("unlink","Failed to remove %s%s",Directory,File); + return -1; + } + + return 1; + } + + return 0; +} + /*}}}*/ +// ListGenerator::EmitMD5 - Perform md5 lookup caching /*{{{*/ +// --------------------------------------------------------------------- +/* This looks up the file in the cache to see if it is one we already + know the hash too */ +bool ListGenerator::EmitMD5(const char *Dir,const char *File, + struct stat const &St,unsigned char MD5[16], + unsigned int Tag,unsigned int Flag) +{ + if ((IO->Header.Flags[Tag] & Flag) != Flag) + return true; + + // Lookup the md5 in the old file list + if (DB != 0 && (DBIO->Header.Flags[Tag] & Flag) == Flag) + { + // Do a lookup and make sure the timestamps match + dsFList List; + bool Hit = false; + const char *iDir = Dir; + unsigned int Strip = StripDepth; + while (true) + { + if (DB->Lookup(*DBIO,iDir,File,List) == true && List.Entity != 0) + { + if ((signed)(List.Entity->ModTime + List.Head.Epoch) == St.st_mtime) + Hit = true; + break; + } + + if (Strip == 0) + break; + + Strip--; + for (; *iDir != 0 && *iDir != '/'; iDir++); + if (*iDir == 0 || iDir[1] == 0) + break; + iDir++; + } + + if (Hit == true) + { + /* Both hardlinks and normal files have md5s, also check that the + sizes match */ + if (List.File != 0 && List.File->Size == (unsigned)St.st_size) + { + memcpy(MD5,List.File->MD5,sizeof(List.File->MD5)); + return true; + } + } + } + + Prog.CkSumBytes += St.st_size; + + if (Verbose == true) + { + Prog.Hide(); + c1out << "MD5 " << Dir << File << endl; + Prog.Show(); + } + + return dsGenFileList::EmitMD5(Dir,File,St,MD5,Tag,Flag); +} + /*}}}*/ +// ListGenerator::NeedsRSync - Check if a file is rsyncable /*{{{*/ +// --------------------------------------------------------------------- +/* This checks the rsync filter list and the rsync size limit*/ +bool ListGenerator::NeedsRSync(const char *Dir,const char *File, + dsFList::NormalFile &F) +{ + if (MinRSyncSize == 0) + return false; + + if (F.Size <= MinRSyncSize) + return false; + + if (RSyncFilter.Test(Dir,File) == false) + return false; + + /* Add it to the counters, EmitMD5 will not be called if rsync checksums + are being built. */ + Prog.CkSumBytes += F.Size; + if (Verbose == true) + { + Prog.Hide(); + c1out << "RSYNC " << Dir << File << endl; + Prog.Show(); + } + + return true; +} + /*}}}*/ + +// Compare::Compare - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +Compare::Compare() +{ + Verbose = false; + if (_config->FindI("verbose",0) > 0) + Verbose = true; + Act = !_config->FindB("noact",false); + DoDelete = _config->FindB("delete",false); +} + /*}}}*/ +// Compare::Visit - Collect statistics about the tree /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool Compare::Visit(dsFList &List,string Dir) +{ + if (Prog.DirCount+Prog.LinkCount+Prog.FileCount - Prog.LastCount > 100 || + List.Tag == dsFList::tDirStart) + Prog.Update(Dir.c_str()); + + // Increment our counters + if (List.Tag == dsFList::tDirectory) + Prog.DirCount++; + else + { + if (List.Tag == dsFList::tSymlink) + Prog.LinkCount++; + + if (List.Tag == dsFList::tNormalFile || + List.Tag == dsFList::tHardLink || + List.Tag == dsFList::tDeviceSpecial) + Prog.FileCount++; + } + + // Normal file + if (List.File != 0) + Prog.Bytes += List.File->Size; + + return true; +} + /*}}}*/ +// Compare::PrintPath - Print out a path string /*{{{*/ +// --------------------------------------------------------------------- +/* This handles the absolute paths that can occure while processing */ +void Compare::PrintPath(ostream &out,string Dir,string Name) +{ + if (Name[0] != '/') + out << Dir << Name << endl; + else + out << string(Name,Base.length()) << endl; +} + /*}}}*/ + +// LookupPath - Find a full path within the database /*{{{*/ +// --------------------------------------------------------------------- +/* This does the necessary path simplification and symlink resolution + to locate the path safely. The file must exist locally inorder to + resolve the local symlinks. */ +bool LookupPath(const char *Path,dsFList &List,dsFileListDB &DB, + dsFList::IO &IO) +{ + char Buffer[2024]; + strcpy(Buffer,Path); + + if (SimplifyPath(Buffer) == false || + ResolveLink(Buffer,sizeof(Buffer)) == false) + return false; + + // Strip off the final component name + char *I = Buffer + strlen(Buffer); + for (; I != Buffer && (*I == '/' || *I == 0); I--); + for (; I != Buffer && *I != '/'; I--); + if (I != Buffer) + { + memmove(I+1,I,strlen(I) + 1); + I++; + *I = 0; + I++; + if (DB.Lookup(IO,Buffer,I,List) == false) + return false; + } + else + { + if (DB.Lookup(IO,"",I,List) == false) + return false; + } + + return true; +} + /*}}}*/ +// PrintMD5 - Prints the MD5 of a file in the form similar to md5sum /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void PrintMD5(dsFList &List,const char *Dir,const char *File = 0) +{ + if (List.File == 0 || + List.Head.Flags[List.Tag] & dsFList::NormalFile::FlMD5 == 0) + return; + + char S[16*2+1]; + for (unsigned int I = 0; I != 16; I++) + sprintf(S+2*I,"%02x",List.File->MD5[I]); + S[16*2] = 0; + if (File == 0) + cout << S << " " << Dir << List.File->Name << endl; + else + cout << S << " " << File << endl; +} + /*}}}*/ + +// DoGenerate - The Generate Command /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool DoGenerate(CommandLine &CmdL) +{ + ListGenerator Gen; + if (_error->PendingError() == true) + return false; + + // Load the filter list + if (Gen.Filter.LoadFilter(_config->Tree("FileList::Filter")) == false) + return false; + + // Load the delay filter list + if (Gen.PreferFilter.LoadFilter(_config->Tree("FileList::Prefer-Filter")) == false) + return false; + + // Determine the ordering to use + string Ord = _config->Find("FileList::Order","tree"); + if (stringcasecmp(Ord,"tree") == 0) + Gen.Type = dsGenFileList::Tree; + else + { + if (stringcasecmp(Ord,"breadth") == 0) + Gen.Type = dsGenFileList::Breadth; + else + { + if (stringcasecmp(Ord,"depth") == 0) + Gen.Type = dsGenFileList::Depth; + else + return _error->Error("Invalid ordering %s, must be tree, breadth or detph",Ord.c_str()); + } + } + + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + string List = CmdL.FileList[1]; + + // Open the original file to pull cached Check Sums out of + if (FileExists(List) == true && + _config->FindB("FileList::MD5-Hashes",false) == true) + { + Gen.DBIO = new dsMMapIO(List); + if (_error->PendingError() == true) + return false; + Gen.DB = new dsFileListDB; + if (Gen.DB->Generate(*Gen.DBIO) == false) + return false; + } + + // Sub scope to close the file + { + FdIO IO(List + ".new",FileFd::WriteEmpty); + + // Set the flags for the list + if (_config->FindB("FileList::MD5-Hashes",false) == true) + { + IO.Header.Flags[dsFList::tNormalFile] |= dsFList::NormalFile::FlMD5; + IO.Header.Flags[dsFList::tHardLink] |= dsFList::HardLink::FlMD5; + } + if (_config->FindB("FileList::Permissions",false) == true) + { + IO.Header.Flags[dsFList::tDirectory] |= dsFList::Directory::FlPerm; + IO.Header.Flags[dsFList::tNormalFile] |= dsFList::NormalFile::FlPerm; + IO.Header.Flags[dsFList::tHardLink] |= dsFList::HardLink::FlPerm; + } + if (_config->FindB("FileList::Ownership",false) == true) + { + IO.Header.Flags[dsFList::tDirectory] |= dsFList::Directory::FlOwner; + IO.Header.Flags[dsFList::tNormalFile] |= dsFList::NormalFile::FlOwner; + IO.Header.Flags[dsFList::tSymlink] |= dsFList::Symlink::FlOwner; + IO.Header.Flags[dsFList::tDeviceSpecial] |= dsFList::DeviceSpecial::FlOwner; + IO.Header.Flags[dsFList::tHardLink] |= dsFList::HardLink::FlOwner; + } + + if (Gen.Go("./",IO) == false) + return false; + Gen.Prog.Done(); + Gen.Prog.Stats(_config->FindB("FileList::MD5-Hashes",false)); + + delete Gen.DB; + Gen.DB = 0; + delete Gen.DBIO; + Gen.DBIO = 0; + } + + // Just in case :> + if (_error->PendingError() == true) + return false; + + // Swap files + bool OldExists = FileExists(List); + if (OldExists == true && rename(List.c_str(),(List + "~").c_str()) != 0) + return _error->Errno("rename","Unable to rename %s to %s~",List.c_str(),List.c_str()); + if (rename((List + ".new").c_str(),List.c_str()) != 0) + return _error->Errno("rename","Unable to rename %s.new to %s",List.c_str(),List.c_str()); + if (OldExists == true && unlink((List + "~").c_str()) != 0) + return _error->Errno("unlink","Unable to unlink %s~",List.c_str()); + + return true; +} + /*}}}*/ +// DoDump - Dump the contents of a file list /*{{{*/ +// --------------------------------------------------------------------- +/* This displays a short one line dump of each record in the file */ +bool DoDump(CommandLine &CmdL) +{ + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + dsFList List; + unsigned long CountDir = 0; + unsigned long CountFile = 0; + unsigned long CountLink = 0; + unsigned long CountLinkReal = 0; + unsigned long NumFiles = 0; + unsigned long NumDirs = 0; + unsigned long NumLinks = 0; + double Bytes = 0; + + while (List.Step(IO) == true) + { + if (List.Print(cout) == false) + return false; + + switch (List.Tag) + { + case dsFList::tDirMarker: + case dsFList::tDirStart: + case dsFList::tDirectory: + { + CountDir += List.Dir.Name.length(); + if (List.Tag == dsFList::tDirectory) + NumDirs++; + break; + } + + case dsFList::tHardLink: + case dsFList::tNormalFile: + { + CountFile += List.File->Name.length(); + NumFiles++; + Bytes += List.File->Size; + break; + } + + case dsFList::tSymlink: + { + CountFile += List.SLink.Name.length(); + CountLink += List.SLink.To.length(); + + unsigned int Tmp = List.SLink.To.length(); + if ((List.SLink.Compress & (1<<7)) == (1<<7)) + Tmp -= List.SLink.Name.length(); + Tmp -= List.SLink.Compress & 0x7F; + CountLinkReal += Tmp; + NumLinks++; + break; + } + } + if (List.Tag == dsFList::tTrailer) + break; + } + cout << "String Sizes: Dirs=" << CountDir << " Files=" << CountFile << + " Links=" << CountLink << " (" << CountLinkReal << ")"; + cout << " Total=" << CountDir+CountFile+CountLink << endl; + cout << "Entries: Dirs=" << NumDirs << " Files=" << NumFiles << + " Links=" << NumLinks << " Total=" << NumDirs+NumFiles+NumLinks << endl; + cout << "Totals " << SizeToStr(Bytes) << "b." << endl; + + return true; +} + /*}}}*/ +// DoMkHardLinks - Generate hardlinks for duplicated files /*{{{*/ +// --------------------------------------------------------------------- +/* This scans the archive for any duplicated files, it uses the MD5 of each + file and searches a map for another match then links the two */ +struct Md5Cmp +{ + unsigned char MD5[16]; + int operator <(const Md5Cmp &rhs) const {return memcmp(MD5,rhs.MD5,sizeof(MD5)) < 0;}; + int operator <=(const Md5Cmp &rhs) const {return memcmp(MD5,rhs.MD5,sizeof(MD5)) <= 0;}; + int operator >=(const Md5Cmp &rhs) const {return memcmp(MD5,rhs.MD5,sizeof(MD5)) >= 0;}; + int operator >(const Md5Cmp &rhs) const {return memcmp(MD5,rhs.MD5,sizeof(MD5)) > 0;}; + int operator ==(const Md5Cmp &rhs) const {return memcmp(MD5,rhs.MD5,sizeof(MD5)) == 0;}; + + Md5Cmp(unsigned char Md[16]) {memcpy(MD5,Md,sizeof(MD5));}; +}; + +struct Location +{ + string Dir; + string File; + + Location() {}; + Location(string Dir,string File) : Dir(Dir), File(File) {}; +}; + +bool DoMkHardLinks(CommandLine &CmdL) +{ + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + dsFList List; + if (List.Step(IO) == false || List.Tag != dsFList::tHeader) + return _error->Error("Unable to read header"); + + // Make sure we have hashes + if ((IO.Header.Flags[dsFList::tNormalFile] & + dsFList::NormalFile::FlMD5) == 0 || + (IO.Header.Flags[dsFList::tHardLink] & + dsFList::HardLink::FlMD5) == 0) + return _error->Error("The file list must contain MD5 hashes"); + + string LastDir; + double Savings = 0; + unsigned long Hits = 0; + bool Act = !_config->FindB("noact",false); + map Map; + while (List.Step(IO) == true) + { + // Entering a new directory, just store it.. + if (List.Tag == dsFList::tDirStart) + { + LastDir = List.Dir.Name; + continue; + } + + /* Handle normal file entities. Pre-existing hard links we treat + exactly like a normal file, if two hard link chains are identical + one will be destroyed and its items placed on the other + automatcially */ + if (List.File != 0) + { + map::const_iterator I = Map.find(Md5Cmp(List.File->MD5)); + if (I == Map.end()) + { + Map[Md5Cmp(List.File->MD5)] = Location(LastDir,List.File->Name); + continue; + } + + // Compute full file names for both + string FileA = (*I).second.Dir + (*I).second.File; + struct stat StA; + string FileB = LastDir + List.File->Name; + struct stat StB; + + // Stat them + if (lstat(FileA.c_str(),&StA) != 0) + { + _error->Warning("Unable to stat %s",FileA.c_str()); + continue; + } + if (lstat(FileB.c_str(),&StB) != 0) + { + _error->Warning("Unable to stat %s",FileB.c_str()); + continue; + } + + // Verify they are on the same filesystem + if (StA.st_dev != StB.st_dev || StA.st_size != StB.st_size) + continue; + + // And not merged.. + if (StA.st_ino == StB.st_ino) + continue; + + c1out << "Dup " << FileA << endl; + c1out << " " << FileB << endl; + + // Relink the file and copy the mod time from the oldest one. + if (Act == true) + { + if (unlink(FileB.c_str()) != 0) + return _error->Errno("unlink","Failed to unlink %s",FileB.c_str()); + if (link(FileA.c_str(),FileB.c_str()) != 0) + return _error->Errno("link","Failed to link %s to %s",FileA.c_str(),FileB.c_str()); + if (StB.st_mtime > StA.st_mtime) + { + struct utimbuf Time; + Time.actime = Time.modtime = StB.st_mtime; + if (utime(FileB.c_str(),&Time) != 0) + _error->Warning("Unable to set mod time for %s",FileB.c_str()); + } + } + + // Counters + Savings += List.File->Size; + Hits++; + + continue; + } + + if (List.Tag == dsFList::tTrailer) + break; + } + + cout << "Total space saved by merging " << + SizeToStr(Savings) << "b. " << Hits << " files affected." << endl; + return true; +} + /*}}}*/ +// DoLookup - Lookup a single file in the listing /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool DoLookup(CommandLine &CmdL) +{ + if (CmdL.FileSize() < 4) + return _error->Error("You must specify a file name, directory name and a entry"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + // Index it + dsFileListDB DB; + if (DB.Generate(IO) == false) + return false; + + dsFList List; + if (DB.Lookup(IO,CmdL.FileList[2],CmdL.FileList[3],List) == false) + return _error->Error("Unable to locate item"); + List.Print(cout); + return true; +} + /*}}}*/ +// DoMD5Cache - Lookup a stream of files in the listing /*{{{*/ +// --------------------------------------------------------------------- +/* This takes a list of files names and prints out their MD5s, if possible + data is used from the cache to save IO */ +bool DoMD5Cache(CommandLine &CmdL) +{ + struct timeval Start; + gettimeofday(&Start,0); + + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + dsFList List; + if (List.Step(IO) == false || List.Tag != dsFList::tHeader) + return _error->Error("Unable to read header"); + + // Make sure we have hashes + if ((IO.Header.Flags[dsFList::tNormalFile] & + dsFList::NormalFile::FlMD5) == 0 || + (IO.Header.Flags[dsFList::tHardLink] & + dsFList::HardLink::FlMD5) == 0) + return _error->Error("The file list must contain MD5 hashes"); + + // Index it + dsFileListDB DB; + if (DB.Generate(IO) == false) + return false; + + // Counters + double Bytes = 0; + double MD5Bytes = 0; + unsigned long Files = 0; + unsigned long Errors = 0; + + while (!cin == false) + { + char Buf2[200]; + cin.getline(Buf2,sizeof(Buf2)); + if (Buf2[0] == 0) + continue; + Files++; + + // Stat the file + struct stat St; + if (stat(Buf2,&St) != 0) + { + cout << " " << Buf2 << "(stat)" << endl; + Errors++; + continue; + } + + // Lookup in the cache and make sure the file has not changed + if (LookupPath(Buf2,List,DB,IO) == false || + (signed)(List.Entity->ModTime + List.Head.Epoch) != St.st_mtime || + (List.File != 0 && List.File->Size != (unsigned)St.st_size)) + { + _error->DumpErrors(); + + // Open the file and hash it + MD5Summation Sum; + FileFd Fd(Buf2,FileFd::ReadOnly); + if (_error->PendingError() == true) + { + cout << " " << Buf2 << "(open)" << endl; + continue; + } + + if (Sum.AddFD(Fd.Fd(),Fd.Size()) == false) + { + cout << " " << Buf2 << "(md5)" << endl; + continue; + } + + // Store the new hash + List.Tag = dsFList::tNormalFile; + Sum.Result().Value(List.File->MD5); + List.File->Size = (unsigned)St.st_size; + + MD5Bytes += List.File->Size; + } + + PrintMD5(List,0,Buf2); + Bytes += List.File->Size; + } + + // Print out a summary + struct timeval Now; + gettimeofday(&Now,0); + double Delta = Now.tv_sec - Start.tv_sec + (Now.tv_usec - Start.tv_usec)/1000000.0; + cerr << Files << " files, " << SizeToStr(MD5Bytes) << "/" << + SizeToStr(Bytes) << " MD5'd, " << TimeToStr((unsigned)Delta) << endl;; + + return true; +} + /*}}}*/ +// DoMD5Dump - Dump the md5 list /*{{{*/ +// --------------------------------------------------------------------- +/* This displays a short one line dump of each record in the file */ +bool DoMD5Dump(CommandLine &CmdL) +{ + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + dsFList List; + if (List.Step(IO) == false || List.Tag != dsFList::tHeader) + return _error->Error("Unable to read header"); + + // Make sure we have hashes + if ((IO.Header.Flags[dsFList::tNormalFile] & + dsFList::NormalFile::FlMD5) == 0 || + (IO.Header.Flags[dsFList::tHardLink] & + dsFList::HardLink::FlMD5) == 0) + return _error->Error("The file list must contain MD5 hashes"); + + string Dir; + while (List.Step(IO) == true) + { + if (List.Tag == dsFList::tDirStart) + { + Dir = List.Dir.Name; + continue; + } + + PrintMD5(List,Dir.c_str()); + + if (List.Tag == dsFList::tTrailer) + break; + } + return true; +} + /*}}}*/ +// DoVerify - Verify the local tree against a file list /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool DoVerify(CommandLine &CmdL) +{ + if (CmdL.FileList[1] == 0) + return _error->Error("You must specify a file name"); + + // Open the file + dsMMapIO IO(CmdL.FileList[1]); + if (_error->PendingError() == true) + return false; + + /* Set the hashing type, we can either do a full verify or only a date + check verify */ + Compare Comp; + if (_config->FindB("FileList::MD5-Hashes",false) == true) + Comp.HashLevel = dsDirCompare::Md5Always; + else + Comp.HashLevel = dsDirCompare::Md5Date; + + // Scan the file list + if (Comp.Process(".",IO) == false) + return false; + Comp.Prog.Done(); + + // Report stats + Comp.Prog.Stats((IO.Header.Flags[dsFList::tNormalFile] & dsFList::NormalFile::FlMD5) != 0 || + (IO.Header.Flags[dsFList::tHardLink] & dsFList::HardLink::FlMD5) != 0); + + return true; +} + /*}}}*/ +// SigWinch - Window size change signal handler /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void SigWinch(int) +{ + // Riped from GNU ls +#ifdef TIOCGWINSZ + struct winsize ws; + + if (ioctl(1, TIOCGWINSZ, &ws) != -1 && ws.ws_col >= 5) + ScreenWidth = ws.ws_col - 1; + if (ScreenWidth > 250) + ScreenWidth = 250; +#endif +} + /*}}}*/ +// ShowHelp - Show the help screen /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool ShowHelp(CommandLine &CmdL) +{ + cout << PACKAGE << ' ' << VERSION << " for " << ARCHITECTURE << + " compiled on " << __DATE__ << " " << __TIME__ << endl; + + cout << + "Usage: dsync-flist [options] command [file]\n" + "\n" + "dsync-flist is a tool for manipulating dsync binary file lists.\n" + "It can generate the lists and check them against a tree.\n" + "\n" + "Commands:\n" + " generate - Build a file list\n" + " help - This help text\n" + " dump - Display the contents of the list\n" + " md5sums - Print out 'indices' file, suitable for use with md5sum\n" + " md5cache - Print out md5sums of the files given on stdin\n" + " link-dups - Look for duplicate files\n" + " lookup - Display a single file record\n" + " verify - Compare the file list against the local directory\n" + "\n" + "Options:\n" + " -h This help text.\n" + " -q Loggable output - no progress indicator\n" + " -qq No output except for errors\n" + " -i=? Include pattern\n" + " -e=? Exclude pattern\n" + " -c=? Read this configuration file\n" + " -o=? Set an arbitary configuration option, ie -o dir::cache=/tmp\n" + "See the dsync-flist(1) and dsync.conf(5) manual\n" + "pages for more information." << endl; + return 100; +} + /*}}}*/ + +int main(int argc, const char *argv[]) +{ + CommandLine::Args Args[] = { + {'h',"help","help",0}, + {'q',"quiet","quiet",CommandLine::IntLevel}, + {'q',"silent","quiet",CommandLine::IntLevel}, + {'i',"include","FileList::Filter:: + ",CommandLine::HasArg}, + {'e',"exclude","FileList::Filter:: - ",CommandLine::HasArg}, + {'n',"no-act","noact",0}, + {'v',"verbose","verbose",CommandLine::IntLevel}, + {0,"delete","delete",0}, + {0,"prefer-include","FileList::Prefer-Filter:: + ",CommandLine::HasArg}, + {0,"prefer-exclude","FileList::Prefer-Filter:: - ",CommandLine::HasArg}, + {0,"pi","FileList::Prefer-Filter:: + ",CommandLine::HasArg}, + {0,"pe","FileList::Prefer-Filter:: - ",CommandLine::HasArg}, + {0,"clean-include","FList::Clean-Filter:: + ",CommandLine::HasArg}, + {0,"clean-exclude","FList::Clean-Filter:: - ",CommandLine::HasArg}, + {0,"ci","FList::Clean-Filter:: + ",CommandLine::HasArg}, + {0,"ce","FList::Clean-Filter:: - ",CommandLine::HasArg}, + {0,"rsync-include","FList::RSync-Filter:: + ",CommandLine::HasArg}, + {0,"rsync-exclude","FList::RSync-Filter:: - ",CommandLine::HasArg}, + {0,"ri","FList::RSync-Filter:: + ",CommandLine::HasArg}, + {0,"re","FList::RSync-Filter:: - ",CommandLine::HasArg}, + {0,"md5","FileList::MD5-Hashes",0}, + {0,"rsync","FileList::RSync-Hashes",0}, + {0,"rsync-min","FileList::MinRSyncSize",CommandLine::HasArg}, + {0,"perm","FileList::Permissions",0}, + {0,"owner","FileList::Ownership",0}, + {0,"order","FileList::Order",CommandLine::HasArg}, + {'c',"config-file",0,CommandLine::ConfigFile}, + {'o',"option",0,CommandLine::ArbItem}, + {0,0,0,0}}; + CommandLine::Dispatch Cmds[] = {{"generate",&DoGenerate}, + {"help",&ShowHelp}, + {"dump",&DoDump}, + {"link-dups",&DoMkHardLinks}, + {"md5sums",&DoMD5Dump}, + {"md5cache",&DoMD5Cache}, + {"lookup",&DoLookup}, + {"verify",&DoVerify}, + {0,0}}; + CommandLine CmdL(Args,_config); + if (CmdL.Parse(argc,argv) == false) + { + _error->DumpErrors(); + return 100; + } + + // See if the help should be shown + if (_config->FindB("help") == true || + CmdL.FileSize() == 0) + return ShowHelp(CmdL); + + // Setup the output streams +/* c0out.rdbuf(cout.rdbuf()); + c1out.rdbuf(cout.rdbuf()); + c2out.rdbuf(cout.rdbuf()); */ + if (_config->FindI("quiet",0) > 0) + c0out.rdbuf(devnull.rdbuf()); + if (_config->FindI("quiet",0) > 1) + c1out.rdbuf(devnull.rdbuf()); + + // Setup the signals + signal(SIGWINCH,SigWinch); + SigWinch(0); + + // Match the operation + CmdL.DispatchArg(Cmds); + + // Print any errors or warnings found during parsing + if (_error->empty() == false) + { + + bool Errors = _error->PendingError(); + _error->DumpErrors(); + return Errors == true?100:0; + } + + return 0; +} diff --git a/tools/dsync-0.0/cmdline/dsync-flist.h b/tools/dsync-0.0/cmdline/dsync-flist.h new file mode 100644 index 00000000..aebfd08d --- /dev/null +++ b/tools/dsync-0.0/cmdline/dsync-flist.h @@ -0,0 +1,206 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: dsync-flist.h,v 1.5 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + Some header declarations.. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_FLIST_H +#define DSYNC_FLIST_H + +#ifdef __GNUG__ +#pragma interface "dsync-flist.h" +#endif + +#include +#include +#include +#include + +#include +#include +#include +using namespace std; + +extern ostream c0out; +extern ostream c1out; +extern ostream c2out; +extern ofstream devnull; +extern unsigned int ScreenWidth; + +class FdIO : public dsFList::IO +{ + FileFd Fd; + public: + + virtual bool Read(void *Buf,unsigned long Len) {return Fd.Read(Buf,Len);}; + virtual bool Write(const void *Buf,unsigned long Len) {return Fd.Write(Buf,Len);}; + virtual bool Seek(unsigned long Bytes) {return Fd.Seek(Bytes);}; + virtual unsigned long Tell() {return Fd.Tell();}; + + FdIO(string File,FileFd::OpenMode Mode) : Fd(File,Mode) {}; +}; + +class Progress +{ + bool Quiet; + + char LastLine[300]; + char BlankLine[300]; + + public: + + // Counters + unsigned long DirCount; + unsigned long FileCount; + unsigned long LinkCount; + unsigned long LastCount; + double Bytes; + double CkSumBytes; + struct timeval StartTime; + + double ElapsedTime(); + void Done(); + void Update(const char *Dir); + void Stats(bool Md5); + + inline void Hide() + { + if (Quiet == false) + c0out << '\r' << BlankLine << '\r'; + }; + inline void Show() + { + if (Quiet == false) + c0out << LastLine << '\r' << flush; + }; + + Progress(); + ~Progress() {Done();}; +}; + +class ListGenerator : public dsGenFileList +{ + protected: + bool Act; + bool Verbose; + unsigned long MinRSyncSize; + unsigned int StripDepth; + + virtual int Visit(const char *Directory,const char *File, + struct stat const &Stat); + virtual bool EmitMD5(const char *Dir,const char *File, + struct stat const &St,unsigned char MD5[16], + unsigned int Tag,unsigned int Flag); + virtual bool NeedsRSync(const char *Dir,const char *File, + dsFList::NormalFile &F); + + public: + + // Md5 Cache + dsFileListDB *DB; + dsMMapIO *DBIO; + Progress Prog; + + dsFileFilter RemoveFilter; + dsFileFilter RSyncFilter; + + ListGenerator(); + ~ListGenerator(); +}; + +class Compare : public dsDirCorrect +{ + protected: + + bool Verbose; + bool Act; + bool DoDelete; + + virtual bool Visit(dsFList &List,string Dir); + void PrintPath(ostream &out,string Dir,string Name); + + // Display status information + virtual bool GetNew(dsFList &List,string Dir) + { + Prog.Hide(); + c1out << "N "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + return !Act || dsDirCorrect::GetNew(List,Dir); + }; + virtual bool Delete(string Dir,const char *Name,bool Now = false) + { + Prog.Hide(); + c1out << "D "; + PrintPath(c1out,Dir,Name); + Prog.Show(); + return !Act || !DoDelete || dsDirCorrect::Delete(Dir,Name); + }; + virtual bool GetChanged(dsFList &List,string Dir) + { + Prog.Hide(); + c1out << "C "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + return !Act || dsDirCorrect::GetChanged(List,Dir); + }; + virtual bool SetTime(dsFList &List,string Dir) + { + if (Verbose == false) + return !Act || dsDirCorrect::SetTime(List,Dir); + + Prog.Hide(); + c1out << "T "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + return !Act || dsDirCorrect::SetTime(List,Dir); + }; + virtual bool SetPerm(dsFList &List,string Dir) + { + if (Verbose == false) + return !Act || dsDirCorrect::SetPerm(List,Dir); + Prog.Hide(); + c1out << "P "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + return !Act || dsDirCorrect::SetPerm(List,Dir); + }; + virtual bool SetOwners(dsFList &List,string Dir) + { + if (Verbose == false) + return !Act || dsDirCorrect::SetOwners(List,Dir); + Prog.Hide(); + c1out << "O "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + return !Act || dsDirCorrect::SetOwners(List,Dir); + }; + virtual bool CheckHash(dsFList &List,string Dir,unsigned char MD5[16]) + { + Prog.CkSumBytes += List.File->Size; + + if (Verbose == true) + { + Prog.Hide(); + c1out << "H "; + PrintPath(c1out,Dir,List.Entity->Name); + Prog.Show(); + } + return dsDirCompare::CheckHash(List,Dir,MD5); + } + + public: + + Progress Prog; + + Compare(); +}; + +// Path utilities +bool SimplifyPath(char *Buffer); +bool ResolveLink(char *Buffer,unsigned long Max); + +#endif diff --git a/tools/dsync-0.0/cmdline/makefile b/tools/dsync-0.0/cmdline/makefile new file mode 100644 index 00000000..b07669c8 --- /dev/null +++ b/tools/dsync-0.0/cmdline/makefile @@ -0,0 +1,18 @@ +# -*- make -*- +BASE=.. +SUBDIR=cmdline + +# Bring in the default rules +include ../buildlib/defaults.mak + +# Program to test the File Filter +PROGRAM=dsync-flist +SLIBS = -ldsync +SOURCE = dsync-flist.cc path-utils.cc +include $(PROGRAM_H) + +# Program to test the File Filter +PROGRAM=dsync-cdimage +SLIBS = -ldsync +SOURCE = dsync-cdimage.cc +include $(PROGRAM_H) diff --git a/tools/dsync-0.0/cmdline/path-utils.cc b/tools/dsync-0.0/cmdline/path-utils.cc new file mode 100644 index 00000000..7944dc6d --- /dev/null +++ b/tools/dsync-0.0/cmdline/path-utils.cc @@ -0,0 +1,227 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: path-utils.cc,v 1.2 1999/03/22 02:52:46 jgg Exp $ +/* ###################################################################### + + Misc utility functions for dsync-flist to make use of. + + ##################################################################### */ + /*}}}*/ +#include "dsync-flist.h" +#include +#include +#include +#include + +// SimplifyPath - Short function to remove relative path components /*{{{*/ +// --------------------------------------------------------------------- +/* This short function removes relative path components such as ./ and ../ + from the path and removes double // as well. It works by seperating + the path into a list of components and then removing any un-needed + compoments */ +bool SimplifyPath(char *Buffer) +{ + // Create a list of path compoments + char *Pos[100]; + unsigned CurPos = 0; + Pos[CurPos] = Buffer; + CurPos++; + for (char *I = Buffer; *I != 0;) + { + if (*I == '/') + { + *I = 0; + I++; + Pos[CurPos] = I; + CurPos++; + } + else + I++; + } + + // Strip //, ./ and ../ + for (unsigned I = 0; I != CurPos; I++) + { + if (Pos[I] == 0) + continue; + + // Double slash + if (Pos[I][0] == 0) + { + if (I != 0) + Pos[I] = 0; + continue; + } + + // Dot slash + if (Pos[I][0] == '.' && Pos[I][1] == 0) + { + Pos[I] = 0; + continue; + } + + // Dot dot slash + if (Pos[I][0] == '.' && Pos[I][1] == '.' && Pos[I][2] == 0) + { + Pos[I] = 0; + unsigned J = I; + for (; Pos[J] == 0 && J != 0; J--); + if (Pos[J] == 0) + return _error->Error("Invalid path, too many ../s"); + Pos[J] = 0; + continue; + } + } + + // Recombine the path into full path + for (unsigned I = 0; I != CurPos; I++) + { + if (Pos[I] == 0) + continue; + memmove(Buffer,Pos[I],strlen(Pos[I])); + Buffer += strlen(Pos[I]); + + if (I + 1 != CurPos) + *Buffer++ = '/'; + } + *Buffer = 0; + + return true; +} + /*}}}*/ +// ResolveLink - Resolve a file into an unsymlinked path /*{{{*/ +// --------------------------------------------------------------------- +/* The returned path is a path that accesses the same file without + traversing a symlink, the memory buffer used should be twice as large + as the largest path. It uses an LRU cache of past lookups to speed things + up, just don't change directores :> */ +struct Cache +{ + string Dir; + string Trans; + unsigned long Age; +}; +static Cache DirCache[400]; +static unsigned long CacheAge = 0; +bool ResolveLink(char *Buffer,unsigned long Max) +{ + if (Buffer[0] == 0 || (Buffer[0] == '/' && Buffer[1] == 0)) + return true; + + // Lookup in the cache + Cache *Entry = 0; + for (int I = 0; I != 400; I++) + { + // Store an empty entry + if (DirCache[I].Dir.empty() == true) + { + Entry = &DirCache[I]; + Entry->Age = 0; + continue; + } + + // Store the LRU entry + if (Entry != 0 && Entry->Age > DirCache[I].Age) + Entry = &DirCache[I]; + + if (DirCache[I].Dir != Buffer || DirCache[I].Trans.empty() == true) + continue; + strcpy(Buffer,DirCache[I].Trans.c_str()); + DirCache[I].Age = CacheAge++; + return true; + } + + // Prepare the cache for our new entry + if (Entry != 0 && Buffer[strlen(Buffer) - 1] == '/') + { + Entry->Age = CacheAge++; + Entry->Dir = Buffer; + } + else + Entry = 0; + + // Resolve any symlinks + unsigned Counter = 0; + while (1) + { + Counter++; + if (Counter > 50) + return _error->Error("Exceeded allowed symlink depth"); + + // Strip off the final component name + char *I = Buffer + strlen(Buffer); + for (; I != Buffer && (*I == '/' || *I == 0); I--); + for (; I != Buffer && *I != '/'; I--); + if (I != Buffer) + I++; + + if (strlen(I) == 0) + break; + + + /* We need to remove the final slash in the directory component for + readlink to work right */ + char *End = 0; + if (I[strlen(I) - 1] == '/') + { + End = I + strlen(I) - 1; + *End = 0; + } + + int Res = readlink(Buffer,I,Max - (I - Buffer) - 2); + + // If it is a link then read the link dest over the final component + if (Res > 0) + { + I[Res] = 0; + + // Absolute path.. + if (*I == '/') + memmove(Buffer,I,strlen(I)+1); + + // Put the slash back.. + if (End != 0) + { + I[Res] = '/'; + I[Res + 1] = 0; + } + + if (SimplifyPath(Buffer) == false) + return false; + } + else + { + // Put the slash back.. + if (End != 0) + *End = '/'; + break; + } + + } + + /* Here we are abusive and move the current path component to the end + of the buffer to advoid allocating space */ + char *I = Buffer + strlen(Buffer); + for (; I != Buffer && (*I == '/' || *I == 0); I--); + for (; I != Buffer && *I != '/'; I--); + if (I != Buffer) + I++; + unsigned Len = strlen(I) + 1; + char *End = Buffer + Max - Len; + memmove(End,I,Len); + *I = 0; + + + // Recurse to deal with any links in the files path + if (ResolveLink(Buffer,Max - Len) == false) + return false; + I = Buffer + strlen(Buffer); + memmove(I,End,Len); + + // Store in the cache + if (Entry != 0) + Entry->Trans = Buffer; + + return true; +} + /*}}}*/ diff --git a/tools/dsync-0.0/configure b/tools/dsync-0.0/configure new file mode 100755 index 00000000..38db8851 --- /dev/null +++ b/tools/dsync-0.0/configure @@ -0,0 +1,7196 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.61. +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +as_nl=' +' +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + +if test "x$CONFIG_SHELL" = x; then + if (eval ":") 2>/dev/null; then + as_have_required=yes +else + as_have_required=no +fi + + if test $as_have_required = yes && (eval ": +(as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=\$LINENO + as_lineno_2=\$LINENO + test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && + test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } +") 2> /dev/null; then + : +else + as_candidate_shells= + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + case $as_dir in + /*) + for as_base in sh bash ksh sh5; do + as_candidate_shells="$as_candidate_shells $as_dir/$as_base" + done;; + esac +done +IFS=$as_save_IFS + + + for as_shell in $as_candidate_shells $SHELL; do + # Try only shells that exist, to save several forks. + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { ("$as_shell") 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +_ASEOF +}; then + CONFIG_SHELL=$as_shell + as_have_required=yes + if { "$as_shell" 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +(as_func_return () { + (exit $1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = "$1" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test $exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } + +_ASEOF +}; then + break +fi + +fi + + done + + if test "x$CONFIG_SHELL" != x; then + for as_var in BASH_ENV ENV + do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + done + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} +fi + + + if test $as_have_required = no; then + echo This script requires a shell more modern than all the + echo shells that I found on your system. Please install a + echo modern shell, or manually run the script under such a + echo shell if you do have one. + { (exit 1); exit 1; } +fi + + +fi + +fi + + + +(eval "as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0") || { + echo No shell found that supports shell functions. + echo Please tell autoconf@gnu.org about your system, + echo including any error possibly output before this + echo message +} + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir +fi +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + + +exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} + +# Identity of this package. +PACKAGE_NAME= +PACKAGE_TARNAME= +PACKAGE_VERSION= +PACKAGE_STRING= +PACKAGE_BUGREPORT= + +ac_unique_file="configure.in" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='SHELL +PATH_SEPARATOR +PACKAGE_NAME +PACKAGE_TARNAME +PACKAGE_VERSION +PACKAGE_STRING +PACKAGE_BUGREPORT +exec_prefix +prefix +program_transform_name +bindir +sbindir +libexecdir +datarootdir +datadir +sysconfdir +sharedstatedir +localstatedir +includedir +oldincludedir +docdir +infodir +htmldir +dvidir +pdfdir +psdir +libdir +localedir +mandir +DEFS +ECHO_C +ECHO_N +ECHO_T +LIBS +build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +CPPFLAGS +ac_ct_CC +EXEEXT +OBJEXT +build +build_cpu +build_vendor +build_os +host +host_cpu +host_vendor +host_os +CXX +CXXFLAGS +ac_ct_CXX +AR +PTHREADLIB +CXXCPP +GREP +EGREP +HAVE_C9X +DEBIANDOC_HTML +DEBIANDOC_TEXT +YODL_MAN +LIBOBJS +LTLIBOBJS' +ac_subst_files='' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CXXCPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` + eval enable_$ac_feature=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid feature name: $ac_feature" >&2 + { (exit 1); exit 1; }; } + ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` + eval enable_$ac_feature=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/[-.]/_/g'` + eval with_$ac_package=\$ac_optarg ;; + + -without-* | --without-*) + ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid package name: $ac_package" >&2 + { (exit 1); exit 1; }; } + ac_package=`echo $ac_package | sed 's/[-.]/_/g'` + eval with_$ac_package=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) { echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + { echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } +fi + +# Be sure to have absolute directory names. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; } +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used." >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + { echo "$as_me: error: Working directory cannot be determined" >&2 + { (exit 1); exit 1; }; } +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + { echo "$as_me: error: pwd does not report name of working directory" >&2 + { (exit 1); exit 1; }; } + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$0" || +$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$0" : 'X\(//\)[^/]' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X"$0" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2 + { (exit 1); exit 1; }; } + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures this package to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + + cat <<\_ACEOF + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +configure +generated by GNU Autoconf 2.61 + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by $as_me, which was +generated by GNU Autoconf 2.61. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + echo "PATH: $as_dir" +done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; + 2) + ac_configure_args1="$ac_configure_args1 '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + ac_configure_args="$ac_configure_args '$ac_arg'" + ;; + esac + done +done +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + cat <<\_ASBOX +## ---------------- ## +## Cache variables. ## +## ---------------- ## +_ASBOX + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + cat <<\_ASBOX +## ----------------- ## +## Output variables. ## +## ----------------- ## +_ASBOX + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + cat <<\_ASBOX +## ------------------- ## +## File substitutions. ## +## ------------------- ## +_ASBOX + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + cat <<\_ASBOX +## ----------- ## +## confdefs.h. ## +## ----------- ## +_ASBOX + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + echo "$as_me: caught signal $ac_signal" + echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer explicitly selected file to automatically selected ones. +if test -n "$CONFIG_SITE"; then + set x "$CONFIG_SITE" +elif test "x$prefix" != xNONE; then + set x "$prefix/share/config.site" "$prefix/etc/config.site" +else + set x "$ac_default_prefix/share/config.site" \ + "$ac_default_prefix/etc/config.site" +fi +shift +for ac_site_file +do + if test -r "$ac_site_file"; then + { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 +echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { echo "$as_me:$LINENO: loading cache $cache_file" >&5 +echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { echo "$as_me:$LINENO: creating cache $cache_file" >&5 +echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 +echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 +echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 +echo "$as_me: former value: $ac_old_val" >&2;} + { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 +echo "$as_me: current value: $ac_new_val" >&2;} + ac_cache_corrupted=: + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 +echo "$as_me: error: changes in the environment can compromise the build" >&2;} + { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } +fi + + + + + + + + + + + + + + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_aux_dir= +for ac_dir in buildlib "$srcdir"/buildlib; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + { { echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in buildlib \"$srcdir\"/buildlib" >&5 +echo "$as_me: error: cannot find install-sh or install.sh in buildlib \"$srcdir\"/buildlib" >&2;} + { (exit 1); exit 1; }; } +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +ac_config_headers="$ac_config_headers include/config.h:buildlib/config.h.in" + + +cat >>confdefs.h <<_ACEOF +#define VERSION "0.0" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "dsync" +_ACEOF + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { echo "$as_me:$LINENO: result: $CC" >&5 +echo "${ECHO_T}$CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 +echo "${ECHO_T}$ac_ct_CC" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + +# Provide some information about the compiler. +echo "$as_me:$LINENO: checking for C compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; } +ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` +# +# List of possible output files, starting from the most likely. +# The algorithm is not robust to junk in `.', hence go to wildcards (a.*) +# only as a last resort. b.out is created by i960 compilers. +ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out' +# +# The IRIX 6 linker writes into existing files which may not be +# executable, retaining their permissions. Remove them first so a +# subsequent execution test works. +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { (ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi + +{ echo "$as_me:$LINENO: result: $ac_file" >&5 +echo "${ECHO_T}$ac_file" >&6; } +if test -z "$ac_file"; then + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } +fi + +ac_exeext=$ac_cv_exeext + +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; } +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } + fi + fi +fi +{ echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + +rm -f a.out a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; } +{ echo "$as_me:$LINENO: result: $cross_compiling" >&5 +echo "${ECHO_T}$cross_compiling" >&6; } + +{ echo "$as_me:$LINENO: checking for suffix of executables" >&5 +echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; } +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest$ac_cv_exeext +{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 +echo "${ECHO_T}$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +{ echo "$as_me:$LINENO: checking for suffix of object files" >&5 +echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; } +if test "${ac_cv_objext+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 +echo "${ECHO_T}$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; } +if test "${ac_cv_c_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; } +GCC=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 +echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; } +if test "${ac_cv_prog_cc_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 +echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; } +if test "${ac_cv_prog_cc_c89+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cc_c89=$ac_arg +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { echo "$as_me:$LINENO: result: none needed" >&5 +echo "${ECHO_T}none needed" >&6; } ;; + xno) + { echo "$as_me:$LINENO: result: unsupported" >&5 +echo "${ECHO_T}unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 +echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;; +esac + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ echo "$as_me:$LINENO: checking for library containing strerror" >&5 +echo $ECHO_N "checking for library containing strerror... $ECHO_C" >&6; } +if test "${ac_cv_search_strerror+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_func_search_save_LIBS=$LIBS +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char strerror (); +int +main () +{ +return strerror (); + ; + return 0; +} +_ACEOF +for ac_lib in '' cposix; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_search_strerror=$ac_res +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext + if test "${ac_cv_search_strerror+set}" = set; then + break +fi +done +if test "${ac_cv_search_strerror+set}" = set; then + : +else + ac_cv_search_strerror=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_search_strerror" >&5 +echo "${ECHO_T}$ac_cv_search_strerror" >&6; } +ac_res=$ac_cv_search_strerror +if test "$ac_res" != no; then + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + { { echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 +echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} + { (exit 1); exit 1; }; } + +{ echo "$as_me:$LINENO: checking build system type" >&5 +echo $ECHO_N "checking build system type... $ECHO_C" >&6; } +if test "${ac_cv_build+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + { { echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { (exit 1); exit 1; }; } +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 +echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} + { (exit 1); exit 1; }; } + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_build" >&5 +echo "${ECHO_T}$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) { { echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 +echo "$as_me: error: invalid value of canonical build" >&2;} + { (exit 1); exit 1; }; };; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ echo "$as_me:$LINENO: checking host system type" >&5 +echo $ECHO_N "checking host system type... $ECHO_C" >&6; } +if test "${ac_cv_host+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + { { echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { (exit 1); exit 1; }; } +fi + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_host" >&5 +echo "${ECHO_T}$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) { { echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +echo "$as_me: error: invalid value of canonical host" >&2;} + { (exit 1); exit 1; }; };; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { echo "$as_me:$LINENO: result: $CXX" >&5 +echo "${ECHO_T}$CXX" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_CXX+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { echo "$as_me:$LINENO: result: $ac_ct_CXX" >&5 +echo "${ECHO_T}$ac_ct_CXX" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +echo "$as_me:$LINENO: checking for C++ compiler version" >&5 +ac_compiler=`set X $ac_compile; echo $2` +{ (ac_try="$ac_compiler --version >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler --version >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + +{ echo "$as_me:$LINENO: checking whether we are using the GNU C++ compiler" >&5 +echo $ECHO_N "checking whether we are using the GNU C++ compiler... $ECHO_C" >&6; } +if test "${ac_cv_cxx_compiler_gnu+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_compiler_gnu=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_cxx_compiler_gnu" >&5 +echo "${ECHO_T}$ac_cv_cxx_compiler_gnu" >&6; } +GXX=`test $ac_compiler_gnu = yes && echo yes` +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ echo "$as_me:$LINENO: checking whether $CXX accepts -g" >&5 +echo $ECHO_N "checking whether $CXX accepts -g... $ECHO_C" >&6; } +if test "${ac_cv_prog_cxx_g+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CXXFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_prog_cxx_g=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ echo "$as_me:$LINENO: result: $ac_cv_prog_cxx_g" >&5 +echo "${ECHO_T}$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { echo "$as_me:$LINENO: result: $AR" >&5 +echo "${ECHO_T}$AR" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_AR+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_AR="ar" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { echo "$as_me:$LINENO: result: $ac_ct_AR" >&5 +echo "${ECHO_T}$ac_ct_AR" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + + +if test "$AR" = ":"; then + { { echo "$as_me:$LINENO: error: failed: Sorry I could not find ar in the path" >&5 +echo "$as_me: error: failed: Sorry I could not find ar in the path" >&2;} + { (exit 1); exit 1; }; } +fi + + +{ echo "$as_me:$LINENO: checking for pthread_create in -lpthread" >&5 +echo $ECHO_N "checking for pthread_create in -lpthread... $ECHO_C" >&6; } +if test "${ac_cv_lib_pthread_pthread_create+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_pthread_pthread_create=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_pthread_pthread_create=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_pthread_pthread_create" >&5 +echo "${ECHO_T}$ac_cv_lib_pthread_pthread_create" >&6; } +if test $ac_cv_lib_pthread_pthread_create = yes; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_PTHREAD 1 +_ACEOF + PTHREADLIB="-lpthread" +fi + + + +{ echo "$as_me:$LINENO: checking system architecture" >&5 +echo $ECHO_N "checking system architecture... $ECHO_C" >&6; } +archset="`awk '$1 == "'$host_cpu'" { print $2 }' $srcdir/buildlib/archtable`" +if test "x$archset" = "x"; then + { { echo "$as_me:$LINENO: error: failed: use --host=" >&5 +echo "$as_me: error: failed: use --host=" >&2;} + { (exit 1); exit 1; }; } +fi +{ echo "$as_me:$LINENO: result: $archset" >&5 +echo "${ECHO_T}$archset" >&6; } +cat >>confdefs.h <<_ACEOF +#define ARCHITECTURE "$archset" +_ACEOF + + +{ echo "$as_me:$LINENO: checking for C9x integer types" >&5 +echo $ECHO_N "checking for C9x integer types... $ECHO_C" >&6; } +if test "${c9x_ints+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +int +main () +{ +uint8_t Foo1;uint16_t Foo2;uint32_t Foo3;uint64_t Foo + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + c9x_ints=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + c9x_ints=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $c9x_ints" >&5 +echo "${ECHO_T}$c9x_ints" >&6; } + + +if archline="`sed -ne 's/^'$archset':[ ]\+\(.*\)/\1/gp' $srcdir/buildlib/sizetable`"; then + + set $archline + if test "$1" = "little"; then + ac_cv_c_bigendian=no + else + ac_cv_c_bigendian=yes + fi + size_char=$2 + size_int=$3 + size_short=$4 + size_long=$5 +fi + +if test "$cross_compiling" = "yes" -a "$archline" = ""; then + { { echo "$as_me:$LINENO: error: When cross compiling" >&5 +echo "$as_me: error: When cross compiling" >&2;} + { (exit architecture must be present in sizetable); exit architecture must be present in sizetable; }; } +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ echo "$as_me:$LINENO: checking how to run the C++ preprocessor" >&5 +echo $ECHO_N "checking how to run the C++ preprocessor... $ECHO_C" >&6; } +if test -z "$CXXCPP"; then + if test "${ac_cv_prog_CXXCPP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ echo "$as_me:$LINENO: result: $CXXCPP" >&5 +echo "${ECHO_T}$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + : +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break +fi + +rm -f conftest.err conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.err conftest.$ac_ext +if $ac_preproc_ok; then + : +else + { { echo "$as_me:$LINENO: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&5 +echo "$as_me: error: C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 +echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; } +if test "${ac_cv_path_GREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # Extract the first word of "grep ggrep" to use in msg output +if test -z "$GREP"; then +set dummy grep ggrep; ac_prog_name=$2 +if test "${ac_cv_path_GREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_path_GREP_found=false +# Loop through the user's path and test for each of PROGNAME-LIST +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue + # Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + + $ac_path_GREP_found && break 3 + done +done + +done +IFS=$as_save_IFS + + +fi + +GREP="$ac_cv_path_GREP" +if test -z "$GREP"; then + { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } +fi + +else + ac_cv_path_GREP=$GREP +fi + + +fi +{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 +echo "${ECHO_T}$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ echo "$as_me:$LINENO: checking for egrep" >&5 +echo $ECHO_N "checking for egrep... $ECHO_C" >&6; } +if test "${ac_cv_path_EGREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + # Extract the first word of "egrep" to use in msg output +if test -z "$EGREP"; then +set dummy egrep; ac_prog_name=$2 +if test "${ac_cv_path_EGREP+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_path_EGREP_found=false +# Loop through the user's path and test for each of PROGNAME-LIST +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue + # Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + ac_count=`expr $ac_count + 1` + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + + $ac_path_EGREP_found && break 3 + done +done + +done +IFS=$as_save_IFS + + +fi + +EGREP="$ac_cv_path_EGREP" +if test -z "$EGREP"; then + { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } +fi + +else + ac_cv_path_EGREP=$EGREP +fi + + + fi +fi +{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 +echo "${ECHO_T}$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; } +if test "${ac_cv_header_stdc+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_header_stdc=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_header_stdc=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then + : +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + + +fi +fi +{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 +echo "${ECHO_T}$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. + + + + + + + + + +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh` +{ echo "$as_me:$LINENO: checking for $ac_header" >&5 +echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + eval "$as_ac_Header=yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_Header=no" +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +ac_res=`eval echo '${'$as_ac_Header'}'` + { echo "$as_me:$LINENO: result: $ac_res" >&5 +echo "${ECHO_T}$ac_res" >&6; } +if test `eval echo '${'$as_ac_Header'}'` = yes; then + cat >>confdefs.h <<_ACEOF +#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +{ echo "$as_me:$LINENO: checking whether byte ordering is bigendian" >&5 +echo $ECHO_N "checking whether byte ordering is bigendian... $ECHO_C" >&6; } +if test "${ac_cv_c_bigendian+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + # See if sys/param.h defines the BYTE_ORDER macro. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN && defined LITTLE_ENDIAN \ + && BYTE_ORDER && BIG_ENDIAN && LITTLE_ENDIAN) + bogus endian macros +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + # It does; now see whether it defined to BIG_ENDIAN or not. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian +#endif + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_c_bigendian=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_c_bigendian=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # It does not; compile a test program. +if test "$cross_compiling" = yes; then + # try to guess the endianness by grepping values into an object file + ac_cv_c_bigendian=unknown + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; +short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; +void _ascii () { char *s = (char *) ascii_mm; s = (char *) ascii_ii; } +short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; +short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; +void _ebcdic () { char *s = (char *) ebcdic_mm; s = (char *) ebcdic_ii; } +int +main () +{ + _ascii (); _ebcdic (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + if grep BIGenDianSyS conftest.$ac_objext >/dev/null ; then + ac_cv_c_bigendian=yes +fi +if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi +fi +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_c_bigendian=no +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_c_bigendian" >&5 +echo "${ECHO_T}$ac_cv_c_bigendian" >&6; } +case $ac_cv_c_bigendian in + yes) + +cat >>confdefs.h <<\_ACEOF +#define WORDS_BIGENDIAN 1 +_ACEOF + ;; + no) + ;; + *) + { { echo "$as_me:$LINENO: error: unknown endianness +presetting ac_cv_c_bigendian=no (or yes) will help" >&5 +echo "$as_me: error: unknown endianness +presetting ac_cv_c_bigendian=no (or yes) will help" >&2;} + { (exit 1); exit 1; }; } ;; +esac + + +HAVE_C9X=yes +if test x"$c9x_ints" = x"no"; then + { echo "$as_me:$LINENO: checking for char" >&5 +echo $ECHO_N "checking for char... $ECHO_C" >&6; } +if test "${ac_cv_type_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +typedef char ac__type_new_; +int +main () +{ +if ((ac__type_new_ *) 0) + return 0; +if (sizeof (ac__type_new_)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_type_char=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_char=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_type_char" >&5 +echo "${ECHO_T}$ac_cv_type_char" >&6; } + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ echo "$as_me:$LINENO: checking size of char" >&5 +echo $ECHO_N "checking size of char... $ECHO_C" >&6; } +if test "${ac_cv_sizeof_char+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo= ac_hi= +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr '(' $ac_mid ')' + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_char=$ac_lo;; +'') if test "$ac_cv_type_char" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (char) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_char=0 + fi ;; +esac +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef char ac__type_sizeof_; +static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); } +static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (((long int) (sizeof (ac__type_sizeof_))) < 0) + { + long int i = longval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%ld\n", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%lu\n", i); + } + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_char=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +if test "$ac_cv_type_char" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (char) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (char) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_char=0 + fi +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.val +fi +{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_char" >&5 +echo "${ECHO_T}$ac_cv_sizeof_char" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_CHAR $ac_cv_sizeof_char +_ACEOF + + + { echo "$as_me:$LINENO: checking for int" >&5 +echo $ECHO_N "checking for int... $ECHO_C" >&6; } +if test "${ac_cv_type_int+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +typedef int ac__type_new_; +int +main () +{ +if ((ac__type_new_ *) 0) + return 0; +if (sizeof (ac__type_new_)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_type_int=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_int=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_type_int" >&5 +echo "${ECHO_T}$ac_cv_type_int" >&6; } + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ echo "$as_me:$LINENO: checking size of int" >&5 +echo $ECHO_N "checking size of int... $ECHO_C" >&6; } +if test "${ac_cv_sizeof_int+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo= ac_hi= +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr '(' $ac_mid ')' + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_int=$ac_lo;; +'') if test "$ac_cv_type_int" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (int) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (int) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_int=0 + fi ;; +esac +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef int ac__type_sizeof_; +static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); } +static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (((long int) (sizeof (ac__type_sizeof_))) < 0) + { + long int i = longval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%ld\n", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%lu\n", i); + } + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_int=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +if test "$ac_cv_type_int" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (int) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (int) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_int=0 + fi +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.val +fi +{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_int" >&5 +echo "${ECHO_T}$ac_cv_sizeof_int" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_INT $ac_cv_sizeof_int +_ACEOF + + + { echo "$as_me:$LINENO: checking for short" >&5 +echo $ECHO_N "checking for short... $ECHO_C" >&6; } +if test "${ac_cv_type_short+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +typedef short ac__type_new_; +int +main () +{ +if ((ac__type_new_ *) 0) + return 0; +if (sizeof (ac__type_new_)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_type_short=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_short=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_type_short" >&5 +echo "${ECHO_T}$ac_cv_type_short" >&6; } + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ echo "$as_me:$LINENO: checking size of short" >&5 +echo $ECHO_N "checking size of short... $ECHO_C" >&6; } +if test "${ac_cv_sizeof_short+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo= ac_hi= +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr '(' $ac_mid ')' + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_short=$ac_lo;; +'') if test "$ac_cv_type_short" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (short) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (short) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_short=0 + fi ;; +esac +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef short ac__type_sizeof_; +static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); } +static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (((long int) (sizeof (ac__type_sizeof_))) < 0) + { + long int i = longval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%ld\n", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%lu\n", i); + } + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_short=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +if test "$ac_cv_type_short" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (short) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (short) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_short=0 + fi +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.val +fi +{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_short" >&5 +echo "${ECHO_T}$ac_cv_sizeof_short" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_SHORT $ac_cv_sizeof_short +_ACEOF + + + { echo "$as_me:$LINENO: checking for long" >&5 +echo $ECHO_N "checking for long... $ECHO_C" >&6; } +if test "${ac_cv_type_long+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +typedef long ac__type_new_; +int +main () +{ +if ((ac__type_new_ *) 0) + return 0; +if (sizeof (ac__type_new_)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_type_long=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_long=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ echo "$as_me:$LINENO: result: $ac_cv_type_long" >&5 +echo "${ECHO_T}$ac_cv_type_long" >&6; } + +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ echo "$as_me:$LINENO: checking size of long" >&5 +echo $ECHO_N "checking size of long... $ECHO_C" >&6; } +if test "${ac_cv_sizeof_long+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=0 ac_mid=0 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr $ac_mid + 1` + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) < 0)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=-1 ac_mid=-1 + while :; do + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) >= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_lo=$ac_mid; break +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_hi=`expr '(' $ac_mid ')' - 1` + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + ac_mid=`expr 2 '*' $ac_mid` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo= ac_hi= +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + ac_mid=`expr '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo` + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +int +main () +{ +static int test_array [1 - 2 * !(((long int) (sizeof (ac__type_sizeof_))) <= $ac_mid)]; +test_array [0] = 0 + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_hi=$ac_mid +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_lo=`expr '(' $ac_mid ')' + 1` +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in +?*) ac_cv_sizeof_long=$ac_lo;; +'') if test "$ac_cv_type_long" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (long) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (long) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_long=0 + fi ;; +esac +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default + typedef long ac__type_sizeof_; +static long int longval () { return (long int) (sizeof (ac__type_sizeof_)); } +static unsigned long int ulongval () { return (long int) (sizeof (ac__type_sizeof_)); } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (((long int) (sizeof (ac__type_sizeof_))) < 0) + { + long int i = longval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%ld\n", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (ac__type_sizeof_)))) + return 1; + fprintf (f, "%lu\n", i); + } + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + ac_cv_sizeof_long=`cat conftest.val` +else + echo "$as_me: program exited with status $ac_status" >&5 +echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +if test "$ac_cv_type_long" = yes; then + { { echo "$as_me:$LINENO: error: cannot compute sizeof (long) +See \`config.log' for more details." >&5 +echo "$as_me: error: cannot compute sizeof (long) +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; } + else + ac_cv_sizeof_long=0 + fi +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext +fi +rm -f conftest.val +fi +{ echo "$as_me:$LINENO: result: $ac_cv_sizeof_long" >&5 +echo "${ECHO_T}$ac_cv_sizeof_long" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_LONG $ac_cv_sizeof_long +_ACEOF + + + + HAVE_C9X= + +fi + +# Extract the first word of "debiandoc2html", so it can be a program name with args. +set dummy debiandoc2html; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_DEBIANDOC_HTML+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$DEBIANDOC_HTML"; then + ac_cv_prog_DEBIANDOC_HTML="$DEBIANDOC_HTML" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DEBIANDOC_HTML=""yes"" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_DEBIANDOC_HTML" && ac_cv_prog_DEBIANDOC_HTML="""" +fi +fi +DEBIANDOC_HTML=$ac_cv_prog_DEBIANDOC_HTML +if test -n "$DEBIANDOC_HTML"; then + { echo "$as_me:$LINENO: result: $DEBIANDOC_HTML" >&5 +echo "${ECHO_T}$DEBIANDOC_HTML" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +# Extract the first word of "debiandoc2text", so it can be a program name with args. +set dummy debiandoc2text; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_DEBIANDOC_TEXT+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$DEBIANDOC_TEXT"; then + ac_cv_prog_DEBIANDOC_TEXT="$DEBIANDOC_TEXT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DEBIANDOC_TEXT=""yes"" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_DEBIANDOC_TEXT" && ac_cv_prog_DEBIANDOC_TEXT="""" +fi +fi +DEBIANDOC_TEXT=$ac_cv_prog_DEBIANDOC_TEXT +if test -n "$DEBIANDOC_TEXT"; then + { echo "$as_me:$LINENO: result: $DEBIANDOC_TEXT" >&5 +echo "${ECHO_T}$DEBIANDOC_TEXT" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + +# Extract the first word of "yodl2man", so it can be a program name with args. +set dummy yodl2man; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_YODL_MAN+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$YODL_MAN"; then + ac_cv_prog_YODL_MAN="$YODL_MAN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_YODL_MAN=""yes"" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_YODL_MAN" && ac_cv_prog_YODL_MAN="""" +fi +fi +YODL_MAN=$ac_cv_prog_YODL_MAN +if test -n "$YODL_MAN"; then + { echo "$as_me:$LINENO: result: $YODL_MAN" >&5 +echo "${ECHO_T}$YODL_MAN" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + +ac_config_files="$ac_config_files environment.mak:buildlib/environment.mak.in makefile:buildlib/makefile.in" + +ac_config_commands="$ac_config_commands default" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 +echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + *) $as_unset $ac_var ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + test "x$cache_file" != "x/dev/null" && + { echo "$as_me:$LINENO: updating cache $cache_file" >&5 +echo "$as_me: updating cache $cache_file" >&6;} + cat confcache >$cache_file + else + { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 +echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: ${CONFIG_STATUS=./config.status} +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 +echo "$as_me: creating $CONFIG_STATUS" >&6;} +cat >$CONFIG_STATUS <<_ACEOF +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false +SHELL=\${CONFIG_SHELL-$SHELL} +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + echo "#! /bin/sh" >conf$$.sh + echo "exit 0" >>conf$$.sh + chmod +x conf$$.sh + if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then + PATH_SEPARATOR=';' + else + PATH_SEPARATOR=: + fi + rm -f conf$$.sh +fi + +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +as_nl=' +' +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +case $0 in + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + { (exit 1); exit 1; } +fi + +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +for as_var in \ + LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ + LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ + LC_TELEPHONE LC_TIME +do + if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then + eval $as_var=C; export $as_var + else + ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + fi +done + +# Required to use basename. +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + + +# Name of the executable. +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# CDPATH. +$as_unset CDPATH + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir +fi +echo >conf$$.file +if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' +elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 + +# Save the log message, to keep $[0] and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by $as_me, which was +generated by GNU Autoconf 2.61. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +cat >>$CONFIG_STATUS <<_ACEOF +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +ac_cs_usage="\ +\`$as_me' instantiates files from templates according to the +current configuration. + +Usage: $0 [OPTIONS] [FILE]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +ac_cs_version="\\ +config.status +configured by $0, generated by GNU Autoconf 2.61, + with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" + +Copyright (C) 2006 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If no file are specified by the user, then we need to provide default +# value. By we need to know if files were specified by the user. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + echo "$ac_cs_version"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + CONFIG_FILES="$CONFIG_FILES $ac_optarg" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + { echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; };; + --help | --hel | -h ) + echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) { echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; + + *) ac_config_targets="$ac_config_targets $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +if \$ac_cs_recheck; then + echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 + CONFIG_SHELL=$SHELL + export CONFIG_SHELL + exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "include/config.h") CONFIG_HEADERS="$CONFIG_HEADERS include/config.h:buildlib/config.h.in" ;; + "environment.mak") CONFIG_FILES="$CONFIG_FILES environment.mak:buildlib/environment.mak.in" ;; + "makefile") CONFIG_FILES="$CONFIG_FILES makefile:buildlib/makefile.in" ;; + "default") CONFIG_COMMANDS="$CONFIG_COMMANDS default" ;; + + *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= + trap 'exit_status=$? + { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status +' 0 + trap '{ (exit 1); exit 1; }' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -n "$tmp" && test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || +{ + echo "$me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} + +# +# Set up the sed scripts for CONFIG_FILES section. +# + +# No need to generate the scripts if there are no CONFIG_FILES. +# This happens for instance when ./config.status config.h +if test -n "$CONFIG_FILES"; then + +_ACEOF + + + +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + cat >conf$$subs.sed <<_ACEOF +SHELL!$SHELL$ac_delim +PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim +PACKAGE_NAME!$PACKAGE_NAME$ac_delim +PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim +PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim +PACKAGE_STRING!$PACKAGE_STRING$ac_delim +PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim +exec_prefix!$exec_prefix$ac_delim +prefix!$prefix$ac_delim +program_transform_name!$program_transform_name$ac_delim +bindir!$bindir$ac_delim +sbindir!$sbindir$ac_delim +libexecdir!$libexecdir$ac_delim +datarootdir!$datarootdir$ac_delim +datadir!$datadir$ac_delim +sysconfdir!$sysconfdir$ac_delim +sharedstatedir!$sharedstatedir$ac_delim +localstatedir!$localstatedir$ac_delim +includedir!$includedir$ac_delim +oldincludedir!$oldincludedir$ac_delim +docdir!$docdir$ac_delim +infodir!$infodir$ac_delim +htmldir!$htmldir$ac_delim +dvidir!$dvidir$ac_delim +pdfdir!$pdfdir$ac_delim +psdir!$psdir$ac_delim +libdir!$libdir$ac_delim +localedir!$localedir$ac_delim +mandir!$mandir$ac_delim +DEFS!$DEFS$ac_delim +ECHO_C!$ECHO_C$ac_delim +ECHO_N!$ECHO_N$ac_delim +ECHO_T!$ECHO_T$ac_delim +LIBS!$LIBS$ac_delim +build_alias!$build_alias$ac_delim +host_alias!$host_alias$ac_delim +target_alias!$target_alias$ac_delim +CC!$CC$ac_delim +CFLAGS!$CFLAGS$ac_delim +LDFLAGS!$LDFLAGS$ac_delim +CPPFLAGS!$CPPFLAGS$ac_delim +ac_ct_CC!$ac_ct_CC$ac_delim +EXEEXT!$EXEEXT$ac_delim +OBJEXT!$OBJEXT$ac_delim +build!$build$ac_delim +build_cpu!$build_cpu$ac_delim +build_vendor!$build_vendor$ac_delim +build_os!$build_os$ac_delim +host!$host$ac_delim +host_cpu!$host_cpu$ac_delim +host_vendor!$host_vendor$ac_delim +host_os!$host_os$ac_delim +CXX!$CXX$ac_delim +CXXFLAGS!$CXXFLAGS$ac_delim +ac_ct_CXX!$ac_ct_CXX$ac_delim +AR!$AR$ac_delim +PTHREADLIB!$PTHREADLIB$ac_delim +CXXCPP!$CXXCPP$ac_delim +GREP!$GREP$ac_delim +EGREP!$EGREP$ac_delim +HAVE_C9X!$HAVE_C9X$ac_delim +DEBIANDOC_HTML!$DEBIANDOC_HTML$ac_delim +DEBIANDOC_TEXT!$DEBIANDOC_TEXT$ac_delim +YODL_MAN!$YODL_MAN$ac_delim +LIBOBJS!$LIBOBJS$ac_delim +LTLIBOBJS!$LTLIBOBJS$ac_delim +_ACEOF + + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 66; then + break + elif $ac_last_try; then + { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` +if test -n "$ac_eof"; then + ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` + ac_eof=`expr $ac_eof + 1` +fi + +cat >>$CONFIG_STATUS <<_ACEOF +cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end +_ACEOF +sed ' +s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g +s/^/s,@/; s/!/@,|#_!!_#|/ +:n +t n +s/'"$ac_delim"'$/,g/; t +s/$/\\/; p +N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n +' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF +:end +s/|#_!!_#|//g +CEOF$ac_eof +_ACEOF + + +# VPATH may cause trouble with some makes, so we remove $(srcdir), +# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=/{ +s/:*\$(srcdir):*/:/ +s/:*\${srcdir}:*/:/ +s/:*@srcdir@:*/:/ +s/^\([^=]*=[ ]*\):*/\1/ +s/:*$// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF +fi # test -n "$CONFIG_FILES" + + +for ac_tag in :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 +echo "$as_me: error: Invalid tag $ac_tag." >&2;} + { (exit 1); exit 1; }; };; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 +echo "$as_me: error: cannot find input file: $ac_f" >&2;} + { (exit 1); exit 1; }; };; + esac + ac_file_inputs="$ac_file_inputs $ac_f" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input="Generated from "`IFS=: + echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure." + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { echo "$as_me:$LINENO: creating $ac_file" >&5 +echo "$as_me: creating $ac_file" >&6;} + fi + + case $ac_tag in + *:-:* | *:-) cat >"$tmp/stdin";; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { as_dir="$ac_dir" + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 +echo "$as_me: error: cannot create directory $as_dir" >&2;} + { (exit 1); exit 1; }; }; } + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= + +case `sed -n '/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p +' $ac_file_inputs` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF + sed "$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s&@configure_input@&$configure_input&;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +$ac_datarootdir_hack +" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && + { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&5 +echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined." >&2;} + + rm -f "$tmp/stdin" + case $ac_file in + -) cat "$tmp/out"; rm -f "$tmp/out";; + *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;; + esac + ;; + :H) + # + # CONFIG_HEADER + # +_ACEOF + +# Transform confdefs.h into a sed script `conftest.defines', that +# substitutes the proper values into config.h.in to produce config.h. +rm -f conftest.defines conftest.tail +# First, append a space to every undef/define line, to ease matching. +echo 's/$/ /' >conftest.defines +# Then, protect against being on the right side of a sed subst, or in +# an unquoted here document, in config.status. If some macros were +# called several times there might be several #defines for the same +# symbol, which is useless. But do not sort them, since the last +# AC_DEFINE must be honored. +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +# These sed commands are passed to sed as "A NAME B PARAMS C VALUE D", where +# NAME is the cpp macro being defined, VALUE is the value it is being given. +# PARAMS is the parameter list in the macro definition--in most cases, it's +# just an empty string. +ac_dA='s,^\\([ #]*\\)[^ ]*\\([ ]*' +ac_dB='\\)[ (].*,\\1define\\2' +ac_dC=' ' +ac_dD=' ,' + +uniq confdefs.h | + sed -n ' + t rset + :rset + s/^[ ]*#[ ]*define[ ][ ]*// + t ok + d + :ok + s/[\\&,]/\\&/g + s/^\('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/ '"$ac_dA"'\1'"$ac_dB"'\2'"${ac_dC}"'\3'"$ac_dD"'/p + s/^\('"$ac_word_re"'\)[ ]*\(.*\)/'"$ac_dA"'\1'"$ac_dB$ac_dC"'\2'"$ac_dD"'/p + ' >>conftest.defines + +# Remove the space that was appended to ease matching. +# Then replace #undef with comments. This is necessary, for +# example, in the case of _POSIX_SOURCE, which is predefined and required +# on some systems where configure will not decide to define it. +# (The regexp can be short, since the line contains either #define or #undef.) +echo 's/ $// +s,^[ #]*u.*,/* & */,' >>conftest.defines + +# Break up conftest.defines: +ac_max_sed_lines=50 + +# First sed command is: sed -f defines.sed $ac_file_inputs >"$tmp/out1" +# Second one is: sed -f defines.sed "$tmp/out1" >"$tmp/out2" +# Third one will be: sed -f defines.sed "$tmp/out2" >"$tmp/out1" +# et cetera. +ac_in='$ac_file_inputs' +ac_out='"$tmp/out1"' +ac_nxt='"$tmp/out2"' + +while : +do + # Write a here document: + cat >>$CONFIG_STATUS <<_ACEOF + # First, check the format of the line: + cat >"\$tmp/defines.sed" <<\\CEOF +/^[ ]*#[ ]*undef[ ][ ]*$ac_word_re[ ]*\$/b def +/^[ ]*#[ ]*define[ ][ ]*$ac_word_re[( ]/b def +b +:def +_ACEOF + sed ${ac_max_sed_lines}q conftest.defines >>$CONFIG_STATUS + echo 'CEOF + sed -f "$tmp/defines.sed"' "$ac_in >$ac_out" >>$CONFIG_STATUS + ac_in=$ac_out; ac_out=$ac_nxt; ac_nxt=$ac_in + sed 1,${ac_max_sed_lines}d conftest.defines >conftest.tail + grep . conftest.tail >/dev/null || break + rm -f conftest.defines + mv conftest.tail conftest.defines +done +rm -f conftest.defines conftest.tail + +echo "ac_result=$ac_in" >>$CONFIG_STATUS +cat >>$CONFIG_STATUS <<\_ACEOF + if test x"$ac_file" != x-; then + echo "/* $configure_input */" >"$tmp/config.h" + cat "$ac_result" >>"$tmp/config.h" + if diff $ac_file "$tmp/config.h" >/dev/null 2>&1; then + { echo "$as_me:$LINENO: $ac_file is unchanged" >&5 +echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f $ac_file + mv "$tmp/config.h" $ac_file + fi + else + echo "/* $configure_input */" + cat "$ac_result" + fi + rm -f "$tmp/out12" + ;; + + :C) { echo "$as_me:$LINENO: executing $ac_file commands" >&5 +echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "default":C) make dirs ;; + + esac +done # for ac_tag + + +{ (exit 0); exit 0; } +_ACEOF +chmod +x $CONFIG_STATUS +ac_clean_files=$ac_clean_files_save + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || { (exit 1); exit 1; } +fi + diff --git a/tools/dsync-0.0/configure.in b/tools/dsync-0.0/configure.in new file mode 100644 index 00000000..864aec02 --- /dev/null +++ b/tools/dsync-0.0/configure.in @@ -0,0 +1,101 @@ +ad +dnl Process this file with autoconf to produce a configure script. +dnl The ONLY thing this is used for is to configure for different +dnl linux architectures and configurations, it is not used to make the +dnl code more portable + +dnl You MUST have an environment that has all the POSIX functions and +dnl some of the more popular bsd/sysv ones (like select). You'll also +dnl need a C++ compiler that is semi-standard conformant, exceptions are +dnl not used but STL is. + +dnl 'make -f Makefile startup' will generate the configure file from +dnl configure.in correctly and can be run at any time + +AC_INIT(configure.in) +AC_CONFIG_AUX_DIR(buildlib) +AC_CONFIG_HEADER(include/config.h:buildlib/config.h.in) + +dnl -- SET THIS TO THE RELEASE VERSION -- +AC_DEFINE_UNQUOTED(VERSION,"0.0") +AC_DEFINE_UNQUOTED(PACKAGE,"dsync") + +AC_CHECK_TOOL_PREFIX dnl Initial guess + +dnl Check our C compiler +AC_PROG_CC +AC_ISC_POSIX + +dnl Check the host arch (build+target not needed... yet) +AC_CANONICAL_HOST +AC_CHECK_TOOL_PREFIX dnl recheck, in case the initial guess was wrong + +dnl Check for other programs +AC_PROG_CXX +AC_LANG_CPLUSPLUS +AC_CHECK_TOOL(AR,ar, :) + +if test "$AR" = ":"; then + AC_MSG_ERROR(failed: Sorry I could not find ar in the path) +fi + +dnl Checks for pthread +AC_CHECK_LIB(pthread, pthread_create,[AC_DEFINE(HAVE_PTHREAD) PTHREADLIB="-lpthread"]) +AC_SUBST(PTHREADLIB) + +dnl Converts the ARCH to be the same as dpkg +AC_MSG_CHECKING(system architecture) +archset="`awk '$1 == "'$host_cpu'" { print $2 }' $srcdir/buildlib/archtable`" +if test "x$archset" = "x"; then + AC_MSG_ERROR(failed: use --host=) +fi +AC_MSG_RESULT($archset) +AC_DEFINE_UNQUOTED(ARCHITECTURE,"$archset") + +dnl We use C9x types if at all possible +AC_CACHE_CHECK([for C9x integer types],c9x_ints,[ + AC_TRY_COMPILE([#include ], + [uint8_t Foo1;uint16_t Foo2;uint32_t Foo3;uint64_t Foo], + c9x_ints=yes,c9x_ints=no)]) + +dnl Check the sizes etc. of the architecture +changequote(,) +if archline="`sed -ne 's/^'$archset':[ ]\+\(.*\)/\1/gp' $srcdir/buildlib/sizetable`"; then + changequote([,]) + set $archline + if test "$1" = "little"; then + ac_cv_c_bigendian=no + else + ac_cv_c_bigendian=yes + fi + size_char=$2 + size_int=$3 + size_short=$4 + size_long=$5 +fi + +if test "$cross_compiling" = "yes" -a "$archline" = ""; then + AC_MSG_ERROR(When cross compiling, architecture must be present in sizetable) +fi +AC_C_BIGENDIAN + +dnl We do not need this if we have inttypes.. +HAVE_C9X=yes +if test x"$c9x_ints" = x"no"; then + AC_CHECK_SIZEOF(char,$size_char) + AC_CHECK_SIZEOF(int,$size_int) + AC_CHECK_SIZEOF(short,$size_short) + AC_CHECK_SIZEOF(long,$size_long) + + HAVE_C9X= + AC_SUBST(HAVE_C9X) +fi + +dnl Check for debiandoc +AC_CHECK_PROG(DEBIANDOC_HTML,debiandoc2html,"yes","") +AC_CHECK_PROG(DEBIANDOC_TEXT,debiandoc2text,"yes","") + +dnl Check for YODL +AC_CHECK_PROG(YODL_MAN,yodl2man,"yes","") + +AC_OUTPUT(environment.mak:buildlib/environment.mak.in makefile:buildlib/makefile.in,make dirs) diff --git a/tools/dsync-0.0/debian/changelog b/tools/dsync-0.0/debian/changelog new file mode 100644 index 00000000..247b103e --- /dev/null +++ b/tools/dsync-0.0/debian/changelog @@ -0,0 +1,18 @@ +dsync (0.0-0.2) experimental; urgency=low + + * Make it build with modern autoconf and upgrade to debhelper compat 4. + + -- Ryan Murray Sat, 10 Nov 2007 22:07:03 +0000 + +dsync (0.0-0.1) experimental; urgency=low + + * Make it build using g++-3.3. + + -- Kurt Roeckx Mon, 16 May 2005 16:04:58 +0200 + +dsync (0.0) experimental; urgency=low + + * First experimental version. + + -- Jason Gunthorpe Sun, 17 Jan 1999 19:07:53 -0700 + diff --git a/tools/dsync-0.0/debian/compat b/tools/dsync-0.0/debian/compat new file mode 100644 index 00000000..b8626c4c --- /dev/null +++ b/tools/dsync-0.0/debian/compat @@ -0,0 +1 @@ +4 diff --git a/tools/dsync-0.0/debian/control b/tools/dsync-0.0/debian/control new file mode 100644 index 00000000..70e7604f --- /dev/null +++ b/tools/dsync-0.0/debian/control @@ -0,0 +1,11 @@ +Source: dsync +Section: net +Priority: optional +Maintainer: Jason Gunthorpe +Standards-Version: 2.4.1 + +Package: dsync +Architecture: any +Depends: ${shlibs:Depends} +Description: Mirroring tool + DSync is a mirroring tool. diff --git a/tools/dsync-0.0/debian/postinst b/tools/dsync-0.0/debian/postinst new file mode 100755 index 00000000..0b7518bc --- /dev/null +++ b/tools/dsync-0.0/debian/postinst @@ -0,0 +1,6 @@ +#! /bin/sh +set -e + +if [ "$1" = "configure" ] ; then + ldconfig +fi diff --git a/tools/dsync-0.0/debian/rules b/tools/dsync-0.0/debian/rules new file mode 100755 index 00000000..0c508d42 --- /dev/null +++ b/tools/dsync-0.0/debian/rules @@ -0,0 +1,83 @@ +#!/usr/bin/make -f +# Made with the aid of dh_make, by Craig Small +# Sample debian/rules that uses debhelper. GNU copyright 1997 by Joey Hess. +# Some lines taken from debmake, by Christoph Lameter. +# $Id: rules,v 1.2 1999/01/18 02:38:15 jgg Exp $ + + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +export DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) +export DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) + +# FOR AUTOCONF 2.13 ONLY +ifeq ($(DEB_BUILD_GNU_TYPE), $(DEB_HOST_GNU_TYPE)) +# confflags += --host=$(DEB_HOST_GNU_TYPE) +else + $(error Cannot cross-compile this package out-of-the-box) +endif + +build: build-stamp +build-stamp: configure + dh_testdir + -mkdir build + cd build; ../configure + cd .. + + # Add here commands to compile the package. + make + touch build-stamp + +clean: + dh_testdir +# dh_testroot + rm -f build-stamp + rm -rf build + + # Add here commands to clean up after the build process. + -$(MAKE) clean + -$(MAKE) distclean + dh_clean + +# Build architecture-independent files here. +binary-indep: build +# We have nothing to do by default. + +# Build architecture-dependent files here. +binary-arch: build dsync + +dsync: build +# dh_testversion -pdsync + dh_testdir -pdsync + dh_testroot -pdsync + dh_clean -pdsync -k + dh_installdirs -pdsync usr/bin usr/doc/dsync usr/lib usr/doc/dsync + + cp build/bin/dsync-* debian/dsync/usr/bin/ + cp -a build/bin/libdsync.so.0.0.0 debian/dsync/usr/lib/ + cp -a build/bin/libdsync.so.0.0 debian/dsync/usr/lib/ + cp COPYING debian/dsync/usr/doc/dsync/copyright + + dh_installdocs -pdsync + dh_installman -pdsync + + dh_installchangelogs -pdsync + dh_strip -pdsync + dh_compress -pdsync + dh_fixperms -pdsync + dh_installdeb -pdsync + dh_makeshlibs -pdsync + dh_shlibdeps -pdsync + dh_gencontrol -pdsync + dh_md5sums -pdsync + dh_builddeb -pdsync + +source diff: + @echo >&2 'source and diff are obsolete - use dpkg-source -b'; false + +configure: + make startup + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary diff --git a/tools/dsync-0.0/debian/shlibs.local b/tools/dsync-0.0/debian/shlibs.local new file mode 100644 index 00000000..b75c86e9 --- /dev/null +++ b/tools/dsync-0.0/debian/shlibs.local @@ -0,0 +1 @@ +libdsync 0 diff --git a/tools/dsync-0.0/debian/substvars b/tools/dsync-0.0/debian/substvars new file mode 100644 index 00000000..e3698e34 --- /dev/null +++ b/tools/dsync-0.0/debian/substvars @@ -0,0 +1 @@ +shlibs:Depends=libc6 (>= 2.3.5-1), libgcc1 (>= 1:4.1.1-12), libstdc++6 (>= 4.1.1-12) diff --git a/tools/dsync-0.0/doc/dsync-flist.1.yo b/tools/dsync-0.0/doc/dsync-flist.1.yo new file mode 100644 index 00000000..fbb268a8 --- /dev/null +++ b/tools/dsync-0.0/doc/dsync-flist.1.yo @@ -0,0 +1,160 @@ +mailto(jgg@debian.org) +manpage(dsync-flist)(1)(17 Jan 1999)(dsync)() +manpagename(dsync)(DSync Mirroring utility -- command-line file list manipulator) + +manpagesynopsis() + dsync-flist [options] [command] [file] + +manpagedescription() + +dsync-flist is the command line tool for generating and manipulating the +dsync file list. It can check a previosly generated list against the local +tree and provide a report on its findings. The dsync file list is an +optimized binary file suitable for transmission over the internet. + +em(command) is one of: +itemize( + it() generate em(filelist) + it() help + it() dump em(filelist) + it() md5sums em(filelist) + it() md5cache em(filelist) + it() lookup em(filelist dir file) + it() link-dups em(filelist) + it() verify em(filelist) +) + +Unless the -h, or --help option is given one of the above commands +must be present. + +startdit() +dit(bf(generate)) +bf(generate) creates a file list. It takes as an argument the location to +write the file list to and then procceeds to recursively scan . to produce +the list. If md5 generation is enabled bf(generate) will use the previous +list as a cache for md5 checksums, only building new checksums if the file +size or timestamp has changed. + +dit(bf(help)) +Displays the help text + +dit(bf(dump)) +bf(dump) shows the contents of the given file list in a short form. The first +word is a type field and the remaing fields represent stored information. +The possible types are F - File, D - Directory, DM - Directory Marker, DS - +Directory Start, H - Header, S - Device Special, L - Symlink, T - Trailer. +After this the actual fields are displayed. Mod - Modification time in +seconds since the unix epoch, N - Entitiy Name, MD5 - MD5 hash, Sz - Size +in bytes, T - Link Target, U/G - User/Group internal ID, Sig - Header +signature, Maj - Header major number, Min - Header minor number, Epoch - +Internal Epoch offset, Count - Flag counter. + +dit(bf(md5sums)) +bf(md5sums) takes the contents of the file list and displays the stored md5 +of every file and then the file name. This output can then be given to +bf(md5sum -c) (GNU) to verify the checksums. Combined with the caching +action of the file list generator this can make md5 indexes of large archives +practical. + +dit(bf(md5cache)) +Like bf(md5sums), bf(md5cache) displays the md5sums of the files given +on stdin. It will use cached MD5 information if possible otherwise it will +compute the MD5 and return that. It is necessary to run this command from the +same directory the file list was generated in and to give filenames relative +to that directory. Otherwise the caching mechanism will not work. + +dit(bf(lookup)) +bf(lookup) searches for a single entity in the list. You must specify the +directory, ending in / and then the entity in that directory. The output is +the same as bf(dump) + +dit(bf(link-dups)) +bf(link-dups) checks the entire file list for files that have duplicate +contents and hard links them. It does this by examining the MD5 information +from the file list and then converting the duplicated files into a hard link. +The file choosen to be the target of all other links is the first file +listed in the file list. The timestamp of the new link is set to be the +largest timestamp of all the other links and the permissions and ownership +remain as the first link. Output is two lines per combination, the first +indicting the source file and the second the file that will be erased and +hardlinked, a souce file may occure multiple times if there are many +duplicated copies. + +dit(bf(verify)) +bf(verify) checks the given file list against . and reports and deviations. + +enddit() + +manpageoptions() +All command line options may be set using the configuration file, the +descriptions indicate the configuration option to set. For boolean +options you can override the config file by using something like bf(-f-), +bf(--no-f), bf(-f=no) or several other variations. + +startdit() +dit(bf(-h, --help)) +Show the help text + +dit(bf(-q, --quiet, --silent)) +Quiet; produces output suitable for logging, omitting progress indicators. +More qs will produce more quite up to a maximum of 2. You can also use +bf(-q=#) to set the quiet level, overriding the configuration file. +See bf(quiet) + +dit(bf(-i, --include)) +dit(bf(-e, --exclude)) +Add a pattern to the ordered include list. See bf(FileList::Filter). + +dit(bf(-n, --no-act)) +Suppress action; No changes will be made to the local file system. This +applies to bf(generate) and bf(verify). + +dit(bf(--delete)) +Allow files to be deleted; This allows files to be erased, it effects +bf(generate) and bf(verify). See bf(delete). + +dit(bf(--pi, --perfer-include)) +dit(bf(--pe, --perfer-exclude)) +Add a pattern to the ordered prefer include list. See +bf(FileList::Prefer-Filter). + +dit(bf(--ci, --clean-include)) +dit(bf(--ce, --clean-exclude)) +Add a pattern to the ordered clean include list. Things excluded by this +filter will be erased. See bf(FileList::Clean-Filter). + +dit(bf(--md5)) +Generate md5 hashes into the list. See bf(FileList::MD5-Hashes). + +dit(bf(--perm)) +Generate file permissions into the list. See bf(FileList::Permissions). + +dit(bf(--owner)) +Generate file ownership into the list [unsupported]. See +bf(FileList::Ownership). + +dit(bf(-c, --config-file)) +Configuration File; Specify a configuration file to use. bf(apt-get) will +read the default configuration file and then this configuration file. See +bf(apt.conf(5)) for syntax information. + +dit(bf(-o, --option)) +Set a Configuration Option; This will set an arbitary configuration option. +The syntax is +verb(-o Foo::Bar=bar) + +enddit() + +manpageseealso() +dsync.conf(5) + +manpagediagnostics() +dsync-flist returns zero on normal operation, decimal 100 on error. + +manpagebugs() +See http://bugs.debian.org/dsync. If you wish to report a +bug in bf(apt-get), please see bf(/usr/doc/debian/bug-reporting.txt) +or the bf(bug(1)) command. + +manpageauthor() +dsync was written by Jason Gunthorpe . diff --git a/tools/dsync-0.0/doc/examples/dsync.conf b/tools/dsync-0.0/doc/examples/dsync.conf new file mode 100644 index 00000000..76139fef --- /dev/null +++ b/tools/dsync-0.0/doc/examples/dsync.conf @@ -0,0 +1,55 @@ +/* This dsync configuration file is a sample that contains all options. + It is not ment to be used as is. +*/ + +/* Each module has a set of configuration parameters. The module to use + is specified on the command line. */ +module::Foo +{ + // The base directory for the module + Root "/home/ftp/foo"; + + // Here we specify options that control generation of the file list + FileList + { + // Generation options + MD5-Hashes "yes"; + Hard-Links "yes"; + Permissions "yes"; + Ownership "yes"; + Ordering "depth"; + + /* The filter list. Items excluded by this filter are not inclued + in the file list */ + Filter + { + "+ *"; + }; + + /* The prefer filter list. Items included in the filter are prefered + over items exclued in this filter. This effects the order directories + are listed. All directories included by the filter are listed before + any directories exclued by the filter. The filter only matche + directories, not files. */ + Prefer-Filter + { + "+ *"; + }; + + // Use the specified pre-generated file list, relative to the root, + PreGenerated "dsync.list"; + }; + + // Here we specify options specific to the dsync-flist program + FList + { + /* This filter is used for archive maintinance, files Excluded by + this filter are removed from the archive, directories are never + passed through */ + Clean-Filter + { + "- core"; + "+"; + }; + }; +}; diff --git a/tools/dsync-0.0/doc/filelist.sgml b/tools/dsync-0.0/doc/filelist.sgml new file mode 100644 index 00000000..35483d19 --- /dev/null +++ b/tools/dsync-0.0/doc/filelist.sgml @@ -0,0 +1,709 @@ + + + +DSync File List Format + +Jason Gunthorpe jgg@debian.org +$Id: filelist.sgml,v 1.4 1999/11/15 07:59:49 jgg Exp $ + + + + + +Copyright © Jason Gunthorpe, 1998-1999. +

+DSync and this document are free software; you can redistribute them and/or +modify them under the terms of the GNU General Public License as published +by the Free Software Foundation; either version 2 of the License, or (at your +option) any later version. + +

+For more details, on Debian GNU/Linux systems, see the file +/usr/doc/copyright/GPL for the full license. + + + + +Introduction + + +Purpose +

+The DSync file list is a crucial part of the DSync system, it provides the +client with access to a list of files and file attributes for all the files +in a directory tree. Much information is compacted into the per-file structure +that may be used by the client in reconstructing the directory tree. In spirit +it is like the common ls-lR files that mirrors have, but in practice it is +radically different, most striking is that it is stored in a compacted binary +format and may optionally contain MD5 hashes. + +

+The file list for a directory tree may be either dynamically generated by the +server or generated only once like the ls-lR files. In fact with a static +file list it is possible to use the rsync method to transfer only the +differences in the list which is a huge boon for sites with over 50000 files +in their directory trees + +

+Internally the file list is stored as a series of directory blocks in no set +order. Each block has a relative path from the base to the directory itself +and a list of all files in that directory. Things are not stored recursively +so that the client can have fixed memory usage when managing the list. +Depending on how the generator is configured the order of the directories +may be breadth first or depth first, or perhaps completely random. The client +should make no assumptions about the ordering of anything in the file. + +

+Since the list may be generated on the fly by the server it is necessary for +it to be streamable. To this effect there will be no counts or sizes that +refer to anything outside of the current record. This assures that the +generator will be able to build a file list without negligable server side +overhead. Furthermore a focus is placed on making things as small as possible, +to this end usefull items like record length indicators are omitted. This +does necessarily limit the ability to handle format changes. + + +Structure + + +Data Stream +

+The data stream is encoded as a series of variable length numbers, fixed +length numbers and strings. The use of variable length number encoding +was chosen to accomidate sites with over 100000 files, mostly below 16k, +using variable length encoding will save approximately 400k of data and still +allow some items that are very large. + +

+Numbers are coded as a series of bytes of non-fixed length, the highest bit +of each byte is 1 if the next byte is part of this number. Bytes are ordered +backwards from the least significant to the most significant inorder to +simplify decoding, any omitted bits can be assumed to be 0. Clients should +decode into their largest type and fatally error if a number expands to +larger than that. All numbers are positive. + +

+Strings are coded in pascal form, with a length number preceeding a series +of 8 bit characters making up the string. The strings are coded in UTF. + +

+The first records in the file should be a header record followed by any +include/exclude records to indicate how the list was generated. Following +that is the actual file list data. + +

+The records all have the same form, they start with an 8 bit tag value and +then have the raw record data after. The main header has a set of flags for +all of the records types, these flags are used to designate optional portions +of the record. For instance a +file record may not have a md5 hash or uid/gid values, those would be marked +off in the flags. Generally every non-critical value is optional. The records +and their tags are as follows: + + + 0 - Header + 1 - Directory Marker + 2 - Directory Start + 3 - Directory End + 4 - Normal File + 5 - Symlink + 6 - Device Special + 7 - Directory + 8 - Include/Exclude + 9 - User Map + 10 - Group Map + 11 - Hard Link + 12 - End Marker + 13 - RSync Checksums + 14 - Aggregate File + 15 - RSync End + + +

+The header record is placed first in the file followed by Directory records +and then by a number of file type records. The Directory Start/End are used +to indicate which directory the file records are in. The approach is to +create a bundle of file type records for each directory that are stored +non-recursively. The directory marker records are used with depth-first +traversal to create unseen directories with the proper permissions. + + + +Header +

+The header is the first record in the file and contains some information about +what will follow. + + struct Header + { + uint8 Tag; // 0 for the header + + uint32 Signature; + uint16 MajorVersion; + uint16 MinorVersion; + number Epoch; + + uint8 FlagCount; + uint32 Flags[12]; + }; + + +Signature +This field should contain the hex value 0x97E78AB which designates the file +as a DSync file list. Like all numbers it should be stored in network byte +order. + +MajorVersion +MinorVersion +These two fields designate the revision of the format. The major version +should be increased if an incompatible change is made to the structure of +the file, otherwise the minor version should reflect any changes. The current +major/minor is 0 and 0. Compatibility issues are discussed later on. + +Epoch +Inorder to encode time in a single 32 bit signed integer the format uses a +shifting epoch. Epoch is set to a time in seconds from the unix +epoch. All other times are relative to this time. +In this way we can specify any date 68 years in either direction from any +possible time. Doing so allows us to encode time using only 32 bits. The +generator should either error or truncate if a time value exceeds this +representation. This does impose the limitation that the difference between +the lowest stored date and highest stored date must be no more than 136 years. + +FlagCount +This designates the number of items in the flag array. + +Flags +Each possible record type has a flag value that is used to indicate what +items the generator emitted. There is no per-record flag in order to save +space. The flag array is indexed by the record ID. + + + + + +Directory Marker, Directory Start and Directory +

+The purpose of the directory marker record is to specify directories that +must be created before a directory start record can be processed. It is needed +to ensure the correct permissions and ownership are generated while the +contents are in transfer. + +

+A Directory Start record serves to indicate a change of directory. All further +file type records will refer to the named directory until a Directory End +record is processed marking the final modification for this directory. It is +not possible to nest directory start directives, in fact a Directory Start +record implies a Directory End record for the previosly Started Directory + +

+The plain directory record is a file type record that refers to a directory +file type. All of these record types describe the same thing used in different +contexts so share the same structure. + + + struct DirMarker + { + uint8 Tag; // 1, 2 or 7 for the header + + uint32 ModTime; + uint16 Permissions; + number User; + number Group; + string Path; + }; + + +Flags [from the header] +Optional portions of the structure are Permissions (1<<0) and user/group +(1<<1). The bit is set to 1 if they are present. + +ModTime +This is the number of seconds since the file list epoch, it is the modification +date of the directory. + +Permissions +This is the standard unix permissions in the usual format. + +User +Group +These are the standard unix user/group for the directory. They are indirected +through the user/group maps described later on. + +Path +The path from the base of the file list to the directory this record describes. +However ordinary directory types have a single name relative to the last +Directory Start record. + + + + +Directory End +

+The purpose of the directory end marker is to signafy that their will be no +more file type records from this directory. Directory Start and Directory +End records must be paired. The intent of this record is to allow future +expansion, NOT to allow recursive directory blocks. A Directory Start +record will imply a Directory End record if the previous was not terminated. + +

+There are no data members, it is the basic 1 item record. If the data stream +terminates with an open directory block it is assumed to be truncated and +an error issued. + + + + +Normal File +

+A normal file is a simple, regular file. It has the standard set of unix +attributes and an optional MD5 hash for integrity checking. + + struct NormalFile + { + uint8 Tag; // 4 + + uint32 ModTime; + uint16 Permissions; + number User; + number Group; + string Name; + number Size; + uint128 MD5; + }; + + +Flags [from the header] +Optional portions of the structure are Permissions (1<<0), user/group +(1<<1), and MD5 (1<<2). The bit is set to 1 if they are present. + +ModTime +This is the number of seconds since the file list epoch, it is the modification +date of the file. + +Permissions +This is the standard unix permissions in the usual format. + +User +Group +These are the standard unix user/group for the directory. They are indirected +through the user/group maps described later on. + +Name +The name of the item. It should have no pathname components and is relative +to the last Directory Start record. + +MD5 +This is a MD5 hash of the file. + +Size +This is the size of the file in bytes. + + + + +Symlink +

+This encodes a normal unix symbolic link. Symlinks do not have permissions +or size, but do have optional ownership. + + struct Symlink + { + uint8 Tag; // 5 + + uint32 ModTime; + number User; + number Group; + string Name; + uint8 Compression; + string To; + }; + + +Flags [from the header] +Optional portions of the structure are, user/group +(1<<0). The bit is set to 1 if they are present. + +ModTime +This is the number of seconds since the file list epoch, it is the modification +date of the file. + +User +Group +These are the standard unix user/group for the directory. They are indirected +through the user/group maps described later on. + +Name +The name of the item. It should have no pathname components and is relative +to the last Directory Start record. + +Compression +Common use of symlinks makes them very easy to compress, the compression +byte allows this. It is an 8 bit byte with the first 7 bits representing an +unsigned number and the 8th bit as being a flag. The first 7 bits describe +how many bytes of the last symlink should be prepended to To and if the 8th +bit is set then Name is appended to To. + +To +This is the file the symlink is pointing to. It is an absolute string taken +as is. The client may perform checking on it if desired. The string is +compressed as described in the Compression field. + + + + +Device Special +

+Device Special records encode unix device special files, which have a major +and a minor number corrisponding to some OS specific attribute. These also +encode fifo files, anything that can be created by mknod. + + struct DeviceSpecial + { + uint8 Tag; // 6 + + uint32 ModTime; + uint16 Permissions; + number User; + number Group; + number Dev; + string Name; + }; + + +Flags [from the header] +Optional portions of the structure areuser/group +(1<<0). The bit is set to 1 if they are present. + +ModTime +This is the number of seconds since the file list epoch, it is the modification +date of the file. + +Permissions +This non-optional field is used to encode the type of device and the +creation permissions. + +Dev +This is the OS specific 'dev_t' field for mknod. + +Major +Minor +These are the OS dependent device numbers. + +Name +The name of the item. It should have no pathname components and is relative +to the last Directory Start record. + +To +This is the file the symlink is pointing to. + + + + +Include and Exclude +

+The include/exclude list used to generate the file list is encoded after +the header record. It is stored as an ordered set of include/exclude records +acting as a filter. If no record matches then the pathname is assumed to +be included otherwise the first matching record decides. + + + struct IncludeExclude + { + uint8 Tag; // 8 + + uint8 Type; + string Pattern; + }; + + +Flags [from the header] +None defined. + +Type +This is the sort of rule, presently 1 is an include rule and 2 is an exclude +rule. + +Pattern +This is the textual pattern used for matching. + + + + + +User/Group Map +

+In order to properly transfer users and groups the names are converted from +a local number into a file list number and a number to name mapping. When +the remote side reads the file list it directs all UID/GID translations +through the mapping to create the real names and then does a local lookup. +This also provides some compressesion in the file list as large UIDs are +converted into smaller values through the mapping. + +

+The generator is expected to emit these records at any place before the IDs +are actually used. + + struct NameMap + { + uint8 Tag; // 9,10 + + number FileID; + number RealID; + string Name; + }; + + +Flags [from the header] +Optional portions of the structure are RealID (1<<0). + +FileID +This is the ID used internally in the file list, it should be monotonically +increasing each time a Map record is created so that it is small and unique. + +RealID +This is the ID used in the filesystem on the generating end. This information +maybe used if the user selected to regenerate IDs without translation. + + + + +Hard Link +

+A hard link record is used to record a file that is participating in a hard +link. The only information we know about the link is the inode and device +on the local machine, so we store this information. The client will have to +reconstruct the linkages if possible. + + + struct HardLink + { + uint8 Tag; // 11 + + uint32 ModTime; + number Serial; + uint16 Permissions; + number User; + number Group; + string Name; + number Size; + uint128 MD5; + }; + + +Flags [from the header] +Optional portions of the structure are Permissions (1<<0), user/group +(1<<1), and MD5 (1<<2). The bit is set to 1 if they are present. + +ModTime +This is the number of seconds since the file list epoch, it is the modification +date of the file. + +Serial +This is the unique ID number for the hardlink. It is composed from the +device inode pair in a generator dependent way. The exact nature of the +value is unimportant, only that two hard link records with the same serial +should be linked together. It is recommended that the generator compress +hard link serial numbers into small monotonically increasing IDs. + +Permissions +This is the standard unix permissions in the usual format. + +User +Group +These are the standard unix user/group for the directory. They are indirected +through the user/group maps described later on. + +Name +The name of the item. It should have no pathname components and is relative +to the last Directory Start record. + +MD5 +This is a MD5 hash of the file. + +Size +This is the size of the file in bytes. + + + + +End Marker +

+The End Marker is the final record in the stream, if it is missing the stream +is assumed to be incomplete. + + struct Trailer + { + uint8 Tag; // 12 for the header + + uint32 Signature; + }; + + +Signature +This field should contain the hex value 0xBA87E79 which is designed to +prevent a correputed stream as begin a legitimate end marker. + + + + + +RSync Checksums +

+The checksum record contains the list of checksums for a file and represents +the start of a RSync description block which may contain RSync Checksums, +a Normal File entry or Aggregate Files records. + + struct RSyncChecksums + { + uint8 Tag; // 13 + + number BlockSize; + number FileSize; + uint160 Sums[ceil(FileSize/BlockSize)]; + }; + + +BlockSize +The size of each block in the stream in bytes. + +FileSize +The total size of the the file in bytes. + +Sums +The actual checksum data. The format has the lower 32 bytes as the weak +checksum and the upper 128 as the strong checksum. + + + + +Aggregate File +

+If the generator was given a list of included files this record will be +emitted after the rsync checksum record, once for each file. The given +paths are files that are likely to contain fragments of the larger file. + + struct AggregateFile + { + uint8 Tag; // 14 for this record + + string File; + }; + + +File +The stored filename. + + + + +RSync End +

+The purpose of the directory end marker is to signafy that the RSync data +is finished. RSync blocks begin with the RSync checksum record, then are +typically followed by a Normal File record describing the name and attributes +of the file and then optionally followed by a set of Aggregate File records. + +

+There are no data members, it is the basic 1 item record. If the data stream +terminates with an open block it is assumed to be truncated and an error +issued. + + +The Client + + +Handling Compatibility +

+The format has no provision for making backwards compatible changes, even +minor ones. What was provided is a way to make a generator that is both +forwards and backwards compatible with clients, this is done by disabling +generation of unsupported items and masking them off in the flags. + +

+To deal with this a client should examine the header and determine if it has +a suitable major version, the minor version should largely be ignored. The +client should then examine the flags values and for all records it understands +ensure that no bits are masked on that it does not understand. Records that +it cannot handle should be ignored at this point. When the client is +parsing it should abort if it hits a record it does not support. + + + +Client Requirements +

+The client attempting to verify syncronisity of a local file tree and a +tree destribed in a file list must do three things, look for extra local files, +manage the UID/GID mappings and maintain a map of hardlinks. These items +corrispond to the only necessary memory usage on the client. + +

+It is expected that the client will use the timestamp, size and possibly +MD5 hash to match the local file against the remote one to decide if it +should be retrieved. + +

+Hardlinks are difficult to handle, but represent a very usefull feature. The +client should track all hard links until they are associated with a local +file+inode, then all future links to that remote inode can be recreated +locally. + + +RSync Method + + +Overview +

+The rsync method was invented by Andrew Tridgell and originally +implemented in the rsync program. DSync has a provision to make use of the +rsync method for transfering differences between files effeciently, +however the implemention is not as bandwidth efficient as what the rsync +program uses, emphasis is placed on generator efficiency. + +

+Primarily the rsync method makes use of a series of weak and strong +block checksums for each block in a file. Blocks are a uniform size and +are uniformly distributed about the source file. In order to minimize server +loading the checksum data is generated for the file on the server and then +sent to the client - this might optionally be done from a cached file. The +client is responsible for performing the checksumming and searching on its +end. + +

+In contrast rsync has the client send its checksums to the server and the +server sends back commands to reconstruct the file. This is more bandwidth +efficient because only one round trip is required and there is a higher chance +that more blocks will be matched and not need to be sent to the client. + +

+Furthermore a feature designed for use by CD images is provided where a file +can be specified as the aggregation of many smaller files. The aggregated +files are specified only by giving the file name. The client is expected to +read the file (probably from the network) and perform checksum searching +against the provided table. + + + + +CD Images +

+The primary and most complex use of the rsync data is for forming CD images +on the fly from a mirror and a CD source. This is extremly usefull beacause +CD images take up alot of space and bandwidth to mirror, while they are +mearly aggregates of (possibly) already mirrored data. Using checksums +and a file listing allows the CD image to be reconstructed from any mirror +and reduces the loading on primary CD image servers. + +

+The next use of checksums is to 'freshen' a CD image during development. If +a image is already present that contains a subset of the required data the +checksums generally allow a large percentage of that data to be reused. + +

+Since the client is responsible for reconstruction and checksum searching it +is possible to perform in place reconstruction and in place initial generation +that does not require a (large!) temporary file. + + + + diff --git a/tools/dsync-0.0/doc/makefile b/tools/dsync-0.0/doc/makefile new file mode 100644 index 00000000..d2130dfe --- /dev/null +++ b/tools/dsync-0.0/doc/makefile @@ -0,0 +1,14 @@ +# -*- make -*- +BASE=.. +SUBDIR=doc + +# Bring in the default rules +include ../buildlib/defaults.mak + +# SGML Documents +SOURCE = filelist.sgml +include $(DEBIANDOC_H) + +# Man pages +SOURCE = dsync-flist.1 +include $(YODL_MANPAGE_H) diff --git a/tools/dsync-0.0/libdsync/compare.cc b/tools/dsync-0.0/libdsync/compare.cc new file mode 100644 index 00000000..95a286ba --- /dev/null +++ b/tools/dsync-0.0/libdsync/compare.cc @@ -0,0 +1,608 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: compare.cc,v 1.6 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + Compare a file list with a local directory + + The first step in the compare is to read the names of each entry + in the local directory into ram. This list is the first step to + creating a delete list. Next we begin scanning the file list, checking + each entry against the dir contents, if a match is found it is removed + from the dir list and then stat'd to verify against the file list + contents. If no match is found then the entry is marked for download. + When done the local directory in ram will only contain entries that + need to be erased. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/compare.h" +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + /*}}}*/ + +// DirCompre::dsDirCompare - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsDirCompare::dsDirCompare() : IndexSize(0), IndexAlloc(0), Indexes(0), + NameAlloc(0), Names(0), Verify(true), HashLevel(Md5Date) +{ + IndexAlloc = 1000; + Indexes = (unsigned int *)malloc(sizeof(*Indexes)*IndexAlloc); + NameAlloc = 4096*5; + Names = (char *)malloc(sizeof(*Names)*NameAlloc); + if (Names == 0 || Indexes == 0) + _error->Error("Cannot allocate memory"); +} + /*}}}*/ +// DirCompare::~dsDirCompare - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsDirCompare::~dsDirCompare() +{ + free(Names); + free(Indexes); +} + /*}}}*/ +// DirCompare::LoadDir - Load all the names in the directory /*{{{*/ +// --------------------------------------------------------------------- +/* Such in every name in the directory, we store them as a packed, indexed + array of strings */ +bool dsDirCompare::LoadDir() +{ + // Scan the directory + DIR *DirSt = opendir("."); + if (DirSt == 0) + return _error->Errno("opendir","Unable to open directory %s",SafeGetCWD().c_str()); + struct dirent *Ent; + IndexSize = 0; + char *End = Names + 1; + while ((Ent = readdir(DirSt)) != 0) + { + // Skip . and .. + if (strcmp(Ent->d_name,".") == 0 || + strcmp(Ent->d_name,"..") == 0) + continue; + + // Grab some more bytes in the name allocation + if ((unsigned)(NameAlloc - (End - Names)) <= strlen(Ent->d_name)+1) + { + unsigned long OldEnd = End - Names; + char *New = (char *)realloc(Names,sizeof(*Names)*NameAlloc + 4*4096); + if (New == 0) + { + closedir(DirSt); + return _error->Error("Cannot allocate memory"); + } + + Names = New; + NameAlloc += 4*4096; + End = Names + OldEnd; + } + + // Grab some more bytes in the index allocation + if (IndexSize >= IndexAlloc) + { + unsigned int *New = (unsigned int *)realloc(Indexes, + sizeof(*Indexes)*IndexAlloc + 1000); + if (New == 0) + { + closedir(DirSt); + return _error->Error("Cannot allocate memory"); + } + + Indexes = New; + IndexAlloc += 4*4096; + } + + // Store it + Indexes[IndexSize] = End - Names; + IndexSize++; + strcpy(End,Ent->d_name); + End += strlen(End) + 1; + } + + closedir(DirSt); + return true; +} + /*}}}*/ +// DirCompare::Process - Process the file list stream /*{{{*/ +// --------------------------------------------------------------------- +/* This scans over the dirs from the IO and decides what to do with them */ +bool dsDirCompare::Process(string Base,dsFList::IO &IO) +{ + // Setup the queues and store the current directory + string StartDir = SafeGetCWD(); + + // Change to the base directory + if (chdir(Base.c_str()) != 0) + return _error->Errno("chdir","Could not change to %s",Base.c_str()); + Base = SafeGetCWD(); + this->Base = Base; + + string CurDir; + dsFList List; + bool Missing = false; + while (List.Step(IO) == true) + { + if (Visit(List,CurDir) == false) + return false; + + switch (List.Tag) + { + // Handle a forward directory reference + case dsFList::tDirMarker: + { + // Ingore the root directory + if (List.Entity->Name.empty() == true) + continue; + + char S[1024]; + + snprintf(S,sizeof(S),"%s%s",Base.c_str(),List.Entity->Name.c_str()); + + /* We change the path to be absolute for the benifit of the + routines below */ + List.Entity->Name = S; + + // Stat the marker dir + struct stat St; + bool Res; + if (lstat(S,&St) != 0) + Res = Fetch(List,string(),0); + else + Res = Fetch(List,string(),&St); + + if (Res == false) + return false; + break; + } + + // Start a directory + case dsFList::tDirStart: + { + if (DoDelete(CurDir) == false) + return false; + if (chdir(Base.c_str()) != 0) + return _error->Errno("chdir","Could not change to %s",Base.c_str()); + + CurDir = List.Dir.Name; + Missing = false; + IndexSize = 0; + if (List.Dir.Name.empty() == false) + { + /* Instead of erroring out we just mark them as missing and + do not re-stat. This is to support the verify mode, the + actual downloader should never get this. */ + if (chdir(List.Dir.Name.c_str()) != 0) + { + if (Verify == false) + return _error->Errno("chdir","Unable to cd to %s%s.",Base.c_str(),List.Dir.Name.c_str()); + Missing = true; + } + } + + if (Missing == false) + LoadDir(); + break; + } + + // Finalize the directory + case dsFList::tDirEnd: + { + if (DoDelete(CurDir) == false) + return false; + IndexSize = 0; + if (chdir(Base.c_str()) != 0) + return _error->Errno("chdir","Could not change to %s",Base.c_str()); + break; + } + } + + // We have some sort of normal entity + if (List.Entity != 0 && List.Tag != dsFList::tDirMarker && + List.Tag != dsFList::tDirStart) + { + // See if it exists, if it does then stat it + bool Res = true; + if (Missing == true || DirExists(List.Entity->Name) == false) + Res = Fetch(List,CurDir,0); + else + { + struct stat St; + if (lstat(List.Entity->Name.c_str(),&St) != 0) + Res = Fetch(List,CurDir,0); + else + Res = Fetch(List,CurDir,&St); + } + if (Res == false) + return false; + } + + // Fini + if (List.Tag == dsFList::tTrailer) + { + if (DoDelete(CurDir) == false) + return false; + return true; + } + } + + return false; +} + /*}}}*/ +// DirCompare::DoDelete - Delete files in the delete list /*{{{*/ +// --------------------------------------------------------------------- +/* The delete list is created by removing names that were found till only + extra names remain */ +bool dsDirCompare::DoDelete(string Dir) +{ + for (unsigned int I = 0; I != IndexSize; I++) + { + if (Indexes[I] == 0) + continue; + if (Delete(Dir,Names + Indexes[I]) == false) + return false; + } + + return true; +} + /*}}}*/ +// DirCompare::Fetch - Fetch an entity /*{{{*/ +// --------------------------------------------------------------------- +/* This examins an entry to see what sort of fetch should be done. There + are three sorts, + New - There is no existing data + Changed - There is existing data + Meta - The data is fine but the timestamp/owner/perms might not be */ +bool dsDirCompare::Fetch(dsFList &List,string Dir,struct stat *St) +{ + if (List.Tag != dsFList::tNormalFile && List.Tag != dsFList::tDirectory && + List.Tag != dsFList::tSymlink && List.Tag != dsFList::tDeviceSpecial && + List.Tag != dsFList::tDirMarker) + return _error->Error("dsDirCompare::Fetch called for an entity " + "that it does not understand"); + + // This is a new entitiy + if (St == 0) + return GetNew(List,Dir); + + /* Check the types for a mis-match, if they do not match then + we have to erase the entity and get a new one */ + if ((S_ISREG(St->st_mode) != 0 && List.Tag != dsFList::tNormalFile) || + (S_ISDIR(St->st_mode) != 0 && (List.Tag != dsFList::tDirectory && + List.Tag != dsFList::tDirMarker)) || + (S_ISLNK(St->st_mode) != 0 && List.Tag != dsFList::tSymlink) || + ((S_ISCHR(St->st_mode) != 0 || S_ISBLK(St->st_mode) != 0 || + S_ISFIFO(St->st_mode) != 0) && List.Tag != dsFList::tDeviceSpecial)) + { + return Delete(Dir,List.Entity->Name.c_str(),true) && GetNew(List,Dir); + } + + // First we check permissions and mod time + bool ModTime = (signed)(List.Entity->ModTime + List.Head.Epoch) == St->st_mtime; + bool Perm = true; + if ((List.Head.Flags[List.Tag] & dsFList::DirEntity::FlPerm) != 0) + Perm = List.Entity->Permissions == (unsigned)(St->st_mode & ~S_IFMT); + + // Normal file + if (List.Tag == dsFList::tNormalFile) + { + // Size mismatch is an immedate fail + if (List.NFile.Size != (unsigned)St->st_size) + return GetChanged(List,Dir); + + // Try to check the stored MD5 + if (HashLevel == Md5Always || + (HashLevel == Md5Date && ModTime == false)) + { + if ((List.Head.Flags[List.Tag] & dsFList::NormalFile::FlMD5) != 0) + { + if (CheckHash(List,Dir,List.NFile.MD5) == true) + return FixMeta(List,Dir,*St); + else + return GetChanged(List,Dir); + } + } + + // Look at the modification time + if (ModTime == true) + return FixMeta(List,Dir,*St); + return GetChanged(List,Dir); + } + + // Check symlinks + if (List.Tag == dsFList::tSymlink) + { + char Buf[1024]; + int Res = readlink(List.Entity->Name.c_str(),Buf,sizeof(Buf)); + if (Res > 0) + Buf[Res] = 0; + + // Link is invalid + if (Res < 0 || List.SLink.To != Buf) + return GetNew(List,Dir); + + return FixMeta(List,Dir,*St); + } + + // Check directories and dev special files + if (List.Tag == dsFList::tDirectory || List.Tag == dsFList::tDeviceSpecial || + List.Tag == dsFList::tDirMarker) + return FixMeta(List,Dir,*St); + + return true; +} + /*}}}*/ +// DirCompare::DirExists - See if the entry exists in our dir table /*{{{*/ +// --------------------------------------------------------------------- +/* We look at the dir table for one that exists */ +bool dsDirCompare::DirExists(string Name) +{ + for (unsigned int I = 0; I != IndexSize; I++) + { + if (Indexes[I] == 0) + continue; + if (Name == Names + Indexes[I]) + { + Indexes[I] = 0; + return true; + } + } + return false; +} + /*}}}*/ +// DirCompare::CheckHash - Check the MD5 of a entity /*{{{*/ +// --------------------------------------------------------------------- +/* This is invoked to see of the local file we have is the file the remote + says we should have. */ +bool dsDirCompare::CheckHash(dsFList &List,string Dir,unsigned char MD5[16]) +{ + // Open the file + MD5Summation Sum; + FileFd Fd(List.Entity->Name,FileFd::ReadOnly); + if (_error->PendingError() == true) + return _error->Error("MD5 generation failed for %s%s",Dir.c_str(), + List.Entity->Name.c_str()); + + if (Sum.AddFD(Fd.Fd(),Fd.Size()) == false) + return _error->Error("MD5 generation failed for %s%s",Dir.c_str(), + List.Entity->Name.c_str()); + + unsigned char MyMD5[16]; + Sum.Result().Value(MyMD5); + + return memcmp(MD5,MyMD5,sizeof(MyMD5)) == 0; +} + /*}}}*/ +// DirCompare::FixMeta - Fix timestamps, ownership and permissions /*{{{*/ +// --------------------------------------------------------------------- +/* This checks if it is necessary to correct the timestamps, ownership and + permissions of an entity */ +bool dsDirCompare::FixMeta(dsFList &List,string Dir,struct stat &St) +{ + // Check the mod time + if (List.Tag != dsFList::tSymlink) + { + if ((signed)(List.Entity->ModTime + List.Head.Epoch) != St.st_mtime) + if (SetTime(List,Dir) == false) + return false; + + // Check the permissions + if ((List.Head.Flags[List.Tag] & dsFList::DirEntity::FlPerm) != 0) + { + if (List.Entity->Permissions != (St.st_mode & ~S_IFMT)) + if (SetPerm(List,Dir) == false) + return false; + } + } + + return true; +} + /*}}}*/ + +// DirCorrect::GetNew - Create a new entry /*{{{*/ +// --------------------------------------------------------------------- +/* We cannot create files but we do generate everything else. */ +bool dsDirCorrect::GetNew(dsFList &List,string Dir) +{ + if (List.Tag == dsFList::tDirectory) + { + unsigned long PermDir = 0666; + if ((List.Head.Flags[List.Tag] & dsFList::DirEntity::FlPerm) != 0) + PermDir = List.Entity->Permissions; + + if (mkdir(List.Entity->Name.c_str(),PermDir) != 0) + return _error->Errno("mkdir","Unable to create directory, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + + // Stat the newly created file for FixMeta's benifit + struct stat St; + if (lstat(List.Entity->Name.c_str(),&St) != 0) + return _error->Errno("stat","Unable to stat directory, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + + return FixMeta(List,Dir,St); + } + + if (List.Tag == dsFList::tSymlink) + { + if (symlink(List.SLink.To.c_str(),List.Entity->Name.c_str()) != 0) + return _error->Errno("symlink","Unable to create symlink, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + + // Stat the newly created file for FixMeta's benifit + struct stat St; + if (lstat(List.Entity->Name.c_str(),&St) != 0) + return _error->Errno("stat","Unable to stat directory, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + + return FixMeta(List,Dir,St); + } + + if (List.Tag == dsFList::tDeviceSpecial) + { + unsigned long PermDev; + if ((List.Head.Flags[List.Tag] & dsFList::DirEntity::FlPerm) != 0) + PermDev = List.Entity->Permissions; + else + return _error->Error("Corrupted file list"); + + if (mknod(List.Entity->Name.c_str(),PermDev,List.DevSpecial.Dev) != 0) + return _error->Errno("mkdir","Unable to create directory, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + + // Stat the newly created file for FixMeta's benifit + struct stat St; + if (lstat(List.Entity->Name.c_str(),&St) != 0) + return _error->Errno("stat","Unable to stat directory, %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + return FixMeta(List,Dir,St); + } +} + /*}}}*/ +// DirCorrect::DirUnlink - Unlink a directory /*{{{*/ +// --------------------------------------------------------------------- +/* This just recursively unlinks stuff */ +bool dsDirCorrect::DirUnlink(const char *Path) +{ + // Record what dir we were in + struct stat Dir; + if (lstat(".",&Dir) != 0) + return _error->Errno("lstat","Unable to stat .!"); + + if (chdir(Path) != 0) + return _error->Errno("chdir","Unable to change to %s",Path); + + // Scan the directory + DIR *DirSt = opendir("."); + if (DirSt == 0) + { + chdir(".."); + return _error->Errno("opendir","Unable to open directory %s",Path); + } + + // Erase this directory + struct dirent *Ent; + while ((Ent = readdir(DirSt)) != 0) + { + // Skip . and .. + if (strcmp(Ent->d_name,".") == 0 || + strcmp(Ent->d_name,"..") == 0) + continue; + + struct stat St; + if (lstat(Ent->d_name,&St) != 0) + return _error->Errno("stat","Unable to stat %s",Ent->d_name); + if (S_ISDIR(St.st_mode) == 0) + { + // Try to unlink the file + if (unlink(Ent->d_name) != 0) + { + chdir(".."); + return _error->Errno("unlink","Unable to remove file %s",Ent->d_name); + } + } + else + { + if (DirUnlink(Ent->d_name) == false) + { + chdir(".."); + closedir(DirSt); + return false; + } + } + } + closedir(DirSt); + chdir(".."); + + /* Make sure someone didn't screw with the directory layout while we + were erasing */ + struct stat Dir2; + if (lstat(".",&Dir2) != 0) + return _error->Errno("lstat","Unable to stat .!"); + if (Dir2.st_ino != Dir.st_ino || Dir2.st_dev != Dir.st_dev) + return _error->Error("Hey! Someone is fiddling with the dir tree as I erase it!"); + + if (rmdir(Path) != 0) + return _error->Errno("rmdir","Unable to remove directory %s",Ent->d_name); + + return true; +} + /*}}}*/ +// DirCorrect::Delete - Delete an entry /*{{{*/ +// --------------------------------------------------------------------- +/* This obliterates an entity - recursively, use with caution. */ +bool dsDirCorrect::Delete(string Dir,const char *Name,bool Now) +{ + struct stat St; + if (lstat(Name,&St) != 0) + return _error->Errno("stat","Unable to stat %s%s",Dir.c_str(),Name); + + if (S_ISDIR(St.st_mode) == 0) + { + if (unlink(Name) != 0) + return _error->Errno("unlink","Unable to remove %s%s",Dir.c_str(),Name); + } + else + { + if (DirUnlink(Name) == false) + return _error->Error("Unable to erase directory %s%s",Dir.c_str(),Name); + } + return true; +} + /*}}}*/ +// DirCorrect::GetChanged - Get a changed entry /*{{{*/ +// --------------------------------------------------------------------- +/* This is only called for normal files, we cannot do anything here. */ +bool dsDirCorrect::GetChanged(dsFList &List,string Dir) +{ + return true; +} + /*}}}*/ +// DirCorrect::SetTime - Change the timestamp /*{{{*/ +// --------------------------------------------------------------------- +/* This fixes the mod time of the file */ +bool dsDirCorrect::SetTime(dsFList &List,string Dir) +{ + struct utimbuf Time; + Time.actime = Time.modtime = List.Entity->ModTime + List.Head.Epoch; + if (utime(List.Entity->Name.c_str(),&Time) != 0) + return _error->Errno("utimes","Unable to change mod time for %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + return true; +} + /*}}}*/ +// DirCorrect::SetPerm - Change the permissions /*{{{*/ +// --------------------------------------------------------------------- +/* This fixes the permissions */ +bool dsDirCorrect::SetPerm(dsFList &List,string Dir) +{ + if (chmod(List.Entity->Name.c_str(),List.Entity->Permissions) != 0) + return _error->Errno("chmod","Unable to change permissions for %s%s", + Dir.c_str(),List.Entity->Name.c_str()); + return true; +} + /*}}}*/ +// Dircorrect::SetOwner - Change ownership /*{{{*/ +// --------------------------------------------------------------------- +/* This fixes the file ownership */ +bool dsDirCorrect::SetOwners(dsFList &List,string Dir) +{ + return _error->Error("Ownership is not yet supported"); +} + /*}}}*/ + diff --git a/tools/dsync-0.0/libdsync/compare.h b/tools/dsync-0.0/libdsync/compare.h new file mode 100644 index 00000000..547137f7 --- /dev/null +++ b/tools/dsync-0.0/libdsync/compare.h @@ -0,0 +1,86 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: compare.h,v 1.3 1999/01/17 22:00:51 jgg Exp $ +/* ###################################################################### + + Compare a file list with a local directory + + The Compare class looks at the file list and then generates events + to cause the local directory tree to become syncronized with the + remote tree. + + The Correct class takes the events and applies them to the local tree. + It only applies information that is stored in the file list, another + class will have to hook the events to actually fetch files for download. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_COMPARE +#define DSYNC_COMPARE + +#ifdef __GNUG__ +#pragma interface "dsync/compare.h" +#endif + +#include + +class dsDirCompare +{ + unsigned int IndexSize; + unsigned int IndexAlloc; + unsigned int *Indexes; + unsigned int NameAlloc; + char *Names; + + protected: + + // Location of the tree + string Base; + + // Scan helpers + bool LoadDir(); + bool DoDelete(string Dir); + bool Fetch(dsFList &List,string Dir,struct stat *St); + bool DirExists(string Name); + virtual bool CheckHash(dsFList &List,string Dir,unsigned char MD5[16]); + virtual bool FixMeta(dsFList &List,string Dir,struct stat &St); + virtual bool Visit(dsFList &List,string Dir) {return true;}; + + // Derived classes can hook these to actuall make them do something + virtual bool GetNew(dsFList &List,string Dir) {return true;}; + virtual bool Delete(string Dir,const char *Name,bool Now = false) {return true;}; + virtual bool GetChanged(dsFList &List,string Dir) {return true;}; + virtual bool SetTime(dsFList &List,string Dir) {return true;}; + virtual bool SetPerm(dsFList &List,string Dir) {return true;}; + virtual bool SetOwners(dsFList &List,string Dir) {return true;}; + + public: + + bool Verify; + enum {Md5Never, Md5Date, Md5Always} HashLevel; + + bool Process(string Base,dsFList::IO &IO); + + dsDirCompare(); + virtual ~dsDirCompare(); +}; + +class dsDirCorrect : public dsDirCompare +{ + bool DirUnlink(const char *Path); + + protected: + + // Derived classes can hook these to actuall make them do something + virtual bool GetNew(dsFList &List,string Dir); + virtual bool Delete(string Dir,const char *Name,bool Now = false); + virtual bool GetChanged(dsFList &List,string Dir); + virtual bool SetTime(dsFList &List,string Dir); + virtual bool SetPerm(dsFList &List,string Dir); + virtual bool SetOwners(dsFList &List,string Dir); + + public: + +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/bitmap.cc b/tools/dsync-0.0/libdsync/contrib/bitmap.cc new file mode 100644 index 00000000..87d87b7d --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/bitmap.cc @@ -0,0 +1,40 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: bitmap.cc,v 1.1 1999/11/05 05:47:06 jgg Exp $ +/* ###################################################################### + + Bitmap - A trivial class to implement an 1 bit per element boolean + vector + + This is deliberately extremely light weight so that it is fast for + the client. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/bitmap.h" +#endif + +#include + +#include + /*}}}*/ + +// BitmapVector::BitmapVector - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* Allocate just enough bytes and 0 it */ +BitmapVector::BitmapVector(unsigned long Size) : Size(Size) +{ + Vect = new unsigned long[Bytes()]; + memset(Vect,0,Bytes()); +} + /*}}}*/ +// BitmapVector::~BitmapVector - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +BitmapVector::~BitmapVector() +{ + delete [] Vect; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/bitmap.h b/tools/dsync-0.0/libdsync/contrib/bitmap.h new file mode 100644 index 00000000..9859673b --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/bitmap.h @@ -0,0 +1,49 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: bitmap.h,v 1.1 1999/11/05 05:47:06 jgg Exp $ +/* ###################################################################### + + Bitmap - A trivial class to implement an 1 bit per element boolean + vector + + This is deliberately extremely light weight so that it is fast for + the client. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_BITMAP +#define DSYNC_BITMAP + +#ifdef __GNUG__ +#pragma interface "dsync/bitmap.h" +#endif + +class BitmapVector +{ + unsigned long *Vect; + unsigned long Size; + + #define BITMAPVECTOR_SIZE sizeof(unsigned long)*8 + + // Compute the necessary size of the vector in bytes. + inline unsigned Bytes() {return (Size + BITMAPVECTOR_SIZE - 1)/BITMAPVECTOR_SIZE;}; + + public: + + inline void Set(unsigned long Elm) + {Vect[Elm/BITMAPVECTOR_SIZE] |= 1 << (Elm%BITMAPVECTOR_SIZE);}; + inline bool Get(unsigned long Elm) + {return (Vect[Elm/BITMAPVECTOR_SIZE] & (1 << (Elm%BITMAPVECTOR_SIZE))) != 0;}; + inline void Set(unsigned long Elm,bool To) + { + if (To) + Vect[Elm/BITMAPVECTOR_SIZE] |= 1 << (Elm%BITMAPVECTOR_SIZE); + else + Vect[Elm/BITMAPVECTOR_SIZE] &= ~(1 << (Elm%BITMAPVECTOR_SIZE)); + }; + + BitmapVector(unsigned long Size); + ~BitmapVector(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/cmndline.cc b/tools/dsync-0.0/libdsync/contrib/cmndline.cc new file mode 100644 index 00000000..8dcdfa9f --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/cmndline.cc @@ -0,0 +1,347 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: cmndline.cc,v 1.6 1999/11/17 05:59:29 jgg Exp $ +/* ###################################################################### + + Command Line Class - Sophisticated command line parser + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/cmndline.h" +#endif +#include +#include +#include + /*}}}*/ + +// CommandLine::CommandLine - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +CommandLine::CommandLine(Args *AList,Configuration *Conf) : ArgList(AList), + Conf(Conf), FileList(0) +{ +} + /*}}}*/ +// CommandLine::~CommandLine - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +CommandLine::~CommandLine() +{ + delete [] FileList; +} + /*}}}*/ +// CommandLine::Parse - Main action member /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool CommandLine::Parse(int argc,const char **argv) +{ + delete [] FileList; + FileList = new const char *[argc]; + const char **Files = FileList; + int I; + for (I = 1; I != argc; I++) + { + const char *Opt = argv[I]; + + // It is not an option + if (*Opt != '-') + { + *Files++ = Opt; + continue; + } + + Opt++; + + // Double dash signifies the end of option processing + if (*Opt == '-' && Opt[1] == 0) + break; + + // Single dash is a short option + if (*Opt != '-') + { + // Iterate over each letter + while (*Opt != 0) + { + // Search for the option + Args *A; + for (A = ArgList; A->end() == false && A->ShortOpt != *Opt; A++); + if (A->end() == true) + return _error->Error("Command line option '%c' [from %s] is not known.",*Opt,argv[I]); + + if (HandleOpt(I,argc,argv,Opt,A) == false) + return false; + if (*Opt != 0) + Opt++; + } + continue; + } + + Opt++; + + // Match up to a = against the list + const char *OptEnd = Opt; + Args *A; + for (; *OptEnd != 0 && *OptEnd != '='; OptEnd++); + for (A = ArgList; A->end() == false && + stringcasecmp(Opt,OptEnd,A->LongOpt) != 0; A++); + + // Failed, look for a word after the first - (no-foo) + bool PreceedMatch = false; + if (A->end() == true) + { + for (; Opt != OptEnd && *Opt != '-'; Opt++); + + if (Opt == OptEnd) + return _error->Error("Command line option %s is not understood",argv[I]); + Opt++; + + for (A = ArgList; A->end() == false && + stringcasecmp(Opt,OptEnd,A->LongOpt) != 0; A++); + + // Failed again.. + if (A->end() == true && OptEnd - Opt != 1) + return _error->Error("Command line option %s is not understood",argv[I]); + + // The option could be a single letter option prefixed by a no-.. + if (A->end() == true) + { + for (A = ArgList; A->end() == false && A->ShortOpt != *Opt; A++); + + if (A->end() == true) + return _error->Error("Command line option %s is not understood",argv[I]); + } + + // The option is not boolean + if (A->IsBoolean() == false) + return _error->Error("Command line option %s is not boolean",argv[I]); + PreceedMatch = true; + } + + // Deal with it. + OptEnd--; + if (HandleOpt(I,argc,argv,OptEnd,A,PreceedMatch) == false) + return false; + } + + // Copy any remaining file names over + for (; I != argc; I++) + *Files++ = argv[I]; + *Files = 0; + + return true; +} + /*}}}*/ +// CommandLine::HandleOpt - Handle a single option including all flags /*{{{*/ +// --------------------------------------------------------------------- +/* This is a helper function for parser, it looks at a given argument + and looks for specific patterns in the string, it gets tokanized + -ruffly- like -*[yes|true|enable]-(o|longopt)[=][ ][argument] */ +bool CommandLine::HandleOpt(int &I,int argc,const char *argv[], + const char *&Opt,Args *A,bool PreceedMatch) +{ + const char *Argument = 0; + bool CertainArg = false; + int IncI = 0; + + /* Determine the possible location of an option or 0 if their is + no option */ + if (Opt[1] == 0 || (Opt[1] == '=' && Opt[2] == 0)) + { + if (I + 1 < argc && argv[I+1][0] != '-') + Argument = argv[I+1]; + + // Equals was specified but we fell off the end! + if (Opt[1] == '=' && Argument == 0) + return _error->Error("Option %s requires an argument.",argv[I]); + if (Opt[1] == '=') + CertainArg = true; + + IncI = 1; + } + else + { + if (Opt[1] == '=') + { + CertainArg = true; + Argument = Opt + 2; + } + else + Argument = Opt + 1; + } + + // Option is an argument set + if ((A->Flags & HasArg) == HasArg) + { + if (Argument == 0) + return _error->Error("Option %s requires an argument.",argv[I]); + Opt += strlen(Opt); + I += IncI; + + // Parse a configuration file + if ((A->Flags & ConfigFile) == ConfigFile) + return ReadConfigFile(*Conf,Argument); + + // Arbitary item specification + if ((A->Flags & ArbItem) == ArbItem) + { + const char *J; + for (J = Argument; *J != 0 && *J != '='; J++); + if (*J == 0) + return _error->Error("Option %s: Configuration item sepecification must have an =.",argv[I]); + + // = is trailing + if (J[1] == 0) + { + if (I+1 >= argc) + return _error->Error("Option %s: Configuration item sepecification must have an =.",argv[I]); + Conf->Set(string(Argument,J-Argument),string(argv[I++ +1])); + } + else + Conf->Set(string(Argument,J-Argument),string(J+1)); + + return true; + } + + const char *I = A->ConfName; + for (; *I != 0 && *I != ' '; I++); + if (*I == ' ') + Conf->Set(string(A->ConfName,0,I-A->ConfName),string(I+1) + Argument); + else + Conf->Set(A->ConfName,string(I) + Argument); + + return true; + } + + // Option is an integer level + if ((A->Flags & IntLevel) == IntLevel) + { + // There might be an argument + if (Argument != 0) + { + char *EndPtr; + unsigned long Value = strtol(Argument,&EndPtr,10); + + // Conversion failed and the argument was specified with an =s + if (EndPtr == Argument && CertainArg == true) + return _error->Error("Option %s requires an integer argument, not '%s'",argv[I],Argument); + + // Conversion was ok, set the value and return + if (EndPtr != 0 && EndPtr != Argument && *EndPtr == 0) + { + Conf->Set(A->ConfName,Value); + Opt += strlen(Opt); + I += IncI; + return true; + } + } + + // Increase the level + Conf->Set(A->ConfName,Conf->FindI(A->ConfName)+1); + return true; + } + + // Option is a boolean + int Sense = -1; // -1 is unspecified, 0 is yes 1 is no + + // Look for an argument. + while (1) + { + // Look at preceeding text + char Buffer[300]; + if (Argument == 0) + { + if (PreceedMatch == false) + break; + + if (strlen(argv[I]) >= sizeof(Buffer)) + return _error->Error("Option '%s' is too long",argv[I]); + + // Skip the leading dash + const char *J = argv[I]; + for (; *J != 0 && *J == '-'; J++); + + const char *JEnd = J; + for (; *JEnd != 0 && *JEnd != '-'; JEnd++); + if (*JEnd != 0) + { + strncpy(Buffer,J,JEnd - J); + Buffer[JEnd - J] = 0; + Argument = Buffer; + CertainArg = true; + } + else + break; + } + + // Check for boolean + Sense = StringToBool(Argument); + if (Sense >= 0) + { + // Eat the argument + if (Argument != Buffer) + { + Opt += strlen(Opt); + I += IncI; + } + break; + } + + if (CertainArg == true) + return _error->Error("Sense %s is not understood, try true or false.",Argument); + + Argument = 0; + } + + // Indeterminate sense depends on the flag + if (Sense == -1) + { + if ((A->Flags & InvBoolean) == InvBoolean) + Sense = 0; + else + Sense = 1; + } + + Conf->Set(A->ConfName,Sense); + return true; +} + /*}}}*/ +// CommandLine::FileSize - Count the number of filenames /*{{{*/ +// --------------------------------------------------------------------- +/* */ +unsigned int CommandLine::FileSize() const +{ + unsigned int Count = 0; + for (const char **I = FileList; I != 0 && *I != 0; I++) + Count++; + return Count; +} + /*}}}*/ +// CommandLine::DispatchArg - Do something with the first arg /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool CommandLine::DispatchArg(Dispatch *Map,bool NoMatch) +{ + int I; + for (I = 0; Map[I].Match != 0; I++) + { + if (strcmp(FileList[0],Map[I].Match) == 0) + { + bool Res = Map[I].Handler(*this); + if (Res == false && _error->PendingError() == false) + _error->Error("Handler silently failed"); + return Res; + } + } + + // No matching name + if (Map[I].Match == 0) + { + if (NoMatch == true) + _error->Error("Invalid operation %s",FileList[0]); + } + + return false; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/cmndline.h b/tools/dsync-0.0/libdsync/contrib/cmndline.h new file mode 100644 index 00000000..ca8ba94d --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/cmndline.h @@ -0,0 +1,103 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: cmndline.h,v 1.2 1998/12/29 04:38:09 jgg Exp $ +/* ###################################################################### + + Command Line Class - Sophisticated command line parser + + This class provides a unified command line parser/option handliner/ + configuration mechanism. It allows the caller to specify the option + set and map the option set into the configuration class or other + special functioning. + + Filenames are stripped from the option stream and put into their + own array. + + The argument descriptor array can be initialized as: + + CommandLine::Args Args[] = + {{'q',"quiet","apt::get::quiet",CommandLine::IntLevel}, + {0,0,0,0,0}}; + + The flags mean, + HasArg - Means the argument has a value + IntLevel - Means the argument is an integer level indication, the + following -qqqq (+3) -q5 (=5) -q=5 (=5) are valid + Boolean - Means it is true/false or yes/no. + -d (true) --no-d (false) --yes-d (true) + --long (true) --no-long (false) --yes-long (true) + -d=yes (true) -d=no (false) Words like enable, disable, + true false, yes no and on off are recognized in logical + places. + InvBoolean - Same as boolean but the case with no specified sense + (first case) is set to false. + ConfigFile - Means this flag should be interprited as the name of + a config file to read in at this point in option processing. + Implies HasArg. + The default, if the flags are 0 is to use Boolean + + ##################################################################### */ + /*}}}*/ +#ifndef PKGLIB_CMNDLINE_H +#define PKGLIB_CMNDLINE_H + +#ifdef __GNUG__ +#pragma interface "dsync/cmndline.h" +#endif + +#include + +class CommandLine +{ + public: + struct Args; + struct Dispatch; + + protected: + + Args *ArgList; + Configuration *Conf; + bool HandleOpt(int &I,int argc,const char *argv[], + const char *&Opt,Args *A,bool PreceedeMatch = false); + + public: + + enum AFlags + { + HasArg = (1 << 0), + IntLevel = (1 << 1), + Boolean = (1 << 2), + InvBoolean = (1 << 3), + ConfigFile = (1 << 4) | HasArg, + ArbItem = (1 << 5) | HasArg + }; + + const char **FileList; + + bool Parse(int argc,const char **argv); + void ShowHelp(); + unsigned int FileSize() const; + bool DispatchArg(Dispatch *List,bool NoMatch = true); + + CommandLine(Args *AList,Configuration *Conf); + ~CommandLine(); +}; + +struct CommandLine::Args +{ + char ShortOpt; + const char *LongOpt; + const char *ConfName; + unsigned long Flags; + + inline bool end() {return ShortOpt == 0 && LongOpt == 0;}; + inline bool IsBoolean() {return Flags == 0 || (Flags & (Boolean|InvBoolean)) != 0;}; +}; + +struct CommandLine::Dispatch +{ + const char *Match; + bool (*Handler)(CommandLine &); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/configuration.cc b/tools/dsync-0.0/libdsync/contrib/configuration.cc new file mode 100644 index 00000000..4ce52476 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/configuration.cc @@ -0,0 +1,456 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: configuration.cc,v 1.5 1999/11/17 05:59:29 jgg Exp $ +/* ###################################################################### + + Configuration Class + + This class provides a configuration file and command line parser + for a tree-oriented configuration environment. All runtime configuration + is stored in here. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/configuration.h" +#endif +#include +#include +#include + +#include +#include +#include +using namespace std; + /*}}}*/ + +Configuration *_config = new Configuration; + +// Configuration::Configuration - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +Configuration::Configuration() +{ + Root = new Item; +} + /*}}}*/ +// Configuration::Lookup - Lookup a single item /*{{{*/ +// --------------------------------------------------------------------- +/* This will lookup a single item by name below another item. It is a + helper function for the main lookup function */ +Configuration::Item *Configuration::Lookup(Item *Head,const char *S, + unsigned long Len,bool Create) +{ + int Res = 1; + Item *I = Head->Child; + Item **Last = &Head->Child; + + // Empty strings match nothing. They are used for lists. + if (Len != 0) + { + for (; I != 0; Last = &I->Next, I = I->Next) + if ((Res = stringcasecmp(I->Tag.c_str(), I->Tag.c_str() + strlen(I->Tag.c_str()),S,S + Len)) == 0) + break; + } + else + for (; I != 0; Last = &I->Next, I = I->Next); + + if (Res == 0) + return I; + if (Create == false) + return 0; + + I = new Item; + I->Tag = string(S,Len); + I->Next = *Last; + I->Parent = Head; + *Last = I; + return I; +} + /*}}}*/ +// Configuration::Lookup - Lookup a fully scoped item /*{{{*/ +// --------------------------------------------------------------------- +/* This performs a fully scoped lookup of a given name, possibly creating + new items */ +Configuration::Item *Configuration::Lookup(const char *Name,bool Create) +{ + if (Name == 0) + return Root->Child; + + const char *Start = Name; + const char *End = Start + strlen(Name); + const char *TagEnd = Name; + Item *Itm = Root; + for (; End - TagEnd >= 2; TagEnd++) + { + if (TagEnd[0] == ':' && TagEnd[1] == ':') + { + Itm = Lookup(Itm,Start,TagEnd - Start,Create); + if (Itm == 0) + return 0; + TagEnd = Start = TagEnd + 2; + } + } + + // This must be a trailing ::, we create unique items in a list + if (End - Start == 0) + { + if (Create == false) + return 0; + } + + Itm = Lookup(Itm,Start,End - Start,Create); + return Itm; +} + /*}}}*/ +// Configuration::Find - Find a value /*{{{*/ +// --------------------------------------------------------------------- +/* */ +string Configuration::Find(const char *Name,const char *Default) +{ + Item *Itm = Lookup(Name,false); + if (Itm == 0 || Itm->Value.empty() == true) + { + if (Default == 0) + return string(); + else + return Default; + } + + return Itm->Value; +} + /*}}}*/ +// Configuration::FindFile - Find a Filename /*{{{*/ +// --------------------------------------------------------------------- +/* Directories are stored as the base dir in the Parent node and the + sub directory in sub nodes with the final node being the end filename + */ +string Configuration::FindFile(const char *Name,const char *Default) +{ + Item *Itm = Lookup(Name,false); + if (Itm == 0 || Itm->Value.empty() == true) + { + if (Default == 0) + return string(); + else + return Default; + } + + // Absolute path + if (Itm->Value[0] == '/' || Itm->Parent == 0) + return Itm->Value; + + // ./ is also considered absolute as is anything with ~ in it + if (Itm->Value[0] != 0 && + ((Itm->Value[0] == '.' && Itm->Value[1] == '/') || + (Itm->Value[0] == '~' && Itm->Value[1] == '/'))) + return Itm->Value; + + if (Itm->Parent->Value.end()[-1] == '/') + return Itm->Parent->Value + Itm->Value; + else + return Itm->Parent->Value + '/' + Itm->Value; +} + /*}}}*/ +// Configuration::FindDir - Find a directory name /*{{{*/ +// --------------------------------------------------------------------- +/* This is like findfile execept the result is terminated in a / */ +string Configuration::FindDir(const char *Name,const char *Default) +{ + string Res = FindFile(Name,Default); + if (Res.end()[-1] != '/') + return Res + '/'; + return Res; +} + /*}}}*/ +// Configuration::FindI - Find an integer value /*{{{*/ +// --------------------------------------------------------------------- +/* */ +int Configuration::FindI(const char *Name,int Default) +{ + Item *Itm = Lookup(Name,false); + if (Itm == 0 || Itm->Value.empty() == true) + return Default; + + char *End; + int Res = strtol(Itm->Value.c_str(),&End,0); + if (End == Itm->Value.c_str()) + return Default; + + return Res; +} + /*}}}*/ +// Configuration::FindB - Find a boolean type /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool Configuration::FindB(const char *Name,bool Default) +{ + Item *Itm = Lookup(Name,false); + if (Itm == 0 || Itm->Value.empty() == true) + return Default; + + return StringToBool(Itm->Value,Default); +} + /*}}}*/ +// Configuration::Set - Set a value /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void Configuration::Set(const char *Name,string Value) +{ + Item *Itm = Lookup(Name,true); + if (Itm == 0) + return; + Itm->Value = Value; +} + /*}}}*/ +// Configuration::Set - Set an integer value /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void Configuration::Set(const char *Name,int Value) +{ + Item *Itm = Lookup(Name,true); + if (Itm == 0) + return; + char S[300]; + snprintf(S,sizeof(S),"%i",Value); + Itm->Value = S; +} + /*}}}*/ +// Configuration::Exists - Returns true if the Name exists /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool Configuration::Exists(const char *Name) +{ + Item *Itm = Lookup(Name,false); + if (Itm == 0) + return false; + return true; +} + /*}}}*/ +// Configuration::Dump - Dump the config /*{{{*/ +// --------------------------------------------------------------------- +/* Dump the entire configuration space */ +void Configuration::Dump() +{ + /* Write out all of the configuration directives by walking the + configuration tree */ + const Configuration::Item *Top = _config->Tree(0); + for (; Top != 0;) + { + clog << Top->FullTag() << " \"" << Top->Value << "\";" << endl; + + if (Top->Child != 0) + { + Top = Top->Child; + continue; + } + + while (Top != 0 && Top->Next == 0) + Top = Top->Parent; + if (Top != 0) + Top = Top->Next; + } +} + /*}}}*/ + +// Configuration::Item::FullTag - Return the fully scoped tag /*{{{*/ +// --------------------------------------------------------------------- +/* */ +string Configuration::Item::FullTag() const +{ + if (Parent == 0 || Parent->Parent == 0) + return Tag; + return Parent->FullTag() + "::" + Tag; +} + /*}}}*/ + +// ReadConfigFile - Read a configuration file /*{{{*/ +// --------------------------------------------------------------------- +/* The configuration format is very much like the named.conf format + used in bind8, in fact this routine can parse most named.conf files. */ +bool ReadConfigFile(Configuration &Conf,string FName) +{ + // Open the stream for reading + ifstream F(FName.c_str(),ios::in); + if (!F != 0) + return _error->Errno("ifstream::ifstream","Opening configuration file %s",FName.c_str()); + + char Buffer[300]; + string LineBuffer; + string Stack[100]; + unsigned int StackPos = 0; + + // Parser state + string ParentTag; + + int CurLine = 0; + bool InComment = false; + while (F.eof() == false) + { + F.getline(Buffer,sizeof(Buffer)); + CurLine++; + _strtabexpand(Buffer,sizeof(Buffer)); + _strstrip(Buffer); + + // Multi line comment + if (InComment == true) + { + for (const char *I = Buffer; *I != 0; I++) + { + if (*I == '*' && I[1] == '/') + { + memmove(Buffer,I+2,strlen(I+2) + 1); + InComment = false; + break; + } + } + if (InComment == true) + continue; + } + + // Discard single line comments + bool InQuote = false; + for (char *I = Buffer; *I != 0; I++) + { + if (*I == '"') + InQuote = !InQuote; + if (InQuote == true) + continue; + + if (*I == '/' && I[1] == '/') + { + *I = 0; + break; + } + } + + // Look for multi line comments + for (char *I = Buffer; *I != 0; I++) + { + if (*I == '"') + InQuote = !InQuote; + if (InQuote == true) + continue; + + if (*I == '/' && I[1] == '*') + { + InComment = true; + for (char *J = Buffer; *J != 0; J++) + { + if (*J == '*' && J[1] == '/') + { + memmove(I,J+2,strlen(J+2) + 1); + InComment = false; + break; + } + } + + if (InComment == true) + { + *I = 0; + break; + } + } + } + + // Blank + if (Buffer[0] == 0) + continue; + + // We now have a valid line fragment + for (char *I = Buffer; *I != 0;) + { + if (*I == '{' || *I == ';' || *I == '}') + { + // Put the last fragement into the buffer + char *Start = Buffer; + char *Stop = I; + for (; Start != I && isspace(*Start) != 0; Start++); + for (; Stop != Start && isspace(Stop[-1]) != 0; Stop--); + if (LineBuffer.empty() == false && Stop - Start != 0) + LineBuffer += ' '; + LineBuffer += string(Start,Stop - Start); + + // Remove the fragment + char TermChar = *I; + memmove(Buffer,I + 1,strlen(I + 1) + 1); + I = Buffer; + + // Move up a tag + if (TermChar == '}') + { + if (StackPos == 0) + ParentTag = string(); + else + ParentTag = Stack[--StackPos]; + } + + // Syntax Error + if (TermChar == '{' && LineBuffer.empty() == true) + return _error->Error("Syntax error %s:%u: Block starts with no name.",FName.c_str(),CurLine); + + if (LineBuffer.empty() == true) + continue; + + // Parse off the tag + string Tag; + const char *Pos = LineBuffer.c_str(); + if (ParseQuoteWord(Pos,Tag) == false) + return _error->Error("Syntax error %s:%u: Malformed Tag",FName.c_str(),CurLine); + + // Go down a level + if (TermChar == '{') + { + if (StackPos <= 100) + Stack[StackPos++] = ParentTag; + if (ParentTag.empty() == true) + ParentTag = Tag; + else + ParentTag += string("::") + Tag; + Tag = string(); + } + + // Parse off the word + string Word; + if (ParseCWord(Pos,Word) == false) + { + if (TermChar != '{') + { + Word = Tag; + Tag = ""; + } + } + + // Generate the item name + string Item; + if (ParentTag.empty() == true) + Item = Tag; + else + { + if (TermChar != '{' || Tag.empty() == false) + Item = ParentTag + "::" + Tag; + else + Item = ParentTag; + } + + // Set the item in the configuration class + Conf.Set(Item,Word); + + // Empty the buffer + LineBuffer = string(); + } + else + I++; + } + + // Store the fragment + const char *Stripd = _strstrip(Buffer); + if (*Stripd != 0 && LineBuffer.empty() == false) + LineBuffer += " "; + LineBuffer += Stripd; + } + + return true; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/configuration.h b/tools/dsync-0.0/libdsync/contrib/configuration.h new file mode 100644 index 00000000..10bd9093 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/configuration.h @@ -0,0 +1,88 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: configuration.h,v 1.4 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + Configuration Class + + This class provides a configuration file and command line parser + for a tree-oriented configuration environment. All runtime configuration + is stored in here. + + Each configuration name is given as a fully scoped string such as + Foo::Bar + And has associated with it a text string. The Configuration class only + provides storage and lookup for this tree, other classes provide + configuration file formats (and parsers/emitters if needed). + + Most things can get by quite happily with, + cout << _config->Find("Foo::Bar") << endl; + + A special extension, support for ordered lists is provided by using the + special syntax, "block::list::" the trailing :: designates the + item as a list. To access the list you must use the tree function on + "block::list". + + ##################################################################### */ + /*}}}*/ +#ifndef PKGLIB_CONFIGURATION_H +#define PKGLIB_CONFIGURATION_H + +#ifdef __GNUG__ +#pragma interface "dsync/configuration.h" +#endif + +#include +using namespace std; + +class Configuration +{ +public: + struct Item + { + string Value; + string Tag; + Item *Parent; + Item *Child; + Item *Next; + + string FullTag() const; + + Item() : Parent(0), Child(0), Next(0) {}; + }; +private: + Item *Root; + + Item *Lookup(Item *Head,const char *S,unsigned long Len,bool Create); + Item *Lookup(const char *Name,bool Create); + + public: + + string Find(const char *Name,const char *Default = 0); + string Find(string Name,const char *Default = 0) {return Find(Name.c_str(),Default);}; + string FindFile(const char *Name,const char *Default = 0); + string FindDir(const char *Name,const char *Default = 0); + int FindI(const char *Name,int Default = 0); +// int FindI(string Name,bool Default = 0) {return FindI(Name.c_str(),Default);}; + bool FindB(const char *Name,bool Default = false); +// bool FindB(string Name,bool Default = false) {return FindB(Name.c_str(),Default);}; + + inline void Set(string Name,string Value) {Set(Name.c_str(),Value);}; + void Set(const char *Name,string Value); + void Set(const char *Name,int Value); + + inline bool Exists(string Name) {return Exists(Name.c_str());}; + bool Exists(const char *Name); + + inline const Item *Tree(const char *Name) {return Lookup(Name,false);}; + + void Dump(); + + Configuration(); +}; + +extern Configuration *_config; + +bool ReadConfigFile(Configuration &Conf,string File); + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/error.cc b/tools/dsync-0.0/libdsync/contrib/error.cc new file mode 100644 index 00000000..4d48b82a --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/error.cc @@ -0,0 +1,242 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: error.cc,v 1.4 1999/01/19 04:41:43 jgg Exp $ +/* ###################################################################### + + Global Erorr Class - Global error mechanism + + We use a simple STL vector to store each error record. A PendingFlag + is kept which indicates when the vector contains a Sever error. + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe. + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/error.h" +#endif + +#include + +#include +#include +#include +#include +#include +#include + +using namespace std; + /*}}}*/ + +// Global Error Object /*{{{*/ +/* If the implementation supports posix threads then the accessor function + is compiled to be thread safe otherwise a non-safe version is used. A + Per-Thread error object is maintained in much the same manner as libc + manages errno */ +#if _POSIX_THREADS == 1 + #include + + static pthread_key_t ErrorKey; + static bool GotKey = false; + static void ErrorDestroy(void *Obj) {delete (GlobalError *)Obj;}; + static void KeyAlloc() {GotKey = true; + pthread_key_create(&ErrorKey,ErrorDestroy);}; + + GlobalError *_GetErrorObj() + { + static pthread_once_t Once = PTHREAD_ONCE_INIT; + pthread_once(&Once,KeyAlloc); + + /* Solaris has broken pthread_once support, isn't that nice? Thus + we create a race condition for such defective systems here. */ + if (GotKey == false) + KeyAlloc(); + + void *Res = pthread_getspecific(ErrorKey); + if (Res == 0) + pthread_setspecific(ErrorKey,Res = new GlobalError); + return (GlobalError *)Res; + } +#else + GlobalError *_GetErrorObj() + { + static GlobalError *Obj = new GlobalError; + return Obj; + } +#endif + /*}}}*/ + +// GlobalError::GlobalError - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +GlobalError::GlobalError() : List(0), PendingFlag(false) +{ +} + /*}}}*/ +// GlobalError::Errno - Get part of the error string from errno /*{{{*/ +// --------------------------------------------------------------------- +/* Function indicates the stdlib function that failed and Description is + a user string that leads the text. Form is: + Description - Function (errno: strerror) + Carefull of the buffer overrun, sprintf. + */ +bool GlobalError::Errno(const char *Function,const char *Description,...) +{ + va_list args; + va_start(args,Description); + + // sprintf the description + char S[400]; + vsnprintf(S,sizeof(S),Description,args); + snprintf(S + strlen(S),sizeof(S) - strlen(S), + " - %s (%i %s)",Function,errno,strerror(errno)); + + // Put it on the list + Item *Itm = new Item; + Itm->Text = S; + Itm->Error = true; + Insert(Itm); + + PendingFlag = true; + + return false; +} + /*}}}*/ +// GlobalError::WarningE - Get part of the warn string from errno /*{{{*/ +// --------------------------------------------------------------------- +/* Function indicates the stdlib function that failed and Description is + a user string that leads the text. Form is: + Description - Function (errno: strerror) + Carefull of the buffer overrun, sprintf. + */ +bool GlobalError::WarningE(const char *Function,const char *Description,...) +{ + va_list args; + va_start(args,Description); + + // sprintf the description + char S[400]; + vsnprintf(S,sizeof(S),Description,args); + snprintf(S + strlen(S),sizeof(S) - strlen(S)," - %s (%i %s)",Function,errno,strerror(errno)); + + // Put it on the list + Item *Itm = new Item; + Itm->Text = S; + Itm->Error = false; + Insert(Itm); + + return false; +} + /*}}}*/ +// GlobalError::Error - Add an error to the list /*{{{*/ +// --------------------------------------------------------------------- +/* Just vsprintfs and pushes */ +bool GlobalError::Error(const char *Description,...) +{ + va_list args; + va_start(args,Description); + + // sprintf the description + char S[400]; + vsnprintf(S,sizeof(S),Description,args); + + // Put it on the list + Item *Itm = new Item; + Itm->Text = S; + Itm->Error = true; + Insert(Itm); + + PendingFlag = true; + + return false; +} + /*}}}*/ +// GlobalError::Warning - Add a warning to the list /*{{{*/ +// --------------------------------------------------------------------- +/* This doesn't set the pending error flag */ +bool GlobalError::Warning(const char *Description,...) +{ + va_list args; + va_start(args,Description); + + // sprintf the description + char S[400]; + vsnprintf(S,sizeof(S),Description,args); + + // Put it on the list + Item *Itm = new Item; + Itm->Text = S; + Itm->Error = false; + Insert(Itm); + + return false; +} + /*}}}*/ +// GlobalError::PopMessage - Pulls a single message out /*{{{*/ +// --------------------------------------------------------------------- +/* This should be used in a loop checking empty() each cycle. It returns + true if the message is an error. */ +bool GlobalError::PopMessage(string &Text) +{ + if (List == 0) + return false; + + bool Ret = List->Error; + Text = List->Text; + Item *Old = List; + List = List->Next; + delete Old; + + // This really should check the list to see if only warnings are left.. + if (List == 0) + PendingFlag = false; + + return Ret; +} + /*}}}*/ +// GlobalError::DumpErrors - Dump all of the errors/warns to cerr /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void GlobalError::DumpErrors() +{ + // Print any errors or warnings found + string Err; + while (empty() == false) + { + bool Type = PopMessage(Err); + if (Type == true) + std::cerr << "E: " << Err << endl; + else + cerr << "W: " << Err << endl; + } +} + /*}}}*/ +// GlobalError::Discard - Discard /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void GlobalError::Discard() +{ + while (List != 0) + { + Item *Old = List; + List = List->Next; + delete Old; + } + + PendingFlag = false; +}; + /*}}}*/ +// GlobalError::Insert - Insert a new item at the end /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void GlobalError::Insert(Item *Itm) +{ + Item **End = &List; + for (Item *I = List; I != 0; I = I->Next) + End = &I->Next; + Itm->Next = *End; + *End = Itm; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/error.h b/tools/dsync-0.0/libdsync/contrib/error.h new file mode 100644 index 00000000..4ff988ae --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/error.h @@ -0,0 +1,90 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: error.h,v 1.2 1998/12/29 04:38:09 jgg Exp $ +/* ###################################################################### + + Global Erorr Class - Global error mechanism + + This class has a single global instance. When a function needs to + generate an error condition, such as a read error, it calls a member + in this class to add the error to a stack of errors. + + By using a stack the problem with a scheme like errno is removed and + it allows a very detailed account of what went wrong to be transmitted + to the UI for display. (Errno has problems because each function sets + errno to 0 if it didn't have an error thus eraseing erno in the process + of cleanup) + + Several predefined error generators are provided to handle common + things like errno. The general idea is that all methods return a bool. + If the bool is true then things are OK, if it is false then things + should start being undone and the stack should unwind under program + control. + + A Warning should not force the return of false. Things did not fail, but + they might have had unexpected problems. Errors are stored in a FIFO + so Pop will return the first item.. + + I have some thoughts about extending this into a more general UI<-> + Engine interface, ie allowing the Engine to say 'The disk is full' in + a dialog that says 'Panic' and 'Retry'.. The error generator functions + like errno, Warning and Error return false always so this is normal: + if (open(..)) + return _error->Errno(..); + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe. + + ##################################################################### */ + /*}}}*/ +#ifndef PKGLIB_ERROR_H +#define PKGLIB_ERROR_H + +#ifdef __GNUG__ +#pragma interface "dsync/error.h" +#endif + +#include +using namespace std; + +class GlobalError +{ + struct Item + { + string Text; + bool Error; + Item *Next; + }; + + Item *List; + bool PendingFlag; + void Insert(Item *I); + + public: + + // Call to generate an error from a library call. + bool Errno(const char *Function,const char *Description,...); + bool WarningE(const char *Function,const char *Description,...); + + /* A warning should be considered less severe than an error, and may be + ignored by the client. */ + bool Error(const char *Description,...); + bool Warning(const char *Description,...); + + // Simple accessors + inline bool PendingError() {return PendingFlag;}; + inline bool empty() {return List == 0;}; + bool PopMessage(string &Text); + void Discard(); + + // Usefull routine to dump to cerr + void DumpErrors(); + + GlobalError(); +}; + +// The 'extra-ansi' syntax is used to help with collisions. +GlobalError *_GetErrorObj(); +#define _error _GetErrorObj() + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/fileutl.cc b/tools/dsync-0.0/libdsync/contrib/fileutl.cc new file mode 100644 index 00000000..44f8b2a1 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/fileutl.cc @@ -0,0 +1,534 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: fileutl.cc,v 1.5 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + File Utilities + + CopyFile - Buffered copy of a single file + GetLock - dpkg compatible lock file manipulation (fcntl) + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe. + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/fileutl.h" +#endif +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + /*}}}*/ + +// CopyFile - Buffered copy of a file /*{{{*/ +// --------------------------------------------------------------------- +/* The caller is expected to set things so that failure causes erasure */ +bool CopyFile(FileFd &From,FileFd &To) +{ + if (From.IsOpen() == false || To.IsOpen() == false) + return false; + + // Buffered copy between fds + unsigned char *Buf = new unsigned char[64000]; + unsigned long Size = From.Size(); + while (Size != 0) + { + unsigned long ToRead = Size; + if (Size > 64000) + ToRead = 64000; + + if (From.Read(Buf,ToRead) == false || + To.Write(Buf,ToRead) == false) + { + delete [] Buf; + return false; + } + + Size -= ToRead; + } + + delete [] Buf; + return true; +} + /*}}}*/ +// GetLock - Gets a lock file /*{{{*/ +// --------------------------------------------------------------------- +/* This will create an empty file of the given name and lock it. Once this + is done all other calls to GetLock in any other process will fail with + -1. The return result is the fd of the file, the call should call + close at some time. */ +int GetLock(string File,bool Errors) +{ + int FD = open(File.c_str(),O_RDWR | O_CREAT | O_TRUNC,0640); + if (FD < 0) + { + if (Errors == true) + _error->Errno("open","Could not open lock file %s",File.c_str()); + return -1; + } + + // Aquire a write lock + struct flock fl; + fl.l_type = F_WRLCK; + fl.l_whence = SEEK_SET; + fl.l_start = 0; + fl.l_len = 0; + if (fcntl(FD,F_SETLK,&fl) == -1) + { + if (errno == ENOLCK) + { + _error->Warning("Not using locking for nfs mounted lock file %s",File.c_str()); + return true; + } + if (Errors == true) + _error->Errno("open","Could not get lock %s",File.c_str()); + close(FD); + return -1; + } + + return FD; +} + /*}}}*/ +// FileExists - Check if a file exists /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileExists(string File) +{ + struct stat Buf; + if (stat(File.c_str(),&Buf) != 0) + return false; + return true; +} + /*}}}*/ +// SafeGetCWD - This is a safer getcwd that returns a dynamic string /*{{{*/ +// --------------------------------------------------------------------- +/* We return / on failure. */ +string SafeGetCWD() +{ + // Stash the current dir. + char S[300]; + S[0] = 0; + if (getcwd(S,sizeof(S)-2) == 0) + return "/"; + unsigned int Len = strlen(S); + S[Len] = '/'; + S[Len+1] = 0; + return S; +} + /*}}}*/ +// flNotDir - Strip the directory from the filename /*{{{*/ +// --------------------------------------------------------------------- +/* */ +string flNotDir(string File) +{ + string::size_type Res = File.rfind('/'); + if (Res == string::npos) + return File; + Res++; + return string(File,Res,Res - File.length()); +} + /*}}}*/ +// flNotFile - Strip the file from the directory name /*{{{*/ +// --------------------------------------------------------------------- +/* */ +string flNotFile(string File) +{ + string::size_type Res = File.rfind('/'); + if (Res == string::npos) + return File; + Res++; + return string(File,0,Res); +} + /*}}}*/ +// flNoLink - If file is a symlink then deref it /*{{{*/ +// --------------------------------------------------------------------- +/* If the name is not a link then the returned path is the input. */ +string flNoLink(string File) +{ + struct stat St; + if (lstat(File.c_str(),&St) != 0 || S_ISLNK(St.st_mode) == 0) + return File; + if (stat(File.c_str(),&St) != 0) + return File; + + /* Loop resolving the link. There is no need to limit the number of + loops because the stat call above ensures that the symlink is not + circular */ + char Buffer[1024]; + string NFile = File; + while (1) + { + // Read the link + int Res; + if ((Res = readlink(NFile.c_str(),Buffer,sizeof(Buffer))) <= 0 || + (unsigned)Res >= sizeof(Buffer)) + return File; + + // Append or replace the previous path + Buffer[Res] = 0; + if (Buffer[0] == '/') + NFile = Buffer; + else + NFile = flNotFile(NFile) + Buffer; + + // See if we are done + if (lstat(NFile.c_str(),&St) != 0) + return File; + if (S_ISLNK(St.st_mode) == 0) + return NFile; + } +} + /*}}}*/ +// SetCloseExec - Set the close on exec flag /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void SetCloseExec(int Fd,bool Close) +{ + if (fcntl(Fd,F_SETFD,(Close == false)?0:FD_CLOEXEC) != 0) + { + cerr << "FATAL -> Could not set close on exec " << strerror(errno) << endl; + exit(100); + } +} + /*}}}*/ +// SetNonBlock - Set the nonblocking flag /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void SetNonBlock(int Fd,bool Block) +{ + int Flags = fcntl(Fd,F_GETFL) & (~O_NONBLOCK); + if (fcntl(Fd,F_SETFL,Flags | ((Block == false)?0:O_NONBLOCK)) != 0) + { + cerr << "FATAL -> Could not set non-blocking flag " << strerror(errno) << endl; + exit(100); + } +} + /*}}}*/ +// WaitFd - Wait for a FD to become readable /*{{{*/ +// --------------------------------------------------------------------- +/* This waits for a FD to become readable using select. It is usefull for + applications making use of non-blocking sockets. The timeout is + in seconds. */ +bool WaitFd(int Fd,bool write,unsigned long timeout) +{ + fd_set Set; + struct timeval tv; + FD_ZERO(&Set); + FD_SET(Fd,&Set); + tv.tv_sec = timeout; + tv.tv_usec = 0; + if (write == true) + { + int Res; + do + { + Res = select(Fd+1,0,&Set,0,(timeout != 0?&tv:0)); + } + while (Res < 0 && errno == EINTR); + + if (Res <= 0) + return false; + } + else + { + int Res; + do + { + Res = select(Fd+1,&Set,0,0,(timeout != 0?&tv:0)); + } + while (Res < 0 && errno == EINTR); + + if (Res <= 0) + return false; + } + + return true; +} + /*}}}*/ +// ExecFork - Magical fork that sanitizes the context before execing /*{{{*/ +// --------------------------------------------------------------------- +/* This is used if you want to cleanse the environment for the forked + child, it fixes up the important signals and nukes all of the fds, + otherwise acts like normal fork. */ +int ExecFork() +{ + // Fork off the process + pid_t Process = fork(); + if (Process < 0) + { + cerr << "FATAL -> Failed to fork." << endl; + exit(100); + } + + // Spawn the subprocess + if (Process == 0) + { + // Setup the signals + signal(SIGPIPE,SIG_DFL); + signal(SIGQUIT,SIG_DFL); + signal(SIGINT,SIG_DFL); + signal(SIGWINCH,SIG_DFL); + signal(SIGCONT,SIG_DFL); + signal(SIGTSTP,SIG_DFL); + + // Close all of our FDs - just in case + for (int K = 3; K != 40; K++) + fcntl(K,F_SETFD,FD_CLOEXEC); + } + + return Process; +} + /*}}}*/ +// ExecWait - Fancy waitpid /*{{{*/ +// --------------------------------------------------------------------- +/* Waits for the given sub process. If Reap is set the no errors are + generated. Otherwise a failed subprocess will generate a proper descriptive + message */ +bool ExecWait(int Pid,const char *Name,bool Reap) +{ + if (Pid <= 1) + return true; + + // Wait and collect the error code + int Status; + while (waitpid(Pid,&Status,0) != Pid) + { + if (errno == EINTR) + continue; + + if (Reap == true) + return false; + + return _error->Error("Waited, for %s but it wasn't there",Name); + } + + + // Check for an error code. + if (WIFEXITED(Status) == 0 || WEXITSTATUS(Status) != 0) + { + if (Reap == true) + return false; + if (WIFSIGNALED(Status) != 0 && WTERMSIG(Status) == SIGSEGV) + return _error->Error("Sub-process %s recieved a segmentation fault.",Name); + + if (WIFEXITED(Status) != 0) + return _error->Error("Sub-process %s returned an error code (%u)",Name,WEXITSTATUS(Status)); + + return _error->Error("Sub-process %s exited unexpectedly",Name); + } + + return true; +} + /*}}}*/ + +// FileFd::Open - Open a file /*{{{*/ +// --------------------------------------------------------------------- +/* The most commonly used open mode combinations are given with Mode */ +bool FileFd::Open(string FileName,OpenMode Mode, unsigned long Perms) +{ + Close(); + Flags = AutoClose; + switch (Mode) + { + case ReadOnly: + iFd = open(FileName.c_str(),O_RDONLY); + break; + + case WriteEmpty: + { + struct stat Buf; + if (stat(FileName.c_str(),&Buf) == 0 && S_ISLNK(Buf.st_mode)) + unlink(FileName.c_str()); + iFd = open(FileName.c_str(),O_RDWR | O_CREAT | O_TRUNC,Perms); + break; + } + + case WriteExists: + iFd = open(FileName.c_str(),O_RDWR); + break; + + case WriteAny: + iFd = open(FileName.c_str(),O_RDWR | O_CREAT,Perms); + break; + } + + if (iFd < 0) + return _error->Errno("open","Could not open file %s",FileName.c_str()); + + this->FileName = FileName; + SetCloseExec(iFd,true); + return true; +} + /*}}}*/ +// FileFd::~File - Closes the file /*{{{*/ +// --------------------------------------------------------------------- +/* If the proper modes are selected then we close the Fd and possibly + unlink the file on error. */ +FileFd::~FileFd() +{ + Close(); +} + /*}}}*/ +// FileFd::Read - Read a bit of the file /*{{{*/ +// --------------------------------------------------------------------- +/* We are carefull to handle interruption by a signal while reading + gracefully. */ +bool FileFd::Read(void *To,unsigned long Size,bool AllowEof) +{ + int Res; + errno = 0; + do + { + Res = read(iFd,To,Size); + if (Res < 0 && errno == EINTR) + continue; + if (Res < 0) + { + Flags |= Fail; + return _error->Errno("read","Read error"); + } + + To = (char *)To + Res; + Size -= Res; + } + while (Res > 0 && Size > 0); + + if (Size == 0) + return true; + + // Eof handling + if (AllowEof == true) + { + Flags |= HitEof; + return true; + } + + Flags |= Fail; + return _error->Error("read, still have %u to read but none left",Size); +} + /*}}}*/ +// FileFd::Write - Write to the file /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileFd::Write(const void *From,unsigned long Size) +{ + int Res; + errno = 0; + do + { + Res = write(iFd,From,Size); + if (Res < 0 && errno == EINTR) + continue; + if (Res < 0) + { + Flags |= Fail; + return _error->Errno("write","Write error"); + } + + From = (char *)From + Res; + Size -= Res; + } + while (Res > 0 && Size > 0); + + if (Size == 0) + return true; + + Flags |= Fail; + return _error->Error("write, still have %u to write but couldn't",Size); +} + /*}}}*/ +// FileFd::Seek - Seek in the file /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileFd::Seek(unsigned long To) +{ + if (lseek(iFd,To,SEEK_SET) != (signed)To) + { + Flags |= Fail; + return _error->Error("Unable to seek to %u",To); + } + + return true; +} + /*}}}*/ +// FileFd::Skip - Seek in the file /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileFd::Skip(unsigned long Over) +{ + if (lseek(iFd,Over,SEEK_CUR) < 0) + { + Flags |= Fail; + return _error->Error("Unable to seek ahead %u",Over); + } + + return true; +} + /*}}}*/ +// FileFd::Truncate - Truncate the file /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileFd::Truncate(unsigned long To) +{ + if (ftruncate(iFd,To) != 0) + { + Flags |= Fail; + return _error->Error("Unable to truncate to %u",To); + } + + return true; +} + /*}}}*/ +// FileFd::Tell - Current seek position /*{{{*/ +// --------------------------------------------------------------------- +/* */ +unsigned long FileFd::Tell() +{ + off_t Res = lseek(iFd,0,SEEK_CUR); + if (Res == (off_t)-1) + _error->Errno("lseek","Failed to determine the current file position"); + return Res; +} + /*}}}*/ +// FileFd::Size - Return the size of the file /*{{{*/ +// --------------------------------------------------------------------- +/* */ +unsigned long FileFd::Size() +{ + struct stat Buf; + if (fstat(iFd,&Buf) != 0) + return _error->Errno("fstat","Unable to determine the file size"); + return Buf.st_size; +} + /*}}}*/ +// FileFd::Close - Close the file if the close flag is set /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool FileFd::Close() +{ + bool Res = true; + if ((Flags & AutoClose) == AutoClose) + if (iFd >= 0 && close(iFd) != 0) + Res &= _error->Errno("close","Problem closing the file"); + iFd = -1; + + if ((Flags & Fail) == Fail && (Flags & DelOnFail) == DelOnFail && + FileName.empty() == false) + if (unlink(FileName.c_str()) != 0) + Res &= _error->Warning("unlnk","Problem unlinking the file"); + return Res; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/fileutl.h b/tools/dsync-0.0/libdsync/contrib/fileutl.h new file mode 100644 index 00000000..1be6009d --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/fileutl.h @@ -0,0 +1,90 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: fileutl.h,v 1.3 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + File Utilities + + CopyFile - Buffered copy of a single file + GetLock - dpkg compatible lock file manipulation (fcntl) + FileExists - Returns true if the file exists + SafeGetCWD - Returns the CWD in a string with overrun protection + + The file class is a handy abstraction for various functions+classes + that need to accept filenames. + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe. + + ##################################################################### */ + /*}}}*/ +#ifndef PKGLIB_FILEUTL_H +#define PKGLIB_FILEUTL_H + +#ifdef __GNUG__ +#pragma interface "dsync/fileutl.h" +#endif + +#include + +using namespace std; + +class FileFd +{ + protected: + int iFd; + + enum LocalFlags {AutoClose = (1<<0),Fail = (1<<1),DelOnFail = (1<<2), + HitEof = (1<<3)}; + unsigned long Flags; + string FileName; + + public: + enum OpenMode {ReadOnly,WriteEmpty,WriteExists,WriteAny}; + + bool Read(void *To,unsigned long Size,bool AllowEof = false); + bool Write(const void *From,unsigned long Size); + bool Seek(unsigned long To); + bool Skip(unsigned long To); + bool Truncate(unsigned long To); + unsigned long Tell(); + unsigned long Size(); + bool Open(string FileName,OpenMode Mode,unsigned long Perms = 0666); + bool Close(); + + // Simple manipulators + inline int Fd() {return iFd;}; + inline void Fd(int fd) {iFd = fd;}; + inline bool IsOpen() {return iFd >= 0;}; + inline bool Failed() {return (Flags & Fail) == Fail;}; + inline void EraseOnFailure() {Flags |= DelOnFail;}; + inline void OpFail() {Flags |= Fail;}; + inline bool Eof() {return (Flags & HitEof) == HitEof;}; + inline string &Name() {return FileName;}; + + FileFd(string FileName,OpenMode Mode,unsigned long Perms = 0666) : iFd(-1), + Flags(0) + { + Open(FileName,Mode,Perms); + }; + FileFd(int Fd = -1) : iFd(Fd), Flags(AutoClose) {}; + FileFd(int Fd,bool) : iFd(Fd), Flags(0) {}; + virtual ~FileFd(); +}; + +bool CopyFile(FileFd &From,FileFd &To); +int GetLock(string File,bool Errors = true); +bool FileExists(string File); +string SafeGetCWD(); +void SetCloseExec(int Fd,bool Close); +void SetNonBlock(int Fd,bool Block); +bool WaitFd(int Fd,bool write = false,unsigned long timeout = 0); +int ExecFork(); +bool ExecWait(int Pid,const char *Name,bool Reap = false); + +// File string manipulators +string flNotDir(string File); +string flNotFile(string File); +string flNoLink(string File); + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/md4.cc b/tools/dsync-0.0/libdsync/contrib/md4.cc new file mode 100644 index 00000000..32e0ddb0 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/md4.cc @@ -0,0 +1,182 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: md4.cc,v 1.4 1999/11/17 05:59:29 jgg Exp $ +/* ###################################################################### + + MD4Sum - MD4 Message Digest Algorithm. + + This code implements the MD4 message-digest algorithm. See RFC 1186. + + Ripped shamelessly from RSync which ripped it shamelessly from Samba. + Code is covered under the GPL >=2 and has been changed to have a C++ + interface and use the local configuration stuff. + + Copyright (C) Andrew Tridgell 1997-1998. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#include + +#include +#include +#include + /*}}}*/ + +// byteSwap - Swap bytes in a buffer /*{{{*/ +// --------------------------------------------------------------------- +/* Swap n 32 bit longs in given buffer */ +#ifdef WORDS_BIGENDIAN +static void byteSwap(uint32_t *buf, unsigned words) +{ + uint8_t *p = (uint8_t *)buf; + + do + { + *buf++ = (uint32_t)((unsigned)p[3] << 8 | p[2]) << 16 | + ((unsigned)p[1] << 8 | p[0]); + p += 4; + } while (--words); +} +#else +#define byteSwap(buf,words) +#endif + /*}}}*/ +// InitMD4 - Init the MD4 buffer /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void InitMD4(unsigned char MD4[16]) +{ + uint32_t X[4] = {0x67452301,0xefcdab89,0x98badcfe,0x10325476}; + byteSwap(X,4); + memcpy(MD4,X,16); +} + /*}}}*/ +// ComputeMD4 - Compute the MD4 hash of a buffer /*{{{*/ +// --------------------------------------------------------------------- +/* The buffer *must* be an even multiple of 64 bytes long. The resulting + hash is placed in the output buffer in */ +#define F(X,Y,Z) (((X)&(Y)) | ((~(X))&(Z))) +#define G(X,Y,Z) (((X)&(Y)) | ((X)&(Z)) | ((Y)&(Z))) +#define H(X,Y,Z) ((X)^(Y)^(Z)) +#define lshift(x,s) (((x)<<(s)) | ((x)>>(32-(s)))) + +#define ROUND1(a,b,c,d,k,s) a = lshift(a + F(b,c,d) + X[k], s) +#define ROUND2(a,b,c,d,k,s) a = lshift(a + G(b,c,d) + X[k] + 0x5A827999,s) +#define ROUND3(a,b,c,d,k,s) a = lshift(a + H(b,c,d) + X[k] + 0x6ED9EBA1,s) + +void ComputeMD4(unsigned char MD4[16],unsigned char const *Start, + unsigned const char *End) +{ + uint32_t X[16]; + uint32_t A,B,C,D; + + // Prepare the sum state + memcpy(X,MD4,16); + byteSwap(X,4); + A = X[0]; + B = X[1]; + C = X[2]; + D = X[3]; + + for (; End - Start >= 64; Start += 64) + { + uint32_t AA, BB, CC, DD; + + memcpy(X,Start,sizeof(X)); + byteSwap(X,16); + + AA = A; BB = B; CC = C; DD = D; + + ROUND1(A,B,C,D, 0, 3); ROUND1(D,A,B,C, 1, 7); + ROUND1(C,D,A,B, 2, 11); ROUND1(B,C,D,A, 3, 19); + ROUND1(A,B,C,D, 4, 3); ROUND1(D,A,B,C, 5, 7); + ROUND1(C,D,A,B, 6, 11); ROUND1(B,C,D,A, 7, 19); + ROUND1(A,B,C,D, 8, 3); ROUND1(D,A,B,C, 9, 7); + ROUND1(C,D,A,B, 10, 11); ROUND1(B,C,D,A, 11, 19); + ROUND1(A,B,C,D, 12, 3); ROUND1(D,A,B,C, 13, 7); + ROUND1(C,D,A,B, 14, 11); ROUND1(B,C,D,A, 15, 19); + + ROUND2(A,B,C,D, 0, 3); ROUND2(D,A,B,C, 4, 5); + ROUND2(C,D,A,B, 8, 9); ROUND2(B,C,D,A, 12, 13); + ROUND2(A,B,C,D, 1, 3); ROUND2(D,A,B,C, 5, 5); + ROUND2(C,D,A,B, 9, 9); ROUND2(B,C,D,A, 13, 13); + ROUND2(A,B,C,D, 2, 3); ROUND2(D,A,B,C, 6, 5); + ROUND2(C,D,A,B, 10, 9); ROUND2(B,C,D,A, 14, 13); + ROUND2(A,B,C,D, 3, 3); ROUND2(D,A,B,C, 7, 5); + ROUND2(C,D,A,B, 11, 9); ROUND2(B,C,D,A, 15, 13); + + ROUND3(A,B,C,D, 0, 3); ROUND3(D,A,B,C, 8, 9); + ROUND3(C,D,A,B, 4, 11); ROUND3(B,C,D,A, 12, 15); + ROUND3(A,B,C,D, 2, 3); ROUND3(D,A,B,C, 10, 9); + ROUND3(C,D,A,B, 6, 11); ROUND3(B,C,D,A, 14, 15); + ROUND3(A,B,C,D, 1, 3); ROUND3(D,A,B,C, 9, 9); + ROUND3(C,D,A,B, 5, 11); ROUND3(B,C,D,A, 13, 15); + ROUND3(A,B,C,D, 3, 3); ROUND3(D,A,B,C, 11, 9); + ROUND3(C,D,A,B, 7, 11); ROUND3(B,C,D,A, 15, 15); + + A += AA; + B += BB; + C += CC; + D += DD; + } + X[0] = A; + X[1] = B; + X[2] = C; + X[3] = D; + + byteSwap(X,4); + memcpy(MD4,X,16); +} + /*}}}*/ +// ComputeMD4Final - Finalize the MD4, length and pad /*{{{*/ +// --------------------------------------------------------------------- +/* This does the final round of MD4, Start->End will be padded to be + congruent to 0 mod 64 and TotalLen appended. */ +void ComputeMD4Final(unsigned char MD4[16],unsigned char const *Start, + unsigned char const *End,unsigned long TotalLen) +{ + if (End - Start >= 64) + { + ComputeMD4(MD4,Start,End - ((End - Start)%64)); + Start = End - ((End - Start)%64); + } + + uint8_t Buf[128]; + uint32_t Len = TotalLen*8; + + // Create the partial end buffer, padded to be 448%512 bits long + memset(Buf,0,128); + if (Start != End) + memcpy(Buf,Start,End - Start); + Buf[End-Start] = 0x80; + + // Append the 32 bit length into the 64 bit field + if (End-Start <= 55) + { + memcpy(Buf+56,&Len,sizeof(Len)); + byteSwap((uint32_t *)(Buf+56),1); + ComputeMD4(MD4,Buf,Buf+64); + } + else + { + memcpy(Buf+120,&Len,sizeof(Len)); + byteSwap((uint32_t *)(Buf+120),1); + ComputeMD4(MD4,Buf,Buf+128); + } +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/md4.h b/tools/dsync-0.0/libdsync/contrib/md4.h new file mode 100644 index 00000000..d2b2e900 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/md4.h @@ -0,0 +1,21 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: md4.h,v 1.2 1999/11/17 04:07:17 jgg Exp $ +/* ###################################################################### + + MD4 - MD4 Message Digest Algorithm. + + This is a simple function to compute the MD4 of + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_MD4_H +#define DSYNC_MD4_H + +void InitMD4(unsigned char MD4[16]); +void ComputeMD4(unsigned char MD4[16],unsigned char const *Start, + unsigned const char *End); +void ComputeMD4Final(unsigned char MD4[16],unsigned char const *Start, + unsigned char const *End,unsigned long TotalLen); + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/md5.cc b/tools/dsync-0.0/libdsync/contrib/md5.cc new file mode 100644 index 00000000..4066ae7f --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/md5.cc @@ -0,0 +1,357 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: md5.cc,v 1.5 1999/11/17 04:13:49 jgg Exp $ +/* ###################################################################### + + MD5Sum - MD5 Message Digest Algorithm. + + This code implements the MD5 message-digest algorithm. The algorithm is + due to Ron Rivest. This code was written by Colin Plumb in 1993, no + copyright is claimed. This code is in the public domain; do with it what + you wish. + + Equivalent code is available from RSA Data Security, Inc. This code has + been tested against that, and is equivalent, except that you don't need to + include two pages of legalese with every copy. + + To compute the message digest of a chunk of bytes, instantiate the class, + and repeatedly call one of the Add() members. When finished the Result + method will return the Hash and finalize the value. + + Changed so as no longer to depend on Colin Plumb's `usual.h' header + definitions; now uses stuff from dpkg's config.h. + - Ian Jackson . + + Changed into a C++ interface and made work with APT's config.h. + - Jason Gunthorpe + + Still in the public domain. + + The classes use arrays of char that are a specific size. We cast those + arrays to uint8_t's and go from there. This allows us to advoid using + the uncommon inttypes.h in a public header or internally newing memory. + In theory if C9x becomes nicely accepted + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/md5.h" +#endif + +#include +#include + +#include +#include +#include +#include +#include + /*}}}*/ + +// byteSwap - Swap bytes in a buffer /*{{{*/ +// --------------------------------------------------------------------- +/* Swap n 32 bit longs in given buffer */ +#ifdef WORDS_BIGENDIAN +static void byteSwap(uint32_t *buf, unsigned words) +{ + uint8_t *p = (uint8_t *)buf; + + do + { + *buf++ = (uint32_t)((unsigned)p[3] << 8 | p[2]) << 16 | + ((unsigned)p[1] << 8 | p[0]); + p += 4; + } while (--words); +} +#else +#define byteSwap(buf,words) +#endif + /*}}}*/ +// MD5Transform - Alters an existing MD5 hash /*{{{*/ +// --------------------------------------------------------------------- +/* The core of the MD5 algorithm, this alters an existing MD5 hash to + reflect the addition of 16 longwords of new data. Add blocks + the data and converts bytes into longwords for this routine. */ + +// The four core functions - F1 is optimized somewhat +// #define F1(x, y, z) (x & y | ~x & z) +#define F1(x, y, z) (z ^ (x & (y ^ z))) +#define F2(x, y, z) F1(z, x, y) +#define F3(x, y, z) (x ^ y ^ z) +#define F4(x, y, z) (y ^ (x | ~z)) + +// This is the central step in the MD5 algorithm. +#define MD5STEP(f,w,x,y,z,in,s) \ + (w += f(x,y,z) + in, w = (w<>(32-s)) + x) + +static void MD5Transform(uint32_t buf[4], uint32_t const in[16]) +{ + register uint32_t a, b, c, d; + + a = buf[0]; + b = buf[1]; + c = buf[2]; + d = buf[3]; + + MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); + MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); + MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); + MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); + MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); + MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); + MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); + MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); + MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); + MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); + MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); + MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); + MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); + MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); + MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); + MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); + + MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); + MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); + MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); + MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); + MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); + MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); + MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); + MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); + MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); + MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); + MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); + MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); + MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); + MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); + MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); + MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); + + MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); + MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); + MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); + MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); + MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); + MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); + MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); + MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); + MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); + MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); + MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); + MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); + MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); + MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); + MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); + MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); + + MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); + MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); + MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); + MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); + MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); + MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); + MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); + MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); + MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); + MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); + MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); + MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); + MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); + MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); + MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); + MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); + + buf[0] += a; + buf[1] += b; + buf[2] += c; + buf[3] += d; +} + /*}}}*/ +// MD5SumValue::MD5SumValue - Constructs the summation from a string /*{{{*/ +// --------------------------------------------------------------------- +/* The string form of a MD5 is a 32 character hex number */ +MD5SumValue::MD5SumValue(string Str) +{ + memset(Sum,0,sizeof(Sum)); + Set(Str); +} + /*}}}*/ +// MD5SumValue::MD5SumValue - Default constructor /*{{{*/ +// --------------------------------------------------------------------- +/* Sets the value to 0 */ +MD5SumValue::MD5SumValue() +{ + memset(Sum,0,sizeof(Sum)); +} + /*}}}*/ +// MD5SumValue::Set - Set the sum from a string /*{{{*/ +// --------------------------------------------------------------------- +/* Converts the hex string into a set of chars */ +bool MD5SumValue::Set(string Str) +{ + return Hex2Num(Str.c_str(),Str.c_str()+strlen(Str.c_str()),Sum,sizeof(Sum)); +} + /*}}}*/ +// MD5SumValue::Value - Convert the number into a string /*{{{*/ +// --------------------------------------------------------------------- +/* Converts the set of chars into a hex string in lower case */ +string MD5SumValue::Value() const +{ + char Conv[16] = {'0','1','2','3','4','5','6','7','8','9','a','b', + 'c','d','e','f'}; + char Result[33]; + Result[32] = 0; + + // Convert each char into two letters + int J = 0; + int I = 0; + for (; I != 32; J++, I += 2) + { + Result[I] = Conv[Sum[J] >> 4]; + Result[I + 1] = Conv[Sum[J] & 0xF]; + } + + return string(Result); +} + /*}}}*/ +// MD5SumValue::operator == - Comparitor /*{{{*/ +// --------------------------------------------------------------------- +/* Call memcmp on the buffer */ +bool MD5SumValue::operator ==(const MD5SumValue &rhs) const +{ + return memcmp(Sum,rhs.Sum,sizeof(Sum)) == 0; +} + /*}}}*/ +// MD5Summation::MD5Summation - Initialize the summer /*{{{*/ +// --------------------------------------------------------------------- +/* This assigns the deep magic initial values */ +MD5Summation::MD5Summation() +{ + uint32_t *buf = (uint32_t *)Buf; + uint32_t *bytes = (uint32_t *)Bytes; + + buf[0] = 0x67452301; + buf[1] = 0xefcdab89; + buf[2] = 0x98badcfe; + buf[3] = 0x10325476; + + bytes[0] = 0; + bytes[1] = 0; + Done = false; +} + /*}}}*/ +// MD5Summation::Add - 'Add' a data set to the hash /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool MD5Summation::Add(const unsigned char *data,unsigned long len) +{ + if (Done == true) + return false; + + uint32_t *buf = (uint32_t *)Buf; + uint32_t *bytes = (uint32_t *)Bytes; + uint32_t *in = (uint32_t *)In; + + // Update byte count and carry (this could be done with a long long?) + uint32_t t = bytes[0]; + if ((bytes[0] = t + len) < t) + bytes[1]++; + + // Space available (at least 1) + t = 64 - (t & 0x3f); + if (t > len) + { + memcpy((unsigned char *)in + 64 - t,data,len); + return true; + } + + // First chunk is an odd size + memcpy((unsigned char *)in + 64 - t,data,t); + byteSwap(in, 16); + MD5Transform(buf,in); + data += t; + len -= t; + + // Process data in 64-byte chunks + while (len >= 64) + { + memcpy(in,data,64); + byteSwap(in,16); + MD5Transform(buf,in); + data += 64; + len -= 64; + } + + // Handle any remaining bytes of data. + memcpy(in,data,len); + + return true; +} + /*}}}*/ +// MD5Summation::AddFD - Add the contents of a FD to the hash /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool MD5Summation::AddFD(int Fd,unsigned long Size) +{ + unsigned char Buf[64*64]; + int Res = 0; + while (Size != 0) + { + Res = read(Fd,Buf,MIN(Size,sizeof(Buf))); + if (Res < 0 || (unsigned)Res != MIN(Size,sizeof(Buf))) + return false; + Size -= Res; + Add(Buf,Res); + } + return true; +} + /*}}}*/ +// MD5Summation::Result - Returns the value of the sum /*{{{*/ +// --------------------------------------------------------------------- +/* Because this must add in the last bytes of the series it prevents anyone + from calling add after. */ +MD5SumValue MD5Summation::Result() +{ + uint32_t *buf = (uint32_t *)Buf; + uint32_t *bytes = (uint32_t *)Bytes; + uint32_t *in = (uint32_t *)In; + + if (Done == false) + { + // Number of bytes in In + int count = bytes[0] & 0x3f; + unsigned char *p = (unsigned char *)in + count; + + // Set the first char of padding to 0x80. There is always room. + *p++ = 0x80; + + // Bytes of padding needed to make 56 bytes (-8..55) + count = 56 - 1 - count; + + // Padding forces an extra block + if (count < 0) + { + memset(p,0,count + 8); + byteSwap(in, 16); + MD5Transform(buf,in); + p = (unsigned char *)in; + count = 56; + } + + memset(p, 0, count); + byteSwap(in, 14); + + // Append length in bits and transform + in[14] = bytes[0] << 3; + in[15] = bytes[1] << 3 | bytes[0] >> 29; + MD5Transform(buf,in); + byteSwap(buf,4); + Done = true; + } + + MD5SumValue V; + memcpy(V.Sum,buf,16); + return V; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/md5.h b/tools/dsync-0.0/libdsync/contrib/md5.h new file mode 100644 index 00000000..6fb39ad9 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/md5.h @@ -0,0 +1,75 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: md5.h,v 1.4 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + MD5SumValue - Storage for a MD5Sum + MD5Summation - MD5 Message Digest Algorithm. + + This is a C++ interface to a set of MD5Sum functions. The class can + store a MD5Sum in 16 bytes of memory. + + A MD5Sum is used to generate a (hopefully) unique 16 byte number for a + block of data. This can be used to gaurd against corruption of a file. + MD5 should not be used for tamper protection, use SHA or something more + secure. + + There are two classes because computing a MD5 is not a continual + operation unless 64 byte blocks are used. Also the summation requires an + extra 18*4 bytes to operate. + + ##################################################################### */ + /*}}}*/ +#ifndef APTPKG_MD5_H +#define APTPKG_MD5_H + +#ifdef __GNUG__ +#pragma interface "dsync/md5.h" +#endif + +#include +using namespace std; + +class MD5Summation; + +class MD5SumValue +{ + friend class MD5Summation; + unsigned char Sum[4*4]; + + public: + + // Accessors + bool operator ==(const MD5SumValue &rhs) const; + string Value() const; + inline void Value(unsigned char S[16]) + {for (int I = 0; I != sizeof(Sum); I++) S[I] = Sum[I];}; + inline operator string() const {return Value();}; + bool Set(string Str); + inline void Set(unsigned char S[16]) + {for (int I = 0; I != sizeof(Sum); I++) Sum[I] = S[I];}; + + MD5SumValue(string Str); + MD5SumValue(); +}; + +class MD5Summation +{ + unsigned char Buf[4*4]; + unsigned char Bytes[2*4]; + unsigned char In[16*4]; + bool Done; + + public: + + bool Add(const unsigned char *Data,unsigned long Size); + inline bool Add(const char *Data) {return Add((unsigned char *)Data,strlen(Data));}; + bool AddFD(int Fd,unsigned long Size); + inline bool Add(const unsigned char *Beg,const unsigned char *End) + {return Add(Beg,End-Beg);}; + MD5SumValue Result(); + + MD5Summation(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/mmap.cc b/tools/dsync-0.0/libdsync/contrib/mmap.cc new file mode 100644 index 00000000..2d25ce83 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/mmap.cc @@ -0,0 +1,279 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: mmap.cc,v 1.3 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + MMap Class - Provides 'real' mmap or a faked mmap using read(). + + MMap cover class. + + Some broken versions of glibc2 (libc6) have a broken definition + of mmap that accepts a char * -- all other systems (and libc5) use + void *. We can't safely do anything here that would be portable, so + libc6 generates warnings -- which should be errors, g++ isn't properly + strict. + + The configure test notes that some OS's have broken private mmap's + so on those OS's we can't use mmap. This means we have to use + configure to test mmap and can't rely on the POSIX + _POSIX_MAPPED_FILES test. + + ##################################################################### */ + /*}}}*/ +// Include Files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/mmap.h" +#endif + +#define _BSD_SOURCE +#include +#include + +#include +#include +#include +#include + /*}}}*/ + +// MMap::MMap - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +MMap::MMap(FileFd &F,unsigned long Flags) : Flags(Flags), iSize(0), + Base(0) +{ + if ((Flags & NoImmMap) != NoImmMap) + Map(F); +} + /*}}}*/ +// MMap::MMap - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +MMap::MMap(unsigned long Flags) : Flags(Flags), iSize(0), + Base(0) +{ +} + /*}}}*/ +// MMap::~MMap - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +MMap::~MMap() +{ + Close(); +} + /*}}}*/ +// MMap::Map - Perform the mapping /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool MMap::Map(FileFd &Fd) +{ + iSize = Fd.Size(); + + // Set the permissions. + int Prot = PROT_READ; + int Map = MAP_SHARED; + if ((Flags & ReadOnly) != ReadOnly) + Prot |= PROT_WRITE; + if ((Flags & Public) != Public) + Map = MAP_PRIVATE; + + if (iSize == 0) + return _error->Error("Can't mmap an empty file"); + + // Map it. + Base = mmap(0,iSize,Prot,Map,Fd.Fd(),0); + if (Base == (void *)-1) + return _error->Errno("mmap","Couldn't make mmap of %u bytes",iSize); + + return true; +} + /*}}}*/ +// MMap::Close - Close the map /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool MMap::Close(bool DoSync) +{ + if ((Flags & UnMapped) == UnMapped || Base == 0 || iSize == 0) + return true; + + if (DoSync == true) + Sync(); + + if (munmap((char *)Base,iSize) != 0) + _error->Warning("Unable to munmap"); + + iSize = 0; + return true; +} + /*}}}*/ +// MMap::Sync - Syncronize the map with the disk /*{{{*/ +// --------------------------------------------------------------------- +/* This is done in syncronous mode - the docs indicate that this will + not return till all IO is complete */ +bool MMap::Sync() +{ + if ((Flags & UnMapped) == UnMapped) + return true; + +#ifdef _POSIX_SYNCHRONIZED_IO + if ((Flags & ReadOnly) != ReadOnly) + if (msync((char *)Base,iSize,MS_SYNC) != 0) + return _error->Errno("msync","Unable to write mmap"); +#endif + return true; +} + /*}}}*/ +// MMap::Sync - Syncronize a section of the file to disk /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool MMap::Sync(unsigned long Start,unsigned long Stop) +{ + if ((Flags & UnMapped) == UnMapped) + return true; + +#ifdef _POSIX_SYNCHRONIZED_IO + unsigned long PSize = sysconf(_SC_PAGESIZE); + if ((Flags & ReadOnly) != ReadOnly) + if (msync((char *)Base+(int)(Start/PSize)*PSize,Stop - Start,MS_SYNC) != 0) + return _error->Errno("msync","Unable to write mmap"); +#endif + return true; +} + /*}}}*/ + +// DynamicMMap::DynamicMMap - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +DynamicMMap::DynamicMMap(FileFd &F,unsigned long Flags,unsigned long WorkSpace) : + MMap(F,Flags | NoImmMap), Fd(&F), WorkSpace(WorkSpace) +{ + if (_error->PendingError() == true) + return; + + unsigned long EndOfFile = Fd->Size(); + Fd->Seek(WorkSpace); + char C = 0; + Fd->Write(&C,sizeof(C)); + Map(F); + iSize = EndOfFile; +} + /*}}}*/ +// DynamicMMap::DynamicMMap - Constructor for a non-file backed map /*{{{*/ +// --------------------------------------------------------------------- +/* This is just a fancy malloc really.. */ +DynamicMMap::DynamicMMap(unsigned long Flags,unsigned long WorkSpace) : + MMap(Flags | NoImmMap | UnMapped), Fd(0), WorkSpace(WorkSpace) +{ + if (_error->PendingError() == true) + return; + + Base = new unsigned char[WorkSpace]; + iSize = 0; +} + /*}}}*/ +// DynamicMMap::~DynamicMMap - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* We truncate the file to the size of the memory data set */ +DynamicMMap::~DynamicMMap() +{ + if (Fd == 0) + { + delete [] (unsigned char *)Base; + return; + } + + unsigned long EndOfFile = iSize; + Sync(); + iSize = WorkSpace; + Close(false); + ftruncate(Fd->Fd(),EndOfFile); + Fd->Close(); +} + /*}}}*/ +// DynamicMMap::RawAllocate - Allocate a raw chunk of unaligned space /*{{{*/ +// --------------------------------------------------------------------- +/* This allocates a block of memory aligned to the given size */ +unsigned long DynamicMMap::RawAllocate(unsigned long Size,unsigned long Aln) +{ + unsigned long Result = iSize; + if (Aln != 0) + Result += Aln - (iSize%Aln); + + iSize = Result + Size; + + // Just in case error check + if (Result + Size > WorkSpace) + { + _error->Error("Dynamic MMap ran out of room"); + return 0; + } + + return Result; +} + /*}}}*/ +// DynamicMMap::Allocate - Pooled aligned allocation /*{{{*/ +// --------------------------------------------------------------------- +/* This allocates an Item of size ItemSize so that it is aligned to its + size in the file. */ +unsigned long DynamicMMap::Allocate(unsigned long ItemSize) +{ + // Look for a matching pool entry + Pool *I; + Pool *Empty = 0; + for (I = Pools; I != Pools + PoolCount; I++) + { + if (I->ItemSize == 0) + Empty = I; + if (I->ItemSize == ItemSize) + break; + } + + // No pool is allocated, use an unallocated one + if (I == Pools + PoolCount) + { + // Woops, we ran out, the calling code should allocate more. + if (Empty == 0) + { + _error->Error("Ran out of allocation pools"); + return 0; + } + + I = Empty; + I->ItemSize = ItemSize; + I->Count = 0; + } + + // Out of space, allocate some more + if (I->Count == 0) + { + I->Count = 20*1024/ItemSize; + I->Start = RawAllocate(I->Count*ItemSize,ItemSize); + } + + I->Count--; + unsigned long Result = I->Start; + I->Start += ItemSize; + return Result/ItemSize; +} + /*}}}*/ +// DynamicMMap::WriteString - Write a string to the file /*{{{*/ +// --------------------------------------------------------------------- +/* Strings are not aligned to anything */ +unsigned long DynamicMMap::WriteString(const char *String, + unsigned long Len) +{ + unsigned long Result = iSize; + // Just in case error check + if (Result + Len > WorkSpace) + { + _error->Error("Dynamic MMap ran out of room"); + return 0; + } + + if (Len == (unsigned long)-1) + Len = strlen(String); + iSize += Len + 1; + memcpy((char *)Base + Result,String,Len); + ((char *)Base)[Result + Len] = 0; + return Result; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/mmap.h b/tools/dsync-0.0/libdsync/contrib/mmap.h new file mode 100644 index 00000000..d4a2580d --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/mmap.h @@ -0,0 +1,103 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: mmap.h,v 1.2 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + MMap Class - Provides 'real' mmap or a faked mmap using read(). + + The purpose of this code is to provide a generic way for clients to + access the mmap function. In enviroments that do not support mmap + from file fd's this function will use read and normal allocated + memory. + + Writing to a public mmap will always fully comit all changes when the + class is deleted. Ie it will rewrite the file, unless it is readonly + + The DynamicMMap class is used to help the on-disk data structure + generators. It provides a large allocated workspace and members + to allocate space from the workspace in an effecient fashion. + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe. + + ##################################################################### */ + /*}}}*/ +#ifndef PKGLIB_MMAP_H +#define PKGLIB_MMAP_H + +#ifdef __GNUG__ +#pragma interface "dsync/mmap.h" +#endif + +#include +#include + +/* This should be a 32 bit type, larger tyes use too much ram and smaller + types are too small. Where ever possible 'unsigned long' should be used + instead of this internal type */ +typedef unsigned int map_ptrloc; + +class MMap +{ + protected: + + unsigned long Flags; + unsigned long iSize; + void *Base; + + bool Map(FileFd &Fd); + bool Close(bool DoSync = true); + + public: + + enum OpenFlags {NoImmMap = (1<<0),Public = (1<<1),ReadOnly = (1<<2), + UnMapped = (1<<3)}; + + // Simple accessors + inline operator void *() {return Base;}; + inline void *Data() {return Base;}; + inline unsigned long Size() {return iSize;}; + + // File manipulators + bool Sync(); + bool Sync(unsigned long Start,unsigned long Stop); + + MMap(FileFd &F,unsigned long Flags); + MMap(unsigned long Flags); + virtual ~MMap(); +}; + +class DynamicMMap : public MMap +{ + public: + + // This is the allocation pool structure + struct Pool + { + unsigned long ItemSize; + unsigned long Start; + unsigned long Count; + }; + + protected: + + FileFd *Fd; + unsigned long WorkSpace; + Pool *Pools; + unsigned int PoolCount; + + public: + + // Allocation + unsigned long RawAllocate(unsigned long Size,unsigned long Aln = 0); + unsigned long Allocate(unsigned long ItemSize); + unsigned long WriteString(const char *String,unsigned long Len = (unsigned long)-1); + inline unsigned long WriteString(string S) {return WriteString(S.c_str());}; + void UsePools(Pool &P,unsigned int Count) {Pools = &P; PoolCount = Count;}; + + DynamicMMap(FileFd &F,unsigned long Flags,unsigned long WorkSpace = 2*1024*1024); + DynamicMMap(unsigned long Flags,unsigned long WorkSpace = 2*1024*1024); + virtual ~DynamicMMap(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/slidingwindow.cc b/tools/dsync-0.0/libdsync/contrib/slidingwindow.cc new file mode 100644 index 00000000..ad6cdd21 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/slidingwindow.cc @@ -0,0 +1,110 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: slidingwindow.cc,v 1.1 1999/11/05 05:47:06 jgg Exp $ +/* ###################################################################### + + Sliding Window - Implements a sliding buffer over a file. + + It would be possible to implement an alternate version if + _POSIX_MAPPED_FILES is not defined.. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/slidingwindow.h" +#endif + +#include +#include + +#include +#include + /*}}}*/ + +// SlidingWindow::SlidingWindow - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +SlidingWindow::SlidingWindow(FileFd &Fd,unsigned long MnSize) : Buffer(0), + MinSize(MnSize), Fd(Fd) +{ + Offset = 0; + Left = 0; + PageSize = sysconf(_SC_PAGESIZE); + + if (MinSize < 1024*1024) + MinSize = 1024*1024; + MinSize = Align(MinSize); +} + /*}}}*/ +// SlidingWindow::~SlidingWindow - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* Just unmap the mapping */ +SlidingWindow::~SlidingWindow() +{ + if (Buffer != 0) + { + if (munmap((char *)Buffer,Size) != 0) + _error->Warning("Unable to munmap"); + } +} + /*}}}*/ +// SlidingWindow::Extend - Make Start - End longer /*{{{*/ +// --------------------------------------------------------------------- +/* Start == End when the file is exhausted, false is an IO error. */ +bool SlidingWindow::Extend(unsigned char *&Start,unsigned char *&End) +{ + unsigned long Remainder = 0; + + // Restart + if (Start == 0 || Buffer == 0) + { + Offset = 0; + Left = Fd.Size(); + } + else + { + if (AlignDn((unsigned long)(Start - Buffer)) == 0) + return _error->Error("SlidingWindow::Extend called with too small a 'Start'"); + + // Scanning is finished. + if (Left < (off_t)Size) + { + End = Start; + return true; + } + + Offset += AlignDn((unsigned long)(Start - Buffer)); + Left -= AlignDn((unsigned long)(Start - Buffer)); + Remainder = (Start - Buffer) % PageSize; + } + + // Release the old region + if (Buffer != 0) + { + if (munmap((char *)Buffer,Size) != 0) + return _error->Errno("munmap","Unable to munmap"); + Buffer = 0; + } + + // Maximize the amount that can be mapped + if (Left < (off_t)MinSize) + Size = Align(Left); + else + Size = MinSize; + + // Map it + Buffer = (unsigned char *)mmap(0,Size,PROT_READ,MAP_PRIVATE,Fd.Fd(),Offset); + if (Buffer == (unsigned char *)-1) + return _error->Errno("mmap","Couldn't make mmap %lu->%lu bytes",(unsigned long)Offset, + Size); + + // Reposition + if (Left < (off_t)Size) + End = Buffer + Left; + else + End = Buffer + Size; + Start = Buffer + Remainder; + return true; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/slidingwindow.h b/tools/dsync-0.0/libdsync/contrib/slidingwindow.h new file mode 100644 index 00000000..40a78752 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/slidingwindow.h @@ -0,0 +1,57 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: slidingwindow.h,v 1.2 1999/11/15 07:59:49 jgg Exp $ +/* ###################################################################### + + Sliding Window - Implements a sliding buffer over a file. + + The buffer can be of arbitary size and where possible mmap is used + to optimize IO. + + To use, init the class and then call Extend with a 0 input pointer + to receive the first block and then call extend with Start <= End + to get the next block. If Start != End then Start will be returned + with a new value, but pointing at the same byte, that is the new + region will contain the subregion Start -> End(o) but with a new + length End-Start, End != End(o). + + After the file has been exhausted Start == End will be returned, but + the old region Start -> End(o) will remain valid. + + ##################################################################### */ + /*}}}*/ +#ifndef SLIDING_WINDOW_H +#define SLIDING_WINDOW_H + +#ifdef __GNUG__ +#pragma interface "dsync/slidingwindow.h" +#endif + +#include +#include + +class SlidingWindow +{ + unsigned char *Buffer; + unsigned long Size; + unsigned long MinSize; + FileFd &Fd; + unsigned long PageSize; + off_t Offset; + off_t Left; + + inline unsigned long Align(off_t V) const {return ((V % PageSize) == 0)?V:V + PageSize - (V % PageSize);}; + inline unsigned long Align(unsigned long V) const {return ((V % PageSize) == 0)?V:V + PageSize - (V % PageSize);}; + inline unsigned long AlignDn(off_t V) const {return ((V % PageSize) == 0)?V:V - (V % PageSize);}; + inline unsigned long AlignDn(unsigned long V) const {return ((V % PageSize) == 0)?V:V - (V % PageSize);}; + + public: + + // Make the distance Start - End longer if possible + bool Extend(unsigned char *&Start,unsigned char *&End); + + SlidingWindow(FileFd &Fd,unsigned long MinSize = 0); + ~SlidingWindow(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/strutl.cc b/tools/dsync-0.0/libdsync/contrib/strutl.cc new file mode 100644 index 00000000..5b49c319 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/strutl.cc @@ -0,0 +1,853 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: strutl.cc,v 1.4 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + String Util - Some usefull string functions. + + These have been collected from here and there to do all sorts of usefull + things to strings. They are usefull in file parsers, URI handlers and + especially in APT methods. + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe + + ##################################################################### */ + /*}}}*/ +// Includes /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/strutl.h" +#endif + +#include +#include + +#include +#include +#include +#include +#include + /*}}}*/ + +// strstrip - Remove white space from the front and back of a string /*{{{*/ +// --------------------------------------------------------------------- +/* This is handy to use when parsing a file. It also removes \n's left + over from fgets and company */ +char *_strstrip(char *String) +{ + for (;*String != 0 && (*String == ' ' || *String == '\t'); String++); + + if (*String == 0) + return String; + + char *End = String + strlen(String) - 1; + for (;End != String - 1 && (*End == ' ' || *End == '\t' || *End == '\n' || + *End == '\r'); End--); + End++; + *End = 0; + return String; +}; + /*}}}*/ +// strtabexpand - Converts tabs into 8 spaces /*{{{*/ +// --------------------------------------------------------------------- +/* */ +char *_strtabexpand(char *String,size_t Len) +{ + for (char *I = String; I != I + Len && *I != 0; I++) + { + if (*I != '\t') + continue; + if (I + 8 > String + Len) + { + *I = 0; + return String; + } + + /* Assume the start of the string is 0 and find the next 8 char + division */ + int Len; + if (String == I) + Len = 1; + else + Len = 8 - ((String - I) % 8); + Len -= 2; + if (Len <= 0) + { + *I = ' '; + continue; + } + + memmove(I + Len,I + 1,strlen(I) + 1); + for (char *J = I; J + Len != I; *I = ' ', I++); + } + return String; +} + /*}}}*/ +// ParseQuoteWord - Parse a single word out of a string /*{{{*/ +// --------------------------------------------------------------------- +/* This grabs a single word, converts any % escaped characters to their + proper values and advances the pointer. Double quotes are understood + and striped out as well. This is for URI/URL parsing. */ +bool ParseQuoteWord(const char *&String,string &Res) +{ + // Skip leading whitespace + const char *C = String; + for (;*C != 0 && *C == ' '; C++); + if (*C == 0) + return false; + + // Jump to the next word + for (;*C != 0 && isspace(*C) == 0; C++) + { + if (*C == '"') + { + for (C++;*C != 0 && *C != '"'; C++); + if (*C == 0) + return false; + } + } + + // Now de-quote characters + char Buffer[1024]; + char Tmp[3]; + const char *Start = String; + char *I; + for (I = Buffer; I < Buffer + sizeof(Buffer) && Start != C; I++) + { + if (*Start == '%' && Start + 2 < C) + { + Tmp[0] = Start[1]; + Tmp[1] = Start[2]; + Tmp[2] = 0; + *I = (char)strtol(Tmp,0,16); + Start += 3; + continue; + } + if (*Start != '"') + *I = *Start; + else + I--; + Start++; + } + *I = 0; + Res = Buffer; + + // Skip ending white space + for (;*C != 0 && isspace(*C) != 0; C++); + String = C; + return true; +} + /*}}}*/ +// ParseCWord - Parses a string like a C "" expression /*{{{*/ +// --------------------------------------------------------------------- +/* This expects a series of space seperated strings enclosed in ""'s. + It concatenates the ""'s into a single string. */ +bool ParseCWord(const char *String,string &Res) +{ + // Skip leading whitespace + const char *C = String; + for (;*C != 0 && *C == ' '; C++); + if (*C == 0) + return false; + + char Buffer[1024]; + char *Buf = Buffer; + if (strlen(String) >= sizeof(Buffer)) + return false; + + for (; *C != 0; C++) + { + if (*C == '"') + { + for (C++; *C != 0 && *C != '"'; C++) + *Buf++ = *C; + + if (*C == 0) + return false; + + continue; + } + + if (C != String && isspace(*C) != 0 && isspace(C[-1]) != 0) + continue; + if (isspace(*C) == 0) + return false; + *Buf++ = ' '; + } + *Buf = 0; + Res = Buffer; + return true; +} + /*}}}*/ +// QuoteString - Convert a string into quoted from /*{{{*/ +// --------------------------------------------------------------------- +/* */ +string QuoteString(string Str,const char *Bad) +{ + string Res; + for (string::iterator I = Str.begin(); I != Str.end(); I++) + { + if (strchr(Bad,*I) != 0 || isprint(*I) == 0 || + *I <= 0x20 || *I >= 0x7F) + { + char Buf[10]; + sprintf(Buf,"%%%02x",(int)*I); + Res += Buf; + } + else + Res += *I; + } + return Res; +} + /*}}}*/ +// DeQuoteString - Convert a string from quoted from /*{{{*/ +// --------------------------------------------------------------------- +/* This undoes QuoteString */ +string DeQuoteString(string Str) +{ + string Res; + for (string::iterator I = Str.begin(); I != Str.end(); I++) + { + if (*I == '%' && I + 2 < Str.end()) + { + char Tmp[3]; + Tmp[0] = I[1]; + Tmp[1] = I[2]; + Tmp[2] = 0; + Res += (char)strtol(Tmp,0,16); + I += 2; + continue; + } + else + Res += *I; + } + return Res; +} + + /*}}}*/ +// SizeToStr - Convert a long into a human readable size /*{{{*/ +// --------------------------------------------------------------------- +/* A max of 4 digits are shown before conversion to the next highest unit. + The max length of the string will be 5 chars unless the size is > 10 + YottaBytes (E24) */ +string SizeToStr(double Size) +{ + char S[300]; + double ASize; + if (Size >= 0) + ASize = Size; + else + ASize = -1*Size; + + /* bytes, KiloBytes, MegaBytes, GigaBytes, TeraBytes, PetaBytes, + ExaBytes, ZettaBytes, YottaBytes */ + char Ext[] = {'\0','k','M','G','T','P','E','Z','Y'}; + int I = 0; + while (I <= 8) + { + if (ASize < 100 && I != 0) + { + sprintf(S,"%.1f%c",ASize,Ext[I]); + break; + } + + if (ASize < 10000) + { + sprintf(S,"%.0f%c",ASize,Ext[I]); + break; + } + ASize /= 1000.0; + I++; + } + + return S; +} + /*}}}*/ +// TimeToStr - Convert the time into a string /*{{{*/ +// --------------------------------------------------------------------- +/* Converts a number of seconds to a hms format */ +string TimeToStr(unsigned long Sec) +{ + char S[300]; + + while (1) + { + if (Sec > 60*60*24) + { + sprintf(S,"%lid %lih%lim%lis",Sec/60/60/24,(Sec/60/60) % 24,(Sec/60) % 60,Sec % 60); + break; + } + + if (Sec > 60*60) + { + sprintf(S,"%lih%lim%lis",Sec/60/60,(Sec/60) % 60,Sec % 60); + break; + } + + if (Sec > 60) + { + sprintf(S,"%lim%lis",Sec/60,Sec % 60); + break; + } + + sprintf(S,"%lis",Sec); + break; + } + + return S; +} + /*}}}*/ +// SubstVar - Substitute a string for another string /*{{{*/ +// --------------------------------------------------------------------- +/* This replaces all occurances of Subst with Contents in Str. */ +string SubstVar(string Str,string Subst,string Contents) +{ + string::size_type Pos = 0; + string::size_type OldPos = 0; + string Temp; + + while (OldPos < Str.length() && + (Pos = Str.find(Subst,OldPos)) != string::npos) + { + Temp += string(Str,OldPos,Pos) + Contents; + OldPos = Pos + Subst.length(); + } + + if (OldPos == 0) + return Str; + + return Temp + string(Str,OldPos); +} + /*}}}*/ +// URItoFileName - Convert the uri into a unique file name /*{{{*/ +// --------------------------------------------------------------------- +/* This converts a URI into a safe filename. It quotes all unsafe characters + and converts / to _ and removes the scheme identifier. The resulting + file name should be unique and never occur again for a different file */ +string URItoFileName(string URI) +{ + // Nuke 'sensitive' items + ::URI U(URI); + U.User = string(); + U.Password = string(); + U.Access = ""; + + // "\x00-\x20{}|\\\\^\\[\\]<>\"\x7F-\xFF"; + URI = QuoteString(U,"\\|{}[]<>\"^~_=!@#$%^&*"); + string::iterator J = URI.begin(); + for (; J != URI.end(); J++) + if (*J == '/') + *J = '_'; + return URI; +} + /*}}}*/ +// Base64Encode - Base64 Encoding routine for short strings /*{{{*/ +// --------------------------------------------------------------------- +/* This routine performs a base64 transformation on a string. It was ripped + from wget and then patched and bug fixed. + + This spec can be found in rfc2045 */ +string Base64Encode(string S) +{ + // Conversion table. + static char tbl[64] = {'A','B','C','D','E','F','G','H', + 'I','J','K','L','M','N','O','P', + 'Q','R','S','T','U','V','W','X', + 'Y','Z','a','b','c','d','e','f', + 'g','h','i','j','k','l','m','n', + 'o','p','q','r','s','t','u','v', + 'w','x','y','z','0','1','2','3', + '4','5','6','7','8','9','+','/'}; + + // Pre-allocate some space + string Final; + Final.reserve((4*S.length() + 2)/3 + 2); + + /* Transform the 3x8 bits to 4x6 bits, as required by + base64. */ + for (string::const_iterator I = S.begin(); I < S.end(); I += 3) + { + char Bits[3] = {0,0,0}; + Bits[0] = I[0]; + if (I + 1 < S.end()) + Bits[1] = I[1]; + if (I + 2 < S.end()) + Bits[2] = I[2]; + + Final += tbl[Bits[0] >> 2]; + Final += tbl[((Bits[0] & 3) << 4) + (Bits[1] >> 4)]; + + if (I + 1 >= S.end()) + break; + + Final += tbl[((Bits[1] & 0xf) << 2) + (Bits[2] >> 6)]; + + if (I + 2 >= S.end()) + break; + + Final += tbl[Bits[2] & 0x3f]; + } + + /* Apply the padding elements, this tells how many bytes the remote + end should discard */ + if (S.length() % 3 == 2) + Final += '='; + if (S.length() % 3 == 1) + Final += "=="; + + return Final; +} + /*}}}*/ +// stringcmp - Arbitary string compare /*{{{*/ +// --------------------------------------------------------------------- +/* This safely compares two non-null terminated strings of arbitary + length */ +int stringcmp(const char *A,const char *AEnd,const char *B,const char *BEnd) +{ + for (; A != AEnd && B != BEnd; A++, B++) + if (*A != *B) + break; + + if (A == AEnd && B == BEnd) + return 0; + if (A == AEnd) + return 1; + if (B == BEnd) + return -1; + if (*A < *B) + return -1; + return 1; +} + /*}}}*/ +// stringcasecmp - Arbitary case insensitive string compare /*{{{*/ +// --------------------------------------------------------------------- +/* */ +int stringcasecmp(const char *A,const char *AEnd,const char *B,const char *BEnd) +{ + for (; A != AEnd && B != BEnd; A++, B++) + if (toupper(*A) != toupper(*B)) + break; + + if (A == AEnd && B == BEnd) + return 0; + if (A == AEnd) + return 1; + if (B == BEnd) + return -1; + if (toupper(*A) < toupper(*B)) + return -1; + return 1; +} + /*}}}*/ +// LookupTag - Lookup the value of a tag in a taged string /*{{{*/ +// --------------------------------------------------------------------- +/* The format is like those used in package files and the method + communication system */ +string LookupTag(string Message,const char *Tag,const char *Default) +{ + // Look for a matching tag. + int Length = strlen(Tag); + for (string::iterator I = Message.begin(); I + Length < Message.end(); I++) + { + // Found the tag + const char *i = Message.c_str() + (I - Message.begin()); + if (I[Length] == ':' && stringcasecmp(i,i+Length,Tag) == 0) + { + // Find the end of line and strip the leading/trailing spaces + string::iterator J; + I += Length + 1; + for (; isspace(*I) != 0 && I < Message.end(); I++); + for (J = I; *J != '\n' && J < Message.end(); J++); + for (; J > I && isspace(J[-1]) != 0; J--); + + return string(i,J-I); + } + + for (; *I != '\n' && I < Message.end(); I++); + } + + // Failed to find a match + if (Default == 0) + return string(); + return Default; +} + /*}}}*/ +// StringToBool - Converts a string into a boolean /*{{{*/ +// --------------------------------------------------------------------- +/* This inspects the string to see if it is true or if it is false and + then returns the result. Several varients on true/false are checked. */ +int StringToBool(string Text,int Default) +{ + char *End; + int Res = strtol(Text.c_str(),&End,0); + if (End != Text.c_str() && Res >= 0 && Res <= 1) + return Res; + + // Check for positives + if (strcasecmp(Text.c_str(),"no") == 0 || + strcasecmp(Text.c_str(),"false") == 0 || + strcasecmp(Text.c_str(),"without") == 0 || + strcasecmp(Text.c_str(),"off") == 0 || + strcasecmp(Text.c_str(),"disable") == 0) + return 0; + + // Check for negatives + if (strcasecmp(Text.c_str(),"yes") == 0 || + strcasecmp(Text.c_str(),"true") == 0 || + strcasecmp(Text.c_str(),"with") == 0 || + strcasecmp(Text.c_str(),"on") == 0 || + strcasecmp(Text.c_str(),"enable") == 0) + return 1; + + return Default; +} + /*}}}*/ +// TimeRFC1123 - Convert a time_t into RFC1123 format /*{{{*/ +// --------------------------------------------------------------------- +/* This converts a time_t into a string time representation that is + year 2000 complient and timezone neutral */ +string TimeRFC1123(time_t Date) +{ + struct tm Conv = *gmtime(&Date); + char Buf[300]; + + const char *Day[] = {"Sun","Mon","Tue","Wed","Thu","Fri","Sat"}; + const char *Month[] = {"Jan","Feb","Mar","Apr","May","Jun","Jul", + "Aug","Sep","Oct","Nov","Dec"}; + + sprintf(Buf,"%s, %02i %s %i %02i:%02i:%02i GMT",Day[Conv.tm_wday], + Conv.tm_mday,Month[Conv.tm_mon],Conv.tm_year+1900,Conv.tm_hour, + Conv.tm_min,Conv.tm_sec); + return Buf; +} + /*}}}*/ +// ReadMessages - Read messages from the FD /*{{{*/ +// --------------------------------------------------------------------- +/* This pulls full messages from the input FD into the message buffer. + It assumes that messages will not pause during transit so no + fancy buffering is used. */ +bool ReadMessages(int Fd, vector &List) +{ + char Buffer[4000]; + char *End = Buffer; + + while (1) + { + int Res = read(Fd,End,sizeof(Buffer) - (End-Buffer)); + if (Res < 0 && errno == EINTR) + continue; + + // Process is dead, this is kind of bad.. + if (Res == 0) + return false; + + // No data + if (Res <= 0) + return true; + + End += Res; + + // Look for the end of the message + for (char *I = Buffer; I + 1 < End; I++) + { + if (I[0] != '\n' || I[1] != '\n') + continue; + + // Pull the message out + string Message(Buffer,0,I-Buffer); + + // Fix up the buffer + for (; I < End && *I == '\n'; I++); + End -= I-Buffer; + memmove(Buffer,I,End-Buffer); + I = Buffer; + + List.push_back(Message); + } + if (End == Buffer) + return true; + + if (WaitFd(Fd) == false) + return false; + } +} + /*}}}*/ +// MonthConv - Converts a month string into a number /*{{{*/ +// --------------------------------------------------------------------- +/* This was lifted from the boa webserver which lifted it from 'wn-v1.07' + Made it a bit more robust with a few touppers though. */ +static int MonthConv(char *Month) +{ + switch (toupper(*Month)) + { + case 'A': + return toupper(Month[1]) == 'P'?3:7; + case 'D': + return 11; + case 'F': + return 1; + case 'J': + if (toupper(Month[1]) == 'A') + return 0; + return toupper(Month[2]) == 'N'?5:6; + case 'M': + return toupper(Month[2]) == 'R'?2:4; + case 'N': + return 10; + case 'O': + return 9; + case 'S': + return 8; + + // Pretend it is January.. + default: + return 0; + } +} + /*}}}*/ +// timegm - Internal timegm function if gnu is not available /*{{{*/ +// --------------------------------------------------------------------- +/* Ripped this evil little function from wget - I prefer the use of + GNU timegm if possible as this technique will have interesting problems + with leap seconds, timezones and other. + + Converts struct tm to time_t, assuming the data in tm is UTC rather + than local timezone (mktime assumes the latter). + + Contributed by Roger Beeman , with the help of + Mark Baushke and the rest of the Gurus at CISCO. */ +#ifndef __USE_MISC // glib sets this +static time_t timegm(struct tm *t) +{ + time_t tl, tb; + + tl = mktime (t); + if (tl == -1) + return -1; + tb = mktime (gmtime (&tl)); + return (tl <= tb ? (tl + (tl - tb)) : (tl - (tb - tl))); +} +#endif + /*}}}*/ +// StrToTime - Converts a string into a time_t /*{{{*/ +// --------------------------------------------------------------------- +/* This handles all 3 populare time formats including RFC 1123, RFC 1036 + and the C library asctime format. It requires the GNU library function + 'timegm' to convert a struct tm in UTC to a time_t. For some bizzar + reason the C library does not provide any such function :<*/ +bool StrToTime(string Val,time_t &Result) +{ + struct tm Tm; + char Month[10]; + const char *I = Val.c_str(); + + // Skip the day of the week + for (;*I != 0 && *I != ' '; I++); + + // Handle RFC 1123 time + if (sscanf(I," %d %3s %d %d:%d:%d GMT",&Tm.tm_mday,Month,&Tm.tm_year, + &Tm.tm_hour,&Tm.tm_min,&Tm.tm_sec) != 6) + { + // Handle RFC 1036 time + if (sscanf(I," %d-%3s-%d %d:%d:%d GMT",&Tm.tm_mday,Month, + &Tm.tm_year,&Tm.tm_hour,&Tm.tm_min,&Tm.tm_sec) == 6) + Tm.tm_year += 1900; + else + { + // asctime format + if (sscanf(I," %3s %d %d:%d:%d %d",Month,&Tm.tm_mday, + &Tm.tm_hour,&Tm.tm_min,&Tm.tm_sec,&Tm.tm_year) != 6) + return false; + } + } + + Tm.tm_isdst = 0; + Tm.tm_mon = MonthConv(Month); + Tm.tm_year -= 1900; + + // Convert to local time and then to GMT + Result = timegm(&Tm); + return true; +} + /*}}}*/ +// StrToNum - Convert a fixed length string to a number /*{{{*/ +// --------------------------------------------------------------------- +/* This is used in decoding the crazy fixed length string headers in + tar and ar files. */ +bool StrToNum(const char *Str,unsigned long &Res,unsigned Len,unsigned Base) +{ + char S[30]; + if (Len >= sizeof(S)) + return false; + memcpy(S,Str,Len); + S[Len] = 0; + + // All spaces is a zero + Res = 0; + unsigned I; + for (I = 0; S[I] == ' '; I++); + if (S[I] == 0) + return true; + + char *End; + Res = strtoul(S,&End,Base); + if (End == S) + return false; + + return true; +} + /*}}}*/ +// HexDigit - Convert a hex character into an integer /*{{{*/ +// --------------------------------------------------------------------- +/* Helper for Hex2Num */ +static int HexDigit(int c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + return 0; +} + /*}}}*/ +// Hex2Num - Convert a long hex number into a buffer /*{{{*/ +// --------------------------------------------------------------------- +/* The length of the buffer must be exactly 1/2 the length of the string. */ +bool Hex2Num(const char *Start,const char *End,unsigned char *Num, + unsigned int Length) +{ + if (End - Start != (signed)(Length*2)) + return false; + + // Convert each digit. We store it in the same order as the string + int J = 0; + for (const char *I = Start; I < End;J++, I += 2) + { + if (isxdigit(*I) == 0 || isxdigit(I[1]) == 0) + return false; + + Num[J] = HexDigit(I[0]) << 4; + Num[J] += HexDigit(I[1]); + } + + return true; +} + /*}}}*/ + +// URI::CopyFrom - Copy from an object /*{{{*/ +// --------------------------------------------------------------------- +/* This parses the URI into all of its components */ +void URI::CopyFrom(string U) +{ + string::const_iterator I = U.begin(); + + // Locate the first colon, this seperates the scheme + for (; I < U.end() && *I != ':' ; I++); + string::const_iterator FirstColon = I; + + /* Determine if this is a host type URI with a leading double // + and then search for the first single / */ + string::const_iterator SingleSlash = I; + if (I + 3 < U.end() && I[1] == '/' && I[2] == '/') + SingleSlash += 3; + for (; SingleSlash < U.end() && *SingleSlash != '/'; SingleSlash++); + if (SingleSlash > U.end()) + SingleSlash = U.end(); + + // We can now write the access and path specifiers + Access = string(U,0,FirstColon - U.begin()); + if (SingleSlash != U.end()) + Path = string(U,SingleSlash - U.begin()); + if (Path.empty() == true) + Path = "/"; + + // Now we attempt to locate a user:pass@host fragment + if (FirstColon[1] == '/' && FirstColon[2] == '/') + FirstColon += 3; + else + FirstColon += 1; + if (FirstColon >= U.end()) + return; + + if (FirstColon > SingleSlash) + FirstColon = SingleSlash; + + // Find the colon... + I = FirstColon + 1; + if (I > SingleSlash) + I = SingleSlash; + for (; I < SingleSlash && *I != ':'; I++); + string::const_iterator SecondColon = I; + + // Search for the @ after the colon + for (; I < SingleSlash && *I != '@'; I++); + string::const_iterator At = I; + + // Now write the host and user/pass + if (At == SingleSlash) + { + if (FirstColon < SingleSlash) + Host = string(U,FirstColon - U.begin(),SingleSlash - FirstColon); + } + else + { + Host = string(U,At - U.begin() + 1,SingleSlash - At - 1); + User = string(U,FirstColon - U.begin(),SecondColon - FirstColon); + if (SecondColon < At) + Password = string(U,SecondColon - U.begin() + 1,At - SecondColon - 1); + } + + // Now we parse off a port number from the hostname + Port = 0; + string::size_type Pos = Host.rfind(':'); + if (Pos == string::npos) + return; + + Port = atoi(string(Host,Pos+1).c_str()); + Host = string(Host,0,Pos); +} + /*}}}*/ +// URI::operator string - Convert the URI to a string /*{{{*/ +// --------------------------------------------------------------------- +/* */ +URI::operator string() +{ + string Res; + + if (Access.empty() == false) + Res = Access + ':'; + + if (Host.empty() == false) + { + if (Access.empty() == false) + Res += "//"; + + if (User.empty() == false) + { + Res += User; + if (Password.empty() == false) + Res += ":" + Password; + Res += "@"; + } + + Res += Host; + if (Port != 0) + { + char S[30]; + sprintf(S,":%u",Port); + Res += S; + } + } + + if (Path.empty() == false) + { + if (Path[0] != '/') + Res += "/" + Path; + else + Res += Path; + } + + return Res; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/contrib/strutl.h b/tools/dsync-0.0/libdsync/contrib/strutl.h new file mode 100644 index 00000000..e1e5adac --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/strutl.h @@ -0,0 +1,78 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: strutl.h,v 1.2 1999/10/24 06:53:12 jgg Exp $ +/* ###################################################################### + + String Util - These are some usefull string functions + + _strstrip is a function to remove whitespace from the front and end + of a string. + + This source is placed in the Public Domain, do with it what you will + It was originally written by Jason Gunthorpe + + ##################################################################### */ + /*}}}*/ +#ifndef STRUTL_H +#define STRUTL_H + +#ifdef __GNUG__ +#pragma interface "dsync/strutl.h" +#endif + +#include +#include +#include +#include + +using namespace std; + +char *_strstrip(char *String); +char *_strtabexpand(char *String,size_t Len); +bool ParseQuoteWord(const char *&String,string &Res); +bool ParseCWord(const char *String,string &Res); +string QuoteString(string Str,const char *Bad); +string DeQuoteString(string Str); +string SizeToStr(double Bytes); +string TimeToStr(unsigned long Sec); +string SubstVar(string Str,string Subst,string Contents); +string Base64Encode(string Str); +string URItoFileName(string URI); +string TimeRFC1123(time_t Date); +bool StrToTime(string Val,time_t &Result); +string LookupTag(string Message,const char *Tag,const char *Default = 0); +int StringToBool(string Text,int Default = -1); +bool ReadMessages(int Fd, vector &List); +bool StrToNum(const char *Str,unsigned long &Res,unsigned Len,unsigned Base = 0); +bool Hex2Num(const char *Start,const char *End,unsigned char *Num, + unsigned int Length); + +int stringcmp(const char *A,const char *AEnd,const char *B,const char *BEnd); +inline int stringcmp(const char *A,const char *AEnd,const char *B) {return stringcmp(A,AEnd,B,B+strlen(B));}; +inline int stringcmp(string A,const char *B) {return stringcmp(A.c_str(),A.c_str()+strlen(A.c_str()),B,B+strlen(B));}; +int stringcasecmp(const char *A,const char *AEnd,const char *B,const char *BEnd); +inline int stringcasecmp(const char *A,const char *AEnd,const char *B) {return stringcasecmp(A,AEnd,B,B+strlen(B));}; +inline int stringcasecmp(string A,const char *B) {return stringcasecmp(A.c_str(),A.c_str()+strlen(A.c_str()),B,B+strlen(B));}; + +class URI +{ + void CopyFrom(string From); + + public: + + string Access; + string User; + string Password; + string Host; + string Path; + unsigned int Port; + + operator string(); + inline void operator =(string From) {CopyFrom(From);}; + inline bool empty() {return Access.empty();}; + + URI(string Path) {CopyFrom(Path);}; + URI() : Port(0) {}; +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/contrib/system.h b/tools/dsync-0.0/libdsync/contrib/system.h new file mode 100644 index 00000000..13434fe4 --- /dev/null +++ b/tools/dsync-0.0/libdsync/contrib/system.h @@ -0,0 +1,56 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: system.h,v 1.2 1999/01/19 04:41:43 jgg Exp $ +/* ###################################################################### + + System Header - Usefull private definitions + + This source is placed in the Public Domain, do with it what you will + It was originally written by Brian C. White. + + ##################################################################### */ + /*}}}*/ +// Private header +#ifndef SYSTEM_H +#define SYSTEM_H + +// MIN_VAL(SINT16) will return -0x8000 and MAX_VAL(SINT16) = 0x7FFF +#define MIN_VAL(t) (((t)(-1) > 0) ? (t)( 0) : (t)(((1L<<(sizeof(t)*8-1)) ))) +#define MAX_VAL(t) (((t)(-1) > 0) ? (t)(-1) : (t)(((1L<<(sizeof(t)*8-1))-1))) + +// Min/Max functions +#if defined(__HIGHC__) +#define MIN(x,y) _min(x,y) +#define MAX(x,y) _max(x,y) +#endif + +// GNU C++ has a min/max operator +#if defined(__GNUG__) +#define MIN(A,B) ((A) ? (B)) +#endif + +/* Templates tend to mess up existing code that uses min/max because of the + strict matching requirements */ +#if !defined(MIN) +#define MIN(A,B) ((A) < (B)?(A):(B)) +#define MAX(A,B) ((A) > (B)?(A):(B)) +#endif + +/* Bound functions, bound will return the value b within the limits a-c + bounv will change b so that it is within the limits of a-c. */ +#define _bound(a,b,c) MIN(c,MAX(b,a)) +#define _boundv(a,b,c) b = _bound(a,b,c) +#define ABS(a) (((a) < (0)) ?-(a) : (a)) + +/* Usefull count macro, use on an array of things and it will return the + number of items in the array */ +#define _count(a) (sizeof(a)/sizeof(a[0])) + +// Flag Macros +#define FLAG(f) (1L << (f)) +#define SETFLAG(v,f) ((v) |= FLAG(f)) +#define CLRFLAG(v,f) ((v) &=~FLAG(f)) +#define CHKFLAG(v,f) ((v) & FLAG(f) ? true : false) + +#endif diff --git a/tools/dsync-0.0/libdsync/filefilter.cc b/tools/dsync-0.0/libdsync/filefilter.cc new file mode 100644 index 00000000..0eca9b47 --- /dev/null +++ b/tools/dsync-0.0/libdsync/filefilter.cc @@ -0,0 +1,150 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filefilter.cc,v 1.4 1999/08/05 03:22:55 jgg Exp $ +/* ###################################################################### + + File Filter - Regular Expression maching filter + + The idea for this was stolen shamelessly from rsync. + + Doesn't work: + dsync-flist -e binary-alpha -i binary-all -i binary-i386 generate /tmp/listing + + And various other incantations like that. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/filefilter.h" +#endif + +#include +#include + +#include +using namespace std; + /*}}}*/ + +// FileFilter::dsFileFilter - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFileFilter::dsFileFilter() : List(0) +{ +} + /*}}}*/ +// FileFilter::~dsFileFilter - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFileFilter::~dsFileFilter() +{ + while (List != 0) + { + Item *Tmp = List; + List = Tmp->Next; + delete Tmp; + } +} + /*}}}*/ +// FileFilter::Test - Test a directory and file /*{{{*/ +// --------------------------------------------------------------------- +/* This will return true if the named entity is included by the filter, false + otherwise. By default all entries are included. */ +bool dsFileFilter::Test(const char *Directory,const char *File) +{ + for (Item *I = List; I != 0; I = I->Next) + { + bool Res = I->Test(Directory,File); + if (Res == false) + continue; + + if (I->Type == Item::Include) + return true; + + if (I->Type == Item::Exclude) + return false; + } + + return true; +} + /*}}}*/ +// FileFilter::LoadFilter - Load the filter list from the configuration /*{{{*/ +// --------------------------------------------------------------------- +/* When given the root of a configuration tree this will parse that sub-tree + as an ordered list of include/exclude directives. Each value in the list + must be prefixed with a + or a - indicating include/exclude */ +bool dsFileFilter::LoadFilter(Configuration::Item const *Top) +{ + if (Top != 0) + Top = Top->Child; + + // Advance to the end of the list + Item **End = &List; + for (; *End != 0; End = &(*End)->Next); + + for (; Top != 0;) + { + Item *New = new Item; + + // Decode the type + if (Top->Value[0] == '+') + New->Type = Item::Include; + else + { + if (Top->Value[0] == '-') + New->Type = Item::Exclude; + else + { + delete New; + return _error->Error("Malformed filter directive %s",Top->Tag.c_str()); + } + } + + // Strip off the +/- indicator + unsigned int Count = 1; + for (const char *I = Top->Value.c_str() + 1; I < Top->Value.c_str() + strlen(Top->Value.c_str()) && + isspace(*I); I++) + Count++; + New->Pattern = string(Top->Value,Count); + + // Set flags + New->Flags = 0; + if (New->Pattern == "*") + New->Flags |= Item::MatchAll; + if (New->Pattern.find('/') != string::npos) + New->Flags |= Item::MatchPath; + + // Link it into the list + New->Next = 0; + *End = New; + End = &New->Next; + + Top = Top->Next; + } + return true; +} + /*}}}*/ +// FileFilter::Item::Test - Test a single item /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFileFilter::Item::Test(const char *Directory,const char *File) +{ + // Catch all + if ((Flags & MatchAll) == MatchAll) + return true; + + // Append the direcotry + if ((Flags & MatchPath) == MatchPath) + { + char S[1024]; + if (strlen(Directory) + strlen(File) > sizeof(S)) + return _error->Error("File field overflow"); + strcpy(S,Directory); + strcat(S,File); + + return fnmatch(Pattern.c_str(),S,FNM_PATHNAME) == 0; + } + + return fnmatch(Pattern.c_str(),File,FNM_PATHNAME) == 0; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/filefilter.h b/tools/dsync-0.0/libdsync/filefilter.h new file mode 100644 index 00000000..ff13143b --- /dev/null +++ b/tools/dsync-0.0/libdsync/filefilter.h @@ -0,0 +1,60 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filefilter.h,v 1.2 1998/12/30 05:36:41 jgg Exp $ +/* ###################################################################### + + File Filter - Regular Expression maching filter + + This implements an ordered include/exclude filter list that can be used + to filter filenames. + + Pattern matching is done identically to rsync, the key points are: + - Patterns containing / are matched against the whole path, otherwise + only the file name is used. + - Patterns that end in a / only match directories + - Wildcards supported by fnmatch (?*[) + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_FILEFILTER +#define DSYNC_FILEFILTER + +#ifdef __GNUG__ +#pragma interface "dsync/filefilter.h" +#endif + +#include +#include + +class dsFileFilter +{ + protected: + + struct Item + { + enum {Include, Exclude} Type; + string Pattern; + + // Various flags. + enum {MatchAll = (1<<0), MatchPath = (1<<1)}; + unsigned long Flags; + + Item *Next; + + bool Test(const char *Directory,const char *File); + }; + Item *List; + + public: + + // Members to see if the filter hits or misses + bool Test(const char *Directory,const char *File); + + // Load the filter from a configuration space + bool LoadFilter(Configuration::Item const *Root); + + dsFileFilter(); + ~dsFileFilter(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/filelist.cc b/tools/dsync-0.0/libdsync/filelist.cc new file mode 100644 index 00000000..7711c13f --- /dev/null +++ b/tools/dsync-0.0/libdsync/filelist.cc @@ -0,0 +1,867 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filelist.cc,v 1.14 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + File List Structures + + This module has a large group of services all relating to the binary + file list. Each individual record type has an read and write function + that can be used to store it into a unpacked structure. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/filelist.h" +#endif + +#include +#include +#include + +#include +#include +#include +using namespace std; + /*}}}*/ + +// FList::Step - Step to the next record /*{{{*/ +// --------------------------------------------------------------------- +/* This is an encompassing function to read a single record of any type + from the IO */ +bool dsFList::Step(IO &IO) +{ + if (!(_error->PendingError() == false && IO.ReadInt(Tag,1) == true)) + return false; + + Entity = 0; + File = 0; + + switch (Tag) + { + case dsFList::tHeader: + Head.Tag = Tag; + Head.Read(IO); + IO.Header = Head; + break; + + case dsFList::tDirMarker: + case dsFList::tDirStart: + case dsFList::tDirectory: + Dir.Tag = Tag; + Entity = &Dir; + return Dir.Read(IO); + + case dsFList::tNormalFile: + NFile.Tag = Tag; + Entity = &NFile; + File = &NFile; + return NFile.Read(IO); + + case dsFList::tSymlink: + SLink.Tag = Tag; + Entity = &SLink; + return SLink.Read(IO); + + case dsFList::tDeviceSpecial: + DevSpecial.Tag = Tag; + Entity = &DevSpecial; + return DevSpecial.Read(IO); + + case dsFList::tFilter: + Filt.Tag = Tag; + return Filt.Read(IO); + + case dsFList::tUidMap: + UMap.Tag = Tag; + return UMap.Read(IO); + + case dsFList::tGidMap: + UMap.Tag = Tag; + return UMap.Read(IO); + + case dsFList::tHardLink: + HLink.Tag = Tag; + Entity = &HLink; + File = &HLink; + return HLink.Read(IO); + + case dsFList::tTrailer: + Trail.Tag = Tag; + return Trail.Read(IO); + + case dsFList::tRSyncChecksum: + RChk.Tag = Tag; + return RChk.Read(IO); + + case dsFList::tAggregateFile: + AgFile.Tag = Tag; + return AgFile.Read(IO); + + case tRSyncEnd: + case tDirEnd: + return true; + + default: + return _error->Error("Corrupted file list"); + } + return true; +} + /*}}}*/ +// FList::Print - Print out the record /*{{{*/ +// --------------------------------------------------------------------- +/* This simply displays the record */ +bool dsFList::Print(ostream &out) +{ + char S[1024]; + switch (Tag) + { + case tHeader: + { + snprintf(S,sizeof(S),"H Sig=%lx Maj=%lu Min=%lu Epoch=%lu Count=%lu\n", + Head.Signature,Head.MajorVersion,Head.MinorVersion, + Head.Epoch,Head.FlagCount); + out << S; + break; + } + + case tDirMarker: + case tDirStart: + case tDirectory: + { + if (Tag == tDirMarker) + snprintf(S,sizeof(S),"DM Mod=%lu", + Dir.ModTime+Head.Epoch); + if (Tag == tDirStart) + snprintf(S,sizeof(S),"DS Mod=%lu", + Dir.ModTime+Head.Epoch); + if (Tag == tDirectory) + snprintf(S,sizeof(S),"D Mod=%lu", + Dir.ModTime+Head.Epoch); + out << S; + if ((Head.Flags[Tag] & Directory::FlPerm) != 0) + { + snprintf(S,sizeof(S)," Perm=%lo",Dir.Permissions); + out << S; + } + + if ((Head.Flags[Tag] & Directory::FlOwner) != 0) + { + snprintf(S,sizeof(S)," U=%lu G=%lu",Dir.User,Dir.Group); + out << S; + } + + snprintf(S,sizeof(S)," N='%s'\n",Dir.Name.c_str()); + out << S; + break; + } + + case tDirEnd: + out << "DE" << endl; + break; + + case tHardLink: + case tNormalFile: + { + snprintf(S,sizeof(S),"F Mod=%lu",File->ModTime+Head.Epoch); + out << S; + if ((Head.Flags[Tag] & NormalFile::FlPerm) != 0) + { + snprintf(S,sizeof(S)," Perm=%lo",File->Permissions); + out << S; + } + if ((Head.Flags[Tag] & NormalFile::FlOwner) != 0) + { + snprintf(S,sizeof(S)," U=%lu G=%lu",File->User,File->Group); + out << S; + } + if ((Head.Flags[Tag] & NormalFile::FlMD5) != 0) + { + char S[16*2+1]; + for (unsigned int I = 0; I != 16; I++) + sprintf(S+2*I,"%02x",File->MD5[I]); + S[16*2] = 0; + out << " MD5=" << S; + } + + if (Tag == tHardLink) + out << " Ser=" << HLink.Serial; + snprintf(S,sizeof(S)," Sz=%lu N='%s'\n",File->Size,File->Name.c_str()); + out << S; + + break; + } + + case tDeviceSpecial: + { + snprintf(S,sizeof(S),"S Mod=%lu",DevSpecial.ModTime+Head.Epoch); + out << S; + if ((Head.Flags[Tag] & DeviceSpecial::FlPerm) != 0) + { + snprintf(S,sizeof(S)," Perm=%lo",DevSpecial.Permissions); + out << S; + } + if ((Head.Flags[Tag] & DeviceSpecial::FlOwner) != 0) + { + snprintf(S,sizeof(S)," U=%lu G=%lu",DevSpecial.User,DevSpecial.Group); + out << S; + } + snprintf(S,sizeof(S)," N='%s'\n",DevSpecial.Name.c_str()); + out << S; + break; + } + + case tSymlink: + { + snprintf(S,sizeof(S),"L Mod=%lu",SLink.ModTime+Head.Epoch); + out << S; + if ((Head.Flags[Tag] & Symlink::FlOwner) != 0) + { + snprintf(S,sizeof(S)," U=%lu G=%lu",SLink.User,SLink.Group); + out << S; + } + + snprintf(S,sizeof(S)," N='%s' T='%s'\n",SLink.Name.c_str(),SLink.To.c_str()); + out << S; + break; + } + + case dsFList::tTrailer: + { + snprintf(S,sizeof(S),"T Sig=%lx\n",Trail.Signature); + out << S; + break; + } + + case dsFList::tRSyncChecksum: + { + snprintf(S,sizeof(S),"RC BlockSize=%lu FileSize=%lu\n",RChk.BlockSize,RChk.FileSize); + out << S; + break; + } + + case dsFList::tAggregateFile: + { + snprintf(S,sizeof(S),"RAG File='%s'\n",AgFile.File.c_str()); + break; + } + + case tRSyncEnd: + out << "RSE" << endl; + break; + + default: + return _error->Error("Unknown tag %u",Tag); + } + return true; +} + /*}}}*/ + +// IO::IO - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFList::IO::IO() +{ + NoStrings = false; +} + /*}}}*/ +// IO::ReadNum - Read a variable byte number coded with WriteNum /*{{{*/ +// --------------------------------------------------------------------- +/* Read a variable byte encoded number, see WriteNum */ +bool dsFList::IO::ReadNum(unsigned long &Number) +{ + unsigned int I = 0; + Number = 0; + while (1) + { + unsigned char Byte = 0; + if (Read(&Byte,1) == false) + return false; + Number |= (Byte & 0x7F) << 7*I; + if ((Byte & (1<<7)) == 0) + return true; + I++; + } +} + /*}}}*/ +// IO::WriteNum - Write a variable byte number /*{{{*/ +// --------------------------------------------------------------------- +/* This encodes the given number into a variable number of bytes and writes + it to the stream. This is done by encoding it in 7 bit chunks and using + the 8th bit as a continuation flag */ +bool dsFList::IO::WriteNum(unsigned long Number) +{ + unsigned char Bytes[10]; + unsigned int I = 0; + while (1) + { + Bytes[I] = Number & 0x7F; + Number >>= 7; + if (Number != 0) + Bytes[I] |= (1<<7); + else + break; + I++; + } + return Write(Bytes,I+1); +} + /*}}}*/ +// IO::ReadInt - Read an unsigned int written by WriteInt /*{{{*/ +// --------------------------------------------------------------------- +/* Read an unsigned integer of a given number of bytes, see WriteInt */ +bool dsFList::IO::ReadInt(unsigned long &Number,unsigned char Count) +{ + unsigned char Bytes[8]; + if (Read(&Bytes,Count) == false) + return false; + + Number = 0; + for (unsigned int I = 0; I != Count; I++) + Number |= (Bytes[I] << I*8); + return true; +} + /*}}}*/ +// IO::WriteInt - Write an unsigned int with a number of bytes /*{{{*/ +// --------------------------------------------------------------------- +/* This writes the number of bytes in least-significant-byte first order */ +bool dsFList::IO::WriteInt(unsigned long Number,unsigned char Count) +{ + unsigned char Bytes[8]; + for (unsigned int I = 0; I != Count; I++) + Bytes[I] = (Number >> I*8); + return Write(Bytes,Count); +} + /*}}}*/ +// IO::ReadInt - Read an signed int written by WriteInt /*{{{*/ +// --------------------------------------------------------------------- +/* Read a signed integer of a given number of bytes, see WriteInt */ +bool dsFList::IO::ReadInt(signed long &Number,unsigned char Count) +{ + unsigned char Bytes[8]; + if (Read(&Bytes,Count) == false) + return false; + + Number = 0; + for (unsigned int I = 0; I != Count; I++) + Number |= (Bytes[I] << I*8); + return true; +} + /*}}}*/ +// IO::WriteInt - Write an signed int with a number of bytes /*{{{*/ +// --------------------------------------------------------------------- +/* This writes the number of bytes in least-significant-byte first order */ +bool dsFList::IO::WriteInt(signed long Number,unsigned char Count) +{ + unsigned char Bytes[8]; + for (unsigned int I = 0; I != Count; I++) + Bytes[I] = (Number >> I*8); + return Write(Bytes,Count); +} + /*}}}*/ +// IO::ReadString - Read a string written by WriteString /*{{{*/ +// --------------------------------------------------------------------- +/* If NoStrings is set then the string is not allocated into memory, this + saves time when scanning a file */ +bool dsFList::IO::ReadString(string &Foo) +{ + char S[1024]; + unsigned long Len; + if (ReadNum(Len) == false) + return false; + if (Len >= sizeof(S)) + return _error->Error("String buffer too small"); + if (Read(S,Len) == false) + return false; + S[Len] = 0; + + if (NoStrings == false) + Foo = S; + else + Foo = string(); + + return true; +} + /*}}}*/ +// IO::WriteString - Write a string to the stream /*{{{*/ +// --------------------------------------------------------------------- +/* Write a string, we encode a Number contianing the length and then the + string itself */ +bool dsFList::IO::WriteString(string const &Foo) +{ + return WriteNum(Foo.length()) && Write(Foo.c_str(),strlen(Foo.c_str())); +} + /*}}}*/ + +// Header::Header - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* The constructor sets the current signature and version information */ +dsFList::Header::Header() : Signature(0x97E78AB), MajorVersion(0), + MinorVersion(1) +{ + Tag = dsFList::tHeader; + FlagCount = _count(Flags); + memset(Flags,0,sizeof(Flags)); + + Epoch = (unsigned long)time(0); +} + /*}}}*/ +// Header::Read - Read the coded header /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Header::Read(IO &IO) +{ + // Read the contents + if ((IO.ReadInt(Signature,4) && + IO.ReadInt(MajorVersion,2) && IO.ReadInt(MinorVersion,2) && + IO.ReadNum(Epoch) && IO.ReadInt(FlagCount,1)) == false) + return false; + + unsigned long RealFlagCount = FlagCount; + if (FlagCount > _count(Flags)) + FlagCount = _count(Flags); + + // Read the flag array + for (unsigned int I = 0; I != RealFlagCount; I++) + { + unsigned long Jnk; + if (I >= FlagCount) + { + if (IO.ReadInt(Jnk,4) == false) + return false; + } + else + { + if (IO.ReadInt(Flags[I],4) == false) + return false; + } + } + + return true; +} + /*}}}*/ +// Header::Write - Write the coded header /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Header::Write(IO &IO) +{ + FlagCount = _count(Flags); + + // Write the contents + if ((IO.WriteInt(Tag,1) && IO.WriteInt(Signature,4) && + IO.WriteInt(MajorVersion,2) && IO.WriteInt(MinorVersion,2) && + IO.WriteNum(Epoch) && IO.WriteInt(FlagCount,1)) == false) + return false; + + // Write the flag array + for (unsigned int I = 0; I != FlagCount; I++) + if (IO.WriteInt(Flags[I],4) == false) + return false; + return true; +} + /*}}}*/ +// Directory::Read - Read a coded directory record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Directory::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.ReadInt(ModTime,4)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.ReadInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.ReadNum(User) && + IO.ReadNum(Group)) == false) + return false; + if (IO.ReadString(Name) == false) + return false; + return true; +} + /*}}}*/ +// Directory::Write - Write a compacted directory record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Directory::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.WriteInt(Tag,1) && IO.WriteInt(ModTime,4)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.WriteInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.WriteNum(User) && + IO.WriteNum(Group)) == false) + return false; + if (IO.WriteString(Name) == false) + return false; + return true; +} + /*}}}*/ +// NormalFile::Read - Read the compacted file record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::NormalFile::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.ReadInt(ModTime,4)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.ReadInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.ReadNum(User) && + IO.ReadNum(Group)) == false) + return false; + if ((IO.ReadString(Name) && IO.ReadNum(Size)) == false) + return false; + if ((F & FlMD5) == FlMD5 && IO.Read(&MD5,16) == false) + return false; + + return true; +} + /*}}}*/ +// NormalFile::write - Write the compacted file record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::NormalFile::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.WriteInt(Tag,1) && IO.WriteInt(ModTime,4)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.WriteInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.WriteNum(User) && + IO.WriteNum(Group)) == false) + return false; + if ((IO.WriteString(Name) && IO.WriteNum(Size)) == false) + return false; + if ((F & FlMD5) == FlMD5 && IO.Write(&MD5,16) == false) + return false; + + return true; +} + /*}}}*/ +// Symlink::Read - Read a compacted symlink record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Symlink::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.ReadInt(ModTime,4)) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.ReadNum(User) && + IO.ReadNum(Group)) == false) + return false; + if ((IO.ReadString(Name) && IO.ReadInt(Compress,1) && + IO.ReadString(To)) == false) + return false; + + // Decompress the string + if (Compress != 0) + { + if ((Compress & (1<<7)) == (1<<7)) + To += Name; + if ((Compress & 0x7F) != 0) + To = string(IO.LastSymlink,0,Compress & 0x7F) + To; + } + + IO.LastSymlink = To; + return true; +} + /*}}}*/ +// Symlink::Write - Write a compacted symlink record /*{{{*/ +// --------------------------------------------------------------------- +/* This performs the symlink compression described in the file list + document. */ +bool dsFList::Symlink::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.WriteInt(Tag,1) && IO.WriteInt(ModTime,4)) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.WriteNum(User) && + IO.WriteNum(Group)) == false) + return false; + + if (IO.WriteString(Name) == false) + return false; + + // Attempt to remove the trailing text + bool Trail = false; + if (To.length() >= Name.length()) + { + unsigned int I = To.length() - Name.length(); + for (unsigned int J = 0; I < To.length(); I++, J++) + if (To[I] != Name[J]) + break; + if (I == To.length()) + Trail = true; + } + + // Compress the symlink target + Compress = 0; + unsigned int Len = To.length(); + if (Trail == true) + Len -= Name.length(); + for (; Compress < Len && Compress < IO.LastSymlink.length() && + Compress < 0x7F; Compress++) + if (To[Compress] != IO.LastSymlink[Compress]) + break; + + // Set the trail flag + if (Trail == true) + Compress |= (1<<7); + + // Write the compresion byte + if (IO.WriteInt(Compress,1) == false) + return false; + + // Write the data string + if (Trail == true) + { + if (IO.WriteString(string(To,Compress & 0x7F,To.length() - Name.length() - (Compress & 0x7F))) == false) + return false; + } + else + { + if (IO.WriteString(string(To,Compress,To.length() - Compress)) == false) + return false; + } + + IO.LastSymlink = To; + + return true; +} + /*}}}*/ +// DeviceSpecial::Read - Read a compacted device special record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::DeviceSpecial::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.ReadInt(ModTime,4)) == false) + return false; + if (IO.ReadInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.ReadNum(User) && + IO.ReadNum(Group)) == false) + return false; + if ((IO.ReadNum(Dev) && IO.ReadString(Name)) == false) + return false; + return true; +} + /*}}}*/ +// DeviceSpecial::Write - Write a compacted device special record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::DeviceSpecial::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.WriteInt(Tag,1) && IO.WriteInt(ModTime,4)) == false) + return false; + if (IO.WriteInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.WriteNum(User) && + IO.WriteNum(Group)) == false) + return false; + if ((IO.WriteNum(Dev) && IO.WriteString(Name)) == false) + return false; + return true; +} + /*}}}*/ +// Filter::Read - Read a compacted filter record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Filter::Read(IO &IO) +{ + if ((IO.ReadInt(Type,1) && + IO.ReadString(Pattern)) == false) + return false; + return true; +} + /*}}}*/ +// Filter::Write - Write a compacted filter record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Filter::Write(IO &IO) +{ + if ((IO.WriteInt(Tag,1) && IO.WriteInt(Type,1) && + IO.WriteString(Pattern)) == false) + return false; + return true; +} + /*}}}*/ +// UidGidMap::Read - Read a compacted Uid/Gid map record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::UidGidMap::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + if ((IO.ReadNum(FileID)) == false) + return false; + + if ((F & FlRealID) == FlRealID && IO.ReadNum(RealID) == false) + return false; + if (IO.ReadString(Name) == false) + return false; + return true; +} + /*}}}*/ +// UidGidMap::Write - Write a compacted Uid/Gid map record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::UidGidMap::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + if ((IO.WriteInt(Tag,1) && IO.WriteNum(FileID)) == false) + return false; + + if ((F & FlRealID) == FlRealID && IO.WriteNum(RealID) == false) + return false; + if (IO.WriteString(Name) == false) + return false; + return true; +} + /*}}}*/ +// HardLink::Read - Read the compacted link record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::HardLink::Read(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.ReadInt(ModTime,4) && IO.ReadNum(Serial)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.ReadInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.ReadNum(User) && + IO.ReadNum(Group)) == false) + return false; + if ((IO.ReadString(Name) && IO.ReadNum(Size)) == false) + return false; + if ((F & FlMD5) == FlMD5 && IO.Read(&MD5,16) == false) + return false; + + return true; +} + /*}}}*/ +// HardLink::Write - Write the compacted file record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::HardLink::Write(IO &IO) +{ + unsigned long F = IO.Header.Flags[Tag]; + + if ((IO.WriteInt(Tag,1) && IO.WriteInt(ModTime,4) && + IO.ReadNum(Serial)) == false) + return false; + if ((F & FlPerm) == FlPerm && IO.WriteInt(Permissions,2) == false) + return false; + if ((F & FlOwner) == FlOwner && (IO.WriteNum(User) && + IO.WriteNum(Group)) == false) + return false; + if ((IO.WriteString(Name) && IO.WriteNum(Size)) == false) + return false; + if ((F & FlMD5) == FlMD5 && IO.Write(&MD5,16) == false) + return false; + + return true; +} + /*}}}*/ +// Trailer::Trailer - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFList::Trailer::Trailer() : Tag(dsFList::tTrailer), Signature(0xBA87E79) +{ +} + /*}}}*/ +// Trailer::Read - Read a compacted tail record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Trailer::Read(IO &IO) +{ + if (IO.ReadInt(Signature,4) == false) + return false; + return true; +} + /*}}}*/ +// Trailer::Write - Write a compacted tail record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::Trailer::Write(IO &IO) +{ + if ((IO.WriteInt(Tag,1) && + IO.WriteInt(Signature,4)) == false) + return false; + return true; +} + /*}}}*/ +// RSyncChecksum::RSyncChecksum - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFList::RSyncChecksum::RSyncChecksum() : Tag(dsFList::tRSyncChecksum), + Sums(0) +{ +} + /*}}}*/ +// RSyncChecksum::~RSyncChecksum - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFList::RSyncChecksum::~RSyncChecksum() +{ + delete [] Sums; +} + /*}}}*/ +// RSyncChecksum::Read - Read a compacted device special record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::RSyncChecksum::Read(IO &IO) +{ + if ((IO.ReadNum(BlockSize) && IO.ReadNum(FileSize)) == false) + return false; + + // Read in the checksum table + delete [] Sums; + Sums = new unsigned char[(FileSize + BlockSize-1)/BlockSize*20]; + if (IO.Read(Sums,(FileSize + BlockSize-1)/BlockSize*20) == false) + return false; + + return true; +} + /*}}}*/ +// RSyncChecksum::Write - Write a compacted device special record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::RSyncChecksum::Write(IO &IO) +{ + if ((IO.WriteInt(Tag,1) && IO.WriteNum(BlockSize) && + IO.WriteNum(FileSize)) == false) + return false; + + if (IO.Write(Sums,(FileSize + BlockSize-1)/BlockSize*20) == false) + return false; + return true; +} + /*}}}*/ +// AggregateFile::Read - Read a aggregate file record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::AggregateFile::Read(IO &IO) +{ + return IO.ReadString(File); +} + /*}}}*/ +// AggregateFile::Write - Write a compacted filter record /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsFList::AggregateFile::Write(IO &IO) +{ + if ((IO.WriteInt(Tag,1) && IO.WriteString(File)) == false) + return false; + return true; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/filelist.h b/tools/dsync-0.0/libdsync/filelist.h new file mode 100644 index 00000000..430d089a --- /dev/null +++ b/tools/dsync-0.0/libdsync/filelist.h @@ -0,0 +1,224 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filelist.h,v 1.10 1999/12/26 06:59:00 jgg Exp $ +/* ###################################################################### + + File List structures + + These structures represent the uncompacted binary records from the + file list file. Functions are provided to compact and decompact these + structures for reading and writing. + + The dsFList class can be instantiated to get get a general 'all records' + storage. It also has a member to read the next record from the IO and + to print out a record summary. + + Be sure to read filelist.sgml which contains the precise meaning of + the feilds and the compaction technique used. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_FILELIST +#define DSYNC_FILELIST + +#ifdef __GNUG__ +#pragma interface "dsync/filelist.h" +#endif + +#include +using namespace std; + +class dsFList +{ + public: + + class IO; + + struct Header + { + unsigned long Tag; + unsigned long Signature; + unsigned long MajorVersion; + unsigned long MinorVersion; + unsigned long Epoch; + + unsigned long FlagCount; + unsigned long Flags[15]; + + bool Read(IO &IO); + bool Write(IO &IO); + + Header(); + }; + + struct DirEntity + { + unsigned long Tag; + signed long ModTime; + unsigned long Permissions; + unsigned long User; + unsigned long Group; + string Name; + + enum EntFlags {FlPerm = (1<<0), FlOwner = (1<<1)}; + + /* You know what? egcs-2.91.60 will not call the destructor for Name + if this in not here. I can't reproduce this in a simpler context + either. - Jgg [time passes] serious egcs bug, it was mislinking + the string classes :< */ + ~DirEntity() {}; + }; + + struct Directory : public DirEntity + { + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct NormalFile : public DirEntity + { + unsigned long Size; + unsigned char MD5[16]; + + enum Flags {FlMD5 = (1<<2)}; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct Symlink : public DirEntity + { + unsigned long Compress; + string To; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct DeviceSpecial : public DirEntity + { + unsigned long Dev; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct Filter + { + unsigned long Tag; + unsigned long Type; + string Pattern; + + enum Types {Include=1, Exclude=2}; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct UidGidMap + { + unsigned long Tag; + unsigned long FileID; + unsigned long RealID; + string Name; + + enum Flags {FlRealID = (1<<0)}; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct HardLink : public NormalFile + { + unsigned long Serial; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + struct Trailer + { + unsigned long Tag; + unsigned long Signature; + + bool Read(IO &IO); + bool Write(IO &IO); + Trailer(); + }; + + struct RSyncChecksum + { + unsigned long Tag; + unsigned long BlockSize; + unsigned long FileSize; + + // Array of 160 bit values (20 bytes) stored in Network byte order + unsigned char *Sums; + + bool Read(IO &IO); + bool Write(IO &IO); + RSyncChecksum(); + ~RSyncChecksum(); + }; + + struct AggregateFile + { + unsigned long Tag; + string File; + + bool Read(IO &IO); + bool Write(IO &IO); + }; + + + enum Types {tHeader=0, tDirMarker=1, tDirStart=2, tDirEnd=3, tNormalFile=4, + tSymlink=5, tDeviceSpecial=6, tDirectory=7, tFilter=8, + tUidMap=9, tGidMap=10, tHardLink=11, tTrailer=12, tRSyncChecksum=13, + tAggregateFile=14, tRSyncEnd=15}; + + unsigned long Tag; + Header Head; + Directory Dir; + NormalFile NFile; + Symlink SLink; + DeviceSpecial DevSpecial; + Filter Filt; + UidGidMap UMap; + HardLink HLink; + Trailer Trail; + DirEntity *Entity; + NormalFile *File; + RSyncChecksum RChk; + AggregateFile AgFile; + + bool Step(IO &IO); + bool Print(ostream &out); +}; + +class dsFList::IO +{ + public: + + string LastSymlink; + dsFList::Header Header; + bool NoStrings; + + virtual bool Read(void *Buf,unsigned long Len) = 0; + virtual bool Write(const void *Buf,unsigned long Len) = 0; + virtual bool Seek(unsigned long Bytes) = 0; + virtual unsigned long Tell() = 0; + + bool ReadNum(unsigned long &Number); + bool WriteNum(unsigned long Number); + bool ReadInt(unsigned long &Number,unsigned char Count); + bool WriteInt(unsigned long Number,unsigned char Count); + bool ReadInt(signed long &Number,unsigned char Count); + bool WriteInt(signed long Number,unsigned char Count); + bool ReadString(string &Foo); + bool WriteString(string const &Foo); + + IO(); + virtual ~IO() {}; +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/filelistdb.cc b/tools/dsync-0.0/libdsync/filelistdb.cc new file mode 100644 index 00000000..74bf4110 --- /dev/null +++ b/tools/dsync-0.0/libdsync/filelistdb.cc @@ -0,0 +1,166 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filelistdb.cc,v 1.4 1999/02/27 08:00:05 jgg Exp $ +/* ###################################################################### + + File List Database + + The mmap class should probably go someplace else.. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/filelistdb.h" +#endif + +#include +#include + /*}}}*/ + +// FileListDB::dsFileListDB - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsFileListDB::dsFileListDB() +{ +} + /*}}}*/ +// FileListDB::Generate - Build the directory map /*{{{*/ +// --------------------------------------------------------------------- +/* This sucks the offset of every directory record into a stl map for + quick lookup. */ +bool dsFileListDB::Generate(dsFList::IO &IO) +{ + // Iterate over the file + dsFList List; + while (List.Step(IO) == true) + { + // Record the current location so we can jump to it + unsigned long Pos = IO.Tell(); + string LastSymlink = IO.LastSymlink; + + if (List.Tag == dsFList::tTrailer) + return true; + + // We only index directory start records + if (List.Tag != dsFList::tDirStart) + continue; + + // Store it in the map + Location &Loc = Map[List.Dir.Name]; + Loc.Offset = Pos; + Loc.LastSymlink = LastSymlink; + } + + return false; +} + /*}}}*/ +// FileListDB::Lookup - Find a directory and file /*{{{*/ +// --------------------------------------------------------------------- +/* We use a caching scheme, if the last lookup is in the same directory + we do not re-seek but mearly look at the next entries till termination + then wraps around. In the case of a largely unchanged directory this + gives huge speed increases. */ +bool dsFileListDB::Lookup(dsFList::IO &IO,const char *Dir,const char *File, + dsFList &List) +{ + map::const_iterator I = Map.find(Dir); + if (I == Map.end()) + return false; + + // See if we should reseek + bool Restart = true; + if (LastDir != Dir || LastDir.empty() == true) + { + Restart = false; + IO.LastSymlink = I->second.LastSymlink; + if (IO.Seek(I->second.Offset) == false) + return false; + LastDir = Dir; + } + + List.Head = IO.Header; + while (List.Step(IO) == true) + { + // Oops, ran out of directories + if (List.Tag == dsFList::tDirEnd || + List.Tag == dsFList::tDirStart || + List.Tag == dsFList::tTrailer) + { + if (Restart == false) + { + LastDir = string(); + return false; + } + + Restart = false; + IO.LastSymlink = I->second.LastSymlink; + if (IO.Seek(I->second.Offset) == false) + return false; + LastDir = Dir; + + continue; + } + + // Skip over non directory contents + if (List.Tag == dsFList::tDirMarker || + List.Tag == dsFList::tDirEnd || + List.Tag == dsFList::tDirStart || + List.Entity == 0) + continue; + + if (List.Entity->Name == File) + return true; + } + return false; +} + /*}}}*/ + +// MMapIO::dsMMapIO - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsMMapIO::dsMMapIO(string File) : Fd(File,FileFd::ReadOnly), + Map(Fd,MMap::Public | MMap::ReadOnly) +{ + Pos = 0; +} + /*}}}*/ +// MMapIO::Read - Read bytes from the map /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsMMapIO::Read(void *Buf,unsigned long Len) +{ + if (Pos + Len > Map.Size()) + return _error->Error("Attempt to read past end of mmap"); + memcpy(Buf,(unsigned char *)Map.Data() + Pos,Len); + Pos += Len; + return true; +} + /*}}}*/ +// MMapIO::Write - Write bytes (fail) /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsMMapIO::Write(const void *Buf,unsigned long Len) +{ + return _error->Error("Attempt to write to read only mmap"); +} + /*}}}*/ +// MMapIO::Seek - Jump to a spot /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsMMapIO::Seek(unsigned long Bytes) +{ + if (Bytes > Map.Size()) + return _error->Error("Attempt to seek past end of mmap"); + Pos = Bytes; + return true; +} + /*}}}*/ +// MMapIO::Tell - Return the current location /*{{{*/ +// --------------------------------------------------------------------- +/* */ +unsigned long dsMMapIO::Tell() +{ + return Pos; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/filelistdb.h b/tools/dsync-0.0/libdsync/filelistdb.h new file mode 100644 index 00000000..95942570 --- /dev/null +++ b/tools/dsync-0.0/libdsync/filelistdb.h @@ -0,0 +1,63 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: filelistdb.h,v 1.2 1999/01/10 07:34:05 jgg Exp $ +/* ###################################################################### + + File List DB + + This scans a file list and generates a searchable list of all + directories in the list. It can then do a lookup of a given file, + directory pair. + + The memory mapped IO class is recommended for use with the DB class + for speed. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_FILELISTDB +#define DSYNC_FILELISTDB + +#ifdef __GNUG__ +#pragma interface "dsync/filelistdb.h" +#endif + +#include +#include +#include + +class dsFileListDB +{ + struct Location + { + unsigned long Offset; + string LastSymlink; + }; + + dsFList::IO *IO; + map Map; + string LastDir; + public: + + bool Generate(dsFList::IO &IO); + bool Lookup(dsFList::IO &IO,const char *Dir,const char *File,dsFList &List); + + dsFileListDB(); +}; + +class dsMMapIO : public dsFList::IO +{ + FileFd Fd; + MMap Map; + unsigned long Pos; + + public: + + virtual bool Read(void *Buf,unsigned long Len); + virtual bool Write(const void *Buf,unsigned long Len); + virtual bool Seek(unsigned long Bytes); + virtual unsigned long Tell(); + + dsMMapIO(string File); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/genfilelist.cc b/tools/dsync-0.0/libdsync/genfilelist.cc new file mode 100644 index 00000000..7c5b10a0 --- /dev/null +++ b/tools/dsync-0.0/libdsync/genfilelist.cc @@ -0,0 +1,574 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: genfilelist.cc,v 1.10 1999/12/26 06:59:01 jgg Exp $ +/* ###################################################################### + + Generate File List + + File list generation can be done with modification to the generation + order, ordering can be done by depth, breadth or by tree with and + a fitler can be applied to delay a directory till the end of processing. + + The emitter simply generates the necessary structure and writes it to + the IO. The client can hook some of the functions to provide progress + reporting and md5 caching if so desired. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/genfilelist.h" +#endif + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + /*}}}*/ + +// GenFileList::dsGenFileList - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsGenFileList::dsGenFileList() : IO(0), Type(Tree) +{ +} + /*}}}*/ +// GenFileList::~dsGenFileList - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +dsGenFileList::~dsGenFileList() +{ +} + /*}}}*/ +// GenFileList::Go - Generate the list /*{{{*/ +// --------------------------------------------------------------------- +/* This invokes the proper recursive directory scanner to build the file + names. Depth and Breath use a queue */ +bool dsGenFileList::Go(string Base,dsFList::IO &IO) +{ + // Setup the queues and store the current directory + string StartDir = SafeGetCWD(); + Queue.erase(Queue.begin(),Queue.end()); + DelayQueue.erase(Queue.begin(),Queue.end()); + + struct stat St; + if (stat(Base.c_str(),&St) != 0) + return _error->Errno("stat","Could not stat the base directory"); + + // Begin + this->IO = &IO; + IO.Header.Write(IO); + + switch (Type) + { + case Depth: + { + // Change to the base directory + if (chdir(Base.c_str()) != 0) + return _error->Errno("chdir","Could not change to %s",Base.c_str()); + Base = SafeGetCWD(); + + char Cwd[1024]; + Cwd[0] = 0; + if (DirDepthFirst(Cwd) == false) + { + chdir(StartDir.c_str()); + return false; + } + + // Now deal with the delay list + while (DelayQueue.empty() == false) + { + // Get the first delayed directory + string Dir = DelayQueue.front(); + DelayQueue.pop_front(); + + // Change to it and emit it. + strcpy(Cwd,Dir.c_str()); + chdir(Base.c_str()); + chdir(Cwd); + if (DirDepthFirst(Cwd) == false) + { + chdir(StartDir.c_str()); + return false; + } + } + + break; + } + + case Tree: + case Breadth: + { + // Change to the base directory + if (chdir(Base.c_str()) != 0) + return _error->Errno("chdir","Could not change to %s",Base.c_str()); + Base = SafeGetCWD(); + + Queue.push_back(""); + while (Queue.empty() == false || DelayQueue.empty() == false) + { + if (DirTree() == false) + { + chdir(StartDir.c_str()); + return false; + } + + chdir(Base.c_str()); + } + break; + } + + default: + return _error->Error("Internal Error"); + }; + + chdir(StartDir.c_str()); + + dsFList::Trailer Trail; + return Trail.Write(IO); +} + /*}}}*/ +// GenFileList::DirDepthFirst - Depth first directory ordering /*{{{*/ +// --------------------------------------------------------------------- +/* */ +bool dsGenFileList::DirDepthFirst(char *CurDir) +{ + // Scan the directory, first pass is to descend into the sub directories + DIR *DirSt = opendir("."); + if (DirSt == 0) + return _error->Errno("opendir","Unable to open direcotry %s",CurDir); + struct dirent *Ent; + bool EmittedThis = false; + struct stat St; + while ((Ent = readdir(DirSt)) != 0) + { + // Skip . and .. + if (strcmp(Ent->d_name,".") == 0 || + strcmp(Ent->d_name,"..") == 0) + continue; + + if (lstat(Ent->d_name,&St) != 0) + { + closedir(DirSt); + return _error->Errno("stat","Could not stat %s%s",CurDir,Ent->d_name); + } + + // it is a directory + if (S_ISDIR(St.st_mode) != 0) + { + char S[1024]; + snprintf(S,sizeof(S),"%s/",Ent->d_name); + + // Check the Filter + if (Filter.Test(CurDir,S) == false) + continue; + + // Emit a directory marker record for this directory + if (EmittedThis == false) + { + EmittedThis = true; + + if (lstat(".",&St) != 0) + { + closedir(DirSt); + return _error->Errno("stat","Could not stat %s",CurDir); + } + + if (DirectoryMarker(CurDir,St) == false) + { + closedir(DirSt); + return false; + } + } + + // Check the delay filter + if (PreferFilter.Test(CurDir,S) == false) + { + snprintf(S,sizeof(S),"%s%s/",CurDir,Ent->d_name); + DelayQueue.push_back(S); + continue; + } + + // Append the new directory to CurDir and decend + char *End = CurDir + strlen(CurDir); + strcat(End,S); + if (chdir(S) != 0) + { + closedir(DirSt); + return _error->Errno("chdir","Could not chdir to %s%s",CurDir,S); + } + + // Recurse + if (DirDepthFirst(CurDir) == false) + { + closedir(DirSt); + return false; + } + + if (chdir("..") != 0) + { + closedir(DirSt); + return _error->Errno("chdir","Could not chdir to %s%s",CurDir,S); + } + + // Chop off the directory we added to the current dir + *End = 0; + } + } + rewinddir(DirSt); + + // Begin emitting this directory + if (lstat(".",&St) != 0) + { + closedir(DirSt); + return _error->Errno("stat","Could not stat %s",CurDir); + } + + if (EnterDir(CurDir,St) == false) + { + closedir(DirSt); + return false; + } + + while ((Ent = readdir(DirSt)) != 0) + { + // Skip . and .. + if (strcmp(Ent->d_name,".") == 0 || + strcmp(Ent->d_name,"..") == 0) + continue; + + struct stat St; + if (lstat(Ent->d_name,&St) != 0) + { + closedir(DirSt); + return _error->Errno("stat","Could not stat %s%s",CurDir,Ent->d_name); + } + + // it is a directory + if (S_ISDIR(St.st_mode) != 0) + { + char S[1024]; + snprintf(S,sizeof(S),"%s/",Ent->d_name); + + // Check the Filter + if (Filter.Test(CurDir,S) == false) + continue; + } + else + { + // Check the Filter + if (Filter.Test(CurDir,Ent->d_name) == false) + continue; + } + + if (DoFile(CurDir,Ent->d_name,St) == false) + { + closedir(DirSt); + return false; + } + } + closedir(DirSt); + + if (LeaveDir(CurDir) == false) + return false; + + return true; +} + /*}}}*/ +// GenFileList::DirTree - Breadth/Tree directory ordering /*{{{*/ +// --------------------------------------------------------------------- +/* Breadth ordering does all of the dirs at each depth before proceeding + to the next depth. We just treat the list as a queue to get this + effect. Tree ordering does things in a more normal recursive fashion, + we treat the queue as a stack to get that effect. */ +bool dsGenFileList::DirTree() +{ + string Dir; + if (Queue.empty() == false) + { + Dir = Queue.front(); + Queue.pop_front(); + } + else + { + Dir = DelayQueue.front(); + DelayQueue.pop_front(); + } + + struct stat St; + if (Dir.empty() == false && chdir(Dir.c_str()) != 0 || stat(".",&St) != 0) + return _error->Errno("chdir","Could not change to %s",Dir.c_str()); + + if (EnterDir(Dir.c_str(),St) == false) + return false; + + // Scan the directory + DIR *DirSt = opendir("."); + if (DirSt == 0) + return _error->Errno("opendir","Unable to open direcotry %s",Dir.c_str()); + struct dirent *Ent; + while ((Ent = readdir(DirSt)) != 0) + { + // Skip . and .. + if (strcmp(Ent->d_name,".") == 0 || + strcmp(Ent->d_name,"..") == 0) + continue; + + if (lstat(Ent->d_name,&St) != 0) + { + closedir(DirSt); + return _error->Errno("stat","Could not stat %s%s",Dir.c_str(),Ent->d_name); + } + + // It is a directory + if (S_ISDIR(St.st_mode) != 0) + { + char S[1024]; + snprintf(S,sizeof(S),"%s/",Ent->d_name); + + // Check the Filter + if (Filter.Test(Dir.c_str(),S) == false) + continue; + + // Check the delay filter + if (PreferFilter.Test(Dir.c_str(),S) == false) + { + snprintf(S,sizeof(S),"%s%s/",Dir.c_str(),Ent->d_name); + if (Type == Tree) + DelayQueue.push_front(S); + else + DelayQueue.push_back(S); + continue; + } + + snprintf(S,sizeof(S),"%s%s/",Dir.c_str(),Ent->d_name); + + if (Type == Tree) + Queue.push_front(S); + else + Queue.push_back(S); + } + else + { + // Check the Filter + if (Filter.Test(Dir.c_str(),Ent->d_name) == false) + continue; + } + + if (DoFile(Dir.c_str(),Ent->d_name,St) == false) + { + closedir(DirSt); + return false; + } + } + closedir(DirSt); + + if (LeaveDir(Dir.c_str()) == false) + return false; + + return true; +} + /*}}}*/ + +// GenFileList::EnterDir - Called when a directory is entered /*{{{*/ +// --------------------------------------------------------------------- +/* This is called to start a directory block the current working dir + should be set to the directory entered. This emits the directory start + record */ +bool dsGenFileList::EnterDir(const char *Dir,struct stat const &St) +{ + if (Visit(Dir,0,St) != 0) + return false; + + dsFList::Directory D; + D.Tag = dsFList::tDirStart; + D.ModTime = St.st_mtime - IO->Header.Epoch; + D.Permissions = St.st_mode & ~S_IFMT; + D.Name = Dir; + return EmitOwner(St,D.User,D.Group,D.Tag,dsFList::Directory::FlOwner) && + D.Write(*IO); +} + /*}}}*/ +// GenFileList::LeaveDir - Called when a directory is left /*{{{*/ +// --------------------------------------------------------------------- +/* Don't do anything for now */ +bool dsGenFileList::LeaveDir(const char *Dir) +{ + return true; +} + /*}}}*/ +// GenFileList::DirectoryMarker - Called when a dir is skipped /*{{{*/ +// --------------------------------------------------------------------- +/* This is used by the depth first ordering, when a dir is temporarily + skipped over this function is called to emit a marker */ +bool dsGenFileList::DirectoryMarker(const char *Dir, + struct stat const &St) +{ + dsFList::Directory D; + D.Tag = dsFList::tDirMarker; + D.ModTime = St.st_mtime - IO->Header.Epoch; + D.Permissions = St.st_mode & ~S_IFMT; + D.Name = Dir; + return EmitOwner(St,D.User,D.Group,D.Tag,dsFList::Directory::FlOwner) && + D.Write(*IO); +} + /*}}}*/ +// GenFileList::DoFile - This does all other items in a directory /*{{{*/ +// --------------------------------------------------------------------- +/* The different file types are emitted as perscribed by the file list + document */ +bool dsGenFileList::DoFile(const char *Dir,const char *File, + struct stat const &St) +{ + int Res = Visit(Dir,File,St); + if (Res < 0) + return false; + if (Res > 0) + return true; + + // Regular file + if (S_ISREG(St.st_mode) != 0) + { + dsFList::NormalFile F; + + F.Tag = dsFList::tNormalFile; + F.ModTime = St.st_mtime - IO->Header.Epoch; + F.Permissions = St.st_mode & ~S_IFMT; + F.Name = File; + F.Size = St.st_size; + + if (EmitOwner(St,F.User,F.Group,F.Tag,dsFList::NormalFile::FlOwner) == false) + return false; + + // See if we need to emit rsync checksums + if (NeedsRSync(Dir,File,F) == true) + { + dsFList::RSyncChecksum Ck; + if (EmitRSync(Dir,File,St,F,Ck) == false) + return false; + + // Write out the file record, the checksums and the end marker + return F.Write(*IO) && Ck.Write(*IO); + } + else + { + if (EmitMD5(Dir,File,St,F.MD5,F.Tag, + dsFList::NormalFile::FlMD5) == false) + return false; + + return F.Write(*IO); + } + } + + // Directory + if (S_ISDIR(St.st_mode) != 0) + { + dsFList::Directory D; + D.Tag = dsFList::tDirectory; + D.ModTime = St.st_mtime - IO->Header.Epoch; + D.Permissions = St.st_mode & ~S_IFMT; + D.Name = File; + return EmitOwner(St,D.User,D.Group,D.Tag,dsFList::Directory::FlOwner) && + D.Write(*IO); + } + + // Link + if (S_ISLNK(St.st_mode) != 0) + { + dsFList::Symlink L; + L.Tag = dsFList::tSymlink; + L.ModTime = St.st_mtime - IO->Header.Epoch; + L.Name = File; + + char Buf[1024]; + int Res = readlink(File,Buf,sizeof(Buf)); + if (Res <= 0) + return _error->Errno("readlink","Unable to read symbolic link"); + Buf[Res] = 0; + L.To = Buf; + + return EmitOwner(St,L.User,L.Group,L.Tag,dsFList::Symlink::FlOwner) && + L.Write(*IO); + } + + // Block special file + if (S_ISCHR(St.st_mode) != 0 || S_ISBLK(St.st_mode) != 0 || + S_ISFIFO(St.st_mode) != 0) + { + dsFList::DeviceSpecial D; + D.Tag = dsFList::tDeviceSpecial; + D.ModTime = St.st_mtime - IO->Header.Epoch; + D.Permissions = St.st_mode & ~S_IFMT; + D.Dev = St.st_dev; + D.Name = File; + + return EmitOwner(St,D.User,D.Group,D.Tag,dsFList::DeviceSpecial::FlOwner) && + D.Write(*IO); + } + + return _error->Error("File %s%s is not a known type",Dir,File); +} + /*}}}*/ +// GenFileList::EmitOwner - Set the entitiy ownership /*{{{*/ +// --------------------------------------------------------------------- +/* This emits the necessary UID/GID mapping records and sets the feilds + in */ +bool dsGenFileList::EmitOwner(struct stat const &St,unsigned long &UID, + unsigned long &GID,unsigned int Tag, + unsigned int Flag) +{ + if ((IO->Header.Flags[Tag] & Flag) != Flag) + return true; + + return _error->Error("UID/GID storage is not supported yet"); +} + /*}}}*/ +// GenFileList::EmitMd5 - Generate the md5 hash for the file /*{{{*/ +// --------------------------------------------------------------------- +/* This uses the MD5 class to generate the md5 hash for the entry. */ +bool dsGenFileList::EmitMD5(const char *Dir,const char *File, + struct stat const &St,unsigned char MD5[16], + unsigned int Tag,unsigned int Flag) +{ + if ((IO->Header.Flags[Tag] & Flag) != Flag) + return true; + + // Open the file + MD5Summation Sum; + FileFd Fd(File,FileFd::ReadOnly); + if (_error->PendingError() == true) + return _error->Error("MD5 generation failed for %s%s",Dir,File); + + if (Sum.AddFD(Fd.Fd(),Fd.Size()) == false) + return _error->Error("MD5 generation failed for %s%s",Dir,File); + + Sum.Result().Value(MD5); + + return true; +} + /*}}}*/ +// GenFileList::EmitRSync - Emit a RSync checksum record /*{{{*/ +// --------------------------------------------------------------------- +/* This just generates the checksum into the memory structure. */ +bool dsGenFileList::EmitRSync(const char *Dir,const char *File, + struct stat const &St,dsFList::NormalFile &F, + dsFList::RSyncChecksum &Ck) +{ + FileFd Fd(File,FileFd::ReadOnly); + if (_error->PendingError() == true) + return _error->Error("RSync Checksum generation failed for %s%s",Dir,File); + + if (GenerateRSync(Fd,Ck,F.MD5) == false) + return _error->Error("RSync Checksum generation failed for %s%s",Dir,File); + + return true; +} + /*}}}*/ diff --git a/tools/dsync-0.0/libdsync/genfilelist.h b/tools/dsync-0.0/libdsync/genfilelist.h new file mode 100644 index 00000000..65f54c14 --- /dev/null +++ b/tools/dsync-0.0/libdsync/genfilelist.h @@ -0,0 +1,74 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: genfilelist.h,v 1.5 1999/12/26 06:59:01 jgg Exp $ +/* ###################################################################### + + Generate File List + + This class is responsible for generating the file list. It is fairly + simple and direct. One hook is provided to allow a derived class to + cache md5 generation. + + The file list format is documented in the filelist.sgml document. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_GENFILELIST +#define DSYNC_GENFILELIST + +#ifdef __GNUG__ +#pragma interface "dsync/genfilelist.h" +#endif + +#include +#include +#include + +class dsGenFileList +{ + protected: + + list Queue; + list DelayQueue; + dsFList::IO *IO; + + // Hooks + virtual int Visit(const char *Directory,const char *File, + struct stat const &Stat) {return 0;}; + + // Directory handlers + bool DirDepthFirst(char *CurDir); + bool DirTree(); + + // Emitters + bool EnterDir(const char *Dir,struct stat const &St); + bool LeaveDir(const char *Dir); + bool DirectoryMarker(const char *Dir,struct stat const &St); + bool DoFile(const char *Dir,const char *File,struct stat const &St); + + bool EmitOwner(struct stat const &St,unsigned long &UID, + unsigned long &GID,unsigned int Tag,unsigned int Flag); + virtual bool EmitMD5(const char *Dir,const char *File, + struct stat const &St,unsigned char MD5[16], + unsigned int Tag,unsigned int Flag); + + virtual bool NeedsRSync(const char *Dir,const char *File, + dsFList::NormalFile &F) {return false;}; + virtual bool EmitRSync(const char *Dir,const char *File, + struct stat const &St,dsFList::NormalFile &F, + dsFList::RSyncChecksum &Ck); + + public: + + // Configurable things + enum {Depth,Breadth,Tree} Type; + dsFileFilter Filter; + dsFileFilter PreferFilter; + + bool Go(string Base,dsFList::IO &IO); + + dsGenFileList(); + virtual ~dsGenFileList(); +}; + +#endif diff --git a/tools/dsync-0.0/libdsync/makefile b/tools/dsync-0.0/libdsync/makefile new file mode 100644 index 00000000..7ce9b91e --- /dev/null +++ b/tools/dsync-0.0/libdsync/makefile @@ -0,0 +1,38 @@ +# -*- make -*- +BASE=.. +SUBDIR=libdsync + +# Header location +SUBDIRS = contrib +HEADER_TARGETDIRS = dsync + +# Bring in the default rules +include ../buildlib/defaults.mak + +# The library name +LIBRARY=dsync +MAJOR=0.0 +MINOR=0 +SLIBS=$(PTHREADLIB) + +# Source code for the contributed non-core things +SOURCE = contrib/error.cc contrib/fileutl.cc contrib/strutl.cc \ + contrib/configuration.cc contrib/cmndline.cc \ + contrib/md5.cc contrib/md4.cc contrib/mmap.cc contrib/bitmap.cc \ + contrib/slidingwindow.cc + +# Source code for the main library +SOURCE+= filefilter.cc genfilelist.cc filelist.cc filelistdb.cc compare.cc \ + rsync-algo.cc + +# Public header files +HEADERS = error.h configuration.h cmndline.h md5.h md4.h fileutl.h mmap.h \ + filefilter.h genfilelist.h filelist.h filelistdb.h compare.h \ + strutl.h bitmap.h slidingwindow.h rsync-algo.h + +HEADERS := $(addprefix dsync/,$(HEADERS)) + +# Private header files +HEADERS+= system.h + +include $(LIBRARY_H) diff --git a/tools/dsync-0.0/libdsync/rsync-algo.cc b/tools/dsync-0.0/libdsync/rsync-algo.cc new file mode 100644 index 00000000..7b513b32 --- /dev/null +++ b/tools/dsync-0.0/libdsync/rsync-algo.cc @@ -0,0 +1,205 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: rsync-algo.cc,v 1.3 1999/12/26 06:59:01 jgg Exp $ +/* ###################################################################### + + RSync Algorithrim + + The RSync algorithim is attributed to Andrew Tridgell and is a means + for matching blocks between two streams. The algorithrim implemented + here differs slightly in its structure and is carefully optimized to be + able to operate on very large files effectively. + + We rely on the RSync rolling weak checksum routine and the MD4 strong + checksum routine. This implementation requires a uniform block size + for each run. + + ##################################################################### */ + /*}}}*/ +// Include files /*{{{*/ +#ifdef __GNUG__ +#pragma implementation "dsync/rsync-algo.h" +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include + /*}}}*/ + +// RollingChecksum - Compute the checksum perscribed by rsync /*{{{*/ +// --------------------------------------------------------------------- +/* */ +static inline unsigned long RollingChecksum(unsigned char *Start, + unsigned char *End) +{ + unsigned long A = 0; + unsigned long B = 0; + + /* A = sum(X[i],j,k) B = sum((k-j+1)*X[i],j,k); + Which reduces to the recurrence, B = sum(A[I],j,k); */ + for (; Start != End; Start++) + { + A += *Start; + B += A; + } + + return (A & 0xFFFF) | (B << 16); +} + /*}}}*/ +// GenerateRSync - Compute the rsync blocks for a file /*{{{*/ +// --------------------------------------------------------------------- +/* This function generates the RSync checksums for each uniform block in + the file. */ +bool GenerateRSync(FileFd &Fd,dsFList::RSyncChecksum &Ck, + unsigned char OutMD5[16], + unsigned long BlockSize) +{ + SlidingWindow Wind(Fd); + MD5Summation MD5; + + Ck.Tag = dsFList::tRSyncChecksum; + Ck.BlockSize = BlockSize; + Ck.FileSize = Fd.Size(); + + // Allocate sum storage space + delete [] Ck.Sums; + Ck.Sums = new unsigned char[(Ck.FileSize + BlockSize-1)/BlockSize*20]; + + // Slide over the file + unsigned char *Start = 0; + unsigned char *End = 0; + unsigned char *Sum = Ck.Sums; + unsigned char *SumEnd = Sum + (Ck.FileSize + BlockSize-1)/BlockSize*20; + while (Sum < SumEnd) + { + // Tail little bit of the file + if ((unsigned)(End - Start) < BlockSize) + { + unsigned char *OldEnd = End; + if (Wind.Extend(Start,End) == false) + return false; + + // The file is very small, pretend this is the last block + if ((unsigned)(End - Start) < BlockSize && End != Start) + { + OldEnd = End; + End = Start; + } + + // All Done + if (Start == End) + { + /* The last block is rather artifical but can be of use in some + cases. Just remember not to insert it into the hash + search table!! */ + *(uint32_t *)Sum = htonl(0xDEADBEEF); + InitMD4(Sum+4); + ComputeMD4Final(Sum+4,Start,OldEnd,OldEnd-Start); + MD5.Add(Start,OldEnd); + Sum += 20; + break; + } + } + + // Compute the checksums + MD5.Add(Start,Start+BlockSize); + *(uint32_t *)Sum = htonl(RollingChecksum(Start,Start+BlockSize)); + InitMD4(Sum+4); + ComputeMD4Final(Sum+4,Start,Start+BlockSize,BlockSize); + Sum += 20; + + Start += BlockSize; + } + + if (Sum != SumEnd) + return _error->Error("Size Mismatch generating checksums"); + + MD5.Result().Value(OutMD5); + + return true; +} + /*}}}*/ + +// RSyncMatch::RSyncMatch - Constructor /*{{{*/ +// --------------------------------------------------------------------- +/* This generates the btree and hash table for looking up checksums */ +RSyncMatch::RSyncMatch(dsFList::RSyncChecksum const &Ck) : Fast(1 << 16), + Ck(Ck) +{ + Indexes = 0; + unsigned int Blocks = (Ck.FileSize + Ck.BlockSize-1)/Ck.BlockSize; + + // Drop the last partial block from the hashing + if (Blocks < 3) + return; + Blocks--; + + // Setup the index table + Indexes = new uint32_t *[Blocks]; + IndexesEnd = Indexes + Blocks; + + // Ready the checksum pointers + unsigned char *Sum = Ck.Sums; + unsigned char *SumEnd = Sum + Blocks*20; + for (uint32_t **I = Indexes; Sum < SumEnd; Sum += 20) + { + *I++ = (uint32_t *)Sum; + } + + // Sort them + qsort(Indexes,Blocks,sizeof(*Indexes),Sort); + + // Generate the hash table + unsigned int Cur = 0; + Hashes[Cur] = Indexes; + for (uint32_t **I = Indexes; I != IndexesEnd; I++) + { + printf("%x\n",**I); + Fast.Set((**I) >> 16); + while (((**I) >> 24) > Cur) + Hashes[Cur++] = I; + } + while (Cur <= 256) + Hashes[Cur++] = IndexesEnd; + + for (unsigned int Cur = 1; Cur != 255; Cur++) + { + printf("%u %p %x\n",Hashes[Cur] - Hashes[Cur-1],Hashes[Cur],**Hashes[Cur] >> 24); + } +} + /*}}}*/ +// RSyncMatch::~RSyncMatch - Destructor /*{{{*/ +// --------------------------------------------------------------------- +/* */ +RSyncMatch::~RSyncMatch() +{ + delete [] Indexes; +} + /*}}}*/ +// RSyncMatch::Sort - QSort function /*{{{*/ +// --------------------------------------------------------------------- +/* */ +int RSyncMatch::Sort(const void *L,const void *R) +{ + if (**(uint32_t **)L == **(uint32_t **)R) + return 0; + if (**(uint32_t **)L > **(uint32_t **)R) + return 1; + return -1; +} + /*}}}*/ +bool RSyncMatch::Scan(FileFd &Fd) +{ + for (unsigned int Cur = 1; Cur != 256; Cur++) + { + printf("%u %p\n",Hashes[Cur] - Hashes[Cur-1],Hashes[Cur]); + } + + return true; +} diff --git a/tools/dsync-0.0/libdsync/rsync-algo.h b/tools/dsync-0.0/libdsync/rsync-algo.h new file mode 100644 index 00000000..1a9711d1 --- /dev/null +++ b/tools/dsync-0.0/libdsync/rsync-algo.h @@ -0,0 +1,59 @@ +// -*- mode: cpp; mode: fold -*- +// Description /*{{{*/ +// $Id: rsync-algo.h,v 1.3 1999/12/26 06:59:01 jgg Exp $ +/* ###################################################################### + + RSync Algorithrim + + The RSync algorithim is attributed to Andrew Tridgell and is a means + for matching blocks between two streams. The algorithrim implemented + here differs slightly in its structure and is carefully optimized to be + able to operate on very large files effectively. + + We rely on the RSync rolling weak checksum routine and the MD4 strong + checksum routine. This implementation requires a uniform block size + for each run. + + ##################################################################### */ + /*}}}*/ +#ifndef DSYNC_RSYNC_ALGO_H +#define DSYNC_RSYNC_ALGO_H + +#ifdef __GNUG__ +#pragma interface "dsync/rsync-algo.h" +#endif + +#include +#include +#include + +#include + +class RSyncMatch +{ + uint32_t **Indexes; + uint32_t **IndexesEnd; + uint32_t **Hashes[257]; + BitmapVector Fast; + dsFList::RSyncChecksum const &Ck; + + static int Sort(const void *L,const void *R); + + protected: + + virtual bool Hit(unsigned long Block,off_t SrcOff, + const unsigned char *Data) {return true;}; + + public: + + bool Scan(FileFd &Fd); + + RSyncMatch(dsFList::RSyncChecksum const &Ck); + virtual ~RSyncMatch(); +}; + +bool GenerateRSync(FileFd &Fd,dsFList::RSyncChecksum &Ck, + unsigned char MD5[16], + unsigned long BlockSize = 8*1024); + +#endif diff --git a/tools/dsync-0.0/test/fftest.cc b/tools/dsync-0.0/test/fftest.cc new file mode 100644 index 00000000..aa4adb7a --- /dev/null +++ b/tools/dsync-0.0/test/fftest.cc @@ -0,0 +1,32 @@ +#include +#include +#include + +int main(int argc, const char *argv[]) +{ + CommandLine::Args Args[] = { + {'i',"include","filter:: + ",CommandLine::HasArg}, + {'e',"exclude","filter:: - ",CommandLine::HasArg}, + {'c',"config-file",0,CommandLine::ConfigFile}, + {'o',"option",0,CommandLine::ArbItem}, + {0,0,0,0}}; + CommandLine CmdL(Args,_config); + if (CmdL.Parse(argc,argv) == false) + { + _error->DumpErrors(); + return 100; + } + + _config->Dump(); + + dsFileFilter Filt; + if (Filt.LoadFilter(_config->Tree("filter")) == false) + { + _error->DumpErrors(); + return 100; + } + + cout << "Test: " << Filt.Test(CmdL.FileList[0],CmdL.FileList[1]) << endl; + + return 0; +} diff --git a/tools/dsync-0.0/test/makefile b/tools/dsync-0.0/test/makefile new file mode 100644 index 00000000..f64d691a --- /dev/null +++ b/tools/dsync-0.0/test/makefile @@ -0,0 +1,18 @@ +# -*- make -*- +BASE=.. +SUBDIR=test + +# Bring in the default rules +include ../buildlib/defaults.mak + +# Program to test the File Filter +PROGRAM=fftest +SLIBS = -ldsync +SOURCE = fftest.cc +include $(PROGRAM_H) + +# Program to test the File Filter +PROGRAM=pathtest +SLIBS = -ldsync +SOURCE = pathtest.cc +include $(PROGRAM_H) diff --git a/tools/dsync-0.0/test/pathtest.cc b/tools/dsync-0.0/test/pathtest.cc new file mode 100644 index 00000000..e95c7943 --- /dev/null +++ b/tools/dsync-0.0/test/pathtest.cc @@ -0,0 +1,221 @@ +#include +#include +#include +#include +#include + +// SimplifyPath - Short function to remove relative path components /*{{{*/ +// --------------------------------------------------------------------- +/* This short function removes relative path components such as ./ and ../ + from the path and removes double // as well. It works by seperating + the path into a list of components and then removing any un-needed + compoments */ +bool SimplifyPath(char *Buffer) +{ + // Create a list of path compoments + char *Pos[100]; + unsigned CurPos = 0; + Pos[CurPos] = Buffer; + CurPos++; + for (char *I = Buffer; *I != 0;) + { + if (*I == '/') + { + *I = 0; + I++; + Pos[CurPos] = I; + CurPos++; + } + else + I++; + } + + // Strip //, ./ and ../ + for (unsigned I = 0; I != CurPos; I++) + { + if (Pos[I] == 0) + continue; + + // Double slash + if (Pos[I][0] == 0) + { + if (I != 0) + Pos[I] = 0; + continue; + } + + // Dot slash + if (Pos[I][0] == '.' && Pos[I][1] == 0) + { + Pos[I] = 0; + continue; + } + + // Dot dot slash + if (Pos[I][0] == '.' && Pos[I][1] == '.' && Pos[I][2] == 0) + { + Pos[I] = 0; + unsigned J = I; + for (; Pos[J] == 0 && J != 0; J--); + if (Pos[J] == 0) + return _error->Error("Invalid path, too many ../s"); + Pos[J] = 0; + continue; + } + } + + // Recombine the path into full path + for (unsigned I = 0; I != CurPos; I++) + { + if (Pos[I] == 0) + continue; + memmove(Buffer,Pos[I],strlen(Pos[I])); + Buffer += strlen(Pos[I]); + + if (I + 1 != CurPos) + *Buffer++ = '/'; + } + *Buffer = 0; + + return true; +} + /*}}}*/ +// ResolveLink - Resolve a file into an unsymlinked path /*{{{*/ +// --------------------------------------------------------------------- +/* The returned path is a path that accesses the same file without + traversing a symlink, the memory buffer used should be twice as large + as the largest path. It uses an LRU cache of past lookups to speed things + up, just don't change directores :> */ +struct Cache +{ + string Dir; + string Trans; + unsigned long Age; +}; +static Cache DirCache[400]; +static unsigned long CacheAge = 0; +bool ResolveLink(char *Buffer,unsigned long Max) +{ + if (Buffer[0] == 0 || (Buffer[0] == '/' && Buffer[1] == 0)) + return true; + + // Lookup in the cache + Cache *Entry = 0; + for (int I = 0; I != 400; I++) + { + // Store an empty entry + if (DirCache[I].Dir.empty() == true) + { + Entry = &DirCache[I]; + Entry->Age = 0; + continue; + } + + // Store the LRU entry + if (Entry != 0 && Entry->Age > DirCache[I].Age) + Entry = &DirCache[I]; + + if (DirCache[I].Dir != Buffer || DirCache[I].Trans.empty() == true) + continue; + strcpy(Buffer,DirCache[I].Trans.c_str()); + DirCache[I].Age = CacheAge++; + return true; + } + + // Prepare the cache for our new entry + if (Entry != 0 && Buffer[strlen(Buffer) - 1] == '/') + { + Entry->Age = CacheAge++; + Entry->Dir = Buffer; + } + else + Entry = 0; + + // Resolve any symlinks + unsigned Counter = 0; + while (1) + { + Counter++; + if (Counter > 50) + return _error->Error("Exceeded allowed symlink depth"); + + // Strip off the final component name + char *I = Buffer + strlen(Buffer); + for (; I != Buffer && (*I == '/' || *I == 0); I--); + for (; I != Buffer && *I != '/'; I--); + if (I != Buffer) + I++; + + // If it is a link then read the link dest over the final component + int Res = readlink(Buffer,I,Max - (I - Buffer)); + if (Res > 0) + { + I[Res] = 0; + + // Absolute path.. + if (*I == '/') + memmove(Buffer,I,strlen(I)+1); + + if (SimplifyPath(Buffer) == false) + return false; + } + else + break; + } + + /* Here we are abusive and move the current path component to the end + of the buffer to advoid allocating space */ + char *I = Buffer + strlen(Buffer); + for (; I != Buffer && (*I == '/' || *I == 0); I--); + for (; I != Buffer && *I != '/'; I--); + if (I != Buffer) + I++; + unsigned Len = strlen(I) + 1; + char *End = Buffer + Max - Len; + memmove(End,I,Len); + *I = 0; + + // Recurse to deal with any links in the files path + if (ResolveLink(Buffer,Max - Len) == false) + return false; + I = Buffer + strlen(Buffer); + memmove(I,End,Len); + + // Store in the cache + if (Entry != 0) + Entry->Trans = Buffer; + + return true; +} + /*}}}*/ + +int main(int argc,char *argv[]) +{ + char Buf[1024*4]; +// strcpy(Buf,argv[1]); + while (!cin == false) + { + char Buf2[200]; + cin.getline(Buf2,sizeof(Buf2)); + strcpy(Buf,Buf2); + + if (ResolveLink(Buf,sizeof(Buf)) == false) + _error->DumpErrors(); + else + { +/* struct stat StA; + struct stat StB; + if (stat(Buf,&StA) != 0 || stat(Buf2,&StB) != 0) + { + cerr << Buf << ',' << Buf2 << endl; + cerr << "Stat failure" << endl; + } + + if (StA.st_ino != StB.st_ino) + cerr << "Inode mismatch" << endl;*/ + + cout << Buf << endl; + } + } + return 0; +}