From: Torsten Werner Date: Tue, 22 Mar 2011 15:50:39 +0000 (+0100) Subject: Merge branch 'contents' X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=87bcdbc7dffd6230cb4d49942fbd87ead6f2fffe;hp=af60b693abaafebb13a0cbaf87b3360cdac6d352;p=dak.git Merge branch 'contents' --- diff --git a/config/debian/cron.dinstall b/config/debian/cron.dinstall index 40451c81..27200dd1 100755 --- a/config/debian/cron.dinstall +++ b/config/debian/cron.dinstall @@ -172,7 +172,7 @@ GO=( stage $GO GO=( - FUNC="merkel1" + FUNC="qa1" TIME="init" ARGS="" ERR="false" @@ -436,14 +436,6 @@ GO=( ) stage $GO & -GO=( - FUNC="merkel2" - TIME="merkel projectb push" - ARGS="" - ERR="false" -) -stage $GO & - GO=( FUNC="mirrorpush" TIME="mirrorpush" @@ -479,8 +471,8 @@ stage $GO & rm -f "${LOCK_BRITNEY}" GO=( - FUNC="merkel3" - TIME="merkel ddaccessible sync" + FUNC="ddaccess" + TIME="ddaccessible sync" ARGS="" ERR="false" ) diff --git a/config/debian/cron.hourly b/config/debian/cron.hourly index 73ba6afc..c20f4d1d 100755 --- a/config/debian/cron.hourly +++ b/config/debian/cron.hourly @@ -30,4 +30,7 @@ cat removals.822 >> removals-full.822 $base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc -l $base/log/ $base/dak/tools/removals.pl $configdir/removalsrss.rc > $webdir/rss/removals.rss +# Tell ries to sync its tree +ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org sync + $scriptsdir/generate-di diff --git a/config/debian/dak.conf b/config/debian/dak.conf index 97aaa01d..b888a606 100644 --- a/config/debian/dak.conf +++ b/config/debian/dak.conf @@ -451,11 +451,9 @@ Queue-Report DB { - Name "projectb"; - Host ""; - Port 5433; + Service "projectb"; // PoolSize should be at least ThreadCount + 1 - PoolSize 17; + PoolSize 5; // MaxOverflow shouldn't exceed postgresql.conf's max_connections - PoolSize MaxOverflow 13; // should be false for encoding == SQL_ASCII diff --git a/config/debian/dak.conf-etc b/config/debian/dak.conf-etc index b0f6a50e..5336a8df 100644 --- a/config/debian/dak.conf-etc +++ b/config/debian/dak.conf-etc @@ -7,5 +7,12 @@ Config DakConfig "/srv/ftp-master.debian.org/dak/config/debian/dak.conf"; AptConfig "/srv/ftp-master.debian.org/dak/config/debian/apt.conf"; } + ries.debian.org + { + AllowLocalConfig "false"; + DatabaseHostname "ftp-master"; + DakConfig "/srv/ftp-master.debian.org/dak/config/debian/dak.conf"; + AptConfig "/srv/ftp-master.debian.org/dak/config/debian/apt.conf"; + } } diff --git a/config/debian/dinstall.functions b/config/debian/dinstall.functions index abece803..e4de479e 100644 --- a/config/debian/dinstall.functions +++ b/config/debian/dinstall.functions @@ -38,27 +38,11 @@ function onerror() { ######################################################################## # pushing merkels QA user, part one -function merkel1() { - log "Telling merkels QA user that we start dinstall" +function qa1() { + log "Telling QA user that we start dinstall" ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@qa.debian.org sleep 1 } -# Create the postgres dump files -function pgdump_pre() { - log "Creating pre-daily-cron-job backup of $PGDATABASE database..." - pg_dump > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S) -} - -function pgdump_post() { - log "Creating post-daily-cron-job backup of $PGDATABASE database..." - cd $base/backup - POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S) - pg_dump > $base/backup/dump_$POSTDUMP - #pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP - ln -sf $base/backup/dump_$POSTDUMP current - #ln -sf $base/backup/dumpall_$POSTDUMP currentall -} - # Updating various files function updates() { log "Updating Bugs docu, Mirror list and mailing-lists.txt" @@ -444,18 +428,10 @@ function bts() { dak bts-categorize } -function merkel2() { - # Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached - log "Trigger merkel/flotows $PGDATABASE sync" - ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1 - # Also trigger flotow, the ftpmaster test box - ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1 -} - -function merkel3() { - # Push dak@merkel to tell it to sync the dd accessible parts. Returns immediately, the sync runs detached - log "Trigger merkels dd accessible parts sync" - ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1 +function ddaccess() { + # Tell our dd accessible mirror to sync itself up. Including ftp dir. + log "Trigger dd accessible parts sync including ftp dir" + ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org pool } function mirrorpush() { diff --git a/config/homedir/syncdd.sh b/config/homedir/syncdd.sh new file mode 100755 index 00000000..652c29a0 --- /dev/null +++ b/config/homedir/syncdd.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# Copyright (C) 2011 Joerg Jaspert +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +set -e +set -u +set -E + +export LANG=C +export LC_ALL=C + +export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars +. $SCRIPTVARS + +EXTRA="" + +check_commandline() { + while [ $# -gt 0 ]; do + case "$1" in + sync) + EXTRA="--exclude ftp/" + ;; + pool) + ;; + *) + echo "Unknown option ${1} ignored" + ;; + esac + shift # Check next set of parameters. + done +} + +if [ $# -gt 0 ]; then + ORIGINAL_COMMAND=$* +else + ORIGINAL_COMMAND="" +fi + +SSH_ORIGINAL_COMMAND=${SSH_ORIGINAL_COMMAND:-""} +if [ -n "${SSH_ORIGINAL_COMMAND}" ]; then + set "nothing" "${SSH_ORIGINAL_COMMAND}" + shift + check_commandline $* +fi + +if [ -n "${ORIGINAL_COMMAND}" ]; then + set ${ORIGINAL_COMMAND} + check_commandline $* +fi + + +cleanup() { + rm -f "${HOME}/sync.lock" +} +trap cleanup EXIT TERM HUP INT QUIT + +# not using $lockdir as thats inside the rsync dir, and --delete would +# kick the lock away. Yes we could exclude it, but wth bother? +# +# Also, NEVER use --delete-excluded! +if lockfile -r3 ${HOME}/sync.lock; then + cd $base/ + rsync -aH -B8192 \ + --exclude backup/*.xz \ + --exclude backup/dump* \ + ${EXTRA} \ + --exclude mirror \ + --exclude morgue/ \ + --exclude=lost+found/ \ + --exclude .da-backup.trace \ + --delete \ + --delete-after \ + --timeout 3600 \ + -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \ + ftpmaster-sync:/srv/ftp-master.debian.org/ . + + cd $public/ + rsync -aH -B8192 \ + --exclude mirror \ + --exclude rsync/ \ + --exclude=lost+found/ \ + --exclude .da-backup.trace \ + --exclude web-users/ \ + --delete \ + --delete-after \ + --timeout 3600 \ + -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \ + ftpmaster-sync2:/srv/ftp.debian.org/ . + +else + echo "Couldn't get the lock, not syncing" + exit 0 +fi + + +## ftpmaster-sync is defined in .ssh/config as: +# Host ftpmaster-sync +# Hostname franck.debian.org +# User dak +# IdentityFile ~dak/.ssh/syncftpmaster +# ForwardX11 no +# ForwardAgent no +# StrictHostKeyChecking yes +# PasswordAuthentication no +# BatchMode yes + +## ftpmaster-sync2 is the same, just a second ssh key diff --git a/dak/admin.py b/dak/admin.py index c7d770ba..218eea57 100755 --- a/dak/admin.py +++ b/dak/admin.py @@ -343,10 +343,13 @@ def show_config(command): if mode == 'db': connstr = "" - if cnf["DB::Host"]: + if cnf.has_key("DB::Service"): + # Service mode + connstr = "postgresql://service=%s" % cnf["DB::Service"] + elif cnf.has_key("DB::Host"): # TCP/IP connstr = "postgres://%s" % cnf["DB::Host"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": connstr += ":%s" % cnf["DB::Port"] connstr += "/%s" % cnf["DB::Name"] else: @@ -356,12 +359,17 @@ def show_config(command): connstr += "?port=%s" % cnf["DB::Port"] print connstr elif mode == 'db-shell': - e = ['PGDATABASE'] - print "PGDATABASE=%s" % cnf["DB::Name"] - if cnf["DB::Host"]: + e = [] + if cnf.has_key("DB::Service"): + e.append('PGSERVICE') + print "PGSERVICE=%s" % cnf["DB::Service"] + if cnf.has_key("DB::Name"): + e.append('PGDATABASE') + print "PGDATABASE=%s" % cnf["DB::Name"] + if cnf.has_key("DB::Host"): print "PGHOST=%s" % cnf["DB::Host"] e.append('PGHOST') - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": print "PGPORT=%s" % cnf["DB::Port"] e.append('PGPORT') print "export " + " ".join(e) diff --git a/dak/clean_suites.py b/dak/clean_suites.py index c6a0a777..ab1e76d7 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -254,7 +254,8 @@ def clean(now_date, delete_date, max_delete, session): q = session.execute(""" SELECT s.id, f.filename FROM source s, files f WHERE f.last_used <= :deletedate - AND s.file = f.id""", {'deletedate': delete_date}) + AND s.file = f.id + AND s.id NOT IN (SELECT src_id FROM extra_src_references)""", {'deletedate': delete_date}) for s in q.fetchall(): Logger.log(["delete source", s[1], s[0]]) if not Options["No-Action"]: diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 6cd84de3..1968dd0c 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -493,7 +493,7 @@ class DBBinary(ORMObject): def properties(self): return ['package', 'version', 'maintainer', 'source', 'architecture', \ 'poolfile', 'binarytype', 'fingerprint', 'install_date', \ - 'suites_count', 'binary_id', 'contents_count'] + 'suites_count', 'binary_id', 'contents_count', 'extra_sources'] def not_null_constraints(self): return ['package', 'version', 'maintainer', 'source', 'poolfile', \ @@ -2465,6 +2465,16 @@ def add_deb_to_db(u, filename, session=None): bin.source_id = bin_sources[0].source_id + if entry.has_key("built-using"): + for srcname, version in entry["built-using"]: + exsources = get_sources_from_name(srcname, version, session=session) + if len(exsources) != 1: + raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \ + (srcname, version, bin.package, bin.version, entry["architecture"], + filename, bin.binarytype, u.pkg.changes["fingerprint"]) + + bin.extra_sources.append(exsources[0]) + # Add and flush object so it has an ID session.add(bin) @@ -2849,6 +2859,7 @@ class DBConn(object): 'changes_pending_files_map', 'changes_pending_source_files', 'changes_pool_files', + 'extra_src_references', # TODO: the maintainer column in table override should be removed. 'override', 'suite_architectures', @@ -2942,7 +2953,9 @@ class DBConn(object): fingerprint = relation(Fingerprint), install_date = self.tbl_binaries.c.install_date, suites = relation(Suite, secondary=self.tbl_bin_associations, - backref=backref('binaries', lazy='dynamic'))), + backref=backref('binaries', lazy='dynamic')), + extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references, + backref=backref('extra_binary_references', lazy='dynamic'))), extension = validator) mapper(BinaryACL, self.tbl_binary_acl, @@ -3164,16 +3177,18 @@ class DBConn(object): def __createconn(self): from config import Config cnf = Config() - if cnf["DB::Host"]: + if cnf.has_key("DB::Service"): + connstr = "postgresql://service=%s" % cnf["DB::Service"] + elif cnf.has_key("DB::Host"): # TCP/IP - connstr = "postgres://%s" % cnf["DB::Host"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr = "postgresql://%s" % cnf["DB::Host"] + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": connstr += ":%s" % cnf["DB::Port"] connstr += "/%s" % cnf["DB::Name"] else: # Unix Socket - connstr = "postgres:///%s" % cnf["DB::Name"] - if cnf["DB::Port"] and cnf["DB::Port"] != "-1": + connstr = "postgresql:///%s" % cnf["DB::Name"] + if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1": connstr += "?port=%s" % cnf["DB::Port"] engine_args = { 'echo': self.debug } @@ -3185,6 +3200,20 @@ class DBConn(object): cnf['DB::Unicode'] == 'false': engine_args['use_native_unicode'] = False + # Monkey patch a new dialect in in order to support service= syntax + import sqlalchemy.dialects.postgresql + from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2 + class PGDialect_psycopg2_dak(PGDialect_psycopg2): + def create_connect_args(self, url): + if str(url).startswith('postgresql://service='): + # Eww + servicename = str(url)[21:] + return (['service=%s' % servicename], {}) + else: + return PGDialect_psycopg2.create_connect_args(self, url) + + sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak + self.db_pg = create_engine(connstr, **engine_args) self.db_meta = MetaData() self.db_meta.bind = self.db_pg diff --git a/daklib/queue.py b/daklib/queue.py index 646d89c5..dfbe3685 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -772,6 +772,30 @@ class Upload(object): if not re_valid_pkg_name.match(prov): self.rejects.append("%s: Invalid Provides field content %s." % (f, prov)) + # If there is a Built-Using field, we need to check we can find the + # exact source version + built_using = control.Find("Built-Using") + if built_using: + try: + entry["built-using"] = [] + for dep in apt_pkg.parse_depends(built_using): + bu_s, bu_v, bu_e = dep[0] + # Check that it's an exact match dependency and we have + # some form of version + if bu_e != "=" or len(bu_v) < 1: + self.rejects.append("%s: Built-Using contains non strict dependency (%s %s %s)" % (f, bu_s, bu_e, bu_v)) + else: + # Find the source id for this version + bu_so = get_sources_from_name(bu_s, version=bu_v, session = session) + if len(bu_so) != 1: + self.rejects.append("%s: Built-Using (%s = %s): Cannot find source package" % (f, bu_s, bu_v)) + else: + entry["built-using"].append( (bu_so[0].source, bu_so[0].version, ) ) + + except ValueError, e: + self.rejects.append("%s: Cannot parse Built-Using field: %s" % (f, str(e))) + + # Check the section & priority match those given in the .changes (non-fatal) if control.Find("Section") and entry["section"] != "" \ and entry["section"] != control.Find("Section"):