stage $GO
GO=(
- FUNC="merkel1"
+ FUNC="qa1"
TIME="init"
ARGS=""
ERR="false"
)
stage $GO &
-GO=(
- FUNC="merkel2"
- TIME="merkel projectb push"
- ARGS=""
- ERR="false"
-)
-stage $GO &
-
GO=(
FUNC="mirrorpush"
TIME="mirrorpush"
rm -f "${LOCK_BRITNEY}"
GO=(
- FUNC="merkel3"
- TIME="merkel ddaccessible sync"
+ FUNC="ddaccess"
+ TIME="ddaccessible sync"
ARGS=""
ERR="false"
)
$base/dak/tools/queue_rss.py -q $queuedir/new -o $webdir/rss/ -d $base/misc -l $base/log/
$base/dak/tools/removals.pl $configdir/removalsrss.rc > $webdir/rss/removals.rss
+# Tell ries to sync its tree
+ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org sync
+
$scriptsdir/generate-di
DB
{
- Name "projectb";
- Host "";
- Port 5433;
+ Service "projectb";
// PoolSize should be at least ThreadCount + 1
- PoolSize 17;
+ PoolSize 5;
// MaxOverflow shouldn't exceed postgresql.conf's max_connections - PoolSize
MaxOverflow 13;
// should be false for encoding == SQL_ASCII
DakConfig "/srv/ftp-master.debian.org/dak/config/debian/dak.conf";
AptConfig "/srv/ftp-master.debian.org/dak/config/debian/apt.conf";
}
+ ries.debian.org
+ {
+ AllowLocalConfig "false";
+ DatabaseHostname "ftp-master";
+ DakConfig "/srv/ftp-master.debian.org/dak/config/debian/dak.conf";
+ AptConfig "/srv/ftp-master.debian.org/dak/config/debian/apt.conf";
+ }
}
########################################################################
# pushing merkels QA user, part one
-function merkel1() {
- log "Telling merkels QA user that we start dinstall"
+function qa1() {
+ log "Telling QA user that we start dinstall"
ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@qa.debian.org sleep 1
}
-# Create the postgres dump files
-function pgdump_pre() {
- log "Creating pre-daily-cron-job backup of $PGDATABASE database..."
- pg_dump > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
-}
-
-function pgdump_post() {
- log "Creating post-daily-cron-job backup of $PGDATABASE database..."
- cd $base/backup
- POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S)
- pg_dump > $base/backup/dump_$POSTDUMP
- #pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
- ln -sf $base/backup/dump_$POSTDUMP current
- #ln -sf $base/backup/dumpall_$POSTDUMP currentall
-}
-
# Updating various files
function updates() {
log "Updating Bugs docu, Mirror list and mailing-lists.txt"
dak bts-categorize
}
-function merkel2() {
- # Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
- log "Trigger merkel/flotows $PGDATABASE sync"
- ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1
- # Also trigger flotow, the ftpmaster test box
- ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1
-}
-
-function merkel3() {
- # Push dak@merkel to tell it to sync the dd accessible parts. Returns immediately, the sync runs detached
- log "Trigger merkels dd accessible parts sync"
- ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1
+function ddaccess() {
+ # Tell our dd accessible mirror to sync itself up. Including ftp dir.
+ log "Trigger dd accessible parts sync including ftp dir"
+ ssh -o Batchmode=yes -o ConnectTimeout=30 -o SetupTimeout=30 -2 -i ${base}/s3kr1t/pushddmirror dak@ries.debian.org pool
}
function mirrorpush() {
--- /dev/null
+#!/bin/bash
+
+# Copyright (C) 2011 Joerg Jaspert <joerg@debian.org>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+
+set -e
+set -u
+set -E
+
+export LANG=C
+export LC_ALL=C
+
+export SCRIPTVARS=/srv/ftp-master.debian.org/dak/config/debian/vars
+. $SCRIPTVARS
+
+EXTRA=""
+
+check_commandline() {
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ sync)
+ EXTRA="--exclude ftp/"
+ ;;
+ pool)
+ ;;
+ *)
+ echo "Unknown option ${1} ignored"
+ ;;
+ esac
+ shift # Check next set of parameters.
+ done
+}
+
+if [ $# -gt 0 ]; then
+ ORIGINAL_COMMAND=$*
+else
+ ORIGINAL_COMMAND=""
+fi
+
+SSH_ORIGINAL_COMMAND=${SSH_ORIGINAL_COMMAND:-""}
+if [ -n "${SSH_ORIGINAL_COMMAND}" ]; then
+ set "nothing" "${SSH_ORIGINAL_COMMAND}"
+ shift
+ check_commandline $*
+fi
+
+if [ -n "${ORIGINAL_COMMAND}" ]; then
+ set ${ORIGINAL_COMMAND}
+ check_commandline $*
+fi
+
+
+cleanup() {
+ rm -f "${HOME}/sync.lock"
+}
+trap cleanup EXIT TERM HUP INT QUIT
+
+# not using $lockdir as thats inside the rsync dir, and --delete would
+# kick the lock away. Yes we could exclude it, but wth bother?
+#
+# Also, NEVER use --delete-excluded!
+if lockfile -r3 ${HOME}/sync.lock; then
+ cd $base/
+ rsync -aH -B8192 \
+ --exclude backup/*.xz \
+ --exclude backup/dump* \
+ ${EXTRA} \
+ --exclude mirror \
+ --exclude morgue/ \
+ --exclude=lost+found/ \
+ --exclude .da-backup.trace \
+ --delete \
+ --delete-after \
+ --timeout 3600 \
+ -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
+ ftpmaster-sync:/srv/ftp-master.debian.org/ .
+
+ cd $public/
+ rsync -aH -B8192 \
+ --exclude mirror \
+ --exclude rsync/ \
+ --exclude=lost+found/ \
+ --exclude .da-backup.trace \
+ --exclude web-users/ \
+ --delete \
+ --delete-after \
+ --timeout 3600 \
+ -e 'ssh -o ConnectTimeout=30 -o SetupTimeout=30' \
+ ftpmaster-sync2:/srv/ftp.debian.org/ .
+
+else
+ echo "Couldn't get the lock, not syncing"
+ exit 0
+fi
+
+
+## ftpmaster-sync is defined in .ssh/config as:
+# Host ftpmaster-sync
+# Hostname franck.debian.org
+# User dak
+# IdentityFile ~dak/.ssh/syncftpmaster
+# ForwardX11 no
+# ForwardAgent no
+# StrictHostKeyChecking yes
+# PasswordAuthentication no
+# BatchMode yes
+
+## ftpmaster-sync2 is the same, just a second ssh key
if mode == 'db':
connstr = ""
- if cnf["DB::Host"]:
+ if cnf.has_key("DB::Service"):
+ # Service mode
+ connstr = "postgresql://service=%s" % cnf["DB::Service"]
+ elif cnf.has_key("DB::Host"):
# TCP/IP
connstr = "postgres://%s" % cnf["DB::Host"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
connstr += ":%s" % cnf["DB::Port"]
connstr += "/%s" % cnf["DB::Name"]
else:
connstr += "?port=%s" % cnf["DB::Port"]
print connstr
elif mode == 'db-shell':
- e = ['PGDATABASE']
- print "PGDATABASE=%s" % cnf["DB::Name"]
- if cnf["DB::Host"]:
+ e = []
+ if cnf.has_key("DB::Service"):
+ e.append('PGSERVICE')
+ print "PGSERVICE=%s" % cnf["DB::Service"]
+ if cnf.has_key("DB::Name"):
+ e.append('PGDATABASE')
+ print "PGDATABASE=%s" % cnf["DB::Name"]
+ if cnf.has_key("DB::Host"):
print "PGHOST=%s" % cnf["DB::Host"]
e.append('PGHOST')
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
print "PGPORT=%s" % cnf["DB::Port"]
e.append('PGPORT')
print "export " + " ".join(e)
q = session.execute("""
SELECT s.id, f.filename FROM source s, files f
WHERE f.last_used <= :deletedate
- AND s.file = f.id""", {'deletedate': delete_date})
+ AND s.file = f.id
+ AND s.id NOT IN (SELECT src_id FROM extra_src_references)""", {'deletedate': delete_date})
for s in q.fetchall():
Logger.log(["delete source", s[1], s[0]])
if not Options["No-Action"]:
def properties(self):
return ['package', 'version', 'maintainer', 'source', 'architecture', \
'poolfile', 'binarytype', 'fingerprint', 'install_date', \
- 'suites_count', 'binary_id', 'contents_count']
+ 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
def not_null_constraints(self):
return ['package', 'version', 'maintainer', 'source', 'poolfile', \
bin.source_id = bin_sources[0].source_id
+ if entry.has_key("built-using"):
+ for srcname, version in entry["built-using"]:
+ exsources = get_sources_from_name(srcname, version, session=session)
+ if len(exsources) != 1:
+ raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
+ (srcname, version, bin.package, bin.version, entry["architecture"],
+ filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+ bin.extra_sources.append(exsources[0])
+
# Add and flush object so it has an ID
session.add(bin)
'changes_pending_files_map',
'changes_pending_source_files',
'changes_pool_files',
+ 'extra_src_references',
# TODO: the maintainer column in table override should be removed.
'override',
'suite_architectures',
fingerprint = relation(Fingerprint),
install_date = self.tbl_binaries.c.install_date,
suites = relation(Suite, secondary=self.tbl_bin_associations,
- backref=backref('binaries', lazy='dynamic'))),
+ backref=backref('binaries', lazy='dynamic')),
+ extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
+ backref=backref('extra_binary_references', lazy='dynamic'))),
extension = validator)
mapper(BinaryACL, self.tbl_binary_acl,
def __createconn(self):
from config import Config
cnf = Config()
- if cnf["DB::Host"]:
+ if cnf.has_key("DB::Service"):
+ connstr = "postgresql://service=%s" % cnf["DB::Service"]
+ elif cnf.has_key("DB::Host"):
# TCP/IP
- connstr = "postgres://%s" % cnf["DB::Host"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ connstr = "postgresql://%s" % cnf["DB::Host"]
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
connstr += ":%s" % cnf["DB::Port"]
connstr += "/%s" % cnf["DB::Name"]
else:
# Unix Socket
- connstr = "postgres:///%s" % cnf["DB::Name"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ connstr = "postgresql:///%s" % cnf["DB::Name"]
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
connstr += "?port=%s" % cnf["DB::Port"]
engine_args = { 'echo': self.debug }
cnf['DB::Unicode'] == 'false':
engine_args['use_native_unicode'] = False
+ # Monkey patch a new dialect in in order to support service= syntax
+ import sqlalchemy.dialects.postgresql
+ from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
+ class PGDialect_psycopg2_dak(PGDialect_psycopg2):
+ def create_connect_args(self, url):
+ if str(url).startswith('postgresql://service='):
+ # Eww
+ servicename = str(url)[21:]
+ return (['service=%s' % servicename], {})
+ else:
+ return PGDialect_psycopg2.create_connect_args(self, url)
+
+ sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
+
self.db_pg = create_engine(connstr, **engine_args)
self.db_meta = MetaData()
self.db_meta.bind = self.db_pg
if not re_valid_pkg_name.match(prov):
self.rejects.append("%s: Invalid Provides field content %s." % (f, prov))
+ # If there is a Built-Using field, we need to check we can find the
+ # exact source version
+ built_using = control.Find("Built-Using")
+ if built_using:
+ try:
+ entry["built-using"] = []
+ for dep in apt_pkg.parse_depends(built_using):
+ bu_s, bu_v, bu_e = dep[0]
+ # Check that it's an exact match dependency and we have
+ # some form of version
+ if bu_e != "=" or len(bu_v) < 1:
+ self.rejects.append("%s: Built-Using contains non strict dependency (%s %s %s)" % (f, bu_s, bu_e, bu_v))
+ else:
+ # Find the source id for this version
+ bu_so = get_sources_from_name(bu_s, version=bu_v, session = session)
+ if len(bu_so) != 1:
+ self.rejects.append("%s: Built-Using (%s = %s): Cannot find source package" % (f, bu_s, bu_v))
+ else:
+ entry["built-using"].append( (bu_so[0].source, bu_so[0].version, ) )
+
+ except ValueError, e:
+ self.rejects.append("%s: Cannot parse Built-Using field: %s" % (f, str(e)))
+
+
# Check the section & priority match those given in the .changes (non-fatal)
if control.Find("Section") and entry["section"] != "" \
and entry["section"] != control.Find("Section"):