From: Frank Lichtenheld Date: Sat, 31 Oct 2009 12:47:16 +0000 (+0000) Subject: Merge commit 'mhy/master' into process-upload X-Git-Url: https://git.decadent.org.uk/gitweb/?a=commitdiff_plain;h=d78cd5b22422f0a9d4660970b2b506c39b6139f8;hp=a2fafa4a1a2a97fd18ec98483c245e48b6500a33;p=dak.git Merge commit 'mhy/master' into process-upload --- diff --git a/dak/clean_queues.py b/dak/clean_queues.py index a5b15427..f30d7f12 100755 --- a/dak/clean_queues.py +++ b/dak/clean_queues.py @@ -83,10 +83,10 @@ def init (cnf): os.chdir(incoming) # Remove a file to the morgue -def remove (f): +def remove (from_dir, f): fname = os.path.basename(f) if os.access(f, os.R_OK): - Logger.log(["move file to morgue", fname, del_dir]) + Logger.log(["move file to morgue", from_dir, fname, del_dir]) if Options["Verbose"]: print "Removing '%s' (to '%s')." % (fname, del_dir) if Options["No-Action"]: @@ -106,11 +106,11 @@ def remove (f): # [Used for Incoming/REJECT] # def flush_old (): - Logger.log(["check Incoming/REJECT for old files"]) + Logger.log(["check Incoming/REJECT for old files", os.getcwd()]) for f in os.listdir('.'): if os.path.isfile(f): if os.stat(f)[stat.ST_MTIME] < delete_date: - remove(f) + remove('Incoming/REJECT', f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) @@ -122,7 +122,7 @@ def flush_orphans (): all_files = {} changes_files = [] - Logger.log(["check Incoming for old orphaned files"]) + Logger.log(["check Incoming for old orphaned files", os.getcwd()]) # Build up the list of all files in the directory for i in os.listdir('.'): if os.path.isfile(i): @@ -163,7 +163,7 @@ def flush_orphans (): # a .dsc) and should be deleted if old enough. for f in all_files.keys(): if os.stat(f)[stat.ST_MTIME] < delete_date: - remove(f) + remove('Incoming', f) else: if Options["Verbose"]: print "Skipping, too new, '%s'." % (os.path.basename(f)) diff --git a/dak/clean_suites.py b/dak/clean_suites.py index 0d3b4732..99f0c8b4 100755 --- a/dak/clean_suites.py +++ b/dak/clean_suites.py @@ -338,7 +338,7 @@ def clean_queue_build(now_date, delete_date, max_delete, session): our_delete_date = now_date - timedelta(seconds = int(cnf["Clean-Suites::QueueBuildStayOfExecution"])) count = 0 - for qf in session.query(QueueBuild).filter(QueueBuild.last_used <= our_delete_date): + for qf in session.query(BuildQueueFile).filter(BuildQueueFile.last_used <= our_delete_date): if not os.path.exists(qf.filename): utils.warn("%s (from queue_build) doesn't exist." % (qf.filename)) continue diff --git a/dak/dakdb/update22.py b/dak/dakdb/update22.py new file mode 100755 index 00000000..7234555e --- /dev/null +++ b/dak/dakdb/update22.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python +# coding=utf8 + +""" +Clean up queue SQL + +@contact: Debian FTP Master +@copyright: 2009 Mark Hymers +@license: GNU General Public License version 2 or later +""" + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +################################################################################ + + +################################################################################ + +import psycopg2 +import time +import os +import datetime +import traceback + +from daklib.dak_exceptions import DBUpdateError +from daklib.config import Config + +################################################################################ + +def do_update(self): + print "Splitting up queues and fixing general design mistakes" + + try: + c = self.db.cursor() + + cnf = Config() + + print "Adding build_queue table" + c.execute("""CREATE TABLE build_queue ( + id SERIAL PRIMARY KEY, + queue_name TEXT NOT NULL UNIQUE, + path TEXT NOT NULL, + copy_files BOOL DEFAULT FALSE NOT NULL)""") + + print "Adding policy_queue table" + c.execute("""CREATE TABLE policy_queue ( + id SERIAL PRIMARY KEY, + queue_name TEXT NOT NULL UNIQUE, + path TEXT NOT NULL)""") + + print "Copying queues" + queues = {} + c.execute("""SELECT queue.id, queue.queue_name, queue.path, queue.copy_pool_files FROM queue""") + + for q in c.fetchall(): + queues[q[0]] = q[1] + if q[1] in ['accepted', 'buildd']: + # Move to build_queue_table + c.execute("""INSERT INTO build_queue (queue_name, path, copy_files) + VALUES ('%s', '%s', '%s')""" % (q[1], q[2], q[3])) + + else: + # Move to policy_queue_table + c.execute("""INSERT INTO policy_queue (queue_name, path) + VALUES ('%s', '%s')""" % (q[1], q[2])) + + + print "Fixing up build_queue_files" + c.execute("""ALTER TABLE queue_files DROP CONSTRAINT queue_files_queueid_fkey""") + c.execute("""ALTER TABLE queue_files RENAME TO build_queue_files""") + c.execute("""ALTER TABLE build_queue_files RENAME COLUMN queueid TO build_queue_id""") + + c.execute("""UPDATE build_queue_files + SET build_queue_id = (SELECT build_queue.id FROM build_queue + WHERE build_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = build_queue_files.build_queue_id))""") + + c.execute("""ALTER TABLE build_queue_files + ADD CONSTRAINT build_queue_files_build_queue_id_fkey + FOREIGN KEY (build_queue_id) + REFERENCES build_queue(id) + ON DELETE CASCADE""") + + + c.execute("""ALTER TABLE suite DROP CONSTRAINT suite_policy_queue_fkey""") + + c.execute("""UPDATE suite + SET policy_queue_id = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = suite.policy_queue_id))""") + + c.execute("""ALTER TABLE suite + ADD CONSTRAINT suite_policy_queue_fkey + FOREIGN KEY (policy_queue_id) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_approved_for_fkey""") + c.execute("""ALTER TABLE known_changes DROP CONSTRAINT known_changes_in_queue_fkey""") + + c.execute("""UPDATE known_changes + SET in_queue = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = known_changes.in_queue))""") + + c.execute("""ALTER TABLE known_changes + ADD CONSTRAINT known_changes_in_queue_fkey + FOREIGN KEY (in_queue) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + + + c.execute("""UPDATE known_changes + SET approved_for = (SELECT policy_queue.id FROM policy_queue + WHERE policy_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = known_changes.approved_for))""") + + c.execute("""ALTER TABLE known_changes + ADD CONSTRAINT known_changes_approved_for_fkey + FOREIGN KEY (in_queue) + REFERENCES policy_queue (id) + ON DELETE RESTRICT""") + + c.execute("""ALTER TABLE suite_queue_copy RENAME TO suite_build_queue_copy""") + + c.execute("""ALTER TABLE suite_build_queue_copy DROP CONSTRAINT suite_queue_copy_queue_fkey""") + + c.execute("""ALTER TABLE suite_build_queue_copy RENAME COLUMN queue TO build_queue_id""") + + c.execute("""UPDATE suite_build_queue_copy + SET build_queue_id = (SELECT build_queue.id FROM build_queue + WHERE build_queue.queue_name = + (SELECT queue.queue_name FROM queue + WHERE queue.id = suite_build_queue_copy.build_queue_id))""") + + c.execute("""ALTER TABLE suite_build_queue_copy + ADD CONSTRAINT suite_build_queue_copy_build_queue_id_fkey + FOREIGN KEY (build_queue_id) + REFERENCES build_queue (id) + ON DELETE RESTRICT""") + + c.execute("""DROP TABLE changes_pending_files""") + + c.execute("""CREATE TABLE changes_pending_files ( + id SERIAL PRIMARY KEY, + filename TEXT NOT NULL UNIQUE, + size BIGINT NOT NULL, + md5sum TEXT NOT NULL, + sha1sum TEXT NOT NULL, + sha256sum TEXT NOT NULL )""") + + c.execute("""CREATE TABLE changes_pending_files_map ( + file_id INT4 NOT NULL REFERENCES changes_pending_files (id), + change_id INT4 NOT NULL REFERENCES known_changes (id), + + PRIMARY KEY (file_id, change_id))""") + + c.execute("""CREATE TABLE changes_pending_source ( + id SERIAL PRIMARY KEY, + change_id INT4 NOT NULL REFERENCES known_changes (id), + source TEXT NOT NULL, + version DEBVERSION NOT NULL, + maintainer_id INT4 NOT NULL REFERENCES maintainer (id), + changedby_id INT4 NOT NULL REFERENCES maintainer (id), + sig_fpr INT4 NOT NULL REFERENCES fingerprint (id), + dm_upload_allowed BOOL NOT NULL DEFAULT FALSE )""") + + c.execute("""CREATE TABLE changes_pending_source_files ( + pending_source_id INT4 REFERENCES changes_pending_source (id) NOT NULL, + pending_file_id INT4 REFERENCES changes_pending_files (id) NOT NULL, + + PRIMARY KEY (pending_source_id, pending_file_id) )""") + + c.execute("""CREATE TABLE changes_pending_binaries ( + id SERIAL PRIMARY KEY, + change_id INT4 NOT NULL REFERENCES known_changes (id), + package TEXT NOT NULL, + version DEBVERSION NOT NULL, + architecture_id INT4 REFERENCES architecture (id) NOT NULL, + source_id INT4 REFERENCES source (id), + pending_source_id INT4 REFERENCES changes_pending_source (id), + pending_file_id INT4 REFERENCES changes_pending_files (id), + + UNIQUE (package, version, architecture_id), + CHECK (source_id IS NOT NULL or pending_source_id IS NOT NULL ) )""") + + print "Getting rid of old queue table" + c.execute("""DROP TABLE queue""") + + print "Moving known_changes table" + c.execute("""ALTER TABLE known_changes RENAME TO changes""") + + print "Sorting out permissions" + + for t in ['build_queue', 'policy_queue', 'build_queue_files', + 'changes_pending_binaries', 'changes_pending_source_files', + 'changes_pending_source', 'changes_pending_files', + 'changes_pool_files', 'suite_build_queue_copy']: + c.execute("GRANT SELECT ON %s TO public" % t) + c.execute("GRANT ALL ON %s TO ftpmaster" % t) + + for s in ['queue_files_id_seq', 'build_queue_id_seq', + 'changes_pending_source_id_seq', + 'changes_pending_binaries_id_seq', + 'changes_pending_files_id_seq', + 'changes_pending_source_id_seq', + 'known_changes_id_seq', + 'policy_queue_id_seq']: + c.execute("GRANT USAGE ON %s TO ftpmaster" % s) + + print "Committing" + c.execute("UPDATE config SET value = '22' WHERE name = 'db_revision'") + self.db.commit() + + except psycopg2.InternalError, msg: + self.db.rollback() + raise DBUpdateError, "Unable to apply queue_build 21, rollback issued. Error message : %s" % (str(msg)) diff --git a/dak/import_known_changes.py b/dak/import_known_changes.py index cdb1d3af..c8d5bf96 100755 --- a/dak/import_known_changes.py +++ b/dak/import_known_changes.py @@ -32,7 +32,7 @@ import sys import os import logging import threading -from daklib.dbconn import DBConn,get_knownchange +from daklib.dbconn import DBConn, get_dbchange from daklib.config import Config import apt_pkg from daklib.dak_exceptions import DBUpdateError, InvalidDscError, ChangesUnicodeError @@ -218,7 +218,7 @@ class ChangesGenerator(threading.Thread): continue count += 1 - if not get_knownchange(changesfile, self.session): + if not get_dbchange(changesfile, self.session): to_import = ChangesToImport(dirpath, changesfile, count) if self.die: return diff --git a/dak/process_upload.py b/dak/process_upload.py index cf1594a3..97117b8a 100755 --- a/dak/process_upload.py +++ b/dak/process_upload.py @@ -125,21 +125,53 @@ Checks Debian packages from Incoming ## pu: create files for BTS ## pu: create entry in queue_build ## pu: check overrides -import errno + +# Integrity checks +## GPG +## Parsing changes (check for duplicates) +## Parse dsc +## file list checks + +# New check layout (TODO: Implement) +## Permission checks +### suite mappings +### ACLs +### version checks (suite) +### override checks + +## Source checks +### copy orig +### unpack +### BTS changelog +### src contents +### lintian +### urgency log + +## Binary checks +### timestamps +### control checks +### src relation check +### contents + +## Database insertion (? copy from stuff) +### BYHAND / NEW / Policy queues +### Pool + +## Queue builds + +from errno import EACCES, EAGAIN import fcntl import os import sys -#from datetime import datetime import traceback import apt_pkg +from sqlalchemy.orm.exc import NoResultFound from daklib import daklog from daklib.queue import * from daklib.queue_install import * from daklib import utils from daklib.dbconn import * -#from daklib.dak_exceptions import * -#from daklib.regexes import re_default_answer, re_issource, re_fdnic from daklib.urgencylog import UrgencyLog from daklib.summarystats import SummaryStats from daklib.holding import Holding @@ -164,13 +196,14 @@ def usage (exit_code=0): ############################################################################### -def action(u): +def action(u, dbc): cnf = Config() holding = Holding() + session = DBConn().session() # changes["distribution"] may not exist in corner cases # (e.g. unreadable changes files) - if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType): + if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], dict): u.pkg.changes["distribution"] = {} (summary, short_summary) = u.build_summaries() @@ -193,6 +226,8 @@ def action(u): if Options["Automatic"]: answer = 'R' else: + # Are we headed for NEW / BYHAND / AUTOBYHAND? + # Note that policy queues are no longer handled here qu = determine_target(u) if qu: print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary) @@ -205,10 +240,38 @@ def action(u): if Options["Automatic"]: answer = queuekey else: - print "ACCEPT\n" + pi + summary, - prompt = "[A]ccept, Skip, Quit ?" - if Options["Automatic"]: - answer = 'A' + # TODO: FIX THIS BY HAVING ADDED TO changes TABLE earlier + try: + dbc = session.query(DBChange).filter_by(changesname=os.path.basename(u.pkg.changes_file)).one() + except NoResultFound, e: + dbc = None + + # Does suite have a policy_queue configured + divert = False + for s in u.pkg.changes["distribution"].keys(): + suite = get_suite(s, session) + if suite.policy_queue: + if not dbc or dbc.approved_for_id != su.policy_queue.policy_queue_id: + # This routine will check whether the upload is a binary + # upload when the source is already in the target suite. If + # so, we skip the policy queue, otherwise we go there. + divert = package_to_suite(u, suite.suite_name, session=session) + if divert: + print "%s for %s\n%s%s" % ( su.policy_queue.queue_name.upper(), + ", ".join(u.pkg.changes["distribution"].keys()), + pi, summary) + queuekey = "P" + prompt = "[P]olicy, Skip, Quit ?" + policyqueue = su.policy_queue + if Options["Automatic"]: + answer = 'P' + break + + if not divert: + print "ACCEPT\n" + pi + summary, + prompt = "[A]ccept, Skip, Quit ?" + if Options["Automatic"]: + answer = 'A' while prompt.find(answer) == -1: answer = utils.our_raw_input(prompt) @@ -217,8 +280,6 @@ def action(u): answer = m.group(1) answer = answer[:1].upper() - session = DBConn().session() - if answer == 'R': os.chdir(u.pkg.directory) u.do_reject(0, pi) @@ -227,6 +288,10 @@ def action(u): u.accept(summary, short_summary, session) u.check_override() u.remove() + elif answer == 'P': + u.pkg.add_known_changes(holding.holding_dir, session) + package_to_queue(u, summary, short_summary, policyqueue, perms=0664, announce=None) + u.remove() elif answer == queuekey: u.pkg.add_known_changes(holding.holding_dir, session) QueueInfo[qu]["process"](u, summary, short_summary, session) @@ -252,6 +317,9 @@ def process_it(changes_file): holding = Holding() + # TODO: Actually implement using pending* tables so that we don't lose track + # of what is where + u = Upload() u.pkg.changes_file = changes_file u.pkg.directory = os.getcwd() diff --git a/dak/update_db.py b/dak/update_db.py index 3d79c9f8..27b6ad8f 100755 --- a/dak/update_db.py +++ b/dak/update_db.py @@ -45,7 +45,7 @@ from daklib.dak_exceptions import DBUpdateError ################################################################################ Cnf = None -required_database_schema = 21 +required_database_schema = 22 ################################################################################ diff --git a/daklib/changes.py b/daklib/changes.py index cab9217b..4d23f9fa 100755 --- a/daklib/changes.py +++ b/daklib/changes.py @@ -179,7 +179,7 @@ class Changes(object): @session_wrapper def remove_known_changes(self, session=None): - session.delete(get_knownchange(self.changes_file, session)) + session.delete(get_dbchange(self.changes_file, session)) def mark_missing_fields(self): """add "missing" in fields which we will require for the known_changes table""" diff --git a/daklib/dbconn.py b/daklib/dbconn.py index 95e30c7c..7add55ff 100755 --- a/daklib/dbconn.py +++ b/daklib/dbconn.py @@ -50,8 +50,6 @@ from sqlalchemy import types as sqltypes from sqlalchemy.exc import * from sqlalchemy.orm.exc import NoResultFound -# Only import Config until Queue stuff is changed to store its config -# in the database from config import Config from singleton import Singleton from textutils import fix_maintainer @@ -432,6 +430,132 @@ __all__.append('BinaryACLMap') ################################################################################ +class BuildQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + + def add_file_from_pool(self, poolfile): + """Copies a file into the pool. Assumes that the PoolFile object is + attached to the same SQLAlchemy session as the Queue object is. + + The caller is responsible for committing after calling this function.""" + poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] + + # Check if we have a file of this name or this ID already + for f in self.queuefiles: + if f.fileid is not None and f.fileid == poolfile.file_id or \ + f.poolfile.filename == poolfile_basename: + # In this case, update the QueueFile entry so we + # don't remove it too early + f.lastused = datetime.now() + DBConn().session().object_session(pf).add(f) + return f + + # Prepare QueueFile object + qf = QueueFile() + qf.queue_id = self.queue_id + qf.lastused = datetime.now() + qf.filename = dest + + targetpath = qf.fullpath + queuepath = os.path.join(self.path, poolfile_basename) + + try: + if self.copy_pool_files: + # We need to copy instead of symlink + import utils + utils.copy(targetfile, queuepath) + # NULL in the fileid field implies a copy + qf.fileid = None + else: + os.symlink(targetfile, queuepath) + qf.fileid = poolfile.file_id + except OSError: + return None + + # Get the same session as the PoolFile is using and add the qf to it + DBConn().session().object_session(poolfile).add(qf) + + return qf + + +__all__.append('BuildQueue') + +@session_wrapper +def get_queue(queuename, session=None): + """ + Returns Queue object for given C{queue name}, creating it if it does not + exist. + + @type queuename: string + @param queuename: The name of the queue + + @type session: Session + @param session: Optional SQLA session object (a temporary one will be + generated if not supplied) + + @rtype: Queue + @return: Queue object for the given queue + """ + + q = session.query(Queue).filter_by(queue_name=queuename) + + try: + return q.one() + except NoResultFound: + return None + +__all__.append('get_queue') + +################################################################################ + +class BuildQueueFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % (self.filename, self.queue_id) + +__all__.append('BuildQueueFile') + +################################################################################ + +class ChangePendingBinary(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_binary_id + +__all__.append('ChangePendingBinary') + +################################################################################ + +class ChangePendingFile(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_file_id + +__all__.append('ChangePendingFile') + +################################################################################ + +class ChangePendingSource(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.change_pending_source_id + +__all__.append('ChangePendingSource') + +################################################################################ + class Component(object): def __init__(self, *args, **kwargs): pass @@ -1124,19 +1248,19 @@ __all__.append('KeyringACLMap') ################################################################################ -class KnownChange(object): +class DBChange(object): def __init__(self, *args, **kwargs): pass def __repr__(self): - return '' % self.changesname + return '' % self.changesname -__all__.append('KnownChange') +__all__.append('DBChange') @session_wrapper -def get_knownchange(filename, session=None): +def get_dbchange(filename, session=None): """ - returns knownchange object for given C{filename}. + returns DBChange object for given C{filename}. @type archive: string @param archive: the name of the arhive @@ -1149,25 +1273,14 @@ def get_knownchange(filename, session=None): @return: Archive object for the given name (None if not present) """ - q = session.query(KnownChange).filter_by(changesname=filename) + q = session.query(DBChange).filter_by(changesname=filename) try: return q.one() except NoResultFound: return None -__all__.append('get_knownchange') - -################################################################################ - -class KnownChangePendingFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.known_change_pending_file_id - -__all__.append('KnownChangePendingFile') +__all__.append('get_dbchange') ################################################################################ @@ -1537,6 +1650,17 @@ __all__.append('insert_pending_content_paths') ################################################################################ +class PolicyQueue(object): + def __init__(self, *args, **kwargs): + pass + + def __repr__(self): + return '' % self.queue_name + +__all__.append('PolicyQueue') + +################################################################################ + class Priority(object): def __init__(self, *args, **kwargs): pass @@ -1607,99 +1731,6 @@ __all__.append('get_priorities') ################################################################################ -class Queue(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % self.queue_name - - def add_file_from_pool(self, poolfile): - """Copies a file into the pool. Assumes that the PoolFile object is - attached to the same SQLAlchemy session as the Queue object is. - - The caller is responsible for committing after calling this function.""" - poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:] - - # Check if we have a file of this name or this ID already - for f in self.queuefiles: - if f.fileid is not None and f.fileid == poolfile.file_id or \ - f.poolfile.filename == poolfile_basename: - # In this case, update the QueueFile entry so we - # don't remove it too early - f.lastused = datetime.now() - DBConn().session().object_session(pf).add(f) - return f - - # Prepare QueueFile object - qf = QueueFile() - qf.queue_id = self.queue_id - qf.lastused = datetime.now() - qf.filename = dest - - targetpath = qf.fullpath - queuepath = os.path.join(self.path, poolfile_basename) - - try: - if self.copy_pool_files: - # We need to copy instead of symlink - import utils - utils.copy(targetfile, queuepath) - # NULL in the fileid field implies a copy - qf.fileid = None - else: - os.symlink(targetfile, queuepath) - qf.fileid = poolfile.file_id - except OSError: - return None - - # Get the same session as the PoolFile is using and add the qf to it - DBConn().session().object_session(poolfile).add(qf) - - return qf - - -__all__.append('Queue') - -@session_wrapper -def get_queue(queuename, session=None): - """ - Returns Queue object for given C{queue name}, creating it if it does not - exist. - - @type queuename: string - @param queuename: The name of the queue - - @type session: Session - @param session: Optional SQLA session object (a temporary one will be - generated if not supplied) - - @rtype: Queue - @return: Queue object for the given queue - """ - - q = session.query(Queue).filter_by(queue_name=queuename) - - try: - return q.one() - except NoResultFound: - return None - -__all__.append('get_queue') - -################################################################################ - -class QueueFile(object): - def __init__(self, *args, **kwargs): - pass - - def __repr__(self): - return '' % (self.filename, self.queue_id) - -__all__.append('QueueFile') - -################################################################################ - class Section(object): def __init__(self, *args, **kwargs): pass @@ -2459,18 +2490,24 @@ class DBConn(Singleton): self.tbl_binaries = Table('binaries', self.db_meta, autoload=True) self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True) self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True) + self.tbl_build_queue = Table('build_queue', self.db_meta, autoload=True) + self.tbl_build_queue_files = Table('build_queue_files', self.db_meta, autoload=True) self.tbl_component = Table('component', self.db_meta, autoload=True) self.tbl_config = Table('config', self.db_meta, autoload=True) self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True) self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True) self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True) + self.tbl_changes_pending_binary = Table('changes_pending_binaries', self.db_meta, autoload=True) self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True) + self.tbl_changes_pending_files_map = Table('changes_pending_files_map', self.db_meta, autoload=True) + self.tbl_changes_pending_source = Table('changes_pending_source', self.db_meta, autoload=True) + self.tbl_changes_pending_source_files = Table('changes_pending_source_files', self.db_meta, autoload=True) self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True) self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True) self.tbl_files = Table('files', self.db_meta, autoload=True) self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True) self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True) - self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True) + self.tbl_changes = Table('changes', self.db_meta, autoload=True) self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True) self.tbl_location = Table('location', self.db_meta, autoload=True) self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True) @@ -2478,9 +2515,8 @@ class DBConn(Singleton): self.tbl_override = Table('override', self.db_meta, autoload=True) self.tbl_override_type = Table('override_type', self.db_meta, autoload=True) self.tbl_pending_content_associations = Table('pending_content_associations', self.db_meta, autoload=True) + self.tbl_policy_queue = Table('policy_queue', self.db_meta, autoload=True) self.tbl_priority = Table('priority', self.db_meta, autoload=True) - self.tbl_queue = Table('queue', self.db_meta, autoload=True) - self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True) self.tbl_section = Table('section', self.db_meta, autoload=True) self.tbl_source = Table('source', self.db_meta, autoload=True) self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True) @@ -2490,7 +2526,7 @@ class DBConn(Singleton): self.tbl_suite = Table('suite', self.db_meta, autoload=True) self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True) self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True) - self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True) + self.tbl_suite_build_queue_copy = Table('suite_build_queue_copy', self.db_meta, autoload=True) self.tbl_uid = Table('uid', self.db_meta, autoload=True) self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True) @@ -2509,6 +2545,12 @@ class DBConn(Singleton): binary_id = self.tbl_bin_associations.c.bin, binary = relation(DBBinary))) + mapper(BuildQueue, self.tbl_build_queue, + properties = dict(queue_id = self.tbl_build_queue.c.id)) + + mapper(BuildQueueFile, self.tbl_build_queue_files, + properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'), + poolfile = relation(PoolFile, backref='buildqueueinstances'))) mapper(DBBinary, self.tbl_binaries, properties = dict(binary_id = self.tbl_binaries.c.id, @@ -2570,16 +2612,36 @@ class DBConn(Singleton): properties = dict(keyring_name = self.tbl_keyrings.c.name, keyring_id = self.tbl_keyrings.c.id)) - mapper(KnownChange, self.tbl_known_changes, - properties = dict(known_change_id = self.tbl_known_changes.c.id, + mapper(DBChange, self.tbl_changes, + properties = dict(change_id = self.tbl_changes.c.id, poolfiles = relation(PoolFile, secondary=self.tbl_changes_pool_files, backref="changeslinks"), - files = relation(KnownChangePendingFile, backref="changesfile"))) - - mapper(KnownChangePendingFile, self.tbl_changes_pending_files, - properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.id)) - + files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_files_map, + backref="changesfile"), + in_queue_id = self.tbl_changes.c.in_queue, + in_queue = relation(PolicyQueue, + primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)), + approved_for_id = self.tbl_changes.c.approved_for)) + + mapper(ChangePendingBinary, self.tbl_changes_pending_binary, + properties = dict(change_pending_binary_id = self.tbl_changes_pending_binary.c.id)) + + mapper(ChangePendingFile, self.tbl_changes_pending_files, + properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id)) + + mapper(ChangePendingSource, self.tbl_changes_pending_source, + properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id, + change = relation(DBChange), + maintainer = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)), + changedby = relation(Maintainer, + primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)), + fingerprint = relation(Fingerprint), + source_files = relation(ChangePendingFile, + secondary=self.tbl_changes_pending_source_files, + backref="pending_sources"))) mapper(KeyringACLMap, self.tbl_keyring_acl_map, properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id, keyring = relation(Keyring, backref="keyring_acl_map"), @@ -2615,16 +2677,12 @@ class DBConn(Singleton): properties = dict(overridetype = self.tbl_override_type.c.type, overridetype_id = self.tbl_override_type.c.id)) + mapper(PolicyQueue, self.tbl_policy_queue, + properties = dict(policy_queue_id = self.tbl_policy_queue.c.id)) + mapper(Priority, self.tbl_priority, properties = dict(priority_id = self.tbl_priority.c.id)) - mapper(Queue, self.tbl_queue, - properties = dict(queue_id = self.tbl_queue.c.id)) - - mapper(QueueFile, self.tbl_queue_files, - properties = dict(queue = relation(Queue, backref='queuefiles'), - poolfile = relation(PoolFile, backref='queueinstances'))) - mapper(Section, self.tbl_section, properties = dict(section_id = self.tbl_section.c.id)) @@ -2672,8 +2730,8 @@ class DBConn(Singleton): mapper(Suite, self.tbl_suite, properties = dict(suite_id = self.tbl_suite.c.id, - policy_queue = relation(Queue), - copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy))) + policy_queue = relation(PolicyQueue), + copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy))) mapper(SuiteArchitecture, self.tbl_suite_architectures, properties = dict(suite_id = self.tbl_suite_architectures.c.suite, diff --git a/daklib/queue.py b/daklib/queue.py index effbb4e0..15a83958 100755 --- a/daklib/queue.py +++ b/daklib/queue.py @@ -438,12 +438,6 @@ class Upload(object): self.pkg.changes["chopversion"] = re_no_epoch.sub('', self.pkg.changes["version"]) self.pkg.changes["chopversion2"] = re_no_revision.sub('', self.pkg.changes["chopversion"]) - # Check there isn't already a changes file of the same name in one - # of the queue directories. - base_filename = os.path.basename(filename) - if get_knownchange(base_filename): - self.rejects.append("%s: a file with this name already exists." % (base_filename)) - # Check the .changes is non-empty if not self.pkg.files: self.rejects.append("%s: nothing to do (Files field is empty)." % (base_filename)) @@ -822,8 +816,8 @@ class Upload(object): session = DBConn().session() try: - changes = session.query(KnownChange).filter_by(changesname=base_filename).one() - if not changes.approved_for: + dbc = session.query(DBChange).filter_by(changesname=base_filename).one() + if dbc.in_queue is not None and dbc.in_queue.queue_name != 'unchecked': self.rejects.append("%s file already known to dak" % base_filename) except NoResultFound, e: # not known, good diff --git a/daklib/queue_install.py b/daklib/queue_install.py index c8fa39e0..d6b651b2 100644 --- a/daklib/queue_install.py +++ b/daklib/queue_install.py @@ -31,60 +31,37 @@ from daklib import utils from daklib.dbconn import * from daklib.config import Config -############################################################################### - -def determine_target(u): - cnf = Config() - - queues = [ "New", "Autobyhand", "Byhand" ] - if cnf.FindB("Dinstall::SecurityQueueHandling"): - queues += [ "Unembargo", "Embargo" ] - else: - queues += [ "OldStableUpdate", "StableUpdate" ] - - target = None - for q in queues: - if QueueInfo[q]["is"](u): - target = q - break - - return target - ################################################################################ -def package_to_suite(u, suite): +def package_to_suite(u, suite_name, session): if not u.pkg.changes["distribution"].has_key(suite): return False ret = True if not u.pkg.changes["architecture"].has_key("source"): - s = DBConn().session() - q = s.query(SrcAssociation.sa_id) + q = session.query(SrcAssociation.sa_id) q = q.join(Suite).filter_by(suite_name=suite) q = q.join(DBSource).filter_by(source=u.pkg.changes['source']) q = q.filter_by(version=u.pkg.changes['version']).limit(1) # NB: Careful, this logic isn't what you would think it is - # Source is already in {old-,}proposed-updates so no need to hold - # Instead, we don't move to the holding area, we just do an ACCEPT + # Source is already in the target suite so no need to go to policy + # Instead, we don't move to the policy area, we just do an ACCEPT if q.count() > 0: ret = False - s.close() - return ret -def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None): +def package_to_queue(u, summary, short_summary, queue, perms=0660, announce=None): cnf = Config() - dir = cnf["Dir::Queue::%s" % queue] + dir = queue.path - print "Moving to %s holding area" % queue.upper() - u.logger.log(["Moving to %s" % queue, u.pkg.changes_file]) + print "Moving to %s policy queue" % queue.queue_name.upper() + u.logger.log(["Moving to %s" % queue.queue_name, u.pkg.changes_file]) u.move_to_dir(dir, perms=perms) - if build: - get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir) + # TODO: Put building logic in here? We used to take a build=bool argument # Check for override disparities u.check_override() @@ -100,64 +77,49 @@ def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, a ################################################################################ -def is_unembargo(u): - session = DBConn().session() - cnf = Config() - - q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) - if q.rowcount > 0: - session.close() - return True - - oldcwd = os.getcwd() - os.chdir(cnf["Dir::Queue::Disembargo"]) - disdir = os.getcwd() - os.chdir(oldcwd) - - ret = False - - if u.pkg.directory == disdir: - if u.pkg.changes["architecture"].has_key("source"): - session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) - session.commit() - - ret = True - - session.close() - - return ret - -def queue_unembargo(u, summary, short_summary, session=None): - return package_to_queue(u, summary, short_summary, "Unembargoed", - perms=0660, build=True, announce='process-unchecked.accepted') - -################################################################################ - -def is_embargo(u): - # if embargoed queues are enabled always embargo - return True - -def queue_embargo(u, summary, short_summary, session=None): - return package_to_queue(u, summary, short_summary, "Unembargoed", - perms=0660, build=True, announce='process-unchecked.accepted') - -################################################################################ - -def is_stableupdate(u): - return package_to_suite(u, 'proposed-updates') - -def do_stableupdate(u, summary, short_summary, session=None): - return package_to_queue(u, summary, short_summary, "ProposedUpdates", - perms=0664, build=False, announce=None) - -################################################################################ - -def is_oldstableupdate(u): - return package_to_suite(u, 'oldstable-proposed-updates') - -def do_oldstableupdate(u, summary, short_summary, session=None): - return package_to_queue(u, summary, short_summary, "OldProposedUpdates", - perms=0664, build=False, announce=None) +# TODO: This logic needs to be replaced with policy queues before we upgrade +# security master + +#def is_unembargo(u): +# session = DBConn().session() +# cnf = Config() +# +# q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) +# if q.rowcount > 0: +# session.close() +# return True +# +# oldcwd = os.getcwd() +# os.chdir(cnf["Dir::Queue::Disembargo"]) +# disdir = os.getcwd() +# os.chdir(oldcwd) +# +# ret = False +# +# if u.pkg.directory == disdir: +# if u.pkg.changes["architecture"].has_key("source"): +# session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) +# session.commit() +# +# ret = True +# +# session.close() +# +# return ret +# +#def queue_unembargo(u, summary, short_summary, session=None): +# return package_to_queue(u, summary, short_summary, "Unembargoed", +# perms=0660, build=True, announce='process-unchecked.accepted') +# +################################################################################# +# +#def is_embargo(u): +# # if embargoed queues are enabled always embargo +# return True +# +#def queue_embargo(u, summary, short_summary, session=None): +# return package_to_queue(u, summary, short_summary, "Unembargoed", +# perms=0660, build=True, announce='process-unchecked.accepted') ################################################################################ @@ -275,12 +237,22 @@ def acknowledge_new(u, summary, short_summary, session=None): # q-unapproved hax0ring QueueInfo = { - "New": { "is": is_new, "process": acknowledge_new }, - "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, - "Byhand" : { "is": is_byhand, "process": do_byhand }, - "OldStableUpdate" : { "is": is_oldstableupdate, - "process": do_oldstableupdate }, - "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate }, - "Unembargo" : { "is": is_unembargo, "process": queue_unembargo }, - "Embargo" : { "is": is_embargo, "process": queue_embargo }, + "new": { "is": is_new, "process": acknowledge_new }, + "autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, + "byhand" : { "is": is_byhand, "process": do_byhand }, } + +def determine_target(u): + cnf = Config() + + # Statically handled queues + target = None + + for q in QueueInfo.keys(): + if QueueInfo[q]["is"](u): + target = q + + return target + +############################################################################### + diff --git a/docs/NEWS b/docs/NEWS deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/README.new-incoming b/docs/README.new-incoming deleted file mode 100644 index 8ebd0e2f..00000000 --- a/docs/README.new-incoming +++ /dev/null @@ -1,123 +0,0 @@ -[An updated version of the proposal sent to debian-devel-announce@l.d.o. - Debian-specific, but useful as a general overview of New Incoming.] - - New Incoming System - =================== - -This document outlines the new system for handling Incoming -directories on ftp-master and non-US. - -The old system: ---------------- - - o incoming was a world writable directory - - o incoming was available to everyone through http://incoming.debian.org/ - - o incoming was processed once a day by dinstall - - o uploads in incoming had to have been there > 24 hours before they - were REJECTed. If they were processed before that and had - problems they were SKIPped (with no notification to the maintainer - and/or uploader). - -The new system: ---------------- - - o There's 4 incoming directories: - - @ "unchecked" - where uploads from Queue Daemons and maintainers - initially go. - - @ "accepted" - where accepted packages stay until the daily - dinstall run. - - @ "new" - where NEW packages (and their dependents[1]) requiring - human processing go after being automatically - checked by dinstall. - - @ "byhand" - where BYHAND packages (and their dependents[1]) - requiring human intervention go after being - automatically checked by dinstall. - - In addition there's 3 support directories: - - @ "reject" - where rejected uploads go - - @ "done" - where the .changes files for packages that have been - installed go. - - @ "holding" - a temporary working area for dinstall to hold - packages while checking them. - - o Packages in 'unchecked' are automatically checked every 15 minutes - and are either: REJECT, ACCEPT, NEW or BYHAND. - - o Only 'unchecked' is locally world-writeable. The others are all, - of course, locally world-readable but only 'accepted' and 'byhand' - are publicly visible on http://incoming.debian.org/ - - o 'accepted' and 'byhand' are made available to the auto-builders so - they can build out of them. - - o 'accepted' is processed once a day as before. - - o Maintainer/uploader & list notification and bug closures are - changed to be done for ACCEPTs, not INSTALLs. - [Rationale: this reduces the load both on our list server and our - BTS server; it also gives people better notice of uploads to - avoid duplication of work especially, for example, in the case of - NMUs.] - [NB: see [3] for clarifications of when mails are sent.] - -Why: ----- - - o Security (no more replaceable file races) - o Integrity (new http://i.d.o contains only signed (+installable) uploads[2]) - o Needed for crypto-in-main integration - o Allows safe auto-building out of accepted - o Allows previously-prohibitively-expensive checks to be added to dinstall - o Much faster feedback on packages; no more 48 hour waits before - finding out your package has been REJECTed. - -What breaks: ------------- - - o people who upload packages but then want to retract or replace the - upload. - - * solution: mostly "Don't do that then"; i.e. test your uploads - properly. Uploads can still be replaced, simply by uploading a - higher versioned replacement. Total retraction is harder but - usually only relevant for NEW packages. - -================================================================================ - -[1] For versions of dependents meaning: binaries compiled from the - source of BYHAND or NEW uploads. Due to dak's fascist - source-must-exist checking, these binaries must be held back until - the BYHAND/NEW uploads are processed. - -[2] When this mail was initially written there was still at least one - upload queue which will accept unsigned uploads from any - source. [I've since discovered it's been deactivated, but not, - AFAIK because it allowed unsigned uploads.] - -[3] - --> reject - / - / -unchecked -----------------------------[*]------> accepted ---------------> pool - \ ^ ^ - | / / - |--> new -- / - | |[4] / - | V / - |--> byhand --/ - -[4] This is a corner case, included for completeness, ignore - it. [Boring details: NEW trumps BYHAND, so it's possible for a - upload with both BYHAND and NEW components to go from 'unchecked' - -> 'new' -> 'byhand' -> 'accepted'] - diff --git a/docs/manpages/Makefile b/docs/manpages/Makefile deleted file mode 100644 index 75cf3cc0..00000000 --- a/docs/manpages/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/make -f - -SGMLMANPAGES = check-overrides.1.sgml clean-suites.1.sgml control-overrides.1.sgml control-suite.1.sgml import-users-from-passwd.1.sgml ls.1.sgml make-maintainers.1.sgml override.1.sgml poolize.1.sgml process-accepted.1.sgml process-new.1.sgml rm.1.sgml - -MANPAGES = $(patsubst %.sgml, dak_%, $(SGMLMANPAGES)) - - -all: $(MANPAGES) - -dak_%: %.sgml - docbook2man $< > /dev/null - -clean: - rm -f $(MANPAGES) manpage.links manpage.log manpage.refs diff --git a/docs/manpages/check-overrides.1.sgml b/docs/manpages/check-overrides.1.sgml deleted file mode 100644 index a4a7c146..00000000 --- a/docs/manpages/check-overrides.1.sgml +++ /dev/null @@ -1,61 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_check-overrides - 1 - - - - - dak check-overrides - Utility to alter or display the contents of a suite - - - - - - dak check-overrides - - - - - Description</> - <para> - <command>dak check-overrides</command> is a cruft checker for overrides. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Notes</> - - <Para>dak check-overrides is not a good idea with New Incoming as it doesn't take into account queue/accepted. You can minimize the impact of this by running it immediately after 'dak process-accepted' but that's still racy because 'dak process-new' doesn't lock with 'dak process-accepted'. A better long term fix is the evil plan for accepted to be in the DB.</> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak check-overrides</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/clean-suites.1.sgml b/docs/manpages/clean-suites.1.sgml deleted file mode 100644 index 621bbc34..00000000 --- a/docs/manpages/clean-suites.1.sgml +++ /dev/null @@ -1,82 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_clean-suites</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak clean-suites</> - <refpurpose>Utility to clean out old packages</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak clean-suites</> - <arg><option><replaceable>options</replaceable></></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak clean-suites</command> is a utility to clean out old packages. It will clean out any binary packages not referenced by a suite and any source packages not referenced by a suite and not referenced by any binary packages. Cleaning is not actual deletion, but rather, removal of packages from the pool to a 'morgue' directory. The 'morgue' directory is split into dated sub-directories to keep things sane in big archives. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <variablelist> - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Don't actually clean any packages.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak clean-suites uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Clean-Suites::StayOfExecution - - This is the number of seconds unreferenced packages are left before being cleaned. - - - - Clean-Suites::MorgueSubDir - - If not blank, this is the subdirectory in the morgue used to hold removed packages. - - - - - - Diagnostics</> - <para> - <command>dak clean-suites</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/control-overrides.1.sgml b/docs/manpages/control-overrides.1.sgml deleted file mode 100644 index 26440ad4..00000000 --- a/docs/manpages/control-overrides.1.sgml +++ /dev/null @@ -1,98 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_control-overrides</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak control-overrides</> - <refpurpose>Utility to manipulate the packages overrides</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak control-overrides</> - <arg><option><replaceable>options</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak control-overrides</command> is the command line tool to handle override files. Override files can be listed or updated. - </para> - </refsect1> - <RefSect1><Title>Options</> - - <VariableList> - <varlistentry> - <term><option>-a/--add</option></term> - <listitem> - <para>Add entries to the override DB. Changes and deletions are ignored.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-c/--component=<replaceable>component</replaceable></option></> - <ListItem><Para>Uses the override DB for the component listed.</para> - </listitem> - </VarListEntry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-l/--list</option></term> - <listitem> - <para>Lists the override DB to stdout.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-q/--quiet</option></term> - <listitem> - <para>Be less verbose about what has been done.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem><Para>Uses the override DB for the suite listed.</para></listitem> - </varlistentry> - - <VarListEntry><term><option>-S/--set</option></term> - <ListItem><Para>Set the override DB to the provided input.</PARA></LISTITEM> - </VarListEntry> - - <varlistentry> - <term><option>-t/--type=<replaceable>type</replaceable></option></term> - <listitem> - <para>Uses the override DB for the type listed. Possible values are: <literal>deb</literal>, <literal>udeb</literal> and <literal>dsc</literal>.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak control-overrides</command> returns zero on normal operation, non-zero on error. - </para> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/control-suite.1.sgml b/docs/manpages/control-suite.1.sgml deleted file mode 100644 index 12c89c5a..00000000 --- a/docs/manpages/control-suite.1.sgml +++ /dev/null @@ -1,82 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_control-suite</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak control-suite</> - <refpurpose>Utility to alter or display the contents of a suite</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak control-suite</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>file...</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak control-suite</command> is a utility to alter or display the contents of a suite. Input for alterations is taken either from filename(s) supplied or stdin. The format for both input and output is lines each with a whitespace separated list of: <literal>package</literal>, <literal>version</literal> and <literal>architecture</literal>. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--add=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Add to the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-l/--list=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>List the contents of the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-r/--remove=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Remove from the suite.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-s/--set=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Set the suite to exactly the input.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak control-suite</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/dak.ent b/docs/manpages/dak.ent deleted file mode 100644 index 1860e8e5..00000000 --- a/docs/manpages/dak.ent +++ /dev/null @@ -1,20 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> - -<!-- Boiler plate docinfo section --> -<!ENTITY dak-docinfo " - <docinfo> - <address><email>james@nocrew.org</email></address> - <author><firstname>James</firstname> <surname>Troup</surname></author> - <copyright><year>2000-2001</year> <holder>James Troup</holder></copyright> - <date>15 January 2001</date> - </docinfo> -"> - -<!-- Boiler plate Author section --> -<!ENTITY manauthor " - <RefSect1><Title>Author - - dak was written by James Troup james@nocrew.org. - - -"> diff --git a/docs/manpages/import-users-from-passwd.1.sgml b/docs/manpages/import-users-from-passwd.1.sgml deleted file mode 100644 index 0fd48511..00000000 --- a/docs/manpages/import-users-from-passwd.1.sgml +++ /dev/null @@ -1,94 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_import-users-from-passwd - 1 - - - - - dak import-users-from-passwd - Utility to sync PostgreSQL users with system users - - - - - - dak import-users-from-passwd - - - - - Description</> - <para> - <command>dak import-users-from-passwd</command> is a utility to sync PostgreSQL's user database with the system's users. It is designed to allow the use of 'peer sameuser' authentication. It simply adds any users in the password file into PostgreSQL's pg_user table if they are already not there. It will also warn you about users who are in the pg_user table but not in the password file. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-n/--no-action<replaceable></replaceable></option></> - <ListItem> - <Para>Don't actually do anything.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-q/--quiet<replaceable></replaceable></option></> - <ListItem> - <Para>Be quiet, i.e. display as little output as possible.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-v/--verbose</option></> - <ListItem> - <Para>Be verbose, i.e. display more output than normal.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak import-users-from-passwd uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Import-Users-From-Passwd::ValidGID - - Each user's primary GID is compared with this, if it's not blank. If they match, the user will be processed, if they don't, the user will be skipped. - - - - Import-Users-From-Passwd::KnownPostgresUsers - - This is a comma-separated list of users who are in PostgreSQL's pg_user table but are not in the password file. - - - - - - Diagnostics</> - <para> - <command>dak import-users-from-passwd</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/ls.1.sgml b/docs/manpages/ls.1.sgml deleted file mode 100644 index c7c4f29a..00000000 --- a/docs/manpages/ls.1.sgml +++ /dev/null @@ -1,104 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_ls</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak ls</> - <refpurpose>Utility to display information about packages</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak ls</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak ls</command> is a utility to display information about packages, specificaly what suites they are in and for which architectures. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--architecture=<replaceable>architecture</replaceable></option></> - <ListItem> - <Para>Only list package information for the listed architecture(s).</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-b/--binary-type=<replaceable>binary type</replaceable></option></> - <ListItem> - <Para>Only show package information for the binary type ('deb' or 'udeb').</PARA> - </LISTITEM> - </VarListEntry> - - <varlistentry><term><option>-c/--component=<replaceable>component</replaceable></option></term> - <listitem> - <para>Only list package information for the listed component(s).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-g/--greaterorequal</option></term> - <term><option>-G/--greaterthan</option></term> - <listitem> - <para>Determine the highest version of each package in the target suite (which is forced to just unstable if one was not specificed) and, in addition to the normal output, also print a line suitable for sending in a reply to a buildd as a 'dep-wait' command. For <option>-g/--greaterorequal</option>, the versioned dependency is a >= one, e.g. <literallayout>dep-retry libgdbm-dev (>= 1.8.3-2)</literallayout></para> - <para>And for <option>-G/--greaterthan</option>, the versioned dependency is a >> one, e.g. <literallayout>dep-retry libflac-dev (>> 1.1.0-10)</literallayout></para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-r/--regex</option></term> - <listitem> - <para>Treat the <replaceable>package</replaceable> argument as a regex, rather than doing an exact search.</para> - </listitem> - </varlistentry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Only list package information for the suite(s) listed.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-S/--source-and-binary</option></> - <ListItem> - <Para>For each package which is a source package, also show information about the binary packages it produces.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak ls</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/make-maintainers.1.sgml b/docs/manpages/make-maintainers.1.sgml deleted file mode 100644 index 8cc324ce..00000000 --- a/docs/manpages/make-maintainers.1.sgml +++ /dev/null @@ -1,85 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_make-maintainers</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak make-maintainers</> - <refpurpose>Utility to generate an index of package's maintainers</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak make-maintainers</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>extra file...</replaceable></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak make-maintainers</command> is a utility to generate an index of package's maintainers. The output format is: -<literallayout>package~version maintainer</literallayout> - The input format of extra files can either be in this form or in the old style which is similar, but lacking the version number, i.e.: -<literallayout>package maintainer</literallayout> - dak Make-Maintainers will auto-detect the type of layout of the extra file. If the extra file is in the old style format the records in it are assumed to supersed any that were seen earlier (i.e. either from earlier extra files or generated from the SQL). - </Para> - <para> - dak Make-Maintainers determines the maintainer of a package by comparing suite priority (see 'Configuration') and then version number. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <variablelist> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>Configuration - dak make-maintainers uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Suite::<SUITE>::Priority - - Suite priority overrides the version checks dak make-maintainers does. A package in higher priority suite overrides versions in lower priority suites even if the version number in the higher priority suite is older. - - - - - - - New versus Old output format - Converting the new output format to the old output format is easy with some simple sed + awk, e.g. -sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' - - - - Diagnostics</> - <para> - <command>dak make-maintainers</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/override.1.sgml b/docs/manpages/override.1.sgml deleted file mode 100644 index 12afac55..00000000 --- a/docs/manpages/override.1.sgml +++ /dev/null @@ -1,87 +0,0 @@ -<!-- -*- mode: sgml -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_override</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak override</> - <refpurpose>Make micromodifications or queries to the overrides table</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak override</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - <arg><option><replaceable>section</replaceable></></arg> - <arg><option><replaceable>priority</replaceable></></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak override</command> makes micromodifications and queries the overrides. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-h/--help</option></> - <ListItem> - <Para>Show help and then exit.</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-d/--done=<replaceable>BUG#</replaceable></option></> - <ListItem> - <Para>Close the listed bugs as part of adjusting the overrides</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Show what dak override would do but make no changes</PARA> - </LISTITEM> - </VarListEntry> - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Affect the overrides in suite listed. The default is <literal>unstable</literal></PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Common use</> - <para> - <command>dak override</command> when invoked with only a package name will tell you what section and priority the given package has. - </PARA> - <para> - <command>dak override</command> when invoked with a package and one or two other values will set the section and/or priority to the values given. You may use a single period ('.') to represent "do not change" or you can ommit the value you do not want to change. - </PARA> - </RefSect1> - <RefSect1><Title>Notes</> - - <Para><command>dak override</command> essentially lets you do what <command>dak control-overrides</command> does only on the microscopic scale rather than the macroscopic scale of <command>dak control-overrides</command>. Use with care.</> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak override</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/process-accepted.1.sgml b/docs/manpages/process-accepted.1.sgml deleted file mode 100644 index 1f3cf4e6..00000000 --- a/docs/manpages/process-accepted.1.sgml +++ /dev/null @@ -1,100 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_process-accepted</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak process-accepted</> - <refpurpose>Installs packages from accepted</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak process-accepted</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>changes_file</replaceable></arg> - <arg><option><replaceable>...</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak process-accepted</command> is the program which installs packages from the accepted directory into the distribution. - </PARA></REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - - <varlistentry> - <term><option>-a/--automatic</option></term> - <listitem> - <para>Run automatically; i.e. perform the default action if it's possible to do so without user interaction. Intend for use in cron jobs and the like.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-m/--manual-reject=<replaceable>message</replaceable></option></term> - <listitem> - <para>Perform a manual rejection of the package. The <replaceable>message</replaceable> is included in the rejection notice sent to the uploader. If no <replaceable>message</replaceable> is given, an editor will be spawned so one can be added to the rejection notice. - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-n/--no-action</option></term> - <listitem> - <para>Don't actually install anything; just show what would be done.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-p/--no-lock</option></term> - <listitem> - <para>Don't check the lockfile. Obviously dangerous and should only be used for cron jobs (if at all).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-v/--version</option></term> - <listitem> - <para>Display the version number and then exit.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak process-accepted</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - <refsect1> - <title>Acknowledgements - dak process-accepted is based very heavily on dinstall, written by Guy Maor maor@debian.org; in fact it started out life as a dinstall clone. - - - &manauthor; - - diff --git a/docs/manpages/process-new.1.sgml b/docs/manpages/process-new.1.sgml deleted file mode 100644 index f99c6cfc..00000000 --- a/docs/manpages/process-new.1.sgml +++ /dev/null @@ -1,95 +0,0 @@ - - -%dakent; - -]> - - - &dak-docinfo; - - - dak_process-new - 1 - - - - - dak process-new - Processes BYHAND and NEW packages - - - - - - dak process-new - - changes_file - - - - - Description</> - <para> - <command>dak process-new</command> is the program which installs packages from the accepted directory into the distribution. - </PARA></REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - - <varlistentry> - <term><option>-a/--automatic</option></term> - <listitem> - <para>Run automatically; i.e. perform the default action if it's possible to do so without user interaction. Intend for use in cron jobs and the like.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-h/--help</option></term> - <listitem> - <para>Display usage help and then exit.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-m/--manual-reject=<replaceable>message</replaceable></option></term> - <listitem> - <para>Perform a manual rejection of the package. The <replaceable>message</replaceable> is included in the rejection notice sent to the uploader. If no <replaceable>message</replaceable> is given, an editor will be spawned so one can be added to the rejection notice. - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-n/--no-action</option></term> - <listitem> - <para>Don't actually install anything; just show what would be done.</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-p/--no-lock</option></term> - <listitem> - <para>Don't check the lockfile. Obviously dangerous and should only be used for cron jobs (if at all).</para> - </listitem> - </varlistentry> - - <varlistentry> - <term><option>-v/--version</option></term> - <listitem> - <para>Display the version number and then exit.</para> - </listitem> - </varlistentry> - - </VariableList> - </RefSect1> - - <RefSect1><Title>Diagnostics</> - <para> - <command>dak process-new</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/docs/manpages/rm.1.sgml b/docs/manpages/rm.1.sgml deleted file mode 100644 index 5b2eaf93..00000000 --- a/docs/manpages/rm.1.sgml +++ /dev/null @@ -1,215 +0,0 @@ -<!-- -*- mode: sgml; mode: fold -*- --> -<!doctype refentry PUBLIC "-//OASIS//DTD DocBook V3.1//EN" [ - -<!ENTITY % dakent SYSTEM "dak.ent"> -%dakent; - -]> - -<refentry> - &dak-docinfo; - - <refmeta> - <refentrytitle>dak_rm</> - <manvolnum>1</> - </refmeta> - - <!-- Man page title --> - <refnamediv> - <refname>dak rm</> - <refpurpose>Utility to remove/add packages from suites</> - </refnamediv> - - <!-- Arguments --> - <refsynopsisdiv> - <cmdsynopsis> - <command>dak rm</> - <arg><option><replaceable>options</replaceable></></arg> - <arg choice="plain"><replaceable>package</replaceable></arg> - <arg><option><replaceable>...</replaceable></option></arg> - </cmdsynopsis> - </refsynopsisdiv> - - <RefSect1><Title>Description</> - <para> - <command>dak rm</command> is the command line tool to add and remove package sets from suites with enforced logging, optional bug closing and override updates. - </PARA> - </REFSECT1> - - <RefSect1><Title>Options</> - - <VariableList> - <VarListEntry><term><option>-a/--architecture=<replaceable>architecture</replaceable></option></> - <ListItem> - <Para>Restrict the packages being considered to the architecture(s) listed.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-b/--binary</option></> - <ListItem> - <Para>Only look at binary packages.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-c/--component=<replaceable>component</replaceable></option></> - <ListItem> - <Para>Restrict the packages being considered to those found in the component(s) listed. The default is <literal>main</literal>.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-C/--carbon-copy=<replaceable>[ bug number | 'package' | email address ]</replaceable></option></> - <ListItem> - <Para>Carbon copy the bug closing mail to the address(es) given. If the removal was not requested by the maintainer, this option should always be used to inform the maintainer of the package's removal. 3 types of address are accepted.</PARA> - <itemizedlist> - <listitem> - <para>number - assumed to be a bug number, and expanded to nnnnn@bugs.debian.org.</para> - </listitem> - <listitem> - <para>'<literal>package</literal>' - carbon copy package@package.debian.org for each package given as an argument.</para> - </listitem> - <listitem> - <para>anything containing '@' - assumed to be an email address, and carbon copied as is.</para> - </listitem> - </itemizedlist> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-d/--done=<replaceable>done</replaceable></option></> - <ListItem> - <Para>Close the bug(s) listed on successful completion.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-m/--reason=<replaceable>reason</replaceable></option></> - <ListItem> - <Para>The reason for the removal or addition of the package(s). This is a required option; if not provided an editor will be spawned so the reason can be added there.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-n/--no-action</option></> - <ListItem> - <Para>Don't actually do anything; just show what would be done.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-p/--partial</option></> - <ListItem> - <Para>Partial removal of a package, so the package is not removed from the overrides. This option is implied by <option>-a/--architecture</option>.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-R/--rdep-check</option></> - <ListItem> - <Para>Check the reverse dependencies (and build-dependencies) of the packages that are to be removed and warn if anything will break.</PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-s/--suite=<replaceable>suite</replaceable></option></> - <ListItem> - <Para>Only add/remove the packages from the suite(s) listed. The default is <literal>unstable</literal></PARA> - </LISTITEM> - </VarListEntry> - - <VarListEntry><term><option>-S/--source-only</option></> - <ListItem> - <Para>Only look at source packages.</PARA> - </LISTITEM> - </VarListEntry> - - </VariableList> - </RefSect1> - - <refsect1> - <title>How packages are chosen - There are 3 methods for selecting packages. - - - Source + Binaries. (default) - In this mode dak rm will assume each of the package(s) passed as arguments are source packages and will also remove any binary packages built from these source packages. - - - Binary only. - Only binary packages are searched; source packages are ignored. This mode is chosen by use of the switch. - This should only be used for orphaned binary packages (i.e. those no longer built by source packages); otherwise, in any system (e.g. Debian) which has auto-building, pointless (and uninstallable) recompiles will be triggered. - - - Source only. - Only source packages are searched; binary packages are ignored. This mode is chosen by use of the switch. - - - - - - Configuration - dak rm uses dak's configuration file. It follows the typical ISC configuration format as seen in ISC tools like bind 8 and dhcpd. Apart from being able to alter the defaults for command line options, the following configuration items are used: - - - Rm::MyEmailAddress - - This is used as the From: line for bug closing mails as per the -d/--done command line switch. It, obviously, must be a RFC-822 valid email address. - - - - Rm::LogFile - - This is the (absolute) file name of the logfile that dak rm unconditionally writes too. This can not be empty or an invalid file. - - - - - - - Examples - The first example is of a source+binaries package removal. - - -$ dak rm -d 68136 -m "Requested by tbm@; confirmed with maintainer. Superseded by libgmp2" gmp1 -Working... done. -Will remove the following packages from unstable: - - gmp1 | 1.3.2-8.2 | source, alpha, hppa, arm, i386, m68k, powerpc, sparc - gmp1-dev | 1.3.2-8.2 | alpha, hppa, arm, i386, m68k, powerpc, sparc - - -------------------- Reason ------------------- -Requested by tbm@; confirmed with maintainer. Superseded by libgmp2 ----------------------------------------------- - -Continue (y/N)? y - Deleting... done. -$ - - - The second example is of a binary-only multi-package removal. - - -$ dak rm -d 82562 -m "Requested by paul@; NBS." -b libgtkextra{5,9,10} -Working... done. -Will remove the following packages from unstable: - -libgtkextra10 | 0.99.10-2 | alpha, i386, m68k, powerpc, sparc -libgtkextra5 | 0.99.5-1 | alpha, i386, m68k, powerpc, sparc -libgtkextra9 | 0.99.9-1 | alpha, i386, m68k, powerpc, sparc - -Will also close bugs: 82562 - -------------------- Reason ------------------- -Requested by paul@; NBS. ----------------------------------------------- - -Continue (y/N)? y - Deleting... done. -$ - - - - - Diagnostics</> - <para> - <command>dak rm</command> returns zero on normal operation, non-zero on error. - </PARA> - </RefSect1> - - &manauthor; - -</refentry> diff --git a/web/index.html b/web/index.html index 1b684159..9f0adcae 100644 --- a/web/index.html +++ b/web/index.html @@ -112,22 +112,6 @@ <p>The source is managed in git and is available from: <a href="http://ftp-master.debian.org/git/">http://ftp-master.debian.org/git/</a></p> - - <p>The old bzr tree is obsolete and no longer available. All - information in it is now in git</p> - - <p>The old CVS tree is obsolete but still available for historical purposes. - It's at <strong>:pserver:anonymous@cvs.debian.org:/cvs/dak</strong>; - the module is 'dak' and the login password is blank. - The old CVS repository can be <a href="http://cvs.debian.org/?cvsroot=dak">browsed</a> - courtesy of viewcvs.</p> - - <p>You can also install the <a href="http://packages.debian.org/unstable/devel/dak">dak Package</a> - if you want to look at it and maybe run your own copy.</p> - - <p>The <strong>really</strong> old dinstall scripts are still available - from <strong>:pserver:anonymous@cvs.debian.org:/cvs/ftp-maint</strong>; - the modules are 'dinstall' and 'masterfiles'.</p> </div> <div id="new"> diff --git a/web/x.png b/web/x.png deleted file mode 100644 index b759b0a3..00000000 Binary files a/web/x.png and /dev/null differ diff --git a/web/x4.png b/web/x4.png deleted file mode 100644 index bc376286..00000000 Binary files a/web/x4.png and /dev/null differ diff --git a/web/x5.png b/web/x5.png deleted file mode 100644 index 022f519c..00000000 Binary files a/web/x5.png and /dev/null differ