# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import commands
+import codecs
import datetime
import email.Header
import os
import pwd
+import grp
import select
import socket
import shutil
import re
import email as modemail
import subprocess
+import ldap
+import errno
+import daklib.config as config
+import daklib.daksubprocess
from dbconn import DBConn, get_architecture, get_component, get_suite, \
get_override_type, Keyring, session_wrapper, \
- get_active_keyring_paths, get_primary_keyring_path, \
- get_suite_architectures, get_or_set_metadatakey, DBSource
+ get_active_keyring_paths, \
+ get_suite_architectures, get_or_set_metadatakey, DBSource, \
+ Component, Override, OverrideType
from sqlalchemy import desc
from dak_exceptions import *
from gpg import SignedFile
from textutils import fix_maintainer
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_taint_free, \
- re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
- re_is_orig_source, re_build_dep_arch
+ re_re_mark, re_whitespace_comment, re_issource, \
+ re_build_dep_arch, re_parse_maintainer
from formats import parse_format, validate_changes_format
from srcformats import get_format_from_string
################################################################################
default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties
-default_apt_config = "/etc/dak/apt.conf" #: default apt config, not normally used
alias_cache = None #: Cache for email alias checks
key_uid_email_cache = {} #: Cache for email addresses from gpg key uids
# code in lenny's Python. This also affects commands.getoutput and
# commands.getstatus.
def dak_getstatusoutput(cmd):
- pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
+ pipe = daklib.daksubprocess.Popen(cmd, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = pipe.stdout.read()
# Expand default component
if component == "":
- comp = get_component(section, session)
- if comp is None:
- component = "main"
- else:
- component = comp.component_name
+ component = "main"
return (section, component)
"-----BEGIN PGP SIGNATURE-----".
"""
- changes_in = open_file(filename)
- content = changes_in.read()
- changes_in.close()
+ with open_file(filename) as changes_in:
+ content = changes_in.read()
try:
unicode(content, 'utf-8')
except UnicodeError:
missingfields.append(keyword)
if len(missingfields):
- raise ParseChangesError("Missing mandantory field(s) in changes file (policy 5.5): %s" % (missingfields))
+ raise ParseChangesError("Missing mandatory field(s) in changes file (policy 5.5): %s" % (missingfields))
return changes
################################################################################
-def create_hash(where, files, hashname, hashfunc):
- """
- create_hash extends the passed files dict with the given hash by
- iterating over all files on disk and passing them to the hashing
- function given.
- """
-
- rejmsg = []
- for f in files.keys():
- try:
- file_handle = open_file(f)
- except CantOpenError:
- rejmsg.append("Could not open file %s for checksumming" % (f))
- continue
-
- files[f][hash_key(hashname)] = hashfunc(file_handle)
-
- file_handle.close()
- return rejmsg
-
-################################################################################
-
-def check_hash(where, files, hashname, hashfunc):
- """
- check_hash checks the given hash in the files dict against the actual
- files on disk. The hash values need to be present consistently in
- all file entries. It does not modify its input in any way.
- """
-
- rejmsg = []
- for f in files.keys():
- file_handle = None
- try:
- try:
- file_handle = open_file(f)
-
- # Check for the hash entry, to not trigger a KeyError.
- if not files[f].has_key(hash_key(hashname)):
- rejmsg.append("%s: misses %s checksum in %s" % (f, hashname,
- where))
- continue
-
- # Actually check the hash for correctness.
- if hashfunc(file_handle) != files[f][hash_key(hashname)]:
- rejmsg.append("%s: %s check failed in %s" % (f, hashname,
- where))
- except CantOpenError:
- # TODO: This happens when the file is in the pool.
- # warn("Cannot open file %s" % f)
- continue
- finally:
- if file_handle:
- file_handle.close()
- return rejmsg
-
-################################################################################
-
-def check_size(where, files):
- """
- check_size checks the file sizes in the passed files dict against the
- files on disk.
- """
-
- rejmsg = []
- for f in files.keys():
- try:
- entry = os.stat(f)
- except OSError as exc:
- if exc.errno == 2:
- # TODO: This happens when the file is in the pool.
- continue
- raise
-
- actual_size = entry[stat.ST_SIZE]
- size = int(files[f]["size"])
- if size != actual_size:
- rejmsg.append("%s: actual file size (%s) does not match size (%s) in %s"
- % (f, actual_size, size, where))
- return rejmsg
-
-################################################################################
-
-def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
+def check_dsc_files(dsc_filename, dsc, dsc_files):
"""
Verify that the files listed in the Files field of the .dsc are
those expected given the announced Format.
"""
rejmsg = []
- # Parse the file if needed
- if dsc is None:
- dsc = parse_changes(dsc_filename, signing_rules=1, dsc_file=1);
-
- if dsc_files is None:
- dsc_files = build_file_list(dsc, is_a_dsc=1)
-
# Ensure .dsc lists proper set of source files according to the format
# announced
has = defaultdict(lambda: 0)
(r'tar.gz', ('native_tar_gz', 'native_tar')),
(r'debian\.tar\.(gz|bz2|xz)', ('debian_tar',)),
(r'orig\.tar\.(gz|bz2|xz)', ('orig_tar',)),
+ (r'orig\.tar\.(gz|bz2|xz)\.asc', ('orig_tar_sig',)),
(r'tar\.(gz|bz2|xz)', ('native_tar',)),
(r'orig-.+\.tar\.(gz|bz2|xz)', ('more_orig_tar',)),
+ (r'orig-.+\.tar\.(gz|bz2|xz)\.asc', ('more_orig_tar_sig',)),
)
- for f in dsc_files.keys():
+ for f in dsc_files:
m = re_issource.match(f)
if not m:
rejmsg.append("%s: %s in Files field not recognised as source."
reject("%s: unexpected source file '%s'" % (dsc_filename, f))
# Check for multiple files
- for file_type in ('orig_tar', 'native_tar', 'debian_tar', 'debian_diff'):
+ for file_type in ('orig_tar', 'orig_tar_sig', 'native_tar', 'debian_tar', 'debian_diff'):
if has[file_type] > 1:
rejmsg.append("%s: lists multiple %s" % (dsc_filename, file_type))
################################################################################
-def check_hash_fields(what, manifest):
- """
- check_hash_fields ensures that there are no checksum fields in the
- given dict that we do not know about.
- """
-
- rejmsg = []
- hashes = map(lambda x: x[0], known_hashes)
- for field in manifest:
- if field.startswith("checksums-"):
- hashname = field.split("-",1)[1]
- if hashname not in hashes:
- rejmsg.append("Unsupported checksum field for %s "\
- "in %s" % (hashname, what))
- return rejmsg
-
-################################################################################
-
-def _ensure_changes_hash(changes, format, version, files, hashname, hashfunc):
- if format >= version:
- # The version should contain the specified hash.
- func = check_hash
-
- # Import hashes from the changes
- rejmsg = parse_checksums(".changes", files, changes, hashname)
- if len(rejmsg) > 0:
- return rejmsg
- else:
- # We need to calculate the hash because it can't possibly
- # be in the file.
- func = create_hash
- return func(".changes", files, hashname, hashfunc)
-
-# We could add the orig which might be in the pool to the files dict to
-# access the checksums easily.
-
-def _ensure_dsc_hash(dsc, dsc_files, hashname, hashfunc):
- """
- ensure_dsc_hashes' task is to ensure that each and every *present* hash
- in the dsc is correct, i.e. identical to the changes file and if necessary
- the pool. The latter task is delegated to check_hash.
- """
-
- rejmsg = []
- if not dsc.has_key('Checksums-%s' % (hashname,)):
- return rejmsg
- # Import hashes from the dsc
- parse_checksums(".dsc", dsc_files, dsc, hashname)
- # And check it...
- rejmsg.extend(check_hash(".dsc", dsc_files, hashname, hashfunc))
- return rejmsg
-
-################################################################################
-
-def parse_checksums(where, files, manifest, hashname):
- rejmsg = []
- field = 'checksums-%s' % hashname
- if not field in manifest:
- return rejmsg
- for line in manifest[field].split('\n'):
- if not line:
- break
- clist = line.strip().split(' ')
- if len(clist) == 3:
- checksum, size, checkfile = clist
- else:
- rejmsg.append("Cannot parse checksum line [%s]" % (line))
- continue
- if not files.has_key(checkfile):
- # TODO: check for the file's entry in the original files dict, not
- # the one modified by (auto)byhand and other weird stuff
- # rejmsg.append("%s: not present in files but in checksums-%s in %s" %
- # (file, hashname, where))
- continue
- if not files[checkfile]["size"] == size:
- rejmsg.append("%s: size differs for files and checksums-%s entry "\
- "in %s" % (checkfile, hashname, where))
- continue
- files[checkfile][hash_key(hashname)] = checksum
- for f in files.keys():
- if not files[f].has_key(hash_key(hashname)):
- rejmsg.append("%s: no entry in checksums-%s in %s" % (f, hashname, where))
- return rejmsg
-
-################################################################################
-
# Dropped support for 1.4 and ``buggy dchanges 3.4'' (?!) compared to di.pl
def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"):
################################################################################
-# see http://bugs.debian.org/619131
-def build_package_list(dsc, session = None):
- if not dsc.has_key("package-list"):
- return {}
-
- packages = {}
+def send_mail (message, filename="", whitelists=None):
+ """sendmail wrapper, takes _either_ a message string or a file as arguments
- for line in dsc["package-list"].split("\n"):
- if not line:
- break
-
- fields = line.split()
- name = fields[0]
- package_type = fields[1]
- (section, component) = extract_component_from_section(fields[2])
- priority = fields[3]
-
- # Validate type if we have a session
- if session and get_override_type(package_type, session) is None:
- # Maybe just warn and ignore? exit(1) might be a bit hard...
- utils.fubar("invalid type (%s) in Package-List." % (package_type))
-
- if name not in packages or packages[name]["type"] == "dsc":
- packages[name] = dict(priority=priority, section=section, type=package_type, component=component, files=[])
-
- return packages
-
-################################################################################
-
-def send_mail (message, filename=""):
- """sendmail wrapper, takes _either_ a message string or a file as arguments"""
+ @type whitelists: list of (str or None)
+ @param whitelists: path to whitelists. C{None} or an empty list whitelists
+ everything, otherwise an address is whitelisted if it is
+ included in any of the lists.
+ In addition a global whitelist can be specified in
+ Dinstall::MailWhiteList.
+ """
maildir = Cnf.get('Dir::Mail')
if maildir:
path = os.path.join(maildir, datetime.datetime.now().isoformat())
path = find_next_free(path)
- fh = open(path, 'w')
- print >>fh, message,
- fh.close()
+ with open(path, 'w') as fh:
+ print >>fh, message,
# Check whether we're supposed to be sending mail
if Cnf.has_key("Dinstall::Options::No-Mail") and Cnf["Dinstall::Options::No-Mail"]:
os.write (fd, message)
os.close (fd)
- if Cnf.has_key("Dinstall::MailWhiteList") and \
- Cnf["Dinstall::MailWhiteList"] != "":
- message_in = open_file(filename)
- message_raw = modemail.message_from_file(message_in)
- message_in.close();
+ if whitelists is None or None in whitelists:
+ whitelists = []
+ if Cnf.get('Dinstall::MailWhiteList', ''):
+ whitelists.append(Cnf['Dinstall::MailWhiteList'])
+ if len(whitelists) != 0:
+ with open_file(filename) as message_in:
+ message_raw = modemail.message_from_file(message_in)
whitelist = [];
- whitelist_in = open_file(Cnf["Dinstall::MailWhiteList"])
- try:
+ for path in whitelists:
+ with open_file(path, 'r') as whitelist_in:
for line in whitelist_in:
if not re_whitespace_comment.match(line):
if re_re_mark.match(line):
whitelist.append(re.compile(re_re_mark.sub("", line.strip(), 1)))
else:
whitelist.append(re.compile(re.escape(line.strip())))
- finally:
- whitelist_in.close()
# Fields to check.
fields = ["To", "Bcc", "Cc"]
mail_whitelisted = 1
break
if not mail_whitelisted:
- print "Skipping %s since it's not in %s" % (item, Cnf["Dinstall::MailWhiteList"])
+ print "Skipping {0} since it's not whitelisted".format(item)
continue
match.append(item)
################################################################################
-def poolify (source, component):
- if component:
- component += '/'
+def poolify (source, component=None):
if source[:3] == "lib":
- return component + source[:4] + '/' + source + '/'
+ return source[:4] + '/' + source + '/'
else:
- return component + source[:1] + '/' + source + '/'
+ return source[:1] + '/' + source + '/'
################################################################################
dest_dir = dest
else:
dest_dir = os.path.dirname(dest)
- if not os.path.exists(dest_dir):
+ if not os.path.lexists(dest_dir):
umask = os.umask(00000)
os.makedirs(dest_dir, 0o2775)
os.umask(umask)
if os.path.exists(dest) and os.path.isdir(dest):
dest += '/' + os.path.basename(src)
# Don't overwrite unless forced to
- if os.path.exists(dest):
+ if os.path.lexists(dest):
if not overwrite:
fubar("Can't move %s to %s - file already exists." % (src, dest))
else:
if os.path.exists(dest) and os.path.isdir(dest):
dest += '/' + os.path.basename(src)
# Don't overwrite unless forced to
- if os.path.exists(dest):
+ if os.path.lexists(dest):
if not overwrite:
raise FileExistsError
else:
################################################################################
-def where_am_i ():
- res = socket.getfqdn()
- database_hostname = Cnf.get("Config::" + res + "::DatabaseHostname")
- if database_hostname:
- return database_hostname
- else:
- return res
-
def which_conf_file ():
if os.getenv('DAK_CONFIG'):
return os.getenv('DAK_CONFIG')
homedir = os.getenv("HOME")
confpath = os.path.join(homedir, "/etc/dak.conf")
if os.path.exists(confpath):
- apt_pkg.ReadConfigFileISC(Cnf,confpath)
+ apt_pkg.read_config_file_isc(Cnf,confpath)
# We are still in here, so there is no local config file or we do
# not allow local files. Do the normal stuff.
return default_config
-def which_apt_conf_file ():
- res = socket.getfqdn()
- # In case we allow local config files per user, try if one exists
- if Cnf.find_b("Config::" + res + "::AllowLocalConfig"):
- homedir = os.getenv("HOME")
- confpath = os.path.join(homedir, "/etc/dak.conf")
- if os.path.exists(confpath):
- apt_pkg.ReadConfigFileISC(Cnf,default_config)
-
- if Cnf.get("Config::" + res + "::AptConfig"):
- return Cnf["Config::" + res + "::AptConfig"]
- else:
- return default_apt_config
-
-def which_alias_file():
- hostname = socket.getfqdn()
- aliasfn = '/var/lib/misc/'+hostname+'/forward-alias'
- if os.path.exists(aliasfn):
- return aliasfn
- else:
- return None
-
################################################################################
def TemplateSubst(subst_map, filename):
""" Perform a substition of template """
- templatefile = open_file(filename)
- template = templatefile.read()
+ with open_file(filename) as templatefile:
+ template = templatefile.read()
for k, v in subst_map.iteritems():
template = template.replace(k, str(v))
- templatefile.close()
return template
################################################################################
################################################################################
-def cc_fix_changes (changes):
- o = changes.get("architecture", "")
- if o:
- del changes["architecture"]
- changes["architecture"] = {}
- for j in o.split():
- changes["architecture"][j] = 1
-
-def changes_compare (a, b):
- """ Sort by source name, source version, 'have source', and then by filename """
- try:
- a_changes = parse_changes(a)
- except:
- return -1
-
- try:
- b_changes = parse_changes(b)
- except:
- return 1
-
- cc_fix_changes (a_changes)
- cc_fix_changes (b_changes)
-
- # Sort by source name
- a_source = a_changes.get("source")
- b_source = b_changes.get("source")
- q = cmp (a_source, b_source)
- if q:
- return q
-
- # Sort by source version
- a_version = a_changes.get("version", "0")
- b_version = b_changes.get("version", "0")
- q = apt_pkg.version_compare(a_version, b_version)
- if q:
- return q
-
- # Sort by 'have source'
- a_has_source = a_changes["architecture"].get("source")
- b_has_source = b_changes["architecture"].get("source")
- if a_has_source and not b_has_source:
- return -1
- elif b_has_source and not a_has_source:
- return 1
-
- # Fall back to sort by filename
- return cmp(a, b)
-
-################################################################################
-
def find_next_free (dest, too_many=100):
extra = 0
orig_dest = dest
- while os.path.exists(dest) and extra < too_many:
+ while os.path.lexists(dest) and extra < too_many:
dest = orig_dest + '.' + repr(extra)
extra += 1
if extra >= too_many:
################################################################################
-def validate_changes_file_arg(filename, require_changes=1):
- """
- 'filename' is either a .changes or .dak file. If 'filename' is a
- .dak file, it's changed to be the corresponding .changes file. The
- function then checks if the .changes file a) exists and b) is
- readable and returns the .changes filename if so. If there's a
- problem, the next action depends on the option 'require_changes'
- argument:
-
- - If 'require_changes' == -1, errors are ignored and the .changes
- filename is returned.
- - If 'require_changes' == 0, a warning is given and 'None' is returned.
- - If 'require_changes' == 1, a fatal error is raised.
-
- """
- error = None
-
- orig_filename = filename
- if filename.endswith(".dak"):
- filename = filename[:-4]+".changes"
-
- if not filename.endswith(".changes"):
- error = "invalid file type; not a changes file"
- else:
- if not os.access(filename,os.R_OK):
- if os.path.exists(filename):
- error = "permission denied"
- else:
- error = "file not found"
-
- if error:
- if require_changes == 1:
- fubar("%s: %s." % (orig_filename, error))
- elif require_changes == 0:
- warn("Skipping %s - %s" % (orig_filename, error))
- return None
- else: # We only care about the .dak file
- return filename
- else:
- return filename
-
-################################################################################
-
-def real_arch(arch):
- return (arch != "source" and arch != "all")
-
-################################################################################
-
def join_with_commas_and(list):
if len(list) == 0: return "nothing"
if len(list) == 1: return list[0]
################################################################################
-def split_args (s, dwim=1):
+def split_args (s, dwim=True):
"""
Split command line arguments which can be separated by either commas
or whitespace. If dwim is set, it will complain about string ending
################################################################################
-def gpgv_get_status_output(cmd, status_read, status_write):
- """
- Our very own version of commands.getouputstatus(), hacked to support
- gpgv's status fd.
- """
-
- cmd = ['/bin/sh', '-c', cmd]
- p2cread, p2cwrite = os.pipe()
- c2pread, c2pwrite = os.pipe()
- errout, errin = os.pipe()
- pid = os.fork()
- if pid == 0:
- # Child
- os.close(0)
- os.close(1)
- os.dup(p2cread)
- os.dup(c2pwrite)
- os.close(2)
- os.dup(errin)
- for i in range(3, 256):
- if i != status_write:
- try:
- os.close(i)
- except:
- pass
- try:
- os.execvp(cmd[0], cmd)
- finally:
- os._exit(1)
-
- # Parent
- os.close(p2cread)
- os.dup2(c2pread, c2pwrite)
- os.dup2(errout, errin)
-
- output = status = ""
- while 1:
- i, o, e = select.select([c2pwrite, errin, status_read], [], [])
- more_data = []
- for fd in i:
- r = os.read(fd, 8196)
- if len(r) > 0:
- more_data.append(fd)
- if fd == c2pwrite or fd == errin:
- output += r
- elif fd == status_read:
- status += r
- else:
- fubar("Unexpected file descriptor [%s] returned from select\n" % (fd))
- if not more_data:
- pid, exit_status = os.waitpid(pid, 0)
- try:
- os.close(status_write)
- os.close(status_read)
- os.close(c2pread)
- os.close(c2pwrite)
- os.close(p2cwrite)
- os.close(errin)
- os.close(errout)
- except:
- pass
- break
-
- return output, status, exit_status
-
-################################################################################
-
-def process_gpgv_output(status):
- # Process the status-fd output
- keywords = {}
- internal_error = ""
- for line in status.split('\n'):
- line = line.strip()
- if line == "":
- continue
- split = line.split()
- if len(split) < 2:
- internal_error += "gpgv status line is malformed (< 2 atoms) ['%s'].\n" % (line)
- continue
- (gnupg, keyword) = split[:2]
- if gnupg != "[GNUPG:]":
- internal_error += "gpgv status line is malformed (incorrect prefix '%s').\n" % (gnupg)
- continue
- args = split[2:]
- if keywords.has_key(keyword) and keyword not in [ "NODATA", "SIGEXPIRED", "KEYEXPIRED" ]:
- internal_error += "found duplicate status token ('%s').\n" % (keyword)
- continue
- else:
- keywords[keyword] = args
-
- return (keywords, internal_error)
-
-################################################################################
-
-def retrieve_key (filename, keyserver=None, keyring=None):
- """
- Retrieve the key that signed 'filename' from 'keyserver' and
- add it to 'keyring'. Returns nothing on success, or an error message
- on error.
- """
-
- # Defaults for keyserver and keyring
- if not keyserver:
- keyserver = Cnf["Dinstall::KeyServer"]
- if not keyring:
- keyring = get_primary_keyring_path()
-
- # Ensure the filename contains no shell meta-characters or other badness
- if not re_taint_free.match(filename):
- return "%s: tainted filename" % (filename)
-
- # Invoke gpgv on the file
- status_read, status_write = os.pipe()
- cmd = "gpgv --status-fd %s --keyring /dev/null %s" % (status_write, filename)
- (_, status, _) = gpgv_get_status_output(cmd, status_read, status_write)
-
- # Process the status-fd output
- (keywords, internal_error) = process_gpgv_output(status)
- if internal_error:
- return internal_error
-
- if not keywords.has_key("NO_PUBKEY"):
- return "didn't find expected NO_PUBKEY in gpgv status-fd output"
-
- fingerprint = keywords["NO_PUBKEY"][0]
- # XXX - gpg sucks. You can't use --secret-keyring=/dev/null as
- # it'll try to create a lockfile in /dev. A better solution might
- # be a tempfile or something.
- cmd = "gpg --no-default-keyring --secret-keyring=%s --no-options" \
- % (Cnf["Dinstall::SigningKeyring"])
- cmd += " --keyring %s --keyserver %s --recv-key %s" \
- % (keyring, keyserver, fingerprint)
- (result, output) = commands.getstatusoutput(cmd)
- if (result != 0):
- return "'%s' failed with exit code %s" % (cmd, result)
-
- return ""
-
-################################################################################
-
def gpg_keyring_args(keyrings=None):
if not keyrings:
keyrings = get_active_keyring_paths()
return " ".join(["--keyring %s" % x for x in keyrings])
-################################################################################
-@session_wrapper
-def check_signature (sig_filename, data_filename="", keyrings=None, autofetch=None, session=None):
- """
- Check the signature of a file and return the fingerprint if the
- signature is valid or 'None' if it's not. The first argument is the
- filename whose signature should be checked. The second argument is a
- reject function and is called when an error is found. The reject()
- function must allow for two arguments: the first is the error message,
- the second is an optional prefix string. It's possible for reject()
- to be called more than once during an invocation of check_signature().
- The third argument is optional and is the name of the files the
- detached signature applies to. The fourth argument is optional and is
- a *list* of keyrings to use. 'autofetch' can either be None, True or
- False. If None, the default behaviour specified in the config will be
- used.
- """
-
- rejects = []
-
- # Ensure the filename contains no shell meta-characters or other badness
- if not re_taint_free.match(sig_filename):
- rejects.append("!!WARNING!! tainted signature filename: '%s'." % (sig_filename))
- return (None, rejects)
-
- if data_filename and not re_taint_free.match(data_filename):
- rejects.append("!!WARNING!! tainted data filename: '%s'." % (data_filename))
- return (None, rejects)
-
- if not keyrings:
- keyrings = [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).all() ]
-
- # Autofetch the signing key if that's enabled
- if autofetch == None:
- autofetch = Cnf.get("Dinstall::KeyAutoFetch")
- if autofetch:
- error_msg = retrieve_key(sig_filename)
- if error_msg:
- rejects.append(error_msg)
- return (None, rejects)
-
- # Build the command line
- status_read, status_write = os.pipe()
- cmd = "gpgv --status-fd %s %s %s %s" % (
- status_write, gpg_keyring_args(keyrings), sig_filename, data_filename)
-
- # Invoke gpgv on the file
- (output, status, exit_status) = gpgv_get_status_output(cmd, status_read, status_write)
-
- # Process the status-fd output
- (keywords, internal_error) = process_gpgv_output(status)
-
- # If we failed to parse the status-fd output, let's just whine and bail now
- if internal_error:
- rejects.append("internal error while performing signature check on %s." % (sig_filename))
- rejects.append(internal_error, "")
- rejects.append("Please report the above errors to the Archive maintainers by replying to this mail.", "")
- return (None, rejects)
-
- # Now check for obviously bad things in the processed output
- if keywords.has_key("KEYREVOKED"):
- rejects.append("The key used to sign %s has been revoked." % (sig_filename))
- if keywords.has_key("BADSIG"):
- rejects.append("bad signature on %s." % (sig_filename))
- if keywords.has_key("ERRSIG") and not keywords.has_key("NO_PUBKEY"):
- rejects.append("failed to check signature on %s." % (sig_filename))
- if keywords.has_key("NO_PUBKEY"):
- args = keywords["NO_PUBKEY"]
- if len(args) >= 1:
- key = args[0]
- rejects.append("The key (0x%s) used to sign %s wasn't found in the keyring(s)." % (key, sig_filename))
- if keywords.has_key("BADARMOR"):
- rejects.append("ASCII armour of signature was corrupt in %s." % (sig_filename))
- if keywords.has_key("NODATA"):
- rejects.append("no signature found in %s." % (sig_filename))
- if keywords.has_key("EXPKEYSIG"):
- args = keywords["EXPKEYSIG"]
- if len(args) >= 1:
- key = args[0]
- rejects.append("Signature made by expired key 0x%s" % (key))
- if keywords.has_key("KEYEXPIRED") and not keywords.has_key("GOODSIG"):
- args = keywords["KEYEXPIRED"]
- expiredate=""
- if len(args) >= 1:
- timestamp = args[0]
- if timestamp.count("T") == 0:
- try:
- expiredate = time.strftime("%Y-%m-%d", time.gmtime(float(timestamp)))
- except ValueError:
- expiredate = "unknown (%s)" % (timestamp)
- else:
- expiredate = timestamp
- rejects.append("The key used to sign %s has expired on %s" % (sig_filename, expiredate))
-
- if len(rejects) > 0:
- return (None, rejects)
-
- # Next check gpgv exited with a zero return code
- if exit_status:
- rejects.append("gpgv failed while checking %s." % (sig_filename))
- if status.strip():
- rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "))
- else:
- rejects.append(prefix_multi_line_string(output, " [GPG output:] "))
- return (None, rejects)
-
- # Sanity check the good stuff we expect
- if not keywords.has_key("VALIDSIG"):
- rejects.append("signature on %s does not appear to be valid [No VALIDSIG]." % (sig_filename))
- else:
- args = keywords["VALIDSIG"]
- if len(args) < 1:
- rejects.append("internal error while checking signature on %s." % (sig_filename))
- else:
- fingerprint = args[0]
- if not keywords.has_key("GOODSIG"):
- rejects.append("signature on %s does not appear to be valid [No GOODSIG]." % (sig_filename))
- if not keywords.has_key("SIG_ID"):
- rejects.append("signature on %s does not appear to be valid [No SIG_ID]." % (sig_filename))
-
- # Finally ensure there's not something we don't recognise
- known_keywords = dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
- SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="",
- NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="",POLICY_URL="")
-
- for keyword in keywords.keys():
- if not known_keywords.has_key(keyword):
- rejects.append("found unknown status token '%s' from gpgv with args '%r' in %s." % (keyword, keywords[keyword], sig_filename))
-
- if len(rejects) > 0:
- return (None, rejects)
- else:
- return (fingerprint, [])
-
################################################################################
def gpg_get_key_addresses(fingerprint):
if addresses != None:
return addresses
addresses = list()
- cmd = "gpg --no-default-keyring %s --fingerprint %s" \
- % (gpg_keyring_args(), fingerprint)
- (result, output) = commands.getstatusoutput(cmd)
- if result == 0:
+ try:
+ with open(os.devnull, "wb") as devnull:
+ output = daklib.daksubprocess.check_output(
+ ["gpg", "--no-default-keyring"] + gpg_keyring_args().split() +
+ ["--with-colons", "--list-keys", fingerprint], stderr=devnull)
+ except subprocess.CalledProcessError:
+ pass
+ else:
for l in output.split('\n'):
- m = re_gpg_uid.match(l)
- if m:
- addresses.append(m.group(1))
+ parts = l.split(':')
+ if parts[0] not in ("uid", "pub"):
+ continue
+ try:
+ uid = parts[9]
+ except IndexError:
+ continue
+ try:
+ # Do not use unicode_escape, because it is locale-specific
+ uid = codecs.decode(uid, "string_escape").decode("utf-8")
+ except UnicodeDecodeError:
+ uid = uid.decode("latin1") # does not fail
+ m = re_parse_maintainer.match(uid)
+ if not m:
+ continue
+ address = m.group(2)
+ address = address.encode("utf8") # dak still uses bytes
+ if address.endswith('@debian.org'):
+ # prefer @debian.org addresses
+ # TODO: maybe not hardcode the domain
+ addresses.insert(0, address)
+ else:
+ addresses.append(address)
key_uid_email_cache[fingerprint] = addresses
return addresses
################################################################################
+def get_logins_from_ldap(fingerprint='*'):
+ """retrieve login from LDAP linked to a given fingerprint"""
+
+ LDAPDn = Cnf['Import-LDAP-Fingerprints::LDAPDn']
+ LDAPServer = Cnf['Import-LDAP-Fingerprints::LDAPServer']
+ l = ldap.open(LDAPServer)
+ l.simple_bind_s('','')
+ Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+ '(keyfingerprint=%s)' % fingerprint,
+ ['uid', 'keyfingerprint'])
+ login = {}
+ for elem in Attrs:
+ login[elem[1]['keyFingerPrint'][0]] = elem[1]['uid'][0]
+ return login
+
+################################################################################
+
+def get_users_from_ldap():
+ """retrieve login and user names from LDAP"""
+
+ LDAPDn = Cnf['Import-LDAP-Fingerprints::LDAPDn']
+ LDAPServer = Cnf['Import-LDAP-Fingerprints::LDAPServer']
+ l = ldap.open(LDAPServer)
+ l.simple_bind_s('','')
+ Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+ '(uid=*)', ['uid', 'cn', 'mn', 'sn'])
+ users = {}
+ for elem in Attrs:
+ elem = elem[1]
+ name = []
+ for k in ('cn', 'mn', 'sn'):
+ try:
+ if elem[k][0] != '-':
+ name.append(elem[k][0])
+ except KeyError:
+ pass
+ users[' '.join(name)] = elem['uid'][0]
+ return users
+
+################################################################################
+
def clean_symlink (src, dest, root):
"""
Relativize an absolute symlink from 'src' -> 'dest' relative to 'root'.
################################################################################
-def temp_filename(directory=None, prefix="dak", suffix=""):
+def temp_filename(directory=None, prefix="dak", suffix="", mode=None, group=None):
"""
Return a secure and unique filename by pre-creating it.
- If 'directory' is non-null, it will be the directory the file is pre-created in.
- If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
- If 'suffix' is non-null, the filename will end with it.
- Returns a pair (fd, name).
+ @type directory: str
+ @param directory: If non-null it will be the directory the file is pre-created in.
+
+ @type prefix: str
+ @param prefix: The filename will be prefixed with this string
+
+ @type suffix: str
+ @param suffix: The filename will end with this string
+
+ @type mode: str
+ @param mode: If set the file will get chmodded to those permissions
+
+ @type group: str
+ @param group: If set the file will get chgrped to the specified group.
+
+ @rtype: list
+ @return: Returns a pair (fd, name)
"""
- return tempfile.mkstemp(suffix, prefix, directory)
+ (tfd, tfname) = tempfile.mkstemp(suffix, prefix, directory)
+ if mode:
+ os.chmod(tfname, mode)
+ if group:
+ gid = grp.getgrnam(group).gr_gid
+ os.chown(tfname, -1, gid)
+ return (tfd, tfname)
################################################################################
-def temp_dirname(parent=None, prefix="dak", suffix=""):
+def temp_dirname(parent=None, prefix="dak", suffix="", mode=None, group=None):
"""
Return a secure and unique directory by pre-creating it.
- If 'parent' is non-null, it will be the directory the directory is pre-created in.
- If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
- If 'suffix' is non-null, the filename will end with it.
- Returns a pathname to the new directory
+ @type parent: str
+ @param parent: If non-null it will be the directory the directory is pre-created in.
+
+ @type prefix: str
+ @param prefix: The filename will be prefixed with this string
+
+ @type suffix: str
+ @param suffix: The filename will end with this string
+
+ @type mode: str
+ @param mode: If set the file will get chmodded to those permissions
+
+ @type group: str
+ @param group: If set the file will get chgrped to the specified group.
+
+ @rtype: list
+ @return: Returns a pair (fd, name)
+
"""
- return tempfile.mkdtemp(suffix, prefix, parent)
+ tfname = tempfile.mkdtemp(suffix, prefix, parent)
+ if mode:
+ os.chmod(tfname, mode)
+ if group:
+ gid = grp.getgrnam(group).gr_gid
+ os.chown(tfname, -1, gid)
+ return tfname
################################################################################
################################################################################
-apt_pkg.init()
-
-Cnf = apt_pkg.Configuration()
-if not os.getenv("DAK_TEST"):
- apt_pkg.read_config_file_isc(Cnf,default_config)
-
-if which_conf_file() != default_config:
- apt_pkg.read_config_file_isc(Cnf,which_conf_file())
+Cnf = config.Config().Cnf
################################################################################
def parse_wnpp_bug_file(file = "/srv/ftp-master.debian.org/scripts/masterfiles/wnpp_rm"):
"""
- Parses the wnpp bug list available at http://qa.debian.org/data/bts/wnpp_rm
+ Parses the wnpp bug list available at https://qa.debian.org/data/bts/wnpp_rm
Well, actually it parsed a local copy, but let's document the source
somewhere ;)
lines = f.readlines()
except IOError as e:
print "Warning: Couldn't open %s; don't know about WNPP bugs, so won't close any." % file
- lines = []
+ lines = []
wnpp = {}
for line in lines:
if (result != 0):
fubar("Gunzip invocation failed!\n%s\n" % (output), result)
packages = open_file(temp_file)
- Packages = apt_pkg.ParseTagFile(packages)
+ Packages = apt_pkg.TagFile(packages)
os.unlink(temp_file)
return Packages
try:
print >>tmp, text,
tmp.close()
- subprocess.check_call([editor, tmp.name])
+ daklib.daksubprocess.check_call([editor, tmp.name])
return open(tmp.name, 'r').read()
finally:
os.unlink(tmp.name)
################################################################################
-def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False):
+def check_reverse_depends(removals, suite, arches=None, session=None, cruft=False, quiet=False, include_arch_all=True):
dbsuite = get_suite(suite, session)
+ overridesuite = dbsuite
+ if dbsuite.overridesuite is not None:
+ overridesuite = get_suite(dbsuite.overridesuite, session)
dep_problem = 0
p2c = {}
- all_broken = {}
+ all_broken = defaultdict(lambda: defaultdict(set))
if arches:
all_arches = set(arches)
else:
- all_arches = set([x.arch_string for x in get_suite_architectures(suite)])
+ all_arches = set(x.arch_string for x in get_suite_architectures(suite))
all_arches -= set(["source", "all"])
+ removal_set = set(removals)
metakey_d = get_or_set_metadatakey("Depends", session)
metakey_p = get_or_set_metadatakey("Provides", session)
params = {
'metakey_d_id': metakey_d.key_id,
'metakey_p_id': metakey_p.key_id,
}
- for architecture in all_arches | set(['all']):
+ if include_arch_all:
+ rdep_architectures = all_arches | set(['all'])
+ else:
+ rdep_architectures = all_arches
+ for architecture in rdep_architectures:
deps = {}
sources = {}
virtual_packages = {}
params['arch_id'] = get_architecture(architecture, session).arch_id
statement = '''
- SELECT b.id, b.package, s.source, c.name as component,
+ SELECT b.package, s.source, c.name as component,
(SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
(SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
JOIN source s ON b.source = s.id
- JOIN files f ON b.file = f.id
- JOIN location l ON f.location = l.id
- JOIN component c ON l.component = c.id
+ JOIN files_archive_map af ON b.file = af.file_id
+ JOIN component c ON af.component_id = c.id
WHERE b.architecture = :arch_id'''
- query = session.query('id', 'package', 'source', 'component', 'depends', 'provides'). \
+ query = session.query('package', 'source', 'component', 'depends', 'provides'). \
from_statement(statement).params(params)
- for binary_id, package, source, component, depends, provides in query:
+ for package, source, component, depends, provides in query:
sources[package] = source
p2c[package] = component
if depends is not None:
# If a virtual package is only provided by the to-be-removed
# packages, treat the virtual package as to-be-removed too.
- for virtual_pkg in virtual_packages.keys():
- if virtual_packages[virtual_pkg] == 0:
- removals.append(virtual_pkg)
+ removal_set.update(virtual_pkg for virtual_pkg in virtual_packages if not virtual_packages[virtual_pkg])
# Check binary dependencies (Depends)
- for package in deps.keys():
+ for package in deps:
if package in removals: continue
- parsed_dep = []
try:
- parsed_dep += apt_pkg.ParseDepends(deps[package])
+ parsed_dep = apt_pkg.parse_depends(deps[package])
except ValueError as e:
print "Error for package %s: %s" % (package, e)
+ parsed_dep = []
for dep in parsed_dep:
# Check for partial breakage. If a package has a ORed
# dependency, there is only a dependency problem if all
source = sources[package]
if component != "main":
source = "%s/%s" % (source, component)
- all_broken.setdefault(source, {}).setdefault(package, set()).add(architecture)
+ all_broken[source][package].add(architecture)
dep_problem = 1
- if all_broken:
+ if all_broken and not quiet:
if cruft:
print " - broken Depends:"
else:
print
# Check source dependencies (Build-Depends and Build-Depends-Indep)
- all_broken.clear()
+ all_broken = defaultdict(set)
metakey_bd = get_or_set_metadatakey("Build-Depends", session)
metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
+ if include_arch_all:
+ metakey_ids = (metakey_bd.key_id, metakey_bdi.key_id)
+ else:
+ metakey_ids = (metakey_bd.key_id,)
+
params = {
'suite_id': dbsuite.suite_id,
- 'metakey_ids': (metakey_bd.key_id, metakey_bdi.key_id),
+ 'metakey_ids': metakey_ids,
}
statement = '''
- SELECT s.id, s.source, string_agg(sm.value, ', ') as build_dep
+ SELECT s.source, string_agg(sm.value, ', ') as build_dep
FROM source s
JOIN source_metadata sm ON s.id = sm.src_id
WHERE s.id in
- (SELECT source FROM src_associations
+ (SELECT src FROM newest_src_association
WHERE suite = :suite_id)
AND sm.key_id in :metakey_ids
GROUP BY s.id, s.source'''
- query = session.query('id', 'source', 'build_dep').from_statement(statement). \
+ query = session.query('source', 'build_dep').from_statement(statement). \
params(params)
- for source_id, source, build_dep in query:
+ for source, build_dep in query:
if source in removals: continue
parsed_dep = []
if build_dep is not None:
# Remove [arch] information since we want to see breakage on all arches
build_dep = re_build_dep_arch.sub("", build_dep)
try:
- parsed_dep += apt_pkg.ParseDepends(build_dep)
+ parsed_dep = apt_pkg.parse_src_depends(build_dep)
except ValueError as e:
print "Error for source %s: %s" % (source, e)
for dep in parsed_dep:
if dep_package in removals:
unsat += 1
if unsat == len(dep):
- component = DBSource.get(source_id, session).get_component_name()
+ component, = session.query(Component.component_name) \
+ .join(Component.overrides) \
+ .filter(Override.suite == overridesuite) \
+ .filter(Override.package == re.sub('/(contrib|non-free)$', '', source)) \
+ .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
+ .first()
+ key = source
if component != "main":
- source = "%s/%s" % (source, component)
- all_broken.setdefault(source, set()).add(pp_deps(dep))
+ key = "%s/%s" % (source, component)
+ all_broken[key].add(pp_deps(dep))
dep_problem = 1
- if all_broken:
+ if all_broken and not quiet:
if cruft:
print " - broken Build-Depends:"
else:
print
return dep_problem
+
+################################################################################
+
+def parse_built_using(control):
+ """source packages referenced via Built-Using
+
+ @type control: dict-like
+ @param control: control file to take Built-Using field from
+
+ @rtype: list of (str, str)
+ @return: list of (source_name, source_version) pairs
+ """
+ built_using = control.get('Built-Using', None)
+ if built_using is None:
+ return []
+
+ bu = []
+ for dep in apt_pkg.parse_depends(built_using):
+ assert len(dep) == 1, 'Alternatives are not allowed in Built-Using field'
+ source_name, source_version, comp = dep[0]
+ assert comp == '=', 'Built-Using must contain strict dependencies'
+ bu.append((source_name, source_version))
+
+ return bu
+
+################################################################################
+
+def is_in_debug_section(control):
+ """binary package is a debug package
+
+ @type control: dict-like
+ @param control: control file of binary package
+
+ @rtype Boolean
+ @return: True if the binary package is a debug package
+ """
+ section = control['Section'].split('/', 1)[-1]
+ auto_built_package = control.get("Auto-Built-Package")
+ return section == "debug" and auto_built_package == "debug-symbols"