################################################################################
def recheck(upload, session):
- files = upload.pkg.files
-
- cnf = Config()
- for f in files.keys():
- # The .orig.tar.gz can disappear out from under us is it's a
- # duplicate of one in the archive.
- if not files.has_key(f):
- continue
- # Check that the source still exists
- if files[f]["type"] == "deb":
- source_version = files[f]["source version"]
- source_package = files[f]["source package"]
- if not upload.pkg.changes["architecture"].has_key("source") \
- and not upload.source_exists(source_package, source_version, upload.pkg.changes["distribution"].keys()):
- source_epochless_version = re_no_epoch.sub('', source_version)
- dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
- found = 0
- for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
- if cnf.has_key("Dir::Queue::%s" % (q)):
- if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
- found = 1
- if not found:
- upload.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
-
- # Version and file overwrite checks
- if files[f]["type"] == "deb":
- upload.check_binary_against_db(f, session)
- elif files[f]["type"] == "dsc":
- upload.check_source_against_db(f, session)
- upload.check_dsc_against_db(f, session)
-
+ upload.recheck()
if len(upload.rejects) > 0:
answer = "XXX"
if Options["No-Action"] or Options["Automatic"] or Options["Trainee"]:
answer = 'S'
- print "REJECT\n" + upload.rejects.join("\n"),
+ print "REJECT\n%s" % '\n'.join(upload.rejects)
prompt = "[R]eject, Skip, Quit ?"
while prompt.find(answer) == -1:
answer = answer[:1].upper()
if answer == 'R':
- upload.do_reject(manual=0, reject_message=upload.rejects.join("\n"))
+ upload.do_reject(manual=0, reject_message='\n'.join(upload.rejects))
os.unlink(upload.pkg.changes_file[:-8]+".dak")
return 0
elif answer == 'S':
elif answer == 'E' and not Options["Trainee"]:
new = edit_overrides (new, upload, session)
elif answer == 'M' and not Options["Trainee"]:
+ upload.pkg.remove_known_changes()
aborted = upload.do_reject(manual=1,
reject_message=Options["Manual-Reject"],
note=get_new_comments(changes.get("source", ""), session=session))
print """Usage: dak process-new [OPTION]... [CHANGES]...
-a, --automatic automatic run
-h, --help show this help and exit.
- -C, --comments-dir=DIR use DIR as comments-dir, for [o-]p-u-new
-m, --manual-reject=MSG manual reject with `msg'
-n, --no-action don't do anything
-t, --trainee FTP Trainee mode
finally:
os.unlink(path)
- # def move_to_dir (upload, dest, perms=0660, changesperms=0664):
- # utils.move (upload.pkg.changes_file, dest, perms=changesperms)
- # file_keys = upload.pkg.files.keys()
- # for f in file_keys:
- # utils.move (f, dest, perms=perms)
-
- # def is_source_in_queue_dir(qdir):
- # entries = [ x for x in os.listdir(qdir) if x.startswith(Upload.pkg.changes["source"])
- # and x.endswith(".changes") ]
- # for entry in entries:
- # # read the .dak
- # u = queue.Upload(Cnf)
- # u.pkg.changes_file = os.path.join(qdir, entry)
- # u.update_vars()
- # if not u.pkg.changes["architecture"].has_key("source"):
- # # another binary upload, ignore
- # continue
- # if Upload.pkg.changes["version"] != u.pkg.changes["version"]:
- # # another version, ignore
- # continue
- # # found it!
- # return True
- # return False
-
- # def move_to_holding(suite, queue_dir):
- # print "Moving to %s holding area." % (suite.upper(),)
- # if Options["No-Action"]:
- # return
- # Logger.log(["Moving to %s" % (suite,), Upload.pkg.changes_file])
- # Upload.dump_vars(queue_dir)
- # move_to_dir(queue_dir, perms=0664)
- # os.unlink(Upload.pkg.changes_file[:-8]+".dak")
-
def _accept(upload):
if Options["No-Action"]:
return
upload.accept(summary, short_summary, targetdir=Config()["Dir::Queue::Newstage"])
os.unlink(upload.pkg.changes_file[:-8]+".dak")
- # def do_accept_stableupdate(upload,suite, q):
- # cnf = Config()
- # queue_dir = cnf["Dir::Queue::%s" % (q,)]
- # if not upload.pkg.changes["architecture"].has_key("source"):
- # # It is not a sourceful upload. So its source may be either in p-u
- # # holding, in new, in accepted or already installed.
- # if is_source_in_queue_dir(queue_dir):
- # # It's in p-u holding, so move it there.
- # print "Binary-only upload, source in %s." % (q,)
- # move_to_holding(suite, queue_dir)
- # elif Upload.source_exists(Upload.pkg.changes["source"],
- # Upload.pkg.changes["version"]):
- # # dak tells us that there is source available. At time of
- # # writing this means that it is installed, so put it into
- # # accepted.
- # print "Binary-only upload, source installed."
- # Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)])
- # _accept()
- # elif is_source_in_queue_dir(Cnf["Dir::Queue::Accepted"]):
- # # The source is in accepted, the binary cleared NEW: accept it.
- # print "Binary-only upload, source in accepted."
- # Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)])
- # _accept()
- # elif is_source_in_queue_dir(Cnf["Dir::Queue::New"]):
- # # It's in NEW. We expect the source to land in p-u holding
- # # pretty soon.
- # print "Binary-only upload, source in new."
- # move_to_holding(suite, queue_dir)
- # elif is_source_in_queue_dir(Cnf["Dir::Queue::Newstage"]):
- # # It's in newstage. Accept into the holding area
- # print "Binary-only upload, source in newstage."
- # Logger.log([utils.getusername(), "PUNEW ACCEPT: %s" % (Upload.pkg.changes_file)])
- # _accept()
- # else:
- # # No case applicable. Bail out. Return will cause the upload
- # # to be skipped.
- # print "ERROR"
- # print "Stable update failed. Source not found."
- # return
- # else:
- # # We are handling a sourceful upload. Move to accepted if currently
- # # in p-u holding and to p-u holding otherwise.
- # if is_source_in_queue_dir(queue_dir):
- # print "Sourceful upload in %s, accepting." % (q,)
- # _accept()
- # else:
- # move_to_holding(suite, queue_dir)
-
def do_accept(upload):
print "ACCEPT"
cnf = Config()
if not Options["No-Action"]:
(summary, short_summary) = upload.build_summaries()
- # if cnf.FindB("Dinstall::SecurityQueueHandling"):
- # upload.dump_vars(cnf["Dir::Queue::Embargoed"])
- # move_to_dir(cnf["Dir::Queue::Embargoed"])
- # upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
- # # Check for override disparities
- # upload.Subst["__SUMMARY__"] = summary
- # else:
- # Stable updates need to be copied to proposed-updates holding
- # area instead of accepted. Sourceful uploads need to go
- # to it directly, binaries only if the source has not yet been
- # accepted into p-u.
- for suite, q in [("proposed-updates", "ProposedUpdates"),
- ("oldstable-proposed-updates", "OldProposedUpdates")]:
- if not upload.pkg.changes["distribution"].has_key(suite):
- continue
- utils.fubar("stable accept not supported yet")
- # return do_accept_stableupdate(suite, q)
- # Just a normal upload, accept it...
- _accept(upload)
-
- def check_status(files):
- new = byhand = 0
- for f in files.keys():
- if files[f]["type"] == "byhand":
- byhand = 1
- elif files[f].has_key("new"):
- new = 1
- return (new, byhand)
+
+ if cnf.FindB("Dinstall::SecurityQueueHandling"):
+ upload.dump_vars(cnf["Dir::Queue::Embargoed"])
+ upload.move_to_dir(cnf["Dir::Queue::Embargoed"])
+ upload.queue_build("embargoed", cnf["Dir::Queue::Embargoed"])
+ # Check for override disparities
+ upload.Subst["__SUMMARY__"] = summary
+ else:
+ # Just a normal upload, accept it...
+ _accept(upload)
def do_pkg(changes_file, session):
u = Upload()
################################################################################
- # def do_comments(dir, opref, npref, line, fn):
- # for comm in [ x for x in os.listdir(dir) if x.startswith(opref) ]:
- # lines = open("%s/%s" % (dir, comm)).readlines()
- # if len(lines) == 0 or lines[0] != line + "\n": continue
- # changes_files = [ x for x in os.listdir(".") if x.startswith(comm[7:]+"_")
- # and x.endswith(".changes") ]
- # changes_files = sort_changes(changes_files)
- # for f in changes_files:
- # f = utils.validate_changes_file_arg(f, 0)
- # if not f: continue
- # print "\n" + f
- # fn(f, "".join(lines[1:]))
-
- # if opref != npref and not Options["No-Action"]:
- # newcomm = npref + comm[len(opref):]
- # os.rename("%s/%s" % (dir, comm), "%s/%s" % (dir, newcomm))
-
- # ################################################################################
-
- # def comment_accept(changes_file, comments):
- # Upload.pkg.changes_file = changes_file
- # Upload.init_vars()
- # Upload.update_vars()
- # Upload.update_subst()
- # files = Upload.pkg.files
-
- # if not recheck():
- # return # dak wants to REJECT, crap
-
- # (new, byhand) = check_status(files)
- # if not new and not byhand:
- # do_accept()
-
- # ################################################################################
-
- # def comment_reject(changes_file, comments):
- # Upload.pkg.changes_file = changes_file
- # Upload.init_vars()
- # Upload.update_vars()
- # Upload.update_subst()
-
- # if not recheck():
- # pass # dak has its own reasons to reject as well, which is fine
-
- # reject(comments)
- # print "REJECT\n" + reject_message,
- # if not Options["No-Action"]:
- # Upload.do_reject(0, reject_message)
- # os.unlink(Upload.pkg.changes_file[:-8]+".dak")
-
- ################################################################################
-
def main():
global Options, Logger, Sections, Priorities
Arguments = [('a',"automatic","Process-New::Options::Automatic"),
('h',"help","Process-New::Options::Help"),
- ('C',"comments-dir","Process-New::Options::Comments-Dir", "HasArg"),
('m',"manual-reject","Process-New::Options::Manual-Reject", "HasArg"),
('t',"trainee","Process-New::Options::Trainee"),
('n',"no-action","Process-New::Options::No-Action")]
- for i in ["automatic", "help", "manual-reject", "no-action", "version", "comments-dir", "trainee"]:
+ for i in ["automatic", "help", "manual-reject", "no-action", "version", "trainee"]:
if not cnf.has_key("Process-New::Options::%s" % (i)):
cnf["Process-New::Options::%s" % (i)] = ""
changes_files = apt_pkg.ParseCommandLine(cnf.Cnf,Arguments,sys.argv)
- if len(changes_files) == 0 and not cnf.get("Process-New::Options::Comments-Dir",""):
+ if len(changes_files) == 0:
changes_files = utils.get_changes_files(cnf["Dir::Queue::New"])
Options = cnf.SubTree("Process-New::Options")
# Kill me now? **FIXME**
cnf["Dinstall::Options::No-Mail"] = ""
- # commentsdir = cnf.get("Process-New::Options::Comments-Dir","")
- # if commentsdir:
- # if changes_files != []:
- # sys.stderr.write("Can't specify any changes files if working with comments-dir")
- # sys.exit(1)
- # do_comments(commentsdir, "ACCEPT.", "ACCEPTED.", "OK", comment_accept)
- # do_comments(commentsdir, "REJECT.", "REJECTED.", "NOTOK", comment_reject)
- # else:
- if True:
- for changes_file in changes_files:
- changes_file = utils.validate_changes_file_arg(changes_file, 0)
- if not changes_file:
- continue
- print "\n" + changes_file
-
- do_pkg (changes_file, session)
+ for changes_file in changes_files:
+ changes_file = utils.validate_changes_file_arg(changes_file, 0)
+ if not changes_file:
+ continue
+ print "\n" + changes_file
+
+ do_pkg (changes_file, session)
end()
################################################################################
+import os
import apt_pkg
import socket
################################################################################
- default_config = "/etc/dak/dak.conf"
+ default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties
-def which_conf_file(Cnf):
- res = socket.gethostbyaddr(socket.gethostname())
- if Cnf.get("Config::" + res[0] + "::DakConfig"):
- return Cnf["Config::" + res[0] + "::DakConfig"]
+def which_conf_file():
+ if os.getenv("DAK_CONFIG"):
+ return os.getenv("DAK_CONFIG")
else:
return default_config
self.Cnf = apt_pkg.newConfiguration()
- apt_pkg.ReadConfigFileISC(self.Cnf, default_config)
+ apt_pkg.ReadConfigFileISC(self.Cnf, which_conf_file())
# Check whether our dak.conf was the real one or
# just a pointer to our main one
################################################################################
import os
+ import re
import psycopg2
import traceback
################################################################################
+ class BinContents(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<BinContents (%s, %s)>' % (self.binary, self.filename)
+
+ __all__.append('BinContents')
+
+ ################################################################################
+
class DBBinary(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
+ class BinaryACL(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<BinaryACL %s>' % self.binary_acl_id
+
+ __all__.append('BinaryACL')
+
+ ################################################################################
+
+ class BinaryACLMap(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<BinaryACLMap %s>' % self.binary_acl_map_id
+
+ __all__.append('BinaryACLMap')
+
+ ################################################################################
+
class Component(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
- class ContentFilename(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ContentFilename %s>' % self.filename
-
- __all__.append('ContentFilename')
-
@session_wrapper
def get_or_set_contents_file_id(filename, session=None):
"""
# Insert paths
pathcache = {}
for fullpath in fullpaths:
- # Get the necessary IDs ...
- (path, file) = os.path.split(fullpath)
-
- filepath_id = get_or_set_contents_path_id(path, session)
- filename_id = get_or_set_contents_file_id(file, session)
-
- pathcache[fullpath] = (filepath_id, filename_id)
+ if fullpath.startswith( './' ):
+ fullpath = fullpath[2:]
- for fullpath, dat in pathcache.items():
- ca = ContentAssociation()
- ca.binary_id = binary_id
- ca.filepath_id = dat[0]
- ca.filename_id = dat[1]
- session.add(ca)
+ session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )", { 'filename': fullpath, 'id': binary_id} )
- # Only commit if we set up the session ourself
+ session.commit()
if privatetrans:
- session.commit()
session.close()
- else:
- session.flush()
-
return True
except:
__all__.append('Fingerprint')
+ @session_wrapper
+ def get_fingerprint(fpr, session=None):
+ """
+ Returns Fingerprint object for given fpr.
+
+ @type fpr: string
+ @param fpr: The fpr to find / add
+
+ @type session: SQLAlchemy
+ @param session: Optional SQL session object (a temporary one will be
+ generated if not supplied).
+
+ @rtype: Fingerprint
+ @return: the Fingerprint object for the given fpr or None
+ """
+
+ q = session.query(Fingerprint).filter_by(fingerprint=fpr)
+
+ try:
+ ret = q.one()
+ except NoResultFound:
+ ret = None
+
+ return ret
+
+ __all__.append('get_fingerprint')
+
@session_wrapper
def get_or_set_fingerprint(fpr, session=None):
"""
################################################################################
+ # Helper routine for Keyring class
+ def get_ldap_name(entry):
+ name = []
+ for k in ["cn", "mn", "sn"]:
+ ret = entry.get(k)
+ if ret and ret[0] != "" and ret[0] != "-":
+ name.append(ret[0])
+ return " ".join(name)
+
+ ################################################################################
+
class Keyring(object):
+ gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
+ " --with-colons --fingerprint --fingerprint"
+
+ keys = {}
+ fpr_lookup = {}
+
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<Keyring %s>' % self.keyring_name
+ def de_escape_gpg_str(self, str):
+ esclist = re.split(r'(\\x..)', str)
+ for x in range(1,len(esclist),2):
+ esclist[x] = "%c" % (int(esclist[x][2:],16))
+ return "".join(esclist)
+
+ def load_keys(self, keyring):
+ import email.Utils
+
+ if not self.keyring_id:
+ raise Exception('Must be initialized with database information')
+
+ k = os.popen(self.gpg_invocation % keyring, "r")
+ key = None
+ signingkey = False
+
+ for line in k.xreadlines():
+ field = line.split(":")
+ if field[0] == "pub":
+ key = field[4]
+ (name, addr) = email.Utils.parseaddr(field[9])
+ name = re.sub(r"\s*[(].*[)]", "", name)
+ if name == "" or addr == "" or "@" not in addr:
+ name = field[9]
+ addr = "invalid-uid"
+ name = self.de_escape_gpg_str(name)
+ self.keys[key] = {"email": addr}
+ if name != "":
+ self.keys[key]["name"] = name
+ self.keys[key]["aliases"] = [name]
+ self.keys[key]["fingerprints"] = []
+ signingkey = True
+ elif key and field[0] == "sub" and len(field) >= 12:
+ signingkey = ("s" in field[11])
+ elif key and field[0] == "uid":
+ (name, addr) = email.Utils.parseaddr(field[9])
+ if name and name not in self.keys[key]["aliases"]:
+ self.keys[key]["aliases"].append(name)
+ elif signingkey and field[0] == "fpr":
+ self.keys[key]["fingerprints"].append(field[9])
+ self.fpr_lookup[field[9]] = key
+
+ def import_users_from_ldap(self, session):
+ import ldap
+ cnf = Config()
+
+ LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
+ LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
+
+ l = ldap.open(LDAPServer)
+ l.simple_bind_s("","")
+ Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
+ "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
+ ["uid", "keyfingerprint", "cn", "mn", "sn"])
+
+ ldap_fin_uid_id = {}
+
+ byuid = {}
+ byname = {}
+
+ for i in Attrs:
+ entry = i[1]
+ uid = entry["uid"][0]
+ name = get_ldap_name(entry)
+ fingerprints = entry["keyFingerPrint"]
+ keyid = None
+ for f in fingerprints:
+ key = self.fpr_lookup.get(f, None)
+ if key not in self.keys:
+ continue
+ self.keys[key]["uid"] = uid
+
+ if keyid != None:
+ continue
+ keyid = get_or_set_uid(uid, session).uid_id
+ byuid[keyid] = (uid, name)
+ byname[uid] = (keyid, name)
+
+ return (byname, byuid)
+
+ def generate_users_from_keyring(self, format, session):
+ byuid = {}
+ byname = {}
+ any_invalid = False
+ for x in self.keys.keys():
+ if self.keys[x]["email"] == "invalid-uid":
+ any_invalid = True
+ self.keys[x]["uid"] = format % "invalid-uid"
+ else:
+ uid = format % self.keys[x]["email"]
+ keyid = get_or_set_uid(uid, session).uid_id
+ byuid[keyid] = (uid, self.keys[x]["name"])
+ byname[uid] = (keyid, self.keys[x]["name"])
+ self.keys[x]["uid"] = uid
+
+ if any_invalid:
+ uid = format % "invalid-uid"
+ keyid = get_or_set_uid(uid, session).uid_id
+ byuid[keyid] = (uid, "ungeneratable user id")
+ byname[uid] = (keyid, "ungeneratable user id")
+
+ return (byname, byuid)
+
__all__.append('Keyring')
@session_wrapper
- def get_or_set_keyring(keyring, session=None):
+ def get_keyring(keyring, session=None):
"""
- If C{keyring} does not have an entry in the C{keyrings} table yet, create one
- and return the new Keyring
+ If C{keyring} does not have an entry in the C{keyrings} table yet, return None
If C{keyring} already has an entry, simply return the existing Keyring
@type keyring: string
try:
return q.one()
except NoResultFound:
- obj = Keyring(keyring_name=keyring)
- session.add(obj)
- session.commit_or_flush()
- return obj
+ return None
+
+ __all__.append('get_keyring')
+
+ ################################################################################
- __all__.append('get_or_set_keyring')
+ class KeyringACLMap(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<KeyringACLMap %s>' % self.keyring_acl_map_id
+
+ __all__.append('KeyringACLMap')
################################################################################
+class KnownChange(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<KnownChange %s>' % self.changesname
+
+__all__.append('KnownChange')
+
+@session_wrapper
+def get_knownchange(filename, session=None):
+ """
+ returns knownchange object for given C{filename}.
+
+ @type archive: string
+ @param archive: the name of the arhive
+
+ @type session: Session
+ @param session: Optional SQLA session object (a temporary one will be
+ generated if not supplied)
+
+ @rtype: Archive
+ @return: Archive object for the given name (None if not present)
+
+ """
+ q = session.query(KnownChange).filter_by(changesname=filename)
+
+ try:
+ return q.one()
+ except NoResultFound:
+ return None
+
+__all__.append('get_knownchange')
+
+################################################################################
class Location(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
+ class SourceACL(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<SourceACL %s>' % self.source_acl_id
+
+ __all__.append('SourceACL')
+
+ ################################################################################
+
class SrcAssociation(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
+ class UploadBlock(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
+
+ __all__.append('UploadBlock')
+
+ ################################################################################
+
class DBConn(Singleton):
"""
database module init.
self.tbl_archive = Table('archive', self.db_meta, autoload=True)
self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
+ self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
+ self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
self.tbl_component = Table('component', self.db_meta, autoload=True)
self.tbl_config = Table('config', self.db_meta, autoload=True)
self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
self.tbl_files = Table('files', self.db_meta, autoload=True)
self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
+ self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True)
+ self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True)
self.tbl_location = Table('location', self.db_meta, autoload=True)
self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
self.tbl_queue_build = Table('queue_build', self.db_meta, autoload=True)
self.tbl_section = Table('section', self.db_meta, autoload=True)
self.tbl_source = Table('source', self.db_meta, autoload=True)
+ self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
self.tbl_uid = Table('uid', self.db_meta, autoload=True)
+ self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
def __setupmappers(self):
mapper(Architecture, self.tbl_architecture,
binary_id = self.tbl_bin_associations.c.bin,
binary = relation(DBBinary)))
+
mapper(DBBinary, self.tbl_binaries,
properties = dict(binary_id = self.tbl_binaries.c.id,
package = self.tbl_binaries.c.package,
binassociations = relation(BinAssociation,
primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))))
+ mapper(BinaryACL, self.tbl_binary_acl,
+ properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
+
+ mapper(BinaryACLMap, self.tbl_binary_acl_map,
+ properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
+ fingerprint = relation(Fingerprint, backref="binary_acl_map"),
+ architecture = relation(Architecture)))
+
mapper(Component, self.tbl_component,
properties = dict(component_id = self.tbl_component.c.id,
component_name = self.tbl_component.c.name))
mapper(DBConfig, self.tbl_config,
properties = dict(config_id = self.tbl_config.c.id))
- mapper(ContentAssociation, self.tbl_content_associations,
- properties = dict(ca_id = self.tbl_content_associations.c.id,
- filename_id = self.tbl_content_associations.c.filename,
- filename = relation(ContentFilename),
- filepath_id = self.tbl_content_associations.c.filepath,
- filepath = relation(ContentFilepath),
- binary_id = self.tbl_content_associations.c.binary_pkg,
- binary = relation(DBBinary)))
-
-
- mapper(ContentFilename, self.tbl_content_file_names,
- properties = dict(cafilename_id = self.tbl_content_file_names.c.id,
- filename = self.tbl_content_file_names.c.file))
-
- mapper(ContentFilepath, self.tbl_content_file_paths,
- properties = dict(cafilepath_id = self.tbl_content_file_paths.c.id,
- filepath = self.tbl_content_file_paths.c.path))
-
mapper(DSCFile, self.tbl_dsc_files,
properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
source_id = self.tbl_dsc_files.c.source,
uid_id = self.tbl_fingerprint.c.uid,
uid = relation(Uid),
keyring_id = self.tbl_fingerprint.c.keyring,
- keyring = relation(Keyring)))
+ keyring = relation(Keyring),
+ source_acl = relation(SourceACL),
+ binary_acl = relation(BinaryACL)))
mapper(Keyring, self.tbl_keyrings,
properties = dict(keyring_name = self.tbl_keyrings.c.name,
keyring_id = self.tbl_keyrings.c.id))
+ mapper(KnownChange, self.tbl_known_changes,
+ properties = dict(known_change_id = self.tbl_known_changes.c.id))
+
+ mapper(KeyringACLMap, self.tbl_keyring_acl_map,
+ properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
+ keyring = relation(Keyring, backref="keyring_acl_map"),
+ architecture = relation(Architecture)))
+
mapper(Location, self.tbl_location,
properties = dict(location_id = self.tbl_location.c.id,
component_id = self.tbl_location.c.component,
properties = dict(overridetype = self.tbl_override_type.c.type,
overridetype_id = self.tbl_override_type.c.id))
- mapper(PendingContentAssociation, self.tbl_pending_content_associations,
- properties = dict(pca_id = self.tbl_pending_content_associations.c.id,
- filepath_id = self.tbl_pending_content_associations.c.filepath,
- filepath = relation(ContentFilepath),
- filename_id = self.tbl_pending_content_associations.c.filename,
- filename = relation(ContentFilename)))
-
mapper(Priority, self.tbl_priority,
properties = dict(priority_id = self.tbl_priority.c.id))
srcfiles = relation(DSCFile,
primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
srcassociations = relation(SrcAssociation,
- primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source))))
+ primaryjoin=(self.tbl_source.c.id==self.tbl_src_associations.c.source)),
+ srcuploaders = relation(SrcUploader)))
+
+ mapper(SourceACL, self.tbl_source_acl,
+ properties = dict(source_acl_id = self.tbl_source_acl.c.id))
mapper(SrcAssociation, self.tbl_src_associations,
properties = dict(sa_id = self.tbl_src_associations.c.id,
properties = dict(uid_id = self.tbl_uid.c.id,
fingerprint = relation(Fingerprint)))
+ mapper(UploadBlock, self.tbl_upload_blocks,
+ properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
+ fingerprint = relation(Fingerprint, backref="uploadblocks"),
+ uid = relation(Uid, backref="uploadblocks")))
+
## Connection functions
def __createconn(self):
from config import Config
###############################################################################
- def lookup_uid_from_fingerprint(fpr, session):
- uid = None
- uid_name = ""
- # This is a stupid default, but see the comments below
- is_dm = False
-
- user = get_uid_from_fingerprint(fpr, session)
-
- if user is not None:
- uid = user.uid
- if user.name is None:
- uid_name = ''
- else:
- uid_name = user.name
-
- # Check the relevant fingerprint (which we have to have)
- for f in user.fingerprint:
- if f.fingerprint == fpr:
- is_dm = f.keyring.debian_maintainer
- break
-
- return (uid, uid_name, is_dm)
+ def check_status(files):
+ new = byhand = 0
+ for f in files.keys():
+ if files[f]["type"] == "byhand":
+ byhand = 1
+ elif files[f].has_key("new"):
+ new = 1
+ return (new, byhand)
###############################################################################
self.pkg.reset()
def package_info(self):
- msg = ''
-
- if len(self.rejects) > 0:
- msg += "Reject Reasons:\n"
- msg += "\n".join(self.rejects)
+ """
+ Format various messages from this Upload to send to the maintainer.
+ """
- if len(self.warnings) > 0:
- msg += "Warnings:\n"
- msg += "\n".join(self.warnings)
+ msgs = (
+ ('Reject Reasons', self.rejects),
+ ('Warnings', self.warnings),
+ ('Notes', self.notes),
+ )
- if len(self.notes) > 0:
- msg += "Notes:\n"
- msg += "\n".join(self.notes)
+ msg = ''
+ for title, messages in msgs:
+ if messages:
+ msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
return msg
# Check there isn't already a changes file of the same name in one
# of the queue directories.
base_filename = os.path.basename(filename)
- for d in [ "Accepted", "Byhand", "Done", "New", "ProposedUpdates", "OldProposedUpdates" ]:
- if os.path.exists(os.path.join(Cnf["Dir::Queue::%s" % (d) ], base_filename)):
- self.rejects.append("%s: a file with this name already exists in the %s directory." % (base_filename, d))
+ if get_knownchange(base_filename):
+ self.rejects.append("%s: a file with this name already exists." % (base_filename))
# Check the .changes is non-empty
if not self.pkg.files:
'OldProposedUpdates', 'Embargoed', 'Unembargoed')
for queue in queues:
- if 'Dir::Queue::%s' % directory not in cnf:
+ if not cnf.get('Dir::Queue::%s' % queue):
continue
queuefile_path = os.path.join(
- cnf['Dir::Queue::%s' % directory], filename
+ cnf['Dir::Queue::%s' % queue], filename
)
if not os.path.exists(queuefile_path):
def check_lintian(self):
cnf = Config()
+ # Don't reject binary uploads
+ if not self.pkg.changes['architecture'].has_key('source'):
+ return
+
# Only check some distributions
valid_dist = False
for dist in ('unstable', 'experimental'):
elif etag in lintiantags['error']:
# The tag is overriden - but is not allowed to be
self.rejects.append("%s: Overriden tag %s found, but this tag may not be overwritten." % (epackage, etag))
- log("overidden tag is overridden", etag)
+ log("ftpmaster does not allow tag to be overridable", etag)
else:
# Tag is known, it is not overriden, direct reject.
self.rejects.append("%s: Found lintian output: '%s %s', automatically rejected package." % (epackage, etag, etext))
- log("auto rejecting", etag)
# Now tell if they *might* override it.
if etag in lintiantags['warning']:
+ log("auto rejecting", "overridable", etag)
self.rejects.append("%s: If you have a good reason, you may override this lintian tag." % (epackage))
+ else:
+ log("auto rejecting", "not overridable", etag)
###########################################################################
def check_urgency(self):
except:
self.rejects.append("%s: deb contents timestamp check failed [%s: %s]" % (filename, sys.exc_type, sys.exc_value))
+ def check_if_upload_is_sponsored(self, uid_email, uid_name):
+ if uid_email in [self.pkg.changes["maintaineremail"], self.pkg.changes["changedbyemail"]]:
+ sponsored = False
+ elif uid_name in [self.pkg.changes["maintainername"], self.pkg.changes["changedbyname"]]:
+ sponsored = False
+ if uid_name == "":
+ sponsored = True
+ else:
+ sponsored = True
+ if ("source" in self.pkg.changes["architecture"] and uid_email and utils.is_email_alias(uid_email)):
+ sponsor_addresses = utils.gpg_get_key_addresses(self.pkg.changes["fingerprint"])
+ if (self.pkg.changes["maintaineremail"] not in sponsor_addresses and
+ self.pkg.changes["changedbyemail"] not in sponsor_addresses):
+ self.pkg.changes["sponsoremail"] = uid_email
+
+ return sponsored
+
+
+ ###########################################################################
+ # check_signed_by_key checks
###########################################################################
+
+ def check_signed_by_key(self):
+ """Ensure the .changes is signed by an authorized uploader."""
+ session = DBConn().session()
+
+ # First of all we check that the person has proper upload permissions
+ # and that this upload isn't blocked
+ fpr = get_fingerprint(self.pkg.changes['fingerprint'], session=session)
+
+ if fpr is None:
+ self.rejects.append("Cannot find fingerprint %s" % self.pkg.changes["fingerprint"])
+ return
+
+ # TODO: Check that import-keyring adds UIDs properly
+ if not fpr.uid:
+ self.rejects.append("Cannot find uid for fingerprint %s. Please contact ftpmaster@debian.org" % fpr.fingerprint)
+ return
+
+ # Check that the fingerprint which uploaded has permission to do so
+ self.check_upload_permissions(fpr, session)
+
+ # Check that this package is not in a transition
+ self.check_transition(session)
+
+ session.close()
+
+
+ def check_upload_permissions(self, fpr, session):
+ # Check any one-off upload blocks
+ self.check_upload_blocks(fpr, session)
+
+ # Start with DM as a special case
+ # DM is a special case unfortunately, so we check it first
+ # (keys with no source access get more access than DMs in one
+ # way; DMs can only upload for their packages whether source
+ # or binary, whereas keys with no access might be able to
+ # upload some binaries)
+ if fpr.source_acl.access_level == 'dm':
+ self.check_dm_source_upload(fpr, session)
+ else:
+ # Check source-based permissions for other types
+ if self.pkg.changes["architecture"].has_key("source"):
+ if fpr.source_acl.access_level is None:
+ rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+ rej += '\nPlease contact ftpmaster if you think this is incorrect'
+ self.rejects.append(rej)
+ return
+ else:
+ # If not a DM, we allow full upload rights
+ uid_email = "%s@debian.org" % (fpr.uid.uid)
+ self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
+
+
+ # Check binary upload permissions
+ # By this point we know that DMs can't have got here unless they
+ # are allowed to deal with the package concerned so just apply
+ # normal checks
+ if fpr.binary_acl.access_level == 'full':
+ return
+
+ # Otherwise we're in the map case
+ tmparches = self.pkg.changes["architecture"].copy()
+ tmparches.pop('source', None)
+
+ for bam in fpr.binary_acl_map:
+ tmparches.pop(bam.architecture.arch_string, None)
+
+ if len(tmparches.keys()) > 0:
+ if fpr.binary_reject:
+ rej = ".changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
+ rej += "\narchitectures involved are: ", ",".join(tmparches.keys())
+ self.rejects.append(rej)
+ else:
+ # TODO: This is where we'll implement reject vs throw away binaries later
+ rej = "Uhm. I'm meant to throw away the binaries now but that's not implemented yet"
+ rej += "\nPlease complain to ftpmaster@debian.org as this shouldn't have been turned on"
+ rej += "\nFingerprint: %s", (fpr.fingerprint)
+ self.rejects.append(rej)
+
+
+ def check_upload_blocks(self, fpr, session):
+ """Check whether any upload blocks apply to this source, source
+ version, uid / fpr combination"""
+
+ def block_rej_template(fb):
+ rej = 'Manual upload block in place for package %s' % fb.source
+ if fb.version is not None:
+ rej += ', version %s' % fb.version
+ return rej
+
+ for fb in session.query(UploadBlock).filter_by(source = self.pkg.changes['source']).all():
+ # version is None if the block applies to all versions
+ if fb.version is None or fb.version == self.pkg.changes['version']:
+ # Check both fpr and uid - either is enough to cause a reject
+ if fb.fpr is not None:
+ if fb.fpr.fingerprint == fpr.fingerprint:
+ self.rejects.append(block_rej_template(fb) + ' for fingerprint %s\nReason: %s' % (fpr.fingerprint, fb.reason))
+ if fb.uid is not None:
+ if fb.uid == fpr.uid:
+ self.rejects.append(block_rej_template(fb) + ' for uid %s\nReason: %s' % (fb.uid.uid, fb.reason))
+
+
+ def check_dm_upload(self, fpr, session):
+ # Quoth the GR (http://www.debian.org/vote/2007/vote_003):
+ ## none of the uploaded packages are NEW
+ rej = False
+ for f in self.pkg.files.keys():
+ if self.pkg.files[f].has_key("byhand"):
+ self.rejects.append("%s may not upload BYHAND file %s" % (uid, f))
+ rej = True
+ if self.pkg.files[f].has_key("new"):
+ self.rejects.append("%s may not upload NEW file %s" % (uid, f))
+ rej = True
+
+ if rej:
+ return
+
+ ## the most recent version of the package uploaded to unstable or
+ ## experimental includes the field "DM-Upload-Allowed: yes" in the source
+ ## section of its control file
+ q = session.query(DBSource).filter_by(source=self.pkg.changes["source"])
+ q = q.join(SrcAssociation)
+ q = q.join(Suite).filter(Suite.suite_name.in_(['unstable', 'experimental']))
+ q = q.order_by(desc('source.version')).limit(1)
+
+ r = q.all()
+
+ if len(r) != 1:
+ rej = "Could not find existing source package %s in unstable or experimental and this is a DM upload" % self.pkg.changes["source"]
+ self.rejects.append(rej)
+ return
+
+ r = r[0]
+ if not r.dm_upload_allowed:
+ rej = "Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version (%s)" % (self.pkg.changes["source"], r.version)
+ self.rejects.append(rej)
+ return
+
+ ## the Maintainer: field of the uploaded .changes file corresponds with
+ ## the owner of the key used (ie, non-developer maintainers may not sponsor
+ ## uploads)
+ if self.check_if_upload_is_sponsored(fpr.uid.uid, fpr.uid.name):
+ self.rejects.append("%s (%s) is not authorised to sponsor uploads" % (fpr.uid.uid, fpr.fingerprint))
+
+ ## the most recent version of the package uploaded to unstable or
+ ## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie,
+ ## non-developer maintainers cannot NMU or hijack packages)
+
+ # srcuploaders includes the maintainer
+ accept = False
+ for sup in r.srcuploaders:
+ (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer()
+ # Eww - I hope we never have two people with the same name in Debian
+ if email == fpr.uid.uid or name == fpr.uid.name:
+ accept = True
+ break
+
+ if not accept:
+ self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (fpr.uid.uid, self.pkg.changes["source"]))
+ return
+
+ ## none of the packages are being taken over from other source packages
+ for b in self.pkg.changes["binary"].keys():
+ for suite in self.pkg.changes["distribution"].keys():
+ q = session.query(DBSource)
+ q = q.join(DBBinary).filter_by(package=b)
+ q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite)
+
+ for s in q.all():
+ if s.source != self.pkg.changes["source"]:
+ self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (fpr.uid.uid, b, s, suite))
+
+
+
def check_transition(self, session):
cnf = Config()
return
###########################################################################
- def check_signed_by_key(self):
- """Ensure the .changes is signed by an authorized uploader."""
- session = DBConn().session()
-
- self.check_transition(session)
-
- (uid, uid_name, is_dm) = lookup_uid_from_fingerprint(self.pkg.changes["fingerprint"], session=session)
-
- # match claimed name with actual name:
- if uid is None:
- # This is fundamentally broken but need us to refactor how we get
- # the UIDs/Fingerprints in order for us to fix it properly
- uid, uid_email = self.pkg.changes["fingerprint"], uid
- may_nmu, may_sponsor = 1, 1
- # XXX by default new dds don't have a fingerprint/uid in the db atm,
- # and can't get one in there if we don't allow nmu/sponsorship
- elif is_dm is False:
- # If is_dm is False, we allow full upload rights
- uid_email = "%s@debian.org" % (uid)
- may_nmu, may_sponsor = 1, 1
- else:
- # Assume limited upload rights unless we've discovered otherwise
- uid_email = uid
- may_nmu, may_sponsor = 0, 0
-
- if uid_email in [self.pkg.changes["maintaineremail"], self.pkg.changes["changedbyemail"]]:
- sponsored = 0
- elif uid_name in [self.pkg.changes["maintainername"], self.pkg.changes["changedbyname"]]:
- sponsored = 0
- if uid_name == "": sponsored = 1
- else:
- sponsored = 1
- if ("source" in self.pkg.changes["architecture"] and
- uid_email and utils.is_email_alias(uid_email)):
- sponsor_addresses = utils.gpg_get_key_addresses(self.pkg.changes["fingerprint"])
- if (self.pkg.changes["maintaineremail"] not in sponsor_addresses and
- self.pkg.changes["changedbyemail"] not in sponsor_addresses):
- self.pkg.changes["sponsoremail"] = uid_email
-
- if sponsored and not may_sponsor:
- self.rejects.append("%s is not authorised to sponsor uploads" % (uid))
-
- if not sponsored and not may_nmu:
- should_reject = True
- highest_sid, highest_version = None, None
-
- # XXX: This reimplements in SQLA what existed before but it's fundamentally fucked
- # It ignores higher versions with the dm_upload_allowed flag set to false
- # I'm keeping the existing behaviour for now until I've gone back and
- # checked exactly what the GR says - mhy
- for si in get_sources_from_name(source=self.pkg.changes['source'], dm_upload_allowed=True, session=session):
- if highest_version is None or apt_pkg.VersionCompare(si.version, highest_version) == 1:
- highest_sid = si.source_id
- highest_version = si.version
-
- if highest_sid is None:
- self.rejects.append("Source package %s does not have 'DM-Upload-Allowed: yes' in its most recent version" % self.pkg.changes["source"])
- else:
- for sup in session.query(SrcUploader).join(DBSource).filter_by(source_id=highest_sid):
- (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer()
- if email == uid_email or name == uid_name:
- should_reject = False
- break
-
- if should_reject is True:
- self.rejects.append("%s is not in Maintainer or Uploaders of source package %s" % (uid, self.pkg.changes["source"]))
-
- for b in self.pkg.changes["binary"].keys():
- for suite in self.pkg.changes["distribution"].keys():
- q = session.query(DBSource)
- q = q.join(DBBinary).filter_by(package=b)
- q = q.join(BinAssociation).join(Suite).filter_by(suite_name=suite)
-
- for s in q.all():
- if s.source != self.pkg.changes["source"]:
- self.rejects.append("%s may not hijack %s from source package %s in suite %s" % (uid, b, s, suite))
-
- for f in self.pkg.files.keys():
- if self.pkg.files[f].has_key("byhand"):
- self.rejects.append("%s may not upload BYHAND file %s" % (uid, f))
- if self.pkg.files[f].has_key("new"):
- self.rejects.append("%s may not upload NEW file %s" % (uid, f))
-
- session.close()
-
+ # End check_signed_by_key checks
###########################################################################
+
def build_summaries(self):
""" Build a summary of changes the upload introduces. """
if actual_size != int(dsc_entry["size"]):
self.rejects.append("size for %s doesn't match %s." % (found, file))
+ ################################################################################
+ # This is used by process-new and process-holding to recheck a changes file
+ # at the time we're running. It mainly wraps various other internal functions
+ # and is similar to accepted_checks - these should probably be tidied up
+ # and combined
+ def recheck(self, session):
+ cnf = Config()
+ for f in self.pkg.files.keys():
+ # The .orig.tar.gz can disappear out from under us is it's a
+ # duplicate of one in the archive.
+ if not self.pkg.files.has_key(f):
+ continue
+
+ entry = self.pkg.files[f]
+
+ # Check that the source still exists
+ if entry["type"] == "deb":
+ source_version = entry["source version"]
+ source_package = entry["source package"]
+ if not self.pkg.changes["architecture"].has_key("source") \
+ and not source_exists(source_package, source_version, self.pkg.changes["distribution"].keys(), session):
+ source_epochless_version = re_no_epoch.sub('', source_version)
+ dsc_filename = "%s_%s.dsc" % (source_package, source_epochless_version)
+ found = False
+ for q in ["Accepted", "Embargoed", "Unembargoed", "Newstage"]:
+ if cnf.has_key("Dir::Queue::%s" % (q)):
+ if os.path.exists(cnf["Dir::Queue::%s" % (q)] + '/' + dsc_filename):
+ found = True
+ if not found:
+ self.rejects.append("no source found for %s %s (%s)." % (source_package, source_version, f))
+
+ # Version and file overwrite checks
+ if entry["type"] == "deb":
+ self.check_binary_against_db(f, session)
+ elif entry["type"] == "dsc":
+ self.check_source_against_db(f, session)
+ self.check_dsc_against_db(f, session)
+
################################################################################
def accepted_checks(self, overwrite_checks, session):
# Recheck anything that relies on the database; since that's not
from textutils import fix_maintainer
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_taint_free, \
- re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource
+ re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
+ re_is_orig_source
from formats import parse_format, validate_changes_format
from srcformats import get_format_from_string
try:
try:
file_handle = open_file(f)
-
+
# Check for the hash entry, to not trigger a KeyError.
if not files[f].has_key(hash_key(hashname)):
rejmsg.append("%s: misses %s checksum in %s" % (f, hashname,
where))
continue
-
+
# Actually check the hash for correctness.
if hashfunc(file_handle) != files[f][hash_key(hashname)]:
rejmsg.append("%s: %s check failed in %s" % (f, hashname,
return res[0]
def which_conf_file ():
- res = socket.gethostbyaddr(socket.gethostname())
- # In case we allow local config files per user, try if one exists
- if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
- homedir = os.getenv("HOME")
- confpath = os.path.join(homedir, "/etc/dak.conf")
- if os.path.exists(confpath):
- apt_pkg.ReadConfigFileISC(Cnf,default_config)
-
- # We are still in here, so there is no local config file or we do
- # not allow local files. Do the normal stuff.
- if Cnf.get("Config::" + res[0] + "::DakConfig"):
- return Cnf["Config::" + res[0] + "::DakConfig"]
+ if os.getenv("DAK_CONFIG"):
+ print(os.getenv("DAK_CONFIG"))
+ return os.getenv("DAK_CONFIG")
else:
- return default_config
+ res = socket.gethostbyaddr(socket.gethostname())
+ # In case we allow local config files per user, try if one exists
+ if Cnf.FindB("Config::" + res[0] + "::AllowLocalConfig"):
+ homedir = os.getenv("HOME")
+ confpath = os.path.join(homedir, "/etc/dak.conf")
+ if os.path.exists(confpath):
+ apt_pkg.ReadConfigFileISC(Cnf,default_config)
+
+ # We are still in here, so there is no local config file or we do
+ # not allow local files. Do the normal stuff.
+ if Cnf.get("Config::" + res[0] + "::DakConfig"):
+ return Cnf["Config::" + res[0] + "::DakConfig"]
+ else:
+ return default_config
def which_apt_conf_file ():
res = socket.gethostbyaddr(socket.gethostname())