import commands
import signal
+from daklib.gpg import SignedFile
+
try:
# python >= 2.6
import json
return None
sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6"]:
+if sa_major_version in ["0.5", "0.6", "0.7"]:
from sqlalchemy.databases import postgres
postgres.ischema_names['debversion'] = DebVersion
else:
- raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
+ raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
################################################################################
################################################################################
+class ArchiveFile(object):
+ def __init__(self, archive=None, component=None, file=None):
+ self.archive = archive
+ self.component = component
+ self.file = file
+ @property
+ def path(self):
+ return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
+
+__all__.append('ArchiveFile')
+
+################################################################################
+
class BinContents(ORMObject):
def __init__(self, file = None, binary = None):
self.file = file
class DBBinary(ORMObject):
def __init__(self, package = None, source = None, version = None, \
maintainer = None, architecture = None, poolfile = None, \
- binarytype = 'deb'):
+ binarytype = 'deb', fingerprint=None):
self.package = package
self.source = source
self.version = version
self.architecture = architecture
self.poolfile = poolfile
self.binarytype = binarytype
+ self.fingerprint = fingerprint
@property
def pkid(self):
@rtype: text
@return: stanza text of the control section.
'''
- import apt_inst
+ import utils
fullpath = self.poolfile.fullpath
deb_file = open(fullpath, 'r')
- stanza = apt_inst.debExtractControl(deb_file)
+ stanza = utils.deb_extract_control(deb_file)
deb_file.close()
return stanza
# Prepare BuildQueueFile object
qf = BuildQueueFile()
qf.build_queue_id = self.queue_id
- qf.lastused = datetime.now()
qf.filename = poolfile_basename
targetpath = poolfile.fullpath
__all__.append('get_component')
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+ """get component after mappings
+
+ Evaluate component mappings from ComponentMappings in dak.conf for the
+ given component name.
+
+ @todo: ansgar wants to get rid of this. It's currently only used for
+ the security archive
+
+ @type component_name: str
+ @param component_name: component name
+
+ @param session: database session
+
+ @rtype: L{daklib.dbconn.Component} or C{None}
+ @return: component after applying maps or C{None}
+ """
+ cnf = Config()
+ for m in cnf.value_list("ComponentMappings"):
+ (src, dst) = m.split()
+ if component_name == src:
+ component_name = dst
+ component = session.query(Component).filter_by(component_name=component_name).first()
+ return component
+
+__all__.append('get_mapped_component')
+
@session_wrapper
def get_component_names(session=None):
"""
@property
def fullpath(self):
- return os.path.join(self.location.path, self.filename)
+ session = DBConn().session().object_session(self)
+ af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
+ return af.path
+
+ @property
+ def component(self):
+ session = DBConn().session().object_session(self)
+ component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+ .group_by(ArchiveFile.component_id).one()
+ return session.query(Component).get(component_id)
+
+ @property
+ def basename(self):
+ return os.path.basename(self.filename)
def is_valid(self, filesize = -1, md5sum = None):
return self.filesize == long(filesize) and self.md5sum == md5sum
def properties(self):
return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
- 'sha256sum', 'location', 'source', 'binary', 'last_used']
+ 'sha256sum', 'source', 'binary', 'last_used']
def not_null_constraints(self):
- return ['filename', 'md5sum', 'location']
+ return ['filename', 'md5sum']
def identical_to(self, filename):
"""
################################################################################
+class PolicyQueueUpload(object):
+ def __cmp__(self, other):
+ ret = cmp(self.changes.source, other.changes.source)
+ if ret == 0:
+ ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+ if ret == 0:
+ if self.source is not None and other.source is None:
+ ret = -1
+ elif self.source is None and other.source is not None:
+ ret = 1
+ if ret == 0:
+ ret = cmp(self.changes.changesname, other.changes.changesname)
+ return ret
+
+__all__.append('PolicyQueueUpload')
+
+################################################################################
+
+class PolicyQueueByhandFile(object):
+ pass
+
+__all__.append('PolicyQueueByhandFile')
+
+################################################################################
+
class Priority(ORMObject):
def __init__(self, priority = None, level = None):
self.priority = priority
################################################################################
-from debian.debfile import Deb822
-
-# Temporary Deb822 subclass to fix bugs with : handling; see #597249
-class Dak822(Deb822):
- def _internal_parser(self, sequence, fields=None):
- # The key is non-whitespace, non-colon characters before any colon.
- key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
- single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
- multi = re.compile(key_part + r"$")
- multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
-
- wanted_field = lambda f: fields is None or f in fields
-
- if isinstance(sequence, basestring):
- sequence = sequence.splitlines()
-
- curkey = None
- content = ""
- for line in self.gpg_stripped_paragraph(sequence):
- m = single.match(line)
- if m:
- if curkey:
- self[curkey] = content
-
- if not wanted_field(m.group('key')):
- curkey = None
- continue
-
- curkey = m.group('key')
- content = m.group('data')
- continue
-
- m = multi.match(line)
- if m:
- if curkey:
- self[curkey] = content
-
- if not wanted_field(m.group('key')):
- curkey = None
- continue
-
- curkey = m.group('key')
- content = ""
- continue
-
- m = multidata.match(line)
- if m:
- content += '\n' + line # XXX not m.group('data')?
- continue
-
- if curkey:
- self[curkey] = content
-
-
class DBSource(ORMObject):
def __init__(self, source = None, version = None, maintainer = None, \
- changedby = None, poolfile = None, install_date = None):
+ changedby = None, poolfile = None, install_date = None, fingerprint = None):
self.source = source
self.version = version
self.maintainer = maintainer
self.changedby = changedby
self.poolfile = poolfile
self.install_date = install_date
+ self.fingerprint = fingerprint
@property
def pkid(self):
def not_null_constraints(self):
return ['source', 'version', 'install_date', 'maintainer', \
- 'changedby', 'poolfile', 'install_date']
+ 'changedby', 'poolfile']
def read_control_fields(self):
'''
@return: fields is the dsc information in a dictionary form
'''
fullpath = self.poolfile.fullpath
- fields = Dak822(open(self.poolfile.fullpath, 'r'))
+ contents = open(fullpath, 'r').read()
+ signed_file = SignedFile(contents, keyrings=[], require_signature=False)
+ fields = apt_pkg.TagSection(signed_file.contents)
return fields
metadata = association_proxy('key', 'value')
if suite != "any":
# source must exist in 'suite' or a suite that is enhanced by 'suite'
s = get_suite(suite, session)
- enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
- considered_suites = [ vc.reference for vc in enhances_vcs ]
- considered_suites.append(s)
+ if s:
+ enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
+ considered_suites = [ vc.reference for vc in enhances_vcs ]
+ considered_suites.append(s)
- q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
+ q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
if q.count() > 0:
continue
# Find source id
bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+
+ # If we couldn't find anything and the upload contains Arch: source,
+ # fall back to trying the source package, source version uploaded
+ # This maintains backwards compatibility with previous dak behaviour
+ # and deals with slightly broken binary debs which don't properly
+ # declare their source package name
+ if len(bin_sources) == 0:
+ if u.pkg.changes["architecture"].has_key("source") \
+ and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
+ bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
+
+ # If we couldn't find a source here, we reject
+ # TODO: Fix this so that it doesn't kill process-upload and instead just
+ # performs a reject. To be honest, we should probably spot this
+ # *much* earlier than here
if len(bin_sources) != 1:
raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
(bin.package, bin.version, entry["architecture"],
else:
return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
+ @property
+ def path(self):
+ return os.path.join(self.archive.path, 'dists', self.suite_name)
+
__all__.append('Suite')
@session_wrapper
'external_overrides',
'extra_src_references',
'files',
+ 'files_archive_map',
'fingerprint',
'keyrings',
'keyring_acl_map',
'override',
'override_type',
'policy_queue',
+ 'policy_queue_upload',
+ 'policy_queue_upload_binaries_map',
+ 'policy_queue_byhand_file',
'priority',
'section',
'source',
'any_associations_source',
'bin_associations_binaries',
'binaries_suite_arch',
- 'binfiles_suite_component_arch',
'changelogs',
'file_arch_suite',
'newest_all_associations',
mapper(Architecture, self.tbl_architecture,
properties = dict(arch_id = self.tbl_architecture.c.id,
suites = relation(Suite, secondary=self.tbl_suite_architectures,
- order_by='suite_name',
- backref=backref('architectures', order_by='arch_string'))),
+ order_by=self.tbl_suite.c.suite_name,
+ backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
extension = validator)
mapper(Archive, self.tbl_archive,
properties = dict(archive_id = self.tbl_archive.c.id,
archive_name = self.tbl_archive.c.name))
+ mapper(ArchiveFile, self.tbl_files_archive_map,
+ properties = dict(archive = relation(Archive, backref='files'),
+ component = relation(Component),
+ file = relation(PoolFile, backref='archives')))
+
mapper(BuildQueue, self.tbl_build_queue,
- properties = dict(queue_id = self.tbl_build_queue.c.id))
+ properties = dict(queue_id = self.tbl_build_queue.c.id,
+ suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
mapper(BuildQueueFile, self.tbl_build_queue_files,
properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
arch_id = self.tbl_binaries.c.architecture,
architecture = relation(Architecture),
poolfile_id = self.tbl_binaries.c.file,
- poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
+ poolfile = relation(PoolFile),
binarytype = self.tbl_binaries.c.type,
fingerprint_id = self.tbl_binaries.c.sig_fpr,
fingerprint = relation(Fingerprint),
mapper(PoolFile, self.tbl_files,
properties = dict(file_id = self.tbl_files.c.id,
- filesize = self.tbl_files.c.size,
- location_id = self.tbl_files.c.location,
- location = relation(Location,
- # using lazy='dynamic' in the back
- # reference because we have A LOT of
- # files in one location
- backref=backref('files', lazy='dynamic'))),
+ filesize = self.tbl_files.c.size),
extension = validator)
mapper(Fingerprint, self.tbl_fingerprint,
mapper(PolicyQueue, self.tbl_policy_queue,
properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+ mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+ properties = dict(
+ changes = relation(DBChange),
+ policy_queue = relation(PolicyQueue, backref='uploads'),
+ target_suite = relation(Suite),
+ source = relation(DBSource),
+ binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+ ))
+
+ mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+ properties = dict(
+ upload = relation(PolicyQueueUpload, backref='byhand'),
+ )
+ )
+
mapper(Priority, self.tbl_priority,
properties = dict(priority_id = self.tbl_priority.c.id))
version = self.tbl_source.c.version,
maintainer_id = self.tbl_source.c.maintainer,
poolfile_id = self.tbl_source.c.file,
- poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+ poolfile = relation(PoolFile),
fingerprint_id = self.tbl_source.c.sig_fpr,
fingerprint = relation(Fingerprint),
changedby_id = self.tbl_source.c.changedby,
copy_queues = relation(BuildQueue,
secondary=self.tbl_suite_build_queue_copy),
srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
- backref=backref('suites', lazy='dynamic'))),
+ backref=backref('suites', lazy='dynamic')),
+ archive = relation(Archive, backref='suites')),
extension = validator)
mapper(Uid, self.tbl_uid,