################################################################################
+import apt_pkg
import os
from os.path import normpath
import re
import psycopg2
import traceback
import commands
+import signal
try:
# python >= 2.6
from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
Text, ForeignKey
from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
- backref, MapperExtension, EXT_CONTINUE, object_mapper
+ backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
from sqlalchemy import types as sqltypes
+from sqlalchemy.orm.collections import attribute_mapped_collection
+from sqlalchemy.ext.associationproxy import association_proxy
# Don't remove this, we re-export the exceptions to scripts which import us
from sqlalchemy.exc import *
# in the database
from config import Config
from textutils import fix_maintainer
-from dak_exceptions import DBUpdateError, NoSourceFieldError
+from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
# suppress some deprecation warnings in squeeze related to sqlalchemy
import warnings
warnings.filterwarnings('ignore', \
"The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
SADeprecationWarning)
-# TODO: sqlalchemy needs some extra configuration to correctly reflect
-# the ind_deb_contents_* indexes - we ignore the warnings at the moment
-warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning)
+warnings.filterwarnings('ignore', \
+ "Predicate of partial index .* ignored during reflection", \
+ SAWarning)
################################################################################
return None
sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6"]:
+if sa_major_version in ["0.5", "0.6", "0.7"]:
from sqlalchemy.databases import postgres
postgres.ischema_names['debversion'] = DebVersion
else:
- raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
+ raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
################################################################################
# list
value = len(value)
elif hasattr(value, 'count'):
- # query
+ # query (but not during validation)
+ if self.in_validation:
+ continue
value = value.count()
else:
raise KeyError('Do not understand property %s.' % property)
validation_message = \
"Validation failed because property '%s' must not be empty in object\n%s"
+ in_validation = False
+
def validate(self):
'''
This function validates the not NULL constraints as returned by
getattr(self, property + '_id') is not None:
continue
if not hasattr(self, property) or getattr(self, property) is None:
- raise DBUpdateError(self.validation_message % \
- (property, str(self)))
+ # str() might lead to races due to a 2nd flush
+ self.in_validation = True
+ message = self.validation_message % (property, str(self))
+ self.in_validation = False
+ raise DBUpdateError(message)
@classmethod
@session_wrapper
################################################################################
+class ArchiveFile(object):
+ def __init__(self, archive=None, component=None, file=None):
+ self.archive = archive
+ self.component = component
+ self.file = file
+ @property
+ def path(self):
+ return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
+
+__all__.append('ArchiveFile')
+
+################################################################################
+
class BinContents(ORMObject):
def __init__(self, file = None, binary = None):
self.file = file
################################################################################
+def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
class DBBinary(ORMObject):
def __init__(self, package = None, source = None, version = None, \
maintainer = None, architecture = None, poolfile = None, \
- binarytype = 'deb'):
+ binarytype = 'deb', fingerprint=None):
self.package = package
self.source = source
self.version = version
self.architecture = architecture
self.poolfile = poolfile
self.binarytype = binarytype
+ self.fingerprint = fingerprint
+
+ @property
+ def pkid(self):
+ return self.binary_id
def properties(self):
return ['package', 'version', 'maintainer', 'source', 'architecture', \
'poolfile', 'binarytype', 'fingerprint', 'install_date', \
- 'suites_count', 'binary_id', 'contents_count']
+ 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
def not_null_constraints(self):
return ['package', 'version', 'maintainer', 'source', 'poolfile', \
'binarytype']
- def get_component_name(self):
- return self.poolfile.location.component.component_name
+ metadata = association_proxy('key', 'value')
def scan_contents(self):
'''
Yields the contents of the package. Only regular files are yielded and
- the path names are normalized after converting them from either utf-8 or
- iso8859-1 encoding.
+ the path names are normalized after converting them from either utf-8
+ or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
+ package does not contain any regular file.
'''
fullpath = self.poolfile.fullpath
- dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE)
+ dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
+ preexec_fn = subprocess_setup)
tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
for member in tar.getmembers():
- if member.isfile():
+ if not member.isdir():
+ name = normpath(member.name)
+ # enforce proper utf-8 encoding
try:
- name = member.name.decode('utf-8')
+ name.decode('utf-8')
except UnicodeDecodeError:
- name = member.name.decode('iso8859-1')
- yield normpath(name)
+ name = name.decode('iso8859-1').encode('utf-8')
+ yield name
tar.close()
dpkg.stdout.close()
dpkg.wait()
+ def read_control(self):
+ '''
+ Reads the control information from a binary.
+
+ @rtype: text
+ @return: stanza text of the control section.
+ '''
+ import utils
+ fullpath = self.poolfile.fullpath
+ deb_file = open(fullpath, 'r')
+ stanza = utils.deb_extract_control(deb_file)
+ deb_file.close()
+
+ return stanza
+
+ def read_control_fields(self):
+ '''
+ Reads the control information from a binary and return
+ as a dictionary.
+
+ @rtype: dict
+ @return: fields of the control section as a dictionary.
+ '''
+ import apt_pkg
+ stanza = self.read_control()
+ return apt_pkg.TagSection(stanza)
+
__all__.append('DBBinary')
@session_wrapper
if binary is None:
return None
else:
- return binary.get_component_name()
+ return binary.poolfile.component.component_name
__all__.append('get_component_by_package_suite')
################################################################################
-MINIMAL_APT_CONF="""
-Dir
-{
- ArchiveDir "%(archivepath)s";
- OverrideDir "%(overridedir)s";
- CacheDir "%(cachedir)s";
-};
-
-Default
-{
- Packages::Compress ". bzip2 gzip";
- Sources::Compress ". bzip2 gzip";
- DeLinkLimit 0;
- FileMode 0664;
-}
-
-bindirectory "incoming"
-{
- Packages "Packages";
- Contents " ";
-
- BinOverride "override.sid.all3";
- BinCacheDB "packages-accepted.db";
-
- FileList "%(filelist)s";
-
- PathPrefix "";
- Packages::Extensions ".deb .udeb";
-};
-
-bindirectory "incoming/"
-{
- Sources "Sources";
- BinOverride "override.sid.all3";
- SrcOverride "override.sid.all3.src";
- FileList "%(filelist)s";
-};
-"""
-
class BuildQueue(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<BuildQueue %s>' % self.queue_name
- def write_metadata(self, starttime, force=False):
- # Do we write out metafiles?
- if not (force or self.generate_metadata):
- return
-
- session = DBConn().session().object_session(self)
-
- fl_fd = fl_name = ac_fd = ac_name = None
- tempdir = None
- arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
- startdir = os.getcwd()
-
- try:
- # Grab files we want to include
- newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
- # Write file list with newer files
- (fl_fd, fl_name) = mkstemp()
- for n in newer:
- os.write(fl_fd, '%s\n' % n.fullpath)
- os.close(fl_fd)
-
- cnf = Config()
-
- # Write minimal apt.conf
- # TODO: Remove hardcoding from template
- (ac_fd, ac_name) = mkstemp()
- os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
- 'filelist': fl_name,
- 'cachedir': cnf["Dir::Cache"],
- 'overridedir': cnf["Dir::Override"],
- })
- os.close(ac_fd)
-
- # Run apt-ftparchive generate
- os.chdir(os.path.dirname(ac_name))
- os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
-
- # Run apt-ftparchive release
- # TODO: Eww - fix this
- bname = os.path.basename(self.path)
- os.chdir(self.path)
- os.chdir('..')
-
- # We have to remove the Release file otherwise it'll be included in the
- # new one
- try:
- os.unlink(os.path.join(bname, 'Release'))
- except OSError:
- pass
-
- os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
-
- # Crude hack with open and append, but this whole section is and should be redone.
- if self.notautomatic:
- release=open("Release", "a")
- release.write("NotAutomatic: yes")
- release.close()
-
- # Sign if necessary
- if self.signingkey:
- keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
- if cnf.has_key("Dinstall::SigningPubKeyring"):
- keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
-
- os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
-
- # Move the files if we got this far
- os.rename('Release', os.path.join(bname, 'Release'))
- if self.signingkey:
- os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
-
- # Clean up any left behind files
- finally:
- os.chdir(startdir)
- if fl_fd:
- try:
- os.close(fl_fd)
- except OSError:
- pass
-
- if fl_name:
- try:
- os.unlink(fl_name)
- except OSError:
- pass
-
- if ac_fd:
- try:
- os.close(ac_fd)
- except OSError:
- pass
-
- if ac_name:
- try:
- os.unlink(ac_name)
- except OSError:
- pass
-
- def clean_and_update(self, starttime, Logger, dryrun=False):
- """WARNING: This routine commits for you"""
- session = DBConn().session().object_session(self)
-
- if self.generate_metadata and not dryrun:
- self.write_metadata(starttime)
-
- # Grab files older than our execution time
- older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
-
- for o in older:
- killdb = False
- try:
- if dryrun:
- Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
- else:
- Logger.log(["I: Removing %s from the queue" % o.fullpath])
- os.unlink(o.fullpath)
- killdb = True
- except OSError, e:
- # If it wasn't there, don't worry
- if e.errno == ENOENT:
- killdb = True
- else:
- # TODO: Replace with proper logging call
- Logger.log(["E: Could not remove %s" % o.fullpath])
-
- if killdb:
- session.delete(o)
-
- session.commit()
-
- for f in os.listdir(self.path):
- if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
- continue
-
- try:
- r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
- except NoResultFound:
- fp = os.path.join(self.path, f)
- if dryrun:
- Logger.log(["I: Would remove unused link %s" % fp])
- else:
- Logger.log(["I: Removing unused link %s" % fp])
- try:
- os.unlink(fp)
- except OSError:
- Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
-
- def add_file_from_pool(self, poolfile):
- """Copies a file into the pool. Assumes that the PoolFile object is
- attached to the same SQLAlchemy session as the Queue object is.
-
- The caller is responsible for committing after calling this function."""
- poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
- # Check if we have a file of this name or this ID already
- for f in self.queuefiles:
- if f.fileid is not None and f.fileid == poolfile.file_id or \
- f.poolfile.filename == poolfile_basename:
- # In this case, update the BuildQueueFile entry so we
- # don't remove it too early
- f.lastused = datetime.now()
- DBConn().session().object_session(poolfile).add(f)
- return f
-
- # Prepare BuildQueueFile object
- qf = BuildQueueFile()
- qf.build_queue_id = self.queue_id
- qf.lastused = datetime.now()
- qf.filename = poolfile_basename
-
- targetpath = poolfile.fullpath
- queuepath = os.path.join(self.path, poolfile_basename)
-
- try:
- if self.copy_files:
- # We need to copy instead of symlink
- import utils
- utils.copy(targetpath, queuepath)
- # NULL in the fileid field implies a copy
- qf.fileid = None
- else:
- os.symlink(targetpath, queuepath)
- qf.fileid = poolfile.file_id
- except OSError:
- return None
-
- # Get the same session as the PoolFile is using and add the qf to it
- DBConn().session().object_session(poolfile).add(qf)
-
- return qf
-
-
__all__.append('BuildQueue')
-@session_wrapper
-def get_build_queue(queuename, session=None):
- """
- Returns BuildQueue object for given C{queue name}, creating it if it does not
- exist.
-
- @type queuename: string
- @param queuename: The name of the queue
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
-
- @rtype: BuildQueue
- @return: BuildQueue object for the given queue
- """
-
- q = session.query(BuildQueue).filter_by(queue_name=queuename)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_build_queue')
-
-################################################################################
-
-class BuildQueueFile(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
-
- @property
- def fullpath(self):
- return os.path.join(self.buildqueue.path, self.filename)
-
-
-__all__.append('BuildQueueFile')
-
-################################################################################
-
-class ChangePendingBinary(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingBinary %s>' % self.change_pending_binary_id
-
-__all__.append('ChangePendingBinary')
-
-################################################################################
-
-class ChangePendingFile(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingFile %s>' % self.change_pending_file_id
-
-__all__.append('ChangePendingFile')
-
-################################################################################
-
-class ChangePendingSource(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<ChangePendingSource %s>' % self.change_pending_source_id
-
-__all__.append('ChangePendingSource')
-
################################################################################
class Component(ORMObject):
def properties(self):
return ['component_name', 'component_id', 'description', \
- 'location_count', 'meets_dfsg', 'overrides_count']
+ 'meets_dfsg', 'overrides_count']
def not_null_constraints(self):
return ['component_name']
__all__.append('get_component')
+@session_wrapper
+def get_mapped_component(component_name, session=None):
+ """get component after mappings
+
+ Evaluate component mappings from ComponentMappings in dak.conf for the
+ given component name.
+
+ @todo: ansgar wants to get rid of this. It's currently only used for
+ the security archive
+
+ @type component_name: str
+ @param component_name: component name
+
+ @param session: database session
+
+ @rtype: L{daklib.dbconn.Component} or C{None}
+ @return: component after applying maps or C{None}
+ """
+ cnf = Config()
+ for m in cnf.value_list("ComponentMappings"):
+ (src, dst) = m.split()
+ if component_name == src:
+ component_name = dst
+ component = session.query(Component).filter_by(component_name=component_name).first()
+ return component
+
+__all__.append('get_mapped_component')
+
+@session_wrapper
+def get_component_names(session=None):
+ """
+ Returns list of strings of component names.
+
+ @rtype: list
+ @return: list of strings of component names
+ """
+
+ return [ x.component_name for x in session.query(Component).all() ]
+
+__all__.append('get_component_names')
+
################################################################################
class DBConfig(object):
################################################################################
+class ExternalOverride(ORMObject):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
+
+__all__.append('ExternalOverride')
+
+################################################################################
+
class PoolFile(ORMObject):
- def __init__(self, filename = None, location = None, filesize = -1, \
+ def __init__(self, filename = None, filesize = -1, \
md5sum = None):
self.filename = filename
- self.location = location
self.filesize = filesize
self.md5sum = md5sum
@property
def fullpath(self):
- return os.path.join(self.location.path, self.filename)
+ session = DBConn().session().object_session(self)
+ af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
+ return af.path
+
+ @property
+ def component(self):
+ session = DBConn().session().object_session(self)
+ component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
+ .group_by(ArchiveFile.component_id).one()
+ return session.query(Component).get(component_id)
+
+ @property
+ def basename(self):
+ return os.path.basename(self.filename)
def is_valid(self, filesize = -1, md5sum = None):
return self.filesize == long(filesize) and self.md5sum == md5sum
def properties(self):
return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
- 'sha256sum', 'location', 'source', 'binary', 'last_used']
+ 'sha256sum', 'source', 'binary', 'last_used']
def not_null_constraints(self):
- return ['filename', 'md5sum', 'location']
-
-__all__.append('PoolFile')
-
-@session_wrapper
-def check_poolfile(filename, filesize, md5sum, location_id, session=None):
- """
- Returns a tuple:
- (ValidFileFound [boolean], PoolFile object or None)
+ return ['filename', 'md5sum']
- @type filename: string
- @param filename: the filename of the file to check against the DB
-
- @type filesize: int
- @param filesize: the size of the file to check against the DB
-
- @type md5sum: string
- @param md5sum: the md5sum of the file to check against the DB
-
- @type location_id: int
- @param location_id: the id of the location to look in
-
- @rtype: tuple
- @return: Tuple of length 2.
- - If valid pool file found: (C{True}, C{PoolFile object})
- - If valid pool file not found:
- - (C{False}, C{None}) if no file found
- - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
- """
-
- poolfile = session.query(Location).get(location_id). \
- files.filter_by(filename=filename).first()
- valid = False
- if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
- valid = True
-
- return (valid, poolfile)
-
-__all__.append('check_poolfile')
-
-# TODO: the implementation can trivially be inlined at the place where the
-# function is called
-@session_wrapper
-def get_poolfile_by_id(file_id, session=None):
- """
- Returns a PoolFile objects or None for the given id
+ def identical_to(self, filename):
+ """
+ compare size and hash with the given file
- @type file_id: int
- @param file_id: the id of the file to look for
+ @rtype: bool
+ @return: true if the given file has the same size and hash as this object; false otherwise
+ """
+ st = os.stat(filename)
+ if self.filesize != st.st_size:
+ return False
- @rtype: PoolFile or None
- @return: either the PoolFile object or None
- """
+ f = open(filename, "r")
+ sha256sum = apt_pkg.sha256sum(f)
+ if sha256sum != self.sha256sum:
+ return False
- return session.query(PoolFile).get(file_id)
+ return True
-__all__.append('get_poolfile_by_id')
+__all__.append('PoolFile')
@session_wrapper
def get_poolfile_like_name(filename, session=None):
__all__.append('get_poolfile_like_name')
-@session_wrapper
-def add_poolfile(filename, datadict, location_id, session=None):
- """
- Add a new file to the pool
-
- @type filename: string
- @param filename: filename
-
- @type datadict: dict
- @param datadict: dict with needed data
-
- @type location_id: int
- @param location_id: database id of the location
-
- @rtype: PoolFile
- @return: the PoolFile object created
- """
- poolfile = PoolFile()
- poolfile.filename = filename
- poolfile.filesize = datadict["size"]
- poolfile.md5sum = datadict["md5sum"]
- poolfile.sha1sum = datadict["sha1sum"]
- poolfile.sha256sum = datadict["sha256sum"]
- poolfile.location_id = location_id
-
- session.add(poolfile)
- # Flush to get a file id (NB: This is not a commit)
- session.flush()
-
- return poolfile
-
-__all__.append('add_poolfile')
-
################################################################################
class Fingerprint(ORMObject):
key = None
signingkey = False
- for line in k.xreadlines():
+ for line in k:
field = line.split(":")
if field[0] == "pub":
key = field[4]
__all__.append('get_keyring')
+@session_wrapper
+def get_active_keyring_paths(session=None):
+ """
+ @rtype: list
+ @return: list of active keyring paths
+ """
+ return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
+
+__all__.append('get_active_keyring_paths')
+
+@session_wrapper
+def get_primary_keyring_path(session=None):
+ """
+ Get the full path to the highest priority active keyring
+
+ @rtype: str or None
+ @return: path to the active keyring with the highest priority or None if no
+ keyring is configured
+ """
+ keyrings = get_active_keyring_paths()
+
+ if len(keyrings) > 0:
+ return keyrings[0]
+ else:
+ return None
+
+__all__.append('get_primary_keyring_path')
+
################################################################################
class KeyringACLMap(object):
def __repr__(self):
return '<DBChange %s>' % self.changesname
- def clean_from_queue(self):
- session = DBConn().session().object_session(self)
-
- # Remove changes_pool_files entries
- self.poolfiles = []
-
- # Remove changes_pending_files references
- self.files = []
-
- # Clear out of queue
- self.in_queue = None
- self.approved_for_id = None
-
__all__.append('DBChange')
@session_wrapper
################################################################################
-class Location(ORMObject):
- def __init__(self, path = None, component = None):
- self.path = path
- self.component = component
- # the column 'type' should go away, see comment at mapper
- self.archive_type = 'pool'
-
- def properties(self):
- return ['path', 'location_id', 'archive_type', 'component', \
- 'files_count']
-
- def not_null_constraints(self):
- return ['path', 'archive_type']
-
-__all__.append('Location')
-
-@session_wrapper
-def get_location(location, component=None, archive=None, session=None):
- """
- Returns Location object for the given combination of location, component
- and archive
-
- @type location: string
- @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
-
- @type component: string
- @param component: the component name (if None, no restriction applied)
-
- @type archive: string
- @param archive: the archive name (if None, no restriction applied)
-
- @rtype: Location / None
- @return: Either a Location object or None if one can't be found
- """
-
- q = session.query(Location).filter_by(path=location)
-
- if archive is not None:
- q = q.join(Archive).filter_by(archive_name=archive)
-
- if component is not None:
- q = q.join(Component).filter_by(component_name=component)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_location')
-
-################################################################################
-
class Maintainer(ORMObject):
def __init__(self, name = None):
self.name = name
################################################################################
-class DebContents(object):
+class PolicyQueue(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
- return '<DebConetnts %s: %s>' % (self.package.package,self.file)
-
-__all__.append('DebContents')
+ return '<PolicyQueue %s>' % self.queue_name
+__all__.append('PolicyQueue')
-class UdebContents(object):
- def __init__(self, *args, **kwargs):
- pass
+@session_wrapper
+def get_policy_queue(queuename, session=None):
+ """
+ Returns PolicyQueue object for given C{queue name}
- def __repr__(self):
- return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
+ @type queuename: string
+ @param queuename: The name of the queue
-__all__.append('UdebContents')
-
-class PendingBinContents(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<PendingBinContents %s>' % self.contents_id
-
-__all__.append('PendingBinContents')
-
-def insert_pending_content_paths(package,
- is_udeb,
- fullpaths,
- session=None):
- """
- Make sure given paths are temporarily associated with given
- package
-
- @type package: dict
- @param package: the package to associate with should have been read in from the binary control file
- @type fullpaths: list
- @param fullpaths: the list of paths of the file being associated with the binary
- @type session: SQLAlchemy session
- @param session: Optional SQLAlchemy session. If this is passed, the caller
- is responsible for ensuring a transaction has begun and committing the
- results or rolling back based on the result code. If not passed, a commit
- will be performed at the end of the function
-
- @return: True upon success, False if there is a problem
- """
-
- privatetrans = False
-
- if session is None:
- session = DBConn().session()
- privatetrans = True
-
- try:
- arch = get_architecture(package['Architecture'], session)
- arch_id = arch.arch_id
-
- # Remove any already existing recorded files for this package
- q = session.query(PendingBinContents)
- q = q.filter_by(package=package['Package'])
- q = q.filter_by(version=package['Version'])
- q = q.filter_by(architecture=arch_id)
- q.delete()
-
- for fullpath in fullpaths:
-
- if fullpath.startswith( "./" ):
- fullpath = fullpath[2:]
-
- pca = PendingBinContents()
- pca.package = package['Package']
- pca.version = package['Version']
- pca.file = fullpath
- pca.architecture = arch_id
-
- if isudeb:
- pca.type = 8 # gross
- else:
- pca.type = 7 # also gross
- session.add(pca)
-
- # Only commit if we set up the session ourself
- if privatetrans:
- session.commit()
- session.close()
- else:
- session.flush()
-
- return True
- except Exception, e:
- traceback.print_exc()
-
- # Only rollback if we set up the session ourself
- if privatetrans:
- session.rollback()
- session.close()
-
- return False
-
-__all__.append('insert_pending_content_paths')
-
-################################################################################
-
-class PolicyQueue(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<PolicyQueue %s>' % self.queue_name
-
-__all__.append('PolicyQueue')
-
-@session_wrapper
-def get_policy_queue(queuename, session=None):
- """
- Returns PolicyQueue object for given C{queue name}
-
- @type queuename: string
- @param queuename: The name of the queue
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
+ @type session: Session
+ @param session: Optional SQLA session object (a temporary one will be
+ generated if not supplied)
@rtype: PolicyQueue
@return: PolicyQueue object for the given queue
__all__.append('get_policy_queue')
-@session_wrapper
-def get_policy_queue_from_path(pathname, session=None):
- """
- Returns PolicyQueue object for given C{path name}
-
- @type queuename: string
- @param queuename: The path
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
+################################################################################
- @rtype: PolicyQueue
- @return: PolicyQueue object for the given queue
- """
+class PolicyQueueUpload(object):
+ def __cmp__(self, other):
+ ret = cmp(self.changes.source, other.changes.source)
+ if ret == 0:
+ ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
+ if ret == 0:
+ if self.source is not None and other.source is None:
+ ret = -1
+ elif self.source is None and other.source is not None:
+ ret = 1
+ if ret == 0:
+ ret = cmp(self.changes.changesname, other.changes.changesname)
+ return ret
+
+__all__.append('PolicyQueueUpload')
- q = session.query(PolicyQueue).filter_by(path=pathname)
+################################################################################
- try:
- return q.one()
- except NoResultFound:
- return None
+class PolicyQueueByhandFile(object):
+ pass
-__all__.append('get_policy_queue_from_path')
+__all__.append('PolicyQueueByhandFile')
################################################################################
################################################################################
+class SrcContents(ORMObject):
+ def __init__(self, file = None, source = None):
+ self.file = file
+ self.source = source
+
+ def properties(self):
+ return ['file', 'source']
+
+__all__.append('SrcContents')
+
+################################################################################
+
+from debian.debfile import Deb822
+
+# Temporary Deb822 subclass to fix bugs with : handling; see #597249
+class Dak822(Deb822):
+ def _internal_parser(self, sequence, fields=None):
+ # The key is non-whitespace, non-colon characters before any colon.
+ key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
+ single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
+ multi = re.compile(key_part + r"$")
+ multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
+
+ wanted_field = lambda f: fields is None or f in fields
+
+ if isinstance(sequence, basestring):
+ sequence = sequence.splitlines()
+
+ curkey = None
+ content = ""
+ for line in self.gpg_stripped_paragraph(sequence):
+ m = single.match(line)
+ if m:
+ if curkey:
+ self[curkey] = content
+
+ if not wanted_field(m.group('key')):
+ curkey = None
+ continue
+
+ curkey = m.group('key')
+ content = m.group('data')
+ continue
+
+ m = multi.match(line)
+ if m:
+ if curkey:
+ self[curkey] = content
+
+ if not wanted_field(m.group('key')):
+ curkey = None
+ continue
+
+ curkey = m.group('key')
+ content = ""
+ continue
+
+ m = multidata.match(line)
+ if m:
+ content += '\n' + line # XXX not m.group('data')?
+ continue
+
+ if curkey:
+ self[curkey] = content
+
+
class DBSource(ORMObject):
def __init__(self, source = None, version = None, maintainer = None, \
- changedby = None, poolfile = None, install_date = None):
+ changedby = None, poolfile = None, install_date = None, fingerprint = None):
self.source = source
self.version = version
self.maintainer = maintainer
self.changedby = changedby
self.poolfile = poolfile
self.install_date = install_date
+ self.fingerprint = fingerprint
+
+ @property
+ def pkid(self):
+ return self.source_id
def properties(self):
return ['source', 'source_id', 'maintainer', 'changedby', \
'fingerprint', 'poolfile', 'version', 'suites_count', \
- 'install_date', 'binaries_count']
+ 'install_date', 'binaries_count', 'uploaders_count']
def not_null_constraints(self):
return ['source', 'version', 'install_date', 'maintainer', \
- 'changedby', 'poolfile', 'install_date']
+ 'changedby', 'poolfile']
+
+ def read_control_fields(self):
+ '''
+ Reads the control information from a dsc
+
+ @rtype: tuple
+ @return: fields is the dsc information in a dictionary form
+ '''
+ fullpath = self.poolfile.fullpath
+ fields = Dak822(open(self.poolfile.fullpath, 'r'))
+ return fields
+
+ metadata = association_proxy('key', 'value')
+
+ def scan_contents(self):
+ '''
+ Returns a set of names for non directories. The path names are
+ normalized after converting them from either utf-8 or iso8859-1
+ encoding.
+ '''
+ fullpath = self.poolfile.fullpath
+ from daklib.contents import UnpackedSource
+ unpacked = UnpackedSource(fullpath)
+ fileset = set()
+ for name in unpacked.get_all_filenames():
+ # enforce proper utf-8 encoding
+ try:
+ name.decode('utf-8')
+ except UnicodeDecodeError:
+ name = name.decode('iso8859-1').encode('utf-8')
+ fileset.add(name)
+ return fileset
__all__.append('DBSource')
q = session.query(DBSource).filter_by(source=source). \
filter(DBSource.version.in_([source_version, orig_source_version]))
if suite != "any":
- # source must exist in suite X, or in some other suite that's
- # mapped to X, recursively... silent-maps are counted too,
- # unreleased-maps aren't.
- maps = cnf.ValueList("SuiteMappings")[:]
- maps.reverse()
- maps = [ m.split() for m in maps ]
- maps = [ (x[1], x[2]) for x in maps
- if x[0] == "map" or x[0] == "silent-map" ]
- s = [suite]
- for (from_, to) in maps:
- if from_ in s and to not in s:
- s.append(to)
-
- q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
+ # source must exist in 'suite' or a suite that is enhanced by 'suite'
+ s = get_suite(suite, session)
+ if s:
+ enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
+ considered_suites = [ vc.reference for vc in enhances_vcs ]
+ considered_suites.append(s)
+
+ q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
if q.count() > 0:
continue
__all__.append('get_source_in_suite')
-################################################################################
-
@session_wrapper
-def add_dsc_to_db(u, filename, session=None):
- entry = u.pkg.files[filename]
- source = DBSource()
- pfs = []
-
- source.source = u.pkg.dsc["source"]
- source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
- source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
- source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
- source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- source.install_date = datetime.now().date()
-
- dsc_component = entry["component"]
- dsc_location_id = entry["location id"]
-
- source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
-
- # Set up a new poolfile if necessary
- if not entry.has_key("files id") or not entry["files id"]:
- filename = entry["pool name"] + filename
- poolfile = add_poolfile(filename, entry, dsc_location_id, session)
- session.flush()
- pfs.append(poolfile)
- entry["files id"] = poolfile.file_id
-
- source.poolfile_id = entry["files id"]
- session.add(source)
-
- suite_names = u.pkg.changes["distribution"].keys()
- source.suites = session.query(Suite). \
- filter(Suite.suite_name.in_(suite_names)).all()
-
- # Add the source files to the DB (files and dsc_files)
- dscfile = DSCFile()
- dscfile.source_id = source.source_id
- dscfile.poolfile_id = entry["files id"]
- session.add(dscfile)
-
- for dsc_file, dentry in u.pkg.dsc_files.items():
- df = DSCFile()
- df.source_id = source.source_id
-
- # If the .orig tarball is already in the pool, it's
- # files id is stored in dsc_files by check_dsc().
- files_id = dentry.get("files id", None)
-
- # Find the entry in the files hash
- # TODO: Bail out here properly
- dfentry = None
- for f, e in u.pkg.files.items():
- if f == dsc_file:
- dfentry = e
- break
-
- if files_id is None:
- filename = dfentry["pool name"] + dsc_file
-
- (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
- # FIXME: needs to check for -1/-2 and or handle exception
- if found and obj is not None:
- files_id = obj.file_id
- pfs.append(obj)
-
- # If still not found, add it
- if files_id is None:
- # HACK: Force sha1sum etc into dentry
- dentry["sha1sum"] = dfentry["sha1sum"]
- dentry["sha256sum"] = dfentry["sha256sum"]
- poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
- pfs.append(poolfile)
- files_id = poolfile.file_id
- else:
- poolfile = get_poolfile_by_id(files_id, session)
- if poolfile is None:
- utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
- pfs.append(poolfile)
-
- df.poolfile_id = files_id
- session.add(df)
-
- # Add the src_uploaders to the DB
- uploader_ids = [source.maintainer_id]
- if u.pkg.dsc.has_key("uploaders"):
- for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
- up = up.strip()
- uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
-
- added_ids = {}
- for up_id in uploader_ids:
- if added_ids.has_key(up_id):
- import utils
- utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
- continue
-
- added_ids[up_id]=1
-
- su = SrcUploader()
- su.maintainer_id = up_id
- su.source_id = source.source_id
- session.add(su)
-
- session.flush()
-
- return source, dsc_component, dsc_location_id, pfs
-
-__all__.append('add_dsc_to_db')
-
-@session_wrapper
-def add_deb_to_db(u, filename, session=None):
+def import_metadata_into_db(obj, session=None):
"""
- Contrary to what you might expect, this routine deals with both
- debs and udebs. That info is in 'dbtype', whilst 'type' is
- 'deb' for both of them
+ This routine works on either DBBinary or DBSource objects and imports
+ their metadata into the database
"""
- cnf = Config()
- entry = u.pkg.files[filename]
-
- bin = DBBinary()
- bin.package = entry["package"]
- bin.version = entry["version"]
- bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
- bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
- bin.arch_id = get_architecture(entry["architecture"], session).arch_id
- bin.binarytype = entry["dbtype"]
-
- # Find poolfile id
- filename = entry["pool name"] + filename
- fullpath = os.path.join(cnf["Dir::Pool"], filename)
- if not entry.get("location id", None):
- entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
-
- if entry.get("files id", None):
- poolfile = get_poolfile_by_id(bin.poolfile_id)
- bin.poolfile_id = entry["files id"]
- else:
- poolfile = add_poolfile(filename, entry, entry["location id"], session)
- bin.poolfile_id = entry["files id"] = poolfile.file_id
-
- # Find source id
- bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
- if len(bin_sources) != 1:
- raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
- (bin.package, bin.version, entry["architecture"],
- filename, bin.binarytype, u.pkg.changes["fingerprint"])
-
- bin.source_id = bin_sources[0].source_id
-
- # Add and flush object so it has an ID
- session.add(bin)
-
- suite_names = u.pkg.changes["distribution"].keys()
- bin.suites = session.query(Suite). \
- filter(Suite.suite_name.in_(suite_names)).all()
-
- session.flush()
+ fields = obj.read_control_fields()
+ for k in fields.keys():
+ try:
+ # Try raw ASCII
+ val = str(fields[k])
+ except UnicodeEncodeError:
+ # Fall back to UTF-8
+ try:
+ val = fields[k].encode('utf-8')
+ except UnicodeEncodeError:
+ # Finally try iso8859-1
+ val = fields[k].encode('iso8859-1')
+ # Otherwise we allow the exception to percolate up and we cause
+ # a reject as someone is playing silly buggers
- # Deal with contents - disabled for now
- #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
- #if not contents:
- # print "REJECT\nCould not determine contents of package %s" % bin.package
- # session.rollback()
- # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+ obj.metadata[get_or_set_metadatakey(k, session)] = val
- return poolfile
+ session.commit_or_flush()
-__all__.append('add_deb_to_db')
+__all__.append('import_metadata_into_db')
################################################################################
################################################################################
-class SrcUploader(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<SrcUploader %s>' % self.uploader_id
-
-__all__.append('SrcUploader')
-
-################################################################################
-
SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
('SuiteID', 'suite_id'),
('Version', 'version'),
'overrides_count']
def not_null_constraints(self):
- return ['suite_name', 'version']
+ return ['suite_name']
def __eq__(self, val):
if isinstance(val, str):
return session.query(DBSource).filter_by(source = source). \
with_parent(self)
+ def get_overridesuite(self):
+ if self.overridesuite is None:
+ return self
+ else:
+ return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
+
+ @property
+ def path(self):
+ return os.path.join(self.archive.path, 'dists', self.suite_name)
+
__all__.append('Suite')
@session_wrapper
################################################################################
-# TODO: should be removed because the implementation is too trivial
@session_wrapper
def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
"""
- Returns list of Architecture objects for given C{suite} name
+ Returns list of Architecture objects for given C{suite} name. The list is
+ empty if suite does not exist.
@type suite: str
@param suite: Suite name to search for
@return: list of Architecture objects for the given name (may be empty)
"""
- return get_suite(suite, session).get_architectures(skipsrc, skipall)
+ try:
+ return get_suite(suite, session).get_architectures(skipsrc, skipall)
+ except AttributeError:
+ return []
__all__.append('get_suite_architectures')
################################################################################
-class SuiteSrcFormat(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
-
-__all__.append('SuiteSrcFormat')
-
-@session_wrapper
-def get_suite_src_formats(suite, session=None):
- """
- Returns list of allowed SrcFormat for C{suite}.
-
- @type suite: str
- @param suite: Suite name to search for
-
- @type session: Session
- @param session: Optional SQL session object (a temporary one will be
- generated if not supplied)
-
- @rtype: list
- @return: the list of allowed source formats for I{suite}
- """
-
- q = session.query(SrcFormat)
- q = q.join(SuiteSrcFormat)
- q = q.join(Suite).filter_by(suite_name=suite)
- q = q.order_by('format_name')
-
- return q.all()
-
-__all__.append('get_suite_src_formats')
-
-################################################################################
-
class Uid(ORMObject):
def __init__(self, uid = None, name = None):
self.uid = uid
################################################################################
+class MetadataKey(ORMObject):
+ def __init__(self, key = None):
+ self.key = key
+
+ def properties(self):
+ return ['key']
+
+ def not_null_constraints(self):
+ return ['key']
+
+__all__.append('MetadataKey')
+
+@session_wrapper
+def get_or_set_metadatakey(keyname, session=None):
+ """
+ Returns MetadataKey object for given uidname.
+
+ If no matching keyname is found, a row is inserted.
+
+ @type uidname: string
+ @param uidname: The keyname to add
+
+ @type session: SQLAlchemy
+ @param session: Optional SQL session object (a temporary one will be
+ generated if not supplied). If not passed, a commit will be performed at
+ the end of the function, otherwise the caller is responsible for commiting.
+
+ @rtype: MetadataKey
+ @return: the metadatakey object for the given keyname
+ """
+
+ q = session.query(MetadataKey).filter_by(key=keyname)
+
+ try:
+ ret = q.one()
+ except NoResultFound:
+ ret = MetadataKey(keyname)
+ session.add(ret)
+ session.commit_or_flush()
+
+ return ret
+
+__all__.append('get_or_set_metadatakey')
+
+################################################################################
+
+class BinaryMetadata(ORMObject):
+ def __init__(self, key = None, value = None, binary = None):
+ self.key = key
+ self.value = value
+ self.binary = binary
+
+ def properties(self):
+ return ['binary', 'key', 'value']
+
+ def not_null_constraints(self):
+ return ['value']
+
+__all__.append('BinaryMetadata')
+
+################################################################################
+
+class SourceMetadata(ORMObject):
+ def __init__(self, key = None, value = None, source = None):
+ self.key = key
+ self.value = value
+ self.source = source
+
+ def properties(self):
+ return ['source', 'key', 'value']
+
+ def not_null_constraints(self):
+ return ['value']
+
+__all__.append('SourceMetadata')
+
+################################################################################
+
+class VersionCheck(ORMObject):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def properties(self):
+ #return ['suite_id', 'check', 'reference_id']
+ return ['check']
+
+ def not_null_constraints(self):
+ return ['suite', 'check', 'reference']
+
+__all__.append('VersionCheck')
+
+@session_wrapper
+def get_version_checks(suite_name, check = None, session = None):
+ suite = get_suite(suite_name, session)
+ if not suite:
+ # Make sure that what we return is iterable so that list comprehensions
+ # involving this don't cause a traceback
+ return []
+ q = session.query(VersionCheck).filter_by(suite=suite)
+ if check:
+ q = q.filter_by(check=check)
+ return q.all()
+
+__all__.append('get_version_checks')
+
+################################################################################
+
class DBConn(object):
"""
database module init.
self.__createconn()
def __setuptables(self):
- tables_with_primary = (
+ tables = (
'architecture',
'archive',
'bin_associations',
+ 'bin_contents',
'binaries',
+ 'binaries_metadata',
'binary_acl',
'binary_acl_map',
'build_queue',
- 'build_queue_files',
'changelogs_text',
'changes',
'component',
'config',
- 'changes_pending_binaries',
- 'changes_pending_files',
- 'changes_pending_source',
'dsc_files',
+ 'external_overrides',
+ 'extra_src_references',
'files',
+ 'files_archive_map',
'fingerprint',
'keyrings',
'keyring_acl_map',
- 'location',
'maintainer',
+ 'metadata_keys',
'new_comments',
+ # TODO: the maintainer column in table override should be removed.
+ 'override',
'override_type',
- 'pending_bin_contents',
'policy_queue',
+ 'policy_queue_upload',
+ 'policy_queue_upload_binaries_map',
+ 'policy_queue_byhand_file',
'priority',
'section',
'source',
'source_acl',
+ 'source_metadata',
'src_associations',
+ 'src_contents',
'src_format',
'src_uploaders',
'suite',
- 'uid',
- 'upload_blocks',
- )
-
- tables_no_primary = (
- 'changes_pending_files_map',
- 'changes_pending_source_files',
- 'changes_pool_files',
- 'deb_contents',
- # TODO: the maintainer column in table override should be removed.
- 'override',
'suite_architectures',
- 'suite_src_formats',
'suite_build_queue_copy',
- 'udeb_contents',
+ 'suite_src_formats',
+ 'uid',
+ 'upload_blocks',
+ 'version_check',
)
views = (
'almost_obsolete_all_associations',
'almost_obsolete_src_associations',
'any_associations_source',
- 'bin_assoc_by_arch',
'bin_associations_binaries',
'binaries_suite_arch',
- 'binfiles_suite_component_arch',
'changelogs',
'file_arch_suite',
'newest_all_associations',
'suite_arch_by_name',
)
- # Sqlalchemy version 0.5 fails to reflect the SERIAL type
- # correctly and that is why we have to use a workaround. It can
- # be removed as soon as we switch to version 0.6.
- for table_name in tables_with_primary:
+ for table_name in tables:
table = Table(table_name, self.db_meta, \
- Column('id', Integer, primary_key = True), \
autoload=True, useexisting=True)
setattr(self, 'tbl_%s' % table_name, table)
- for table_name in tables_no_primary:
- table = Table(table_name, self.db_meta, autoload=True)
- setattr(self, 'tbl_%s' % table_name, table)
-
- # bin_contents needs special attention until update #41 has been
- # applied
- self.tbl_bin_contents = Table('bin_contents', self.db_meta, \
- Column('file', Text, primary_key = True),
- Column('binary_id', Integer, ForeignKey('binaries.id'), \
- primary_key = True),
- autoload=True, useexisting=True)
-
for view_name in views:
view = Table(view_name, self.db_meta, autoload=True)
setattr(self, 'view_%s' % view_name, view)
mapper(Architecture, self.tbl_architecture,
properties = dict(arch_id = self.tbl_architecture.c.id,
suites = relation(Suite, secondary=self.tbl_suite_architectures,
- order_by='suite_name',
- backref=backref('architectures', order_by='arch_string'))),
+ order_by=self.tbl_suite.c.suite_name,
+ backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
extension = validator)
mapper(Archive, self.tbl_archive,
properties = dict(archive_id = self.tbl_archive.c.id,
archive_name = self.tbl_archive.c.name))
- mapper(PendingBinContents, self.tbl_pending_bin_contents,
- properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
- filename = self.tbl_pending_bin_contents.c.filename,
- package = self.tbl_pending_bin_contents.c.package,
- version = self.tbl_pending_bin_contents.c.version,
- arch = self.tbl_pending_bin_contents.c.arch,
- otype = self.tbl_pending_bin_contents.c.type))
-
- mapper(DebContents, self.tbl_deb_contents,
- properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
- package=self.tbl_deb_contents.c.package,
- suite=self.tbl_deb_contents.c.suite,
- arch=self.tbl_deb_contents.c.arch,
- section=self.tbl_deb_contents.c.section,
- filename=self.tbl_deb_contents.c.filename))
-
- mapper(UdebContents, self.tbl_udeb_contents,
- properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
- package=self.tbl_udeb_contents.c.package,
- suite=self.tbl_udeb_contents.c.suite,
- arch=self.tbl_udeb_contents.c.arch,
- section=self.tbl_udeb_contents.c.section,
- filename=self.tbl_udeb_contents.c.filename))
+ mapper(ArchiveFile, self.tbl_files_archive_map,
+ properties = dict(archive = relation(Archive, backref='files'),
+ component = relation(Component),
+ file = relation(PoolFile, backref='archives')))
mapper(BuildQueue, self.tbl_build_queue,
- properties = dict(queue_id = self.tbl_build_queue.c.id))
-
- mapper(BuildQueueFile, self.tbl_build_queue_files,
- properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
- poolfile = relation(PoolFile, backref='buildqueueinstances')))
+ properties = dict(queue_id = self.tbl_build_queue.c.id,
+ suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
mapper(DBBinary, self.tbl_binaries,
properties = dict(binary_id = self.tbl_binaries.c.id,
arch_id = self.tbl_binaries.c.architecture,
architecture = relation(Architecture),
poolfile_id = self.tbl_binaries.c.file,
- poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
+ poolfile = relation(PoolFile),
binarytype = self.tbl_binaries.c.type,
fingerprint_id = self.tbl_binaries.c.sig_fpr,
fingerprint = relation(Fingerprint),
install_date = self.tbl_binaries.c.install_date,
suites = relation(Suite, secondary=self.tbl_bin_associations,
- backref=backref('binaries', lazy='dynamic'))),
+ backref=backref('binaries', lazy='dynamic')),
+ extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
+ backref=backref('extra_binary_references', lazy='dynamic')),
+ key = relation(BinaryMetadata, cascade='all',
+ collection_class=attribute_mapped_collection('key'))),
extension = validator)
mapper(BinaryACL, self.tbl_binary_acl,
poolfile_id = self.tbl_dsc_files.c.file,
poolfile = relation(PoolFile)))
+ mapper(ExternalOverride, self.tbl_external_overrides,
+ properties = dict(
+ suite_id = self.tbl_external_overrides.c.suite,
+ suite = relation(Suite),
+ component_id = self.tbl_external_overrides.c.component,
+ component = relation(Component)))
+
mapper(PoolFile, self.tbl_files,
properties = dict(file_id = self.tbl_files.c.id,
- filesize = self.tbl_files.c.size,
- location_id = self.tbl_files.c.location,
- location = relation(Location,
- # using lazy='dynamic' in the back
- # reference because we have A LOT of
- # files in one location
- backref=backref('files', lazy='dynamic'))),
+ filesize = self.tbl_files.c.size),
extension = validator)
mapper(Fingerprint, self.tbl_fingerprint,
mapper(DBChange, self.tbl_changes,
properties = dict(change_id = self.tbl_changes.c.id,
- poolfiles = relation(PoolFile,
- secondary=self.tbl_changes_pool_files,
- backref="changeslinks"),
seen = self.tbl_changes.c.seen,
source = self.tbl_changes.c.source,
binaries = self.tbl_changes.c.binaries,
maintainer = self.tbl_changes.c.maintainer,
changedby = self.tbl_changes.c.changedby,
date = self.tbl_changes.c.date,
- version = self.tbl_changes.c.version,
- files = relation(ChangePendingFile,
- secondary=self.tbl_changes_pending_files_map,
- backref="changesfile"),
- in_queue_id = self.tbl_changes.c.in_queue,
- in_queue = relation(PolicyQueue,
- primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
- approved_for_id = self.tbl_changes.c.approved_for))
-
- mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
- properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
-
- mapper(ChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
- filename = self.tbl_changes_pending_files.c.filename,
- size = self.tbl_changes_pending_files.c.size,
- md5sum = self.tbl_changes_pending_files.c.md5sum,
- sha1sum = self.tbl_changes_pending_files.c.sha1sum,
- sha256sum = self.tbl_changes_pending_files.c.sha256sum))
-
- mapper(ChangePendingSource, self.tbl_changes_pending_source,
- properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
- change = relation(DBChange),
- maintainer = relation(Maintainer,
- primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
- changedby = relation(Maintainer,
- primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
- fingerprint = relation(Fingerprint),
- source_files = relation(ChangePendingFile,
- secondary=self.tbl_changes_pending_source_files,
- backref="pending_sources")))
-
+ version = self.tbl_changes.c.version))
mapper(KeyringACLMap, self.tbl_keyring_acl_map,
properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
keyring = relation(Keyring, backref="keyring_acl_map"),
architecture = relation(Architecture)))
- mapper(Location, self.tbl_location,
- properties = dict(location_id = self.tbl_location.c.id,
- component_id = self.tbl_location.c.component,
- component = relation(Component, backref='location'),
- archive_id = self.tbl_location.c.archive,
- archive = relation(Archive),
- # FIXME: the 'type' column is old cruft and
- # should be removed in the future.
- archive_type = self.tbl_location.c.type),
- extension = validator)
-
mapper(Maintainer, self.tbl_maintainer,
properties = dict(maintainer_id = self.tbl_maintainer.c.id,
maintains_sources = relation(DBSource, backref='maintainer',
overridetype_id = self.tbl_override_type.c.id))
mapper(PolicyQueue, self.tbl_policy_queue,
- properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+ properties = dict(policy_queue_id = self.tbl_policy_queue.c.id,
+ suite = relation(Suite, primaryjoin=(self.tbl_policy_queue.c.suite_id == self.tbl_suite.c.id))))
+
+ mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
+ properties = dict(
+ changes = relation(DBChange),
+ policy_queue = relation(PolicyQueue, backref='uploads'),
+ target_suite = relation(Suite),
+ source = relation(DBSource),
+ binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
+ ))
+
+ mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
+ properties = dict(
+ upload = relation(PolicyQueueUpload, backref='byhand'),
+ )
+ )
mapper(Priority, self.tbl_priority,
properties = dict(priority_id = self.tbl_priority.c.id))
version = self.tbl_source.c.version,
maintainer_id = self.tbl_source.c.maintainer,
poolfile_id = self.tbl_source.c.file,
- poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
+ poolfile = relation(PoolFile),
fingerprint_id = self.tbl_source.c.sig_fpr,
fingerprint = relation(Fingerprint),
changedby_id = self.tbl_source.c.changedby,
primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
suites = relation(Suite, secondary=self.tbl_src_associations,
backref=backref('sources', lazy='dynamic')),
- srcuploaders = relation(SrcUploader)),
+ uploaders = relation(Maintainer,
+ secondary=self.tbl_src_uploaders),
+ key = relation(SourceMetadata, cascade='all',
+ collection_class=attribute_mapped_collection('key'))),
extension = validator)
mapper(SourceACL, self.tbl_source_acl,
properties = dict(src_format_id = self.tbl_src_format.c.id,
format_name = self.tbl_src_format.c.format_name))
- mapper(SrcUploader, self.tbl_src_uploaders,
- properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
- source_id = self.tbl_src_uploaders.c.source,
- source = relation(DBSource,
- primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)),
- maintainer_id = self.tbl_src_uploaders.c.maintainer,
- maintainer = relation(Maintainer,
- primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
-
mapper(Suite, self.tbl_suite,
properties = dict(suite_id = self.tbl_suite.c.id,
- policy_queue = relation(PolicyQueue),
+ policy_queue = relation(PolicyQueue, primaryjoin=(self.tbl_suite.c.policy_queue_id == self.tbl_policy_queue.c.id)),
copy_queues = relation(BuildQueue,
- secondary=self.tbl_suite_build_queue_copy)),
+ secondary=self.tbl_suite_build_queue_copy),
+ srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
+ backref=backref('suites', lazy='dynamic')),
+ archive = relation(Archive, backref='suites')),
extension = validator)
- mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
- properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
- suite = relation(Suite, backref='suitesrcformats'),
- src_format_id = self.tbl_suite_src_formats.c.src_format,
- src_format = relation(SrcFormat)))
-
mapper(Uid, self.tbl_uid,
properties = dict(uid_id = self.tbl_uid.c.id,
fingerprint = relation(Fingerprint)),
backref=backref('contents', lazy='dynamic', cascade='all')),
file = self.tbl_bin_contents.c.file))
+ mapper(SrcContents, self.tbl_src_contents,
+ properties = dict(
+ source = relation(DBSource,
+ backref=backref('contents', lazy='dynamic', cascade='all')),
+ file = self.tbl_src_contents.c.file))
+
+ mapper(MetadataKey, self.tbl_metadata_keys,
+ properties = dict(
+ key_id = self.tbl_metadata_keys.c.key_id,
+ key = self.tbl_metadata_keys.c.key))
+
+ mapper(BinaryMetadata, self.tbl_binaries_metadata,
+ properties = dict(
+ binary_id = self.tbl_binaries_metadata.c.bin_id,
+ binary = relation(DBBinary),
+ key_id = self.tbl_binaries_metadata.c.key_id,
+ key = relation(MetadataKey),
+ value = self.tbl_binaries_metadata.c.value))
+
+ mapper(SourceMetadata, self.tbl_source_metadata,
+ properties = dict(
+ source_id = self.tbl_source_metadata.c.src_id,
+ source = relation(DBSource),
+ key_id = self.tbl_source_metadata.c.key_id,
+ key = relation(MetadataKey),
+ value = self.tbl_source_metadata.c.value))
+
+ mapper(VersionCheck, self.tbl_version_check,
+ properties = dict(
+ suite_id = self.tbl_version_check.c.suite,
+ suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
+ reference_id = self.tbl_version_check.c.reference,
+ reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
+
## Connection functions
def __createconn(self):
from config import Config
cnf = Config()
- if cnf["DB::Host"]:
+ if cnf.has_key("DB::Service"):
+ connstr = "postgresql://service=%s" % cnf["DB::Service"]
+ elif cnf.has_key("DB::Host"):
# TCP/IP
- connstr = "postgres://%s" % cnf["DB::Host"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ connstr = "postgresql://%s" % cnf["DB::Host"]
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
connstr += ":%s" % cnf["DB::Port"]
connstr += "/%s" % cnf["DB::Name"]
else:
# Unix Socket
- connstr = "postgres:///%s" % cnf["DB::Name"]
- if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
+ connstr = "postgresql:///%s" % cnf["DB::Name"]
+ if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
connstr += "?port=%s" % cnf["DB::Port"]
- if not cnf.has_key('DB::PoolSize'):
- cnf['DB::PoolSize'] = '5'
- if not cnf.has_key('DB::MaxOverflow'):
- cnf['DB::MaxOverflow'] = '10'
-
- self.db_pg = create_engine(connstr, echo=self.debug,
- pool_size=int(cnf['DB::PoolSize']),
- max_overflow=int(cnf['DB::MaxOverflow']))
- self.db_meta = MetaData()
- self.db_meta.bind = self.db_pg
- self.db_smaker = sessionmaker(bind=self.db_pg,
- autoflush=True,
- autocommit=False)
-
- self.__setuptables()
- self.__setupmappers()
-
- def session(self):
- return self.db_smaker()
+
+ engine_args = { 'echo': self.debug }
+ if cnf.has_key('DB::PoolSize'):
+ engine_args['pool_size'] = int(cnf['DB::PoolSize'])
+ if cnf.has_key('DB::MaxOverflow'):
+ engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
+ if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
+ cnf['DB::Unicode'] == 'false':
+ engine_args['use_native_unicode'] = False
+
+ # Monkey patch a new dialect in in order to support service= syntax
+ import sqlalchemy.dialects.postgresql
+ from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
+ class PGDialect_psycopg2_dak(PGDialect_psycopg2):
+ def create_connect_args(self, url):
+ if str(url).startswith('postgresql://service='):
+ # Eww
+ servicename = str(url)[21:]
+ return (['service=%s' % servicename], {})
+ else:
+ return PGDialect_psycopg2.create_connect_args(self, url)
+
+ sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
+
+ try:
+ self.db_pg = create_engine(connstr, **engine_args)
+ self.db_meta = MetaData()
+ self.db_meta.bind = self.db_pg
+ self.db_smaker = sessionmaker(bind=self.db_pg,
+ autoflush=True,
+ autocommit=False)
+
+ self.__setuptables()
+ self.__setupmappers()
+
+ except OperationalError as e:
+ import utils
+ utils.fubar("Cannot connect to database (%s)" % str(e))
+
+ self.pid = os.getpid()
+
+ def session(self, work_mem = 0):
+ '''
+ Returns a new session object. If a work_mem parameter is provided a new
+ transaction is started and the work_mem parameter is set for this
+ transaction. The work_mem parameter is measured in MB. A default value
+ will be used if the parameter is not set.
+ '''
+ # reinitialize DBConn in new processes
+ if self.pid != os.getpid():
+ clear_mappers()
+ self.__createconn()
+ session = self.db_smaker()
+ if work_mem > 0:
+ session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
+ return session
__all__.append('DBConn')