5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
38 from os.path import normpath
45 from daklib.gpg import SignedFile
52 import simplejson as json
54 from datetime import datetime, timedelta
55 from errno import ENOENT
56 from tempfile import mkstemp, mkdtemp
57 from subprocess import Popen, PIPE
58 from tarfile import TarFile
60 from inspect import getargspec
63 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
65 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
66 backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
67 from sqlalchemy import types as sqltypes
68 from sqlalchemy.orm.collections import attribute_mapped_collection
69 from sqlalchemy.ext.associationproxy import association_proxy
71 # Don't remove this, we re-export the exceptions to scripts which import us
72 from sqlalchemy.exc import *
73 from sqlalchemy.orm.exc import NoResultFound
75 # Only import Config until Queue stuff is changed to store its config
77 from config import Config
78 from textutils import fix_maintainer
79 from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
81 # suppress some deprecation warnings in squeeze related to sqlalchemy
83 warnings.filterwarnings('ignore', \
84 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
86 warnings.filterwarnings('ignore', \
87 "Predicate of partial index .* ignored during reflection", \
91 ################################################################################
93 # Patch in support for the debversion field type so that it works during
97 # that is for sqlalchemy 0.6
98 UserDefinedType = sqltypes.UserDefinedType
100 # this one for sqlalchemy 0.5
101 UserDefinedType = sqltypes.TypeEngine
103 class DebVersion(UserDefinedType):
104 def get_col_spec(self):
107 def bind_processor(self, dialect):
110 # ' = None' is needed for sqlalchemy 0.5:
111 def result_processor(self, dialect, coltype = None):
114 sa_major_version = sqlalchemy.__version__[0:3]
115 if sa_major_version in ["0.5", "0.6", "0.7"]:
116 from sqlalchemy.databases import postgres
117 postgres.ischema_names['debversion'] = DebVersion
119 raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
121 ################################################################################
123 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
125 ################################################################################
127 def session_wrapper(fn):
129 Wrapper around common ".., session=None):" handling. If the wrapped
130 function is called without passing 'session', we create a local one
131 and destroy it when the function ends.
133 Also attaches a commit_or_flush method to the session; if we created a
134 local session, this is a synonym for session.commit(), otherwise it is a
135 synonym for session.flush().
138 def wrapped(*args, **kwargs):
139 private_transaction = False
141 # Find the session object
142 session = kwargs.get('session')
145 if len(args) <= len(getargspec(fn)[0]) - 1:
146 # No session specified as last argument or in kwargs
147 private_transaction = True
148 session = kwargs['session'] = DBConn().session()
150 # Session is last argument in args
154 session = args[-1] = DBConn().session()
155 private_transaction = True
157 if private_transaction:
158 session.commit_or_flush = session.commit
160 session.commit_or_flush = session.flush
163 return fn(*args, **kwargs)
165 if private_transaction:
166 # We created a session; close it.
169 wrapped.__doc__ = fn.__doc__
170 wrapped.func_name = fn.func_name
174 __all__.append('session_wrapper')
176 ################################################################################
178 class ORMObject(object):
180 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
181 derived classes must implement the properties() method.
184 def properties(self):
186 This method should be implemented by all derived classes and returns a
187 list of the important properties. The properties 'created' and
188 'modified' will be added automatically. A suffix '_count' should be
189 added to properties that are lists or query objects. The most important
190 property name should be returned as the first element in the list
191 because it is used by repr().
197 Returns a JSON representation of the object based on the properties
198 returned from the properties() method.
201 # add created and modified
202 all_properties = self.properties() + ['created', 'modified']
203 for property in all_properties:
204 # check for list or query
205 if property[-6:] == '_count':
206 real_property = property[:-6]
207 if not hasattr(self, real_property):
209 value = getattr(self, real_property)
210 if hasattr(value, '__len__'):
213 elif hasattr(value, 'count'):
214 # query (but not during validation)
215 if self.in_validation:
217 value = value.count()
219 raise KeyError('Do not understand property %s.' % property)
221 if not hasattr(self, property):
224 value = getattr(self, property)
228 elif isinstance(value, ORMObject):
229 # use repr() for ORMObject types
232 # we want a string for all other types because json cannot
235 data[property] = value
236 return json.dumps(data)
240 Returns the name of the class.
242 return type(self).__name__
246 Returns a short string representation of the object using the first
247 element from the properties() method.
249 primary_property = self.properties()[0]
250 value = getattr(self, primary_property)
251 return '<%s %s>' % (self.classname(), str(value))
255 Returns a human readable form of the object using the properties()
258 return '<%s %s>' % (self.classname(), self.json())
260 def not_null_constraints(self):
262 Returns a list of properties that must be not NULL. Derived classes
263 should override this method if needed.
267 validation_message = \
268 "Validation failed because property '%s' must not be empty in object\n%s"
270 in_validation = False
274 This function validates the not NULL constraints as returned by
275 not_null_constraints(). It raises the DBUpdateError exception if
278 for property in self.not_null_constraints():
279 # TODO: It is a bit awkward that the mapper configuration allow
280 # directly setting the numeric _id columns. We should get rid of it
282 if hasattr(self, property + '_id') and \
283 getattr(self, property + '_id') is not None:
285 if not hasattr(self, property) or getattr(self, property) is None:
286 # str() might lead to races due to a 2nd flush
287 self.in_validation = True
288 message = self.validation_message % (property, str(self))
289 self.in_validation = False
290 raise DBUpdateError(message)
294 def get(cls, primary_key, session = None):
296 This is a support function that allows getting an object by its primary
299 Architecture.get(3[, session])
301 instead of the more verbose
303 session.query(Architecture).get(3)
305 return session.query(cls).get(primary_key)
307 def session(self, replace = False):
309 Returns the current session that is associated with the object. May
310 return None is object is in detached state.
313 return object_session(self)
315 def clone(self, session = None):
317 Clones the current object in a new session and returns the new clone. A
318 fresh session is created if the optional session parameter is not
319 provided. The function will fail if a session is provided and has
322 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
323 an existing object to allow several threads to work with their own
324 instances of an ORMObject.
326 WARNING: Only persistent (committed) objects can be cloned. Changes
327 made to the original object that are not committed yet will get lost.
328 The session of the new object will always be rolled back to avoid
332 if self.session() is None:
333 raise RuntimeError( \
334 'Method clone() failed for detached object:\n%s' % self)
335 self.session().flush()
336 mapper = object_mapper(self)
337 primary_key = mapper.primary_key_from_instance(self)
338 object_class = self.__class__
340 session = DBConn().session()
341 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
342 raise RuntimeError( \
343 'Method clone() failed due to unflushed changes in session.')
344 new_object = session.query(object_class).get(primary_key)
346 if new_object is None:
347 raise RuntimeError( \
348 'Method clone() failed for non-persistent object:\n%s' % self)
351 __all__.append('ORMObject')
353 ################################################################################
355 class Validator(MapperExtension):
357 This class calls the validate() method for each instance for the
358 'before_update' and 'before_insert' events. A global object validator is
359 used for configuring the individual mappers.
362 def before_update(self, mapper, connection, instance):
366 def before_insert(self, mapper, connection, instance):
370 validator = Validator()
372 ################################################################################
374 class Architecture(ORMObject):
375 def __init__(self, arch_string = None, description = None):
376 self.arch_string = arch_string
377 self.description = description
379 def __eq__(self, val):
380 if isinstance(val, str):
381 return (self.arch_string== val)
382 # This signals to use the normal comparison operator
383 return NotImplemented
385 def __ne__(self, val):
386 if isinstance(val, str):
387 return (self.arch_string != val)
388 # This signals to use the normal comparison operator
389 return NotImplemented
391 def properties(self):
392 return ['arch_string', 'arch_id', 'suites_count']
394 def not_null_constraints(self):
395 return ['arch_string']
397 __all__.append('Architecture')
400 def get_architecture(architecture, session=None):
402 Returns database id for given C{architecture}.
404 @type architecture: string
405 @param architecture: The name of the architecture
407 @type session: Session
408 @param session: Optional SQLA session object (a temporary one will be
409 generated if not supplied)
412 @return: Architecture object for the given arch (None if not present)
415 q = session.query(Architecture).filter_by(arch_string=architecture)
419 except NoResultFound:
422 __all__.append('get_architecture')
424 # TODO: should be removed because the implementation is too trivial
426 def get_architecture_suites(architecture, session=None):
428 Returns list of Suite objects for given C{architecture} name
430 @type architecture: str
431 @param architecture: Architecture name to search for
433 @type session: Session
434 @param session: Optional SQL session object (a temporary one will be
435 generated if not supplied)
438 @return: list of Suite objects for the given name (may be empty)
441 return get_architecture(architecture, session).suites
443 __all__.append('get_architecture_suites')
445 ################################################################################
447 class Archive(object):
448 def __init__(self, *args, **kwargs):
452 return '<Archive %s>' % self.archive_name
454 __all__.append('Archive')
457 def get_archive(archive, session=None):
459 returns database id for given C{archive}.
461 @type archive: string
462 @param archive: the name of the arhive
464 @type session: Session
465 @param session: Optional SQLA session object (a temporary one will be
466 generated if not supplied)
469 @return: Archive object for the given name (None if not present)
472 archive = archive.lower()
474 q = session.query(Archive).filter_by(archive_name=archive)
478 except NoResultFound:
481 __all__.append('get_archive')
483 ################################################################################
485 class ArchiveFile(object):
486 def __init__(self, archive=None, component=None, file=None):
487 self.archive = archive
488 self.component = component
492 return os.path.join(self.archive.path, 'pool', self.component.component_name, self.file.filename)
494 __all__.append('ArchiveFile')
496 ################################################################################
498 class BinContents(ORMObject):
499 def __init__(self, file = None, binary = None):
503 def properties(self):
504 return ['file', 'binary']
506 __all__.append('BinContents')
508 ################################################################################
510 def subprocess_setup():
511 # Python installs a SIGPIPE handler by default. This is usually not what
512 # non-Python subprocesses expect.
513 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
515 class DBBinary(ORMObject):
516 def __init__(self, package = None, source = None, version = None, \
517 maintainer = None, architecture = None, poolfile = None, \
518 binarytype = 'deb', fingerprint=None):
519 self.package = package
521 self.version = version
522 self.maintainer = maintainer
523 self.architecture = architecture
524 self.poolfile = poolfile
525 self.binarytype = binarytype
526 self.fingerprint = fingerprint
530 return self.binary_id
532 def properties(self):
533 return ['package', 'version', 'maintainer', 'source', 'architecture', \
534 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
535 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
537 def not_null_constraints(self):
538 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
541 metadata = association_proxy('key', 'value')
543 def get_component_name(self):
544 return self.poolfile.location.component.component_name
546 def scan_contents(self):
548 Yields the contents of the package. Only regular files are yielded and
549 the path names are normalized after converting them from either utf-8
550 or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
551 package does not contain any regular file.
553 fullpath = self.poolfile.fullpath
554 dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
555 preexec_fn = subprocess_setup)
556 tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
557 for member in tar.getmembers():
558 if not member.isdir():
559 name = normpath(member.name)
560 # enforce proper utf-8 encoding
563 except UnicodeDecodeError:
564 name = name.decode('iso8859-1').encode('utf-8')
570 def read_control(self):
572 Reads the control information from a binary.
575 @return: stanza text of the control section.
578 fullpath = self.poolfile.fullpath
579 deb_file = open(fullpath, 'r')
580 stanza = utils.deb_extract_control(deb_file)
585 def read_control_fields(self):
587 Reads the control information from a binary and return
591 @return: fields of the control section as a dictionary.
594 stanza = self.read_control()
595 return apt_pkg.TagSection(stanza)
597 __all__.append('DBBinary')
600 def get_suites_binary_in(package, session=None):
602 Returns list of Suite objects which given C{package} name is in
605 @param package: DBBinary package name to search for
608 @return: list of Suite objects for the given package
611 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
613 __all__.append('get_suites_binary_in')
616 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
618 Returns the component name of the newest binary package in suite_list or
619 None if no package is found. The result can be optionally filtered by a list
620 of architecture names.
623 @param package: DBBinary package name to search for
625 @type suite_list: list of str
626 @param suite_list: list of suite_name items
628 @type arch_list: list of str
629 @param arch_list: optional list of arch_string items that defaults to []
631 @rtype: str or NoneType
632 @return: name of component or None
635 q = session.query(DBBinary).filter_by(package = package). \
636 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
637 if len(arch_list) > 0:
638 q = q.join(DBBinary.architecture). \
639 filter(Architecture.arch_string.in_(arch_list))
640 binary = q.order_by(desc(DBBinary.version)).first()
644 return binary.get_component_name()
646 __all__.append('get_component_by_package_suite')
648 ################################################################################
650 class BinaryACL(object):
651 def __init__(self, *args, **kwargs):
655 return '<BinaryACL %s>' % self.binary_acl_id
657 __all__.append('BinaryACL')
659 ################################################################################
661 class BinaryACLMap(object):
662 def __init__(self, *args, **kwargs):
666 return '<BinaryACLMap %s>' % self.binary_acl_map_id
668 __all__.append('BinaryACLMap')
670 ################################################################################
675 ArchiveDir "%(archivepath)s";
676 OverrideDir "%(overridedir)s";
677 CacheDir "%(cachedir)s";
682 Packages::Compress ". bzip2 gzip";
683 Sources::Compress ". bzip2 gzip";
688 bindirectory "incoming"
693 BinOverride "override.sid.all3";
694 BinCacheDB "packages-accepted.db";
696 FileList "%(filelist)s";
699 Packages::Extensions ".deb .udeb";
702 bindirectory "incoming/"
705 BinOverride "override.sid.all3";
706 SrcOverride "override.sid.all3.src";
707 FileList "%(filelist)s";
711 class BuildQueue(object):
712 def __init__(self, *args, **kwargs):
716 return '<BuildQueue %s>' % self.queue_name
718 def write_metadata(self, starttime, force=False):
719 # Do we write out metafiles?
720 if not (force or self.generate_metadata):
723 session = DBConn().session().object_session(self)
725 fl_fd = fl_name = ac_fd = ac_name = None
727 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
728 startdir = os.getcwd()
731 # Grab files we want to include
732 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
733 newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
734 # Write file list with newer files
735 (fl_fd, fl_name) = mkstemp()
737 os.write(fl_fd, '%s\n' % n.fullpath)
742 # Write minimal apt.conf
743 # TODO: Remove hardcoding from template
744 (ac_fd, ac_name) = mkstemp()
745 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
747 'cachedir': cnf["Dir::Cache"],
748 'overridedir': cnf["Dir::Override"],
752 # Run apt-ftparchive generate
753 os.chdir(os.path.dirname(ac_name))
754 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
756 # Run apt-ftparchive release
757 # TODO: Eww - fix this
758 bname = os.path.basename(self.path)
762 # We have to remove the Release file otherwise it'll be included in the
765 os.unlink(os.path.join(bname, 'Release'))
769 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
771 # Crude hack with open and append, but this whole section is and should be redone.
772 if self.notautomatic:
773 release=open("Release", "a")
774 release.write("NotAutomatic: yes\n")
779 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
780 if cnf.has_key("Dinstall::SigningPubKeyring"):
781 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
783 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
785 # Move the files if we got this far
786 os.rename('Release', os.path.join(bname, 'Release'))
788 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
790 # Clean up any left behind files
817 def clean_and_update(self, starttime, Logger, dryrun=False):
818 """WARNING: This routine commits for you"""
819 session = DBConn().session().object_session(self)
821 if self.generate_metadata and not dryrun:
822 self.write_metadata(starttime)
824 # Grab files older than our execution time
825 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
826 older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
832 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
834 Logger.log(["I: Removing %s from the queue" % o.fullpath])
835 os.unlink(o.fullpath)
838 # If it wasn't there, don't worry
839 if e.errno == ENOENT:
842 # TODO: Replace with proper logging call
843 Logger.log(["E: Could not remove %s" % o.fullpath])
850 for f in os.listdir(self.path):
851 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
854 if not self.contains_filename(f):
855 fp = os.path.join(self.path, f)
857 Logger.log(["I: Would remove unused link %s" % fp])
859 Logger.log(["I: Removing unused link %s" % fp])
863 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
865 def contains_filename(self, filename):
868 @returns True if filename is supposed to be in the queue; False otherwise
870 session = DBConn().session().object_session(self)
871 if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
873 elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
877 def add_file_from_pool(self, poolfile):
878 """Copies a file into the pool. Assumes that the PoolFile object is
879 attached to the same SQLAlchemy session as the Queue object is.
881 The caller is responsible for committing after calling this function."""
882 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
884 # Check if we have a file of this name or this ID already
885 for f in self.queuefiles:
886 if (f.fileid is not None and f.fileid == poolfile.file_id) or \
887 (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
888 # In this case, update the BuildQueueFile entry so we
889 # don't remove it too early
890 f.lastused = datetime.now()
891 DBConn().session().object_session(poolfile).add(f)
894 # Prepare BuildQueueFile object
895 qf = BuildQueueFile()
896 qf.build_queue_id = self.queue_id
897 qf.filename = poolfile_basename
899 targetpath = poolfile.fullpath
900 queuepath = os.path.join(self.path, poolfile_basename)
904 # We need to copy instead of symlink
906 utils.copy(targetpath, queuepath)
907 # NULL in the fileid field implies a copy
910 os.symlink(targetpath, queuepath)
911 qf.fileid = poolfile.file_id
912 except FileExistsError:
913 if not poolfile.identical_to(queuepath):
918 # Get the same session as the PoolFile is using and add the qf to it
919 DBConn().session().object_session(poolfile).add(qf)
923 def add_changes_from_policy_queue(self, policyqueue, changes):
925 Copies a changes from a policy queue together with its poolfiles.
927 @type policyqueue: PolicyQueue
928 @param policyqueue: policy queue to copy the changes from
930 @type changes: DBChange
931 @param changes: changes to copy to this build queue
933 for policyqueuefile in changes.files:
934 self.add_file_from_policy_queue(policyqueue, policyqueuefile)
935 for poolfile in changes.poolfiles:
936 self.add_file_from_pool(poolfile)
938 def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
940 Copies a file from a policy queue.
941 Assumes that the policyqueuefile is attached to the same SQLAlchemy
942 session as the Queue object is. The caller is responsible for
943 committing after calling this function.
945 @type policyqueue: PolicyQueue
946 @param policyqueue: policy queue to copy the file from
948 @type policyqueuefile: ChangePendingFile
949 @param policyqueuefile: file to be added to the build queue
951 session = DBConn().session().object_session(policyqueuefile)
953 # Is the file already there?
955 f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
956 f.lastused = datetime.now()
958 except NoResultFound:
959 pass # continue below
961 # We have to add the file.
962 f = BuildQueuePolicyFile()
964 f.file = policyqueuefile
965 f.filename = policyqueuefile.filename
967 source = os.path.join(policyqueue.path, policyqueuefile.filename)
970 # Always copy files from policy queues as they might move around.
972 utils.copy(source, target)
973 except FileExistsError:
974 if not policyqueuefile.identical_to(target):
982 __all__.append('BuildQueue')
985 def get_build_queue(queuename, session=None):
987 Returns BuildQueue object for given C{queue name}, creating it if it does not
990 @type queuename: string
991 @param queuename: The name of the queue
993 @type session: Session
994 @param session: Optional SQLA session object (a temporary one will be
995 generated if not supplied)
998 @return: BuildQueue object for the given queue
1001 q = session.query(BuildQueue).filter_by(queue_name=queuename)
1005 except NoResultFound:
1008 __all__.append('get_build_queue')
1010 ################################################################################
1012 class BuildQueueFile(object):
1014 BuildQueueFile represents a file in a build queue coming from a pool.
1017 def __init__(self, *args, **kwargs):
1021 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
1025 return os.path.join(self.buildqueue.path, self.filename)
1028 __all__.append('BuildQueueFile')
1030 ################################################################################
1032 class BuildQueuePolicyFile(object):
1034 BuildQueuePolicyFile represents a file in a build queue that comes from a
1035 policy queue (and not a pool).
1038 def __init__(self, *args, **kwargs):
1042 #def filename(self):
1043 # return self.file.filename
1047 return os.path.join(self.build_queue.path, self.filename)
1049 __all__.append('BuildQueuePolicyFile')
1051 ################################################################################
1053 class ChangePendingBinary(object):
1054 def __init__(self, *args, **kwargs):
1058 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
1060 __all__.append('ChangePendingBinary')
1062 ################################################################################
1064 class ChangePendingFile(object):
1065 def __init__(self, *args, **kwargs):
1069 return '<ChangePendingFile %s>' % self.change_pending_file_id
1071 def identical_to(self, filename):
1073 compare size and hash with the given file
1076 @return: true if the given file has the same size and hash as this object; false otherwise
1078 st = os.stat(filename)
1079 if self.size != st.st_size:
1082 f = open(filename, "r")
1083 sha256sum = apt_pkg.sha256sum(f)
1084 if sha256sum != self.sha256sum:
1089 __all__.append('ChangePendingFile')
1091 ################################################################################
1093 class ChangePendingSource(object):
1094 def __init__(self, *args, **kwargs):
1098 return '<ChangePendingSource %s>' % self.change_pending_source_id
1100 __all__.append('ChangePendingSource')
1102 ################################################################################
1104 class Component(ORMObject):
1105 def __init__(self, component_name = None):
1106 self.component_name = component_name
1108 def __eq__(self, val):
1109 if isinstance(val, str):
1110 return (self.component_name == val)
1111 # This signals to use the normal comparison operator
1112 return NotImplemented
1114 def __ne__(self, val):
1115 if isinstance(val, str):
1116 return (self.component_name != val)
1117 # This signals to use the normal comparison operator
1118 return NotImplemented
1120 def properties(self):
1121 return ['component_name', 'component_id', 'description', \
1122 'location_count', 'meets_dfsg', 'overrides_count']
1124 def not_null_constraints(self):
1125 return ['component_name']
1128 __all__.append('Component')
1131 def get_component(component, session=None):
1133 Returns database id for given C{component}.
1135 @type component: string
1136 @param component: The name of the override type
1139 @return: the database id for the given component
1142 component = component.lower()
1144 q = session.query(Component).filter_by(component_name=component)
1148 except NoResultFound:
1151 __all__.append('get_component')
1154 def get_mapped_component(component_name, session=None):
1155 """get component after mappings
1157 Evaluate component mappings from ComponentMappings in dak.conf for the
1158 given component name.
1160 @todo: ansgar wants to get rid of this. It's currently only used for
1161 the security archive
1163 @type component_name: str
1164 @param component_name: component name
1166 @param session: database session
1168 @rtype: L{daklib.dbconn.Component} or C{None}
1169 @return: component after applying maps or C{None}
1172 for m in cnf.value_list("ComponentMappings"):
1173 (src, dst) = m.split()
1174 if component_name == src:
1175 component_name = dst
1176 component = session.query(Component).filter_by(component_name=component_name).first()
1179 __all__.append('get_mapped_component')
1182 def get_component_names(session=None):
1184 Returns list of strings of component names.
1187 @return: list of strings of component names
1190 return [ x.component_name for x in session.query(Component).all() ]
1192 __all__.append('get_component_names')
1194 ################################################################################
1196 class DBConfig(object):
1197 def __init__(self, *args, **kwargs):
1201 return '<DBConfig %s>' % self.name
1203 __all__.append('DBConfig')
1205 ################################################################################
1208 def get_or_set_contents_file_id(filename, session=None):
1210 Returns database id for given filename.
1212 If no matching file is found, a row is inserted.
1214 @type filename: string
1215 @param filename: The filename
1216 @type session: SQLAlchemy
1217 @param session: Optional SQL session object (a temporary one will be
1218 generated if not supplied). If not passed, a commit will be performed at
1219 the end of the function, otherwise the caller is responsible for commiting.
1222 @return: the database id for the given component
1225 q = session.query(ContentFilename).filter_by(filename=filename)
1228 ret = q.one().cafilename_id
1229 except NoResultFound:
1230 cf = ContentFilename()
1231 cf.filename = filename
1233 session.commit_or_flush()
1234 ret = cf.cafilename_id
1238 __all__.append('get_or_set_contents_file_id')
1241 def get_contents(suite, overridetype, section=None, session=None):
1243 Returns contents for a suite / overridetype combination, limiting
1244 to a section if not None.
1247 @param suite: Suite object
1249 @type overridetype: OverrideType
1250 @param overridetype: OverrideType object
1252 @type section: Section
1253 @param section: Optional section object to limit results to
1255 @type session: SQLAlchemy
1256 @param session: Optional SQL session object (a temporary one will be
1257 generated if not supplied)
1259 @rtype: ResultsProxy
1260 @return: ResultsProxy object set up to return tuples of (filename, section,
1264 # find me all of the contents for a given suite
1265 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1269 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1270 JOIN content_file_names n ON (c.filename=n.id)
1271 JOIN binaries b ON (b.id=c.binary_pkg)
1272 JOIN override o ON (o.package=b.package)
1273 JOIN section s ON (s.id=o.section)
1274 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1275 AND b.type=:overridetypename"""
1277 vals = {'suiteid': suite.suite_id,
1278 'overridetypeid': overridetype.overridetype_id,
1279 'overridetypename': overridetype.overridetype}
1281 if section is not None:
1282 contents_q += " AND s.id = :sectionid"
1283 vals['sectionid'] = section.section_id
1285 contents_q += " ORDER BY fn"
1287 return session.execute(contents_q, vals)
1289 __all__.append('get_contents')
1291 ################################################################################
1293 class ContentFilepath(object):
1294 def __init__(self, *args, **kwargs):
1298 return '<ContentFilepath %s>' % self.filepath
1300 __all__.append('ContentFilepath')
1303 def get_or_set_contents_path_id(filepath, session=None):
1305 Returns database id for given path.
1307 If no matching file is found, a row is inserted.
1309 @type filepath: string
1310 @param filepath: The filepath
1312 @type session: SQLAlchemy
1313 @param session: Optional SQL session object (a temporary one will be
1314 generated if not supplied). If not passed, a commit will be performed at
1315 the end of the function, otherwise the caller is responsible for commiting.
1318 @return: the database id for the given path
1321 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1324 ret = q.one().cafilepath_id
1325 except NoResultFound:
1326 cf = ContentFilepath()
1327 cf.filepath = filepath
1329 session.commit_or_flush()
1330 ret = cf.cafilepath_id
1334 __all__.append('get_or_set_contents_path_id')
1336 ################################################################################
1338 class ContentAssociation(object):
1339 def __init__(self, *args, **kwargs):
1343 return '<ContentAssociation %s>' % self.ca_id
1345 __all__.append('ContentAssociation')
1347 def insert_content_paths(binary_id, fullpaths, session=None):
1349 Make sure given path is associated with given binary id
1351 @type binary_id: int
1352 @param binary_id: the id of the binary
1353 @type fullpaths: list
1354 @param fullpaths: the list of paths of the file being associated with the binary
1355 @type session: SQLAlchemy session
1356 @param session: Optional SQLAlchemy session. If this is passed, the caller
1357 is responsible for ensuring a transaction has begun and committing the
1358 results or rolling back based on the result code. If not passed, a commit
1359 will be performed at the end of the function, otherwise the caller is
1360 responsible for commiting.
1362 @return: True upon success
1365 privatetrans = False
1367 session = DBConn().session()
1372 def generate_path_dicts():
1373 for fullpath in fullpaths:
1374 if fullpath.startswith( './' ):
1375 fullpath = fullpath[2:]
1377 yield {'filename':fullpath, 'id': binary_id }
1379 for d in generate_path_dicts():
1380 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1389 traceback.print_exc()
1391 # Only rollback if we set up the session ourself
1398 __all__.append('insert_content_paths')
1400 ################################################################################
1402 class DSCFile(object):
1403 def __init__(self, *args, **kwargs):
1407 return '<DSCFile %s>' % self.dscfile_id
1409 __all__.append('DSCFile')
1412 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1414 Returns a list of DSCFiles which may be empty
1416 @type dscfile_id: int (optional)
1417 @param dscfile_id: the dscfile_id of the DSCFiles to find
1419 @type source_id: int (optional)
1420 @param source_id: the source id related to the DSCFiles to find
1422 @type poolfile_id: int (optional)
1423 @param poolfile_id: the poolfile id related to the DSCFiles to find
1426 @return: Possibly empty list of DSCFiles
1429 q = session.query(DSCFile)
1431 if dscfile_id is not None:
1432 q = q.filter_by(dscfile_id=dscfile_id)
1434 if source_id is not None:
1435 q = q.filter_by(source_id=source_id)
1437 if poolfile_id is not None:
1438 q = q.filter_by(poolfile_id=poolfile_id)
1442 __all__.append('get_dscfiles')
1444 ################################################################################
1446 class ExternalOverride(ORMObject):
1447 def __init__(self, *args, **kwargs):
1451 return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
1453 __all__.append('ExternalOverride')
1455 ################################################################################
1457 class PoolFile(ORMObject):
1458 def __init__(self, filename = None, location = None, filesize = -1, \
1460 self.filename = filename
1461 self.location = location
1462 self.filesize = filesize
1463 self.md5sum = md5sum
1467 session = DBConn().session().object_session(self)
1468 af = session.query(ArchiveFile).join(Archive).filter(ArchiveFile.file == self).first()
1472 def component(self):
1473 session = DBConn().session().object_session(self)
1474 component_id = session.query(ArchiveFile.component_id).filter(ArchiveFile.file == self) \
1475 .group_by(ArchiveFile.component_id).one()
1476 return session.query(Component).get(component_id)
1480 return os.path.basename(self.filename)
1482 def is_valid(self, filesize = -1, md5sum = None):
1483 return self.filesize == long(filesize) and self.md5sum == md5sum
1485 def properties(self):
1486 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1487 'sha256sum', 'source', 'binary', 'last_used']
1489 def not_null_constraints(self):
1490 return ['filename', 'md5sum']
1492 def identical_to(self, filename):
1494 compare size and hash with the given file
1497 @return: true if the given file has the same size and hash as this object; false otherwise
1499 st = os.stat(filename)
1500 if self.filesize != st.st_size:
1503 f = open(filename, "r")
1504 sha256sum = apt_pkg.sha256sum(f)
1505 if sha256sum != self.sha256sum:
1510 __all__.append('PoolFile')
1513 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1516 (ValidFileFound [boolean], PoolFile object or None)
1518 @type filename: string
1519 @param filename: the filename of the file to check against the DB
1522 @param filesize: the size of the file to check against the DB
1524 @type md5sum: string
1525 @param md5sum: the md5sum of the file to check against the DB
1527 @type location_id: int
1528 @param location_id: the id of the location to look in
1531 @return: Tuple of length 2.
1532 - If valid pool file found: (C{True}, C{PoolFile object})
1533 - If valid pool file not found:
1534 - (C{False}, C{None}) if no file found
1535 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1538 poolfile = session.query(Location).get(location_id). \
1539 files.filter_by(filename=filename).first()
1541 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1544 return (valid, poolfile)
1546 __all__.append('check_poolfile')
1548 # TODO: the implementation can trivially be inlined at the place where the
1549 # function is called
1551 def get_poolfile_by_id(file_id, session=None):
1553 Returns a PoolFile objects or None for the given id
1556 @param file_id: the id of the file to look for
1558 @rtype: PoolFile or None
1559 @return: either the PoolFile object or None
1562 return session.query(PoolFile).get(file_id)
1564 __all__.append('get_poolfile_by_id')
1567 def get_poolfile_like_name(filename, session=None):
1569 Returns an array of PoolFile objects which are like the given name
1571 @type filename: string
1572 @param filename: the filename of the file to check against the DB
1575 @return: array of PoolFile objects
1578 # TODO: There must be a way of properly using bind parameters with %FOO%
1579 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1583 __all__.append('get_poolfile_like_name')
1586 def add_poolfile(filename, datadict, location_id, session=None):
1588 Add a new file to the pool
1590 @type filename: string
1591 @param filename: filename
1593 @type datadict: dict
1594 @param datadict: dict with needed data
1596 @type location_id: int
1597 @param location_id: database id of the location
1600 @return: the PoolFile object created
1602 poolfile = PoolFile()
1603 poolfile.filename = filename
1604 poolfile.filesize = datadict["size"]
1605 poolfile.md5sum = datadict["md5sum"]
1606 poolfile.sha1sum = datadict["sha1sum"]
1607 poolfile.sha256sum = datadict["sha256sum"]
1608 poolfile.location_id = location_id
1610 session.add(poolfile)
1611 # Flush to get a file id (NB: This is not a commit)
1616 __all__.append('add_poolfile')
1618 ################################################################################
1620 class Fingerprint(ORMObject):
1621 def __init__(self, fingerprint = None):
1622 self.fingerprint = fingerprint
1624 def properties(self):
1625 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1628 def not_null_constraints(self):
1629 return ['fingerprint']
1631 __all__.append('Fingerprint')
1634 def get_fingerprint(fpr, session=None):
1636 Returns Fingerprint object for given fpr.
1639 @param fpr: The fpr to find / add
1641 @type session: SQLAlchemy
1642 @param session: Optional SQL session object (a temporary one will be
1643 generated if not supplied).
1646 @return: the Fingerprint object for the given fpr or None
1649 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1653 except NoResultFound:
1658 __all__.append('get_fingerprint')
1661 def get_or_set_fingerprint(fpr, session=None):
1663 Returns Fingerprint object for given fpr.
1665 If no matching fpr is found, a row is inserted.
1668 @param fpr: The fpr to find / add
1670 @type session: SQLAlchemy
1671 @param session: Optional SQL session object (a temporary one will be
1672 generated if not supplied). If not passed, a commit will be performed at
1673 the end of the function, otherwise the caller is responsible for commiting.
1674 A flush will be performed either way.
1677 @return: the Fingerprint object for the given fpr
1680 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1684 except NoResultFound:
1685 fingerprint = Fingerprint()
1686 fingerprint.fingerprint = fpr
1687 session.add(fingerprint)
1688 session.commit_or_flush()
1693 __all__.append('get_or_set_fingerprint')
1695 ################################################################################
1697 # Helper routine for Keyring class
1698 def get_ldap_name(entry):
1700 for k in ["cn", "mn", "sn"]:
1702 if ret and ret[0] != "" and ret[0] != "-":
1704 return " ".join(name)
1706 ################################################################################
1708 class Keyring(object):
1709 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1710 " --with-colons --fingerprint --fingerprint"
1715 def __init__(self, *args, **kwargs):
1719 return '<Keyring %s>' % self.keyring_name
1721 def de_escape_gpg_str(self, txt):
1722 esclist = re.split(r'(\\x..)', txt)
1723 for x in range(1,len(esclist),2):
1724 esclist[x] = "%c" % (int(esclist[x][2:],16))
1725 return "".join(esclist)
1727 def parse_address(self, uid):
1728 """parses uid and returns a tuple of real name and email address"""
1730 (name, address) = email.Utils.parseaddr(uid)
1731 name = re.sub(r"\s*[(].*[)]", "", name)
1732 name = self.de_escape_gpg_str(name)
1735 return (name, address)
1737 def load_keys(self, keyring):
1738 if not self.keyring_id:
1739 raise Exception('Must be initialized with database information')
1741 k = os.popen(self.gpg_invocation % keyring, "r")
1746 field = line.split(":")
1747 if field[0] == "pub":
1750 (name, addr) = self.parse_address(field[9])
1752 self.keys[key]["email"] = addr
1753 self.keys[key]["name"] = name
1754 self.keys[key]["fingerprints"] = []
1756 elif key and field[0] == "sub" and len(field) >= 12:
1757 signingkey = ("s" in field[11])
1758 elif key and field[0] == "uid":
1759 (name, addr) = self.parse_address(field[9])
1760 if "email" not in self.keys[key] and "@" in addr:
1761 self.keys[key]["email"] = addr
1762 self.keys[key]["name"] = name
1763 elif signingkey and field[0] == "fpr":
1764 self.keys[key]["fingerprints"].append(field[9])
1765 self.fpr_lookup[field[9]] = key
1767 def import_users_from_ldap(self, session):
1771 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1772 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1774 l = ldap.open(LDAPServer)
1775 l.simple_bind_s("","")
1776 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1777 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1778 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1780 ldap_fin_uid_id = {}
1787 uid = entry["uid"][0]
1788 name = get_ldap_name(entry)
1789 fingerprints = entry["keyFingerPrint"]
1791 for f in fingerprints:
1792 key = self.fpr_lookup.get(f, None)
1793 if key not in self.keys:
1795 self.keys[key]["uid"] = uid
1799 keyid = get_or_set_uid(uid, session).uid_id
1800 byuid[keyid] = (uid, name)
1801 byname[uid] = (keyid, name)
1803 return (byname, byuid)
1805 def generate_users_from_keyring(self, format, session):
1809 for x in self.keys.keys():
1810 if "email" not in self.keys[x]:
1812 self.keys[x]["uid"] = format % "invalid-uid"
1814 uid = format % self.keys[x]["email"]
1815 keyid = get_or_set_uid(uid, session).uid_id
1816 byuid[keyid] = (uid, self.keys[x]["name"])
1817 byname[uid] = (keyid, self.keys[x]["name"])
1818 self.keys[x]["uid"] = uid
1821 uid = format % "invalid-uid"
1822 keyid = get_or_set_uid(uid, session).uid_id
1823 byuid[keyid] = (uid, "ungeneratable user id")
1824 byname[uid] = (keyid, "ungeneratable user id")
1826 return (byname, byuid)
1828 __all__.append('Keyring')
1831 def get_keyring(keyring, session=None):
1833 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1834 If C{keyring} already has an entry, simply return the existing Keyring
1836 @type keyring: string
1837 @param keyring: the keyring name
1840 @return: the Keyring object for this keyring
1843 q = session.query(Keyring).filter_by(keyring_name=keyring)
1847 except NoResultFound:
1850 __all__.append('get_keyring')
1853 def get_active_keyring_paths(session=None):
1856 @return: list of active keyring paths
1858 return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
1860 __all__.append('get_active_keyring_paths')
1863 def get_primary_keyring_path(session=None):
1865 Get the full path to the highest priority active keyring
1868 @return: path to the active keyring with the highest priority or None if no
1869 keyring is configured
1871 keyrings = get_active_keyring_paths()
1873 if len(keyrings) > 0:
1878 __all__.append('get_primary_keyring_path')
1880 ################################################################################
1882 class KeyringACLMap(object):
1883 def __init__(self, *args, **kwargs):
1887 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1889 __all__.append('KeyringACLMap')
1891 ################################################################################
1893 class DBChange(object):
1894 def __init__(self, *args, **kwargs):
1898 return '<DBChange %s>' % self.changesname
1900 def clean_from_queue(self):
1901 session = DBConn().session().object_session(self)
1903 # Remove changes_pool_files entries
1906 # Remove changes_pending_files references
1909 # Clear out of queue
1910 self.in_queue = None
1911 self.approved_for_id = None
1913 __all__.append('DBChange')
1916 def get_dbchange(filename, session=None):
1918 returns DBChange object for given C{filename}.
1920 @type filename: string
1921 @param filename: the name of the file
1923 @type session: Session
1924 @param session: Optional SQLA session object (a temporary one will be
1925 generated if not supplied)
1928 @return: DBChange object for the given filename (C{None} if not present)
1931 q = session.query(DBChange).filter_by(changesname=filename)
1935 except NoResultFound:
1938 __all__.append('get_dbchange')
1940 ################################################################################
1942 class Location(ORMObject):
1943 def __init__(self, path = None, component = None):
1945 self.component = component
1946 # the column 'type' should go away, see comment at mapper
1947 self.archive_type = 'pool'
1949 def properties(self):
1950 return ['path', 'location_id', 'archive_type', 'component', \
1953 def not_null_constraints(self):
1954 return ['path', 'archive_type']
1956 __all__.append('Location')
1959 def get_location(location, component=None, archive=None, session=None):
1961 Returns Location object for the given combination of location, component
1964 @type location: string
1965 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1967 @type component: string
1968 @param component: the component name (if None, no restriction applied)
1970 @type archive: string
1971 @param archive: the archive name (if None, no restriction applied)
1973 @rtype: Location / None
1974 @return: Either a Location object or None if one can't be found
1977 q = session.query(Location).filter_by(path=location)
1979 if archive is not None:
1980 q = q.join(Archive).filter_by(archive_name=archive)
1982 if component is not None:
1983 q = q.join(Component).filter_by(component_name=component)
1987 except NoResultFound:
1990 __all__.append('get_location')
1992 ################################################################################
1994 class Maintainer(ORMObject):
1995 def __init__(self, name = None):
1998 def properties(self):
1999 return ['name', 'maintainer_id']
2001 def not_null_constraints(self):
2004 def get_split_maintainer(self):
2005 if not hasattr(self, 'name') or self.name is None:
2006 return ('', '', '', '')
2008 return fix_maintainer(self.name.strip())
2010 __all__.append('Maintainer')
2013 def get_or_set_maintainer(name, session=None):
2015 Returns Maintainer object for given maintainer name.
2017 If no matching maintainer name is found, a row is inserted.
2020 @param name: The maintainer name to add
2022 @type session: SQLAlchemy
2023 @param session: Optional SQL session object (a temporary one will be
2024 generated if not supplied). If not passed, a commit will be performed at
2025 the end of the function, otherwise the caller is responsible for commiting.
2026 A flush will be performed either way.
2029 @return: the Maintainer object for the given maintainer
2032 q = session.query(Maintainer).filter_by(name=name)
2035 except NoResultFound:
2036 maintainer = Maintainer()
2037 maintainer.name = name
2038 session.add(maintainer)
2039 session.commit_or_flush()
2044 __all__.append('get_or_set_maintainer')
2047 def get_maintainer(maintainer_id, session=None):
2049 Return the name of the maintainer behind C{maintainer_id} or None if that
2050 maintainer_id is invalid.
2052 @type maintainer_id: int
2053 @param maintainer_id: the id of the maintainer
2056 @return: the Maintainer with this C{maintainer_id}
2059 return session.query(Maintainer).get(maintainer_id)
2061 __all__.append('get_maintainer')
2063 ################################################################################
2065 class NewComment(object):
2066 def __init__(self, *args, **kwargs):
2070 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
2072 __all__.append('NewComment')
2075 def has_new_comment(package, version, session=None):
2077 Returns true if the given combination of C{package}, C{version} has a comment.
2079 @type package: string
2080 @param package: name of the package
2082 @type version: string
2083 @param version: package version
2085 @type session: Session
2086 @param session: Optional SQLA session object (a temporary one will be
2087 generated if not supplied)
2093 q = session.query(NewComment)
2094 q = q.filter_by(package=package)
2095 q = q.filter_by(version=version)
2097 return bool(q.count() > 0)
2099 __all__.append('has_new_comment')
2102 def get_new_comments(package=None, version=None, comment_id=None, session=None):
2104 Returns (possibly empty) list of NewComment objects for the given
2107 @type package: string (optional)
2108 @param package: name of the package
2110 @type version: string (optional)
2111 @param version: package version
2113 @type comment_id: int (optional)
2114 @param comment_id: An id of a comment
2116 @type session: Session
2117 @param session: Optional SQLA session object (a temporary one will be
2118 generated if not supplied)
2121 @return: A (possibly empty) list of NewComment objects will be returned
2124 q = session.query(NewComment)
2125 if package is not None: q = q.filter_by(package=package)
2126 if version is not None: q = q.filter_by(version=version)
2127 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
2131 __all__.append('get_new_comments')
2133 ################################################################################
2135 class Override(ORMObject):
2136 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
2137 section = None, priority = None):
2138 self.package = package
2140 self.component = component
2141 self.overridetype = overridetype
2142 self.section = section
2143 self.priority = priority
2145 def properties(self):
2146 return ['package', 'suite', 'component', 'overridetype', 'section', \
2149 def not_null_constraints(self):
2150 return ['package', 'suite', 'component', 'overridetype', 'section']
2152 __all__.append('Override')
2155 def get_override(package, suite=None, component=None, overridetype=None, session=None):
2157 Returns Override object for the given parameters
2159 @type package: string
2160 @param package: The name of the package
2162 @type suite: string, list or None
2163 @param suite: The name of the suite (or suites if a list) to limit to. If
2164 None, don't limit. Defaults to None.
2166 @type component: string, list or None
2167 @param component: The name of the component (or components if a list) to
2168 limit to. If None, don't limit. Defaults to None.
2170 @type overridetype: string, list or None
2171 @param overridetype: The name of the overridetype (or overridetypes if a list) to
2172 limit to. If None, don't limit. Defaults to None.
2174 @type session: Session
2175 @param session: Optional SQLA session object (a temporary one will be
2176 generated if not supplied)
2179 @return: A (possibly empty) list of Override objects will be returned
2182 q = session.query(Override)
2183 q = q.filter_by(package=package)
2185 if suite is not None:
2186 if not isinstance(suite, list): suite = [suite]
2187 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
2189 if component is not None:
2190 if not isinstance(component, list): component = [component]
2191 q = q.join(Component).filter(Component.component_name.in_(component))
2193 if overridetype is not None:
2194 if not isinstance(overridetype, list): overridetype = [overridetype]
2195 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
2199 __all__.append('get_override')
2202 ################################################################################
2204 class OverrideType(ORMObject):
2205 def __init__(self, overridetype = None):
2206 self.overridetype = overridetype
2208 def properties(self):
2209 return ['overridetype', 'overridetype_id', 'overrides_count']
2211 def not_null_constraints(self):
2212 return ['overridetype']
2214 __all__.append('OverrideType')
2217 def get_override_type(override_type, session=None):
2219 Returns OverrideType object for given C{override type}.
2221 @type override_type: string
2222 @param override_type: The name of the override type
2224 @type session: Session
2225 @param session: Optional SQLA session object (a temporary one will be
2226 generated if not supplied)
2229 @return: the database id for the given override type
2232 q = session.query(OverrideType).filter_by(overridetype=override_type)
2236 except NoResultFound:
2239 __all__.append('get_override_type')
2241 ################################################################################
2243 class PolicyQueue(object):
2244 def __init__(self, *args, **kwargs):
2248 return '<PolicyQueue %s>' % self.queue_name
2250 __all__.append('PolicyQueue')
2253 def get_policy_queue(queuename, session=None):
2255 Returns PolicyQueue object for given C{queue name}
2257 @type queuename: string
2258 @param queuename: The name of the queue
2260 @type session: Session
2261 @param session: Optional SQLA session object (a temporary one will be
2262 generated if not supplied)
2265 @return: PolicyQueue object for the given queue
2268 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2272 except NoResultFound:
2275 __all__.append('get_policy_queue')
2278 def get_policy_queue_from_path(pathname, session=None):
2280 Returns PolicyQueue object for given C{path name}
2282 @type queuename: string
2283 @param queuename: The path
2285 @type session: Session
2286 @param session: Optional SQLA session object (a temporary one will be
2287 generated if not supplied)
2290 @return: PolicyQueue object for the given queue
2293 q = session.query(PolicyQueue).filter_by(path=pathname)
2297 except NoResultFound:
2300 __all__.append('get_policy_queue_from_path')
2302 ################################################################################
2304 class PolicyQueueUpload(object):
2305 def __cmp__(self, other):
2306 ret = cmp(self.changes.source, other.changes.source)
2308 ret = apt_pkg.version_compare(self.changes.version, other.changes.version)
2310 if self.source is not None and other.source is None:
2312 elif self.source is None and other.source is not None:
2315 ret = cmp(self.changes.changesname, other.changes.changesname)
2318 __all__.append('PolicyQueueUpload')
2320 ################################################################################
2322 class PolicyQueueByhandFile(object):
2325 __all__.append('PolicyQueueByhandFile')
2327 ################################################################################
2329 class Priority(ORMObject):
2330 def __init__(self, priority = None, level = None):
2331 self.priority = priority
2334 def properties(self):
2335 return ['priority', 'priority_id', 'level', 'overrides_count']
2337 def not_null_constraints(self):
2338 return ['priority', 'level']
2340 def __eq__(self, val):
2341 if isinstance(val, str):
2342 return (self.priority == val)
2343 # This signals to use the normal comparison operator
2344 return NotImplemented
2346 def __ne__(self, val):
2347 if isinstance(val, str):
2348 return (self.priority != val)
2349 # This signals to use the normal comparison operator
2350 return NotImplemented
2352 __all__.append('Priority')
2355 def get_priority(priority, session=None):
2357 Returns Priority object for given C{priority name}.
2359 @type priority: string
2360 @param priority: The name of the priority
2362 @type session: Session
2363 @param session: Optional SQLA session object (a temporary one will be
2364 generated if not supplied)
2367 @return: Priority object for the given priority
2370 q = session.query(Priority).filter_by(priority=priority)
2374 except NoResultFound:
2377 __all__.append('get_priority')
2380 def get_priorities(session=None):
2382 Returns dictionary of priority names -> id mappings
2384 @type session: Session
2385 @param session: Optional SQL session object (a temporary one will be
2386 generated if not supplied)
2389 @return: dictionary of priority names -> id mappings
2393 q = session.query(Priority)
2395 ret[x.priority] = x.priority_id
2399 __all__.append('get_priorities')
2401 ################################################################################
2403 class Section(ORMObject):
2404 def __init__(self, section = None):
2405 self.section = section
2407 def properties(self):
2408 return ['section', 'section_id', 'overrides_count']
2410 def not_null_constraints(self):
2413 def __eq__(self, val):
2414 if isinstance(val, str):
2415 return (self.section == val)
2416 # This signals to use the normal comparison operator
2417 return NotImplemented
2419 def __ne__(self, val):
2420 if isinstance(val, str):
2421 return (self.section != val)
2422 # This signals to use the normal comparison operator
2423 return NotImplemented
2425 __all__.append('Section')
2428 def get_section(section, session=None):
2430 Returns Section object for given C{section name}.
2432 @type section: string
2433 @param section: The name of the section
2435 @type session: Session
2436 @param session: Optional SQLA session object (a temporary one will be
2437 generated if not supplied)
2440 @return: Section object for the given section name
2443 q = session.query(Section).filter_by(section=section)
2447 except NoResultFound:
2450 __all__.append('get_section')
2453 def get_sections(session=None):
2455 Returns dictionary of section names -> id mappings
2457 @type session: Session
2458 @param session: Optional SQL session object (a temporary one will be
2459 generated if not supplied)
2462 @return: dictionary of section names -> id mappings
2466 q = session.query(Section)
2468 ret[x.section] = x.section_id
2472 __all__.append('get_sections')
2474 ################################################################################
2476 class SrcContents(ORMObject):
2477 def __init__(self, file = None, source = None):
2479 self.source = source
2481 def properties(self):
2482 return ['file', 'source']
2484 __all__.append('SrcContents')
2486 ################################################################################
2488 class DBSource(ORMObject):
2489 def __init__(self, source = None, version = None, maintainer = None, \
2490 changedby = None, poolfile = None, install_date = None, fingerprint = None):
2491 self.source = source
2492 self.version = version
2493 self.maintainer = maintainer
2494 self.changedby = changedby
2495 self.poolfile = poolfile
2496 self.install_date = install_date
2497 self.fingerprint = fingerprint
2501 return self.source_id
2503 def properties(self):
2504 return ['source', 'source_id', 'maintainer', 'changedby', \
2505 'fingerprint', 'poolfile', 'version', 'suites_count', \
2506 'install_date', 'binaries_count', 'uploaders_count']
2508 def not_null_constraints(self):
2509 return ['source', 'version', 'install_date', 'maintainer', \
2510 'changedby', 'poolfile']
2512 def read_control_fields(self):
2514 Reads the control information from a dsc
2517 @return: fields is the dsc information in a dictionary form
2519 fullpath = self.poolfile.fullpath
2520 contents = open(fullpath, 'r').read()
2521 signed_file = SignedFile(contents, keyrings=[], require_signature=False)
2522 fields = apt_pkg.TagSection(signed_file.contents)
2525 metadata = association_proxy('key', 'value')
2527 def get_component_name(self):
2528 return self.poolfile.location.component.component_name
2530 def scan_contents(self):
2532 Returns a set of names for non directories. The path names are
2533 normalized after converting them from either utf-8 or iso8859-1
2536 fullpath = self.poolfile.fullpath
2537 from daklib.contents import UnpackedSource
2538 unpacked = UnpackedSource(fullpath)
2540 for name in unpacked.get_all_filenames():
2541 # enforce proper utf-8 encoding
2543 name.decode('utf-8')
2544 except UnicodeDecodeError:
2545 name = name.decode('iso8859-1').encode('utf-8')
2549 __all__.append('DBSource')
2552 def source_exists(source, source_version, suites = ["any"], session=None):
2554 Ensure that source exists somewhere in the archive for the binary
2555 upload being processed.
2556 1. exact match => 1.0-3
2557 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2559 @type source: string
2560 @param source: source name
2562 @type source_version: string
2563 @param source_version: expected source version
2566 @param suites: list of suites to check in, default I{any}
2568 @type session: Session
2569 @param session: Optional SQLA session object (a temporary one will be
2570 generated if not supplied)
2573 @return: returns 1 if a source with expected version is found, otherwise 0
2580 from daklib.regexes import re_bin_only_nmu
2581 orig_source_version = re_bin_only_nmu.sub('', source_version)
2583 for suite in suites:
2584 q = session.query(DBSource).filter_by(source=source). \
2585 filter(DBSource.version.in_([source_version, orig_source_version]))
2587 # source must exist in 'suite' or a suite that is enhanced by 'suite'
2588 s = get_suite(suite, session)
2590 enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
2591 considered_suites = [ vc.reference for vc in enhances_vcs ]
2592 considered_suites.append(s)
2594 q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
2599 # No source found so return not ok
2604 __all__.append('source_exists')
2607 def get_suites_source_in(source, session=None):
2609 Returns list of Suite objects which given C{source} name is in
2612 @param source: DBSource package name to search for
2615 @return: list of Suite objects for the given source
2618 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2620 __all__.append('get_suites_source_in')
2623 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2625 Returns list of DBSource objects for given C{source} name and other parameters
2628 @param source: DBSource package name to search for
2630 @type version: str or None
2631 @param version: DBSource version name to search for or None if not applicable
2633 @type dm_upload_allowed: bool
2634 @param dm_upload_allowed: If None, no effect. If True or False, only
2635 return packages with that dm_upload_allowed setting
2637 @type session: Session
2638 @param session: Optional SQL session object (a temporary one will be
2639 generated if not supplied)
2642 @return: list of DBSource objects for the given name (may be empty)
2645 q = session.query(DBSource).filter_by(source=source)
2647 if version is not None:
2648 q = q.filter_by(version=version)
2650 if dm_upload_allowed is not None:
2651 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2655 __all__.append('get_sources_from_name')
2657 # FIXME: This function fails badly if it finds more than 1 source package and
2658 # its implementation is trivial enough to be inlined.
2660 def get_source_in_suite(source, suite, session=None):
2662 Returns a DBSource object for a combination of C{source} and C{suite}.
2664 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2665 - B{suite} - a suite name, eg. I{unstable}
2667 @type source: string
2668 @param source: source package name
2671 @param suite: the suite name
2674 @return: the version for I{source} in I{suite}
2678 q = get_suite(suite, session).get_sources(source)
2681 except NoResultFound:
2684 __all__.append('get_source_in_suite')
2687 def import_metadata_into_db(obj, session=None):
2689 This routine works on either DBBinary or DBSource objects and imports
2690 their metadata into the database
2692 fields = obj.read_control_fields()
2693 for k in fields.keys():
2696 val = str(fields[k])
2697 except UnicodeEncodeError:
2698 # Fall back to UTF-8
2700 val = fields[k].encode('utf-8')
2701 except UnicodeEncodeError:
2702 # Finally try iso8859-1
2703 val = fields[k].encode('iso8859-1')
2704 # Otherwise we allow the exception to percolate up and we cause
2705 # a reject as someone is playing silly buggers
2707 obj.metadata[get_or_set_metadatakey(k, session)] = val
2709 session.commit_or_flush()
2711 __all__.append('import_metadata_into_db')
2714 ################################################################################
2716 def split_uploaders(uploaders_list):
2718 Split the Uploaders field into the individual uploaders and yield each of
2719 them. Beware: email addresses might contain commas.
2722 for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
2723 yield uploader.strip()
2726 def add_dsc_to_db(u, filename, session=None):
2727 entry = u.pkg.files[filename]
2731 source.source = u.pkg.dsc["source"]
2732 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2733 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2734 # If Changed-By isn't available, fall back to maintainer
2735 if u.pkg.changes.has_key("changed-by"):
2736 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2738 source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2739 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2740 source.install_date = datetime.now().date()
2742 dsc_component = entry["component"]
2743 dsc_location_id = entry["location id"]
2745 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2747 # Set up a new poolfile if necessary
2748 if not entry.has_key("files id") or not entry["files id"]:
2749 filename = entry["pool name"] + filename
2750 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2752 pfs.append(poolfile)
2753 entry["files id"] = poolfile.file_id
2755 source.poolfile_id = entry["files id"]
2758 suite_names = u.pkg.changes["distribution"].keys()
2759 source.suites = session.query(Suite). \
2760 filter(Suite.suite_name.in_(suite_names)).all()
2762 # Add the source files to the DB (files and dsc_files)
2764 dscfile.source_id = source.source_id
2765 dscfile.poolfile_id = entry["files id"]
2766 session.add(dscfile)
2768 for dsc_file, dentry in u.pkg.dsc_files.items():
2770 df.source_id = source.source_id
2772 # If the .orig tarball is already in the pool, it's
2773 # files id is stored in dsc_files by check_dsc().
2774 files_id = dentry.get("files id", None)
2776 # Find the entry in the files hash
2777 # TODO: Bail out here properly
2779 for f, e in u.pkg.files.items():
2784 if files_id is None:
2785 filename = dfentry["pool name"] + dsc_file
2787 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2788 # FIXME: needs to check for -1/-2 and or handle exception
2789 if found and obj is not None:
2790 files_id = obj.file_id
2793 # If still not found, add it
2794 if files_id is None:
2795 # HACK: Force sha1sum etc into dentry
2796 dentry["sha1sum"] = dfentry["sha1sum"]
2797 dentry["sha256sum"] = dfentry["sha256sum"]
2798 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2799 pfs.append(poolfile)
2800 files_id = poolfile.file_id
2802 poolfile = get_poolfile_by_id(files_id, session)
2803 if poolfile is None:
2804 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2805 pfs.append(poolfile)
2807 df.poolfile_id = files_id
2810 # Add the src_uploaders to the DB
2812 session.refresh(source)
2813 source.uploaders = [source.maintainer]
2814 if u.pkg.dsc.has_key("uploaders"):
2815 for up in split_uploaders(u.pkg.dsc["uploaders"]):
2816 source.uploaders.append(get_or_set_maintainer(up, session))
2820 return source, dsc_component, dsc_location_id, pfs
2822 __all__.append('add_dsc_to_db')
2825 def add_deb_to_db(u, filename, session=None):
2827 Contrary to what you might expect, this routine deals with both
2828 debs and udebs. That info is in 'dbtype', whilst 'type' is
2829 'deb' for both of them
2832 entry = u.pkg.files[filename]
2835 bin.package = entry["package"]
2836 bin.version = entry["version"]
2837 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2838 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2839 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2840 bin.binarytype = entry["dbtype"]
2843 filename = entry["pool name"] + filename
2844 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2845 if not entry.get("location id", None):
2846 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2848 if entry.get("files id", None):
2849 poolfile = get_poolfile_by_id(bin.poolfile_id)
2850 bin.poolfile_id = entry["files id"]
2852 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2853 bin.poolfile_id = entry["files id"] = poolfile.file_id
2856 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2858 # If we couldn't find anything and the upload contains Arch: source,
2859 # fall back to trying the source package, source version uploaded
2860 # This maintains backwards compatibility with previous dak behaviour
2861 # and deals with slightly broken binary debs which don't properly
2862 # declare their source package name
2863 if len(bin_sources) == 0:
2864 if u.pkg.changes["architecture"].has_key("source") \
2865 and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
2866 bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
2868 # If we couldn't find a source here, we reject
2869 # TODO: Fix this so that it doesn't kill process-upload and instead just
2870 # performs a reject. To be honest, we should probably spot this
2871 # *much* earlier than here
2872 if len(bin_sources) != 1:
2873 raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2874 (bin.package, bin.version, entry["architecture"],
2875 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2877 bin.source_id = bin_sources[0].source_id
2879 if entry.has_key("built-using"):
2880 for srcname, version in entry["built-using"]:
2881 exsources = get_sources_from_name(srcname, version, session=session)
2882 if len(exsources) != 1:
2883 raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
2884 (srcname, version, bin.package, bin.version, entry["architecture"],
2885 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2887 bin.extra_sources.append(exsources[0])
2889 # Add and flush object so it has an ID
2892 suite_names = u.pkg.changes["distribution"].keys()
2893 bin.suites = session.query(Suite). \
2894 filter(Suite.suite_name.in_(suite_names)).all()
2898 # Deal with contents - disabled for now
2899 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2901 # print "REJECT\nCould not determine contents of package %s" % bin.package
2902 # session.rollback()
2903 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2905 return bin, poolfile
2907 __all__.append('add_deb_to_db')
2909 ################################################################################
2911 class SourceACL(object):
2912 def __init__(self, *args, **kwargs):
2916 return '<SourceACL %s>' % self.source_acl_id
2918 __all__.append('SourceACL')
2920 ################################################################################
2922 class SrcFormat(object):
2923 def __init__(self, *args, **kwargs):
2927 return '<SrcFormat %s>' % (self.format_name)
2929 __all__.append('SrcFormat')
2931 ################################################################################
2933 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2934 ('SuiteID', 'suite_id'),
2935 ('Version', 'version'),
2936 ('Origin', 'origin'),
2938 ('Description', 'description'),
2939 ('Untouchable', 'untouchable'),
2940 ('Announce', 'announce'),
2941 ('Codename', 'codename'),
2942 ('OverrideCodename', 'overridecodename'),
2943 ('ValidTime', 'validtime'),
2944 ('Priority', 'priority'),
2945 ('NotAutomatic', 'notautomatic'),
2946 ('CopyChanges', 'copychanges'),
2947 ('OverrideSuite', 'overridesuite')]
2949 # Why the heck don't we have any UNIQUE constraints in table suite?
2950 # TODO: Add UNIQUE constraints for appropriate columns.
2951 class Suite(ORMObject):
2952 def __init__(self, suite_name = None, version = None):
2953 self.suite_name = suite_name
2954 self.version = version
2956 def properties(self):
2957 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2960 def not_null_constraints(self):
2961 return ['suite_name']
2963 def __eq__(self, val):
2964 if isinstance(val, str):
2965 return (self.suite_name == val)
2966 # This signals to use the normal comparison operator
2967 return NotImplemented
2969 def __ne__(self, val):
2970 if isinstance(val, str):
2971 return (self.suite_name != val)
2972 # This signals to use the normal comparison operator
2973 return NotImplemented
2977 for disp, field in SUITE_FIELDS:
2978 val = getattr(self, field, None)
2980 ret.append("%s: %s" % (disp, val))
2982 return "\n".join(ret)
2984 def get_architectures(self, skipsrc=False, skipall=False):
2986 Returns list of Architecture objects
2988 @type skipsrc: boolean
2989 @param skipsrc: Whether to skip returning the 'source' architecture entry
2992 @type skipall: boolean
2993 @param skipall: Whether to skip returning the 'all' architecture entry
2997 @return: list of Architecture objects for the given name (may be empty)
3000 q = object_session(self).query(Architecture).with_parent(self)
3002 q = q.filter(Architecture.arch_string != 'source')
3004 q = q.filter(Architecture.arch_string != 'all')
3005 return q.order_by(Architecture.arch_string).all()
3007 def get_sources(self, source):
3009 Returns a query object representing DBSource that is part of C{suite}.
3011 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
3013 @type source: string
3014 @param source: source package name
3016 @rtype: sqlalchemy.orm.query.Query
3017 @return: a query of DBSource
3021 session = object_session(self)
3022 return session.query(DBSource).filter_by(source = source). \
3025 def get_overridesuite(self):
3026 if self.overridesuite is None:
3029 return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
3033 return os.path.join(self.archive.path, 'dists', self.suite_name)
3035 __all__.append('Suite')
3038 def get_suite(suite, session=None):
3040 Returns Suite object for given C{suite name}.
3043 @param suite: The name of the suite
3045 @type session: Session
3046 @param session: Optional SQLA session object (a temporary one will be
3047 generated if not supplied)
3050 @return: Suite object for the requested suite name (None if not present)
3053 q = session.query(Suite).filter_by(suite_name=suite)
3057 except NoResultFound:
3060 __all__.append('get_suite')
3062 ################################################################################
3065 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
3067 Returns list of Architecture objects for given C{suite} name. The list is
3068 empty if suite does not exist.
3071 @param suite: Suite name to search for
3073 @type skipsrc: boolean
3074 @param skipsrc: Whether to skip returning the 'source' architecture entry
3077 @type skipall: boolean
3078 @param skipall: Whether to skip returning the 'all' architecture entry
3081 @type session: Session
3082 @param session: Optional SQL session object (a temporary one will be
3083 generated if not supplied)
3086 @return: list of Architecture objects for the given name (may be empty)
3090 return get_suite(suite, session).get_architectures(skipsrc, skipall)
3091 except AttributeError:
3094 __all__.append('get_suite_architectures')
3096 ################################################################################
3098 class Uid(ORMObject):
3099 def __init__(self, uid = None, name = None):
3103 def __eq__(self, val):
3104 if isinstance(val, str):
3105 return (self.uid == val)
3106 # This signals to use the normal comparison operator
3107 return NotImplemented
3109 def __ne__(self, val):
3110 if isinstance(val, str):
3111 return (self.uid != val)
3112 # This signals to use the normal comparison operator
3113 return NotImplemented
3115 def properties(self):
3116 return ['uid', 'name', 'fingerprint']
3118 def not_null_constraints(self):
3121 __all__.append('Uid')
3124 def get_or_set_uid(uidname, session=None):
3126 Returns uid object for given uidname.
3128 If no matching uidname is found, a row is inserted.
3130 @type uidname: string
3131 @param uidname: The uid to add
3133 @type session: SQLAlchemy
3134 @param session: Optional SQL session object (a temporary one will be
3135 generated if not supplied). If not passed, a commit will be performed at
3136 the end of the function, otherwise the caller is responsible for commiting.
3139 @return: the uid object for the given uidname
3142 q = session.query(Uid).filter_by(uid=uidname)
3146 except NoResultFound:
3150 session.commit_or_flush()
3155 __all__.append('get_or_set_uid')
3158 def get_uid_from_fingerprint(fpr, session=None):
3159 q = session.query(Uid)
3160 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
3164 except NoResultFound:
3167 __all__.append('get_uid_from_fingerprint')
3169 ################################################################################
3171 class UploadBlock(object):
3172 def __init__(self, *args, **kwargs):
3176 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
3178 __all__.append('UploadBlock')
3180 ################################################################################
3182 class MetadataKey(ORMObject):
3183 def __init__(self, key = None):
3186 def properties(self):
3189 def not_null_constraints(self):
3192 __all__.append('MetadataKey')
3195 def get_or_set_metadatakey(keyname, session=None):
3197 Returns MetadataKey object for given uidname.
3199 If no matching keyname is found, a row is inserted.
3201 @type uidname: string
3202 @param uidname: The keyname to add
3204 @type session: SQLAlchemy
3205 @param session: Optional SQL session object (a temporary one will be
3206 generated if not supplied). If not passed, a commit will be performed at
3207 the end of the function, otherwise the caller is responsible for commiting.
3210 @return: the metadatakey object for the given keyname
3213 q = session.query(MetadataKey).filter_by(key=keyname)
3217 except NoResultFound:
3218 ret = MetadataKey(keyname)
3220 session.commit_or_flush()
3224 __all__.append('get_or_set_metadatakey')
3226 ################################################################################
3228 class BinaryMetadata(ORMObject):
3229 def __init__(self, key = None, value = None, binary = None):
3232 self.binary = binary
3234 def properties(self):
3235 return ['binary', 'key', 'value']
3237 def not_null_constraints(self):
3240 __all__.append('BinaryMetadata')
3242 ################################################################################
3244 class SourceMetadata(ORMObject):
3245 def __init__(self, key = None, value = None, source = None):
3248 self.source = source
3250 def properties(self):
3251 return ['source', 'key', 'value']
3253 def not_null_constraints(self):
3256 __all__.append('SourceMetadata')
3258 ################################################################################
3260 class VersionCheck(ORMObject):
3261 def __init__(self, *args, **kwargs):
3264 def properties(self):
3265 #return ['suite_id', 'check', 'reference_id']
3268 def not_null_constraints(self):
3269 return ['suite', 'check', 'reference']
3271 __all__.append('VersionCheck')
3274 def get_version_checks(suite_name, check = None, session = None):
3275 suite = get_suite(suite_name, session)
3277 # Make sure that what we return is iterable so that list comprehensions
3278 # involving this don't cause a traceback
3280 q = session.query(VersionCheck).filter_by(suite=suite)
3282 q = q.filter_by(check=check)
3285 __all__.append('get_version_checks')
3287 ################################################################################
3289 class DBConn(object):
3291 database module init.
3295 def __init__(self, *args, **kwargs):
3296 self.__dict__ = self.__shared_state
3298 if not getattr(self, 'initialised', False):
3299 self.initialised = True
3300 self.debug = kwargs.has_key('debug')
3303 def __setuptables(self):
3310 'binaries_metadata',
3314 'build_queue_files',
3315 'build_queue_policy_files',
3320 'changes_pending_binaries',
3321 'changes_pending_files',
3322 'changes_pending_source',
3323 'changes_pending_files_map',
3324 'changes_pending_source_files',
3325 'changes_pool_files',
3327 'external_overrides',
3328 'extra_src_references',
3330 'files_archive_map',
3338 # TODO: the maintainer column in table override should be removed.
3342 'policy_queue_upload',
3343 'policy_queue_upload_binaries_map',
3344 'policy_queue_byhand_file',
3355 'suite_architectures',
3356 'suite_build_queue_copy',
3357 'suite_src_formats',
3364 'almost_obsolete_all_associations',
3365 'almost_obsolete_src_associations',
3366 'any_associations_source',
3367 'bin_associations_binaries',
3368 'binaries_suite_arch',
3371 'newest_all_associations',
3372 'newest_any_associations',
3374 'newest_src_association',
3375 'obsolete_all_associations',
3376 'obsolete_any_associations',
3377 'obsolete_any_by_all_associations',
3378 'obsolete_src_associations',
3380 'src_associations_bin',
3381 'src_associations_src',
3382 'suite_arch_by_name',
3385 for table_name in tables:
3386 table = Table(table_name, self.db_meta, \
3387 autoload=True, useexisting=True)
3388 setattr(self, 'tbl_%s' % table_name, table)
3390 for view_name in views:
3391 view = Table(view_name, self.db_meta, autoload=True)
3392 setattr(self, 'view_%s' % view_name, view)
3394 def __setupmappers(self):
3395 mapper(Architecture, self.tbl_architecture,
3396 properties = dict(arch_id = self.tbl_architecture.c.id,
3397 suites = relation(Suite, secondary=self.tbl_suite_architectures,
3398 order_by=self.tbl_suite.c.suite_name,
3399 backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
3400 extension = validator)
3402 mapper(Archive, self.tbl_archive,
3403 properties = dict(archive_id = self.tbl_archive.c.id,
3404 archive_name = self.tbl_archive.c.name))
3406 mapper(ArchiveFile, self.tbl_files_archive_map,
3407 properties = dict(archive = relation(Archive, backref='files'),
3408 component = relation(Component),
3409 file = relation(PoolFile, backref='archives')))
3411 mapper(BuildQueue, self.tbl_build_queue,
3412 properties = dict(queue_id = self.tbl_build_queue.c.id,
3413 suite = relation(Suite, primaryjoin=(self.tbl_build_queue.c.suite_id==self.tbl_suite.c.id))))
3415 mapper(BuildQueueFile, self.tbl_build_queue_files,
3416 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3417 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3419 mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
3421 build_queue = relation(BuildQueue, backref='policy_queue_files'),
3422 file = relation(ChangePendingFile, lazy='joined')))
3424 mapper(DBBinary, self.tbl_binaries,
3425 properties = dict(binary_id = self.tbl_binaries.c.id,
3426 package = self.tbl_binaries.c.package,
3427 version = self.tbl_binaries.c.version,
3428 maintainer_id = self.tbl_binaries.c.maintainer,
3429 maintainer = relation(Maintainer),
3430 source_id = self.tbl_binaries.c.source,
3431 source = relation(DBSource, backref='binaries'),
3432 arch_id = self.tbl_binaries.c.architecture,
3433 architecture = relation(Architecture),
3434 poolfile_id = self.tbl_binaries.c.file,
3435 poolfile = relation(PoolFile),
3436 binarytype = self.tbl_binaries.c.type,
3437 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3438 fingerprint = relation(Fingerprint),
3439 install_date = self.tbl_binaries.c.install_date,
3440 suites = relation(Suite, secondary=self.tbl_bin_associations,
3441 backref=backref('binaries', lazy='dynamic')),
3442 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
3443 backref=backref('extra_binary_references', lazy='dynamic')),
3444 key = relation(BinaryMetadata, cascade='all',
3445 collection_class=attribute_mapped_collection('key'))),
3446 extension = validator)
3448 mapper(BinaryACL, self.tbl_binary_acl,
3449 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3451 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3452 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3453 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3454 architecture = relation(Architecture)))
3456 mapper(Component, self.tbl_component,
3457 properties = dict(component_id = self.tbl_component.c.id,
3458 component_name = self.tbl_component.c.name),
3459 extension = validator)
3461 mapper(DBConfig, self.tbl_config,
3462 properties = dict(config_id = self.tbl_config.c.id))
3464 mapper(DSCFile, self.tbl_dsc_files,
3465 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3466 source_id = self.tbl_dsc_files.c.source,
3467 source = relation(DBSource),
3468 poolfile_id = self.tbl_dsc_files.c.file,
3469 poolfile = relation(PoolFile)))
3471 mapper(ExternalOverride, self.tbl_external_overrides,
3473 suite_id = self.tbl_external_overrides.c.suite,
3474 suite = relation(Suite),
3475 component_id = self.tbl_external_overrides.c.component,
3476 component = relation(Component)))
3478 mapper(PoolFile, self.tbl_files,
3479 properties = dict(file_id = self.tbl_files.c.id,
3480 filesize = self.tbl_files.c.size),
3481 extension = validator)
3483 mapper(Fingerprint, self.tbl_fingerprint,
3484 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3485 uid_id = self.tbl_fingerprint.c.uid,
3486 uid = relation(Uid),
3487 keyring_id = self.tbl_fingerprint.c.keyring,
3488 keyring = relation(Keyring),
3489 source_acl = relation(SourceACL),
3490 binary_acl = relation(BinaryACL)),
3491 extension = validator)
3493 mapper(Keyring, self.tbl_keyrings,
3494 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3495 keyring_id = self.tbl_keyrings.c.id))
3497 mapper(DBChange, self.tbl_changes,
3498 properties = dict(change_id = self.tbl_changes.c.id,
3499 poolfiles = relation(PoolFile,
3500 secondary=self.tbl_changes_pool_files,
3501 backref="changeslinks"),
3502 seen = self.tbl_changes.c.seen,
3503 source = self.tbl_changes.c.source,
3504 binaries = self.tbl_changes.c.binaries,
3505 architecture = self.tbl_changes.c.architecture,
3506 distribution = self.tbl_changes.c.distribution,
3507 urgency = self.tbl_changes.c.urgency,
3508 maintainer = self.tbl_changes.c.maintainer,
3509 changedby = self.tbl_changes.c.changedby,
3510 date = self.tbl_changes.c.date,
3511 version = self.tbl_changes.c.version,
3512 files = relation(ChangePendingFile,
3513 secondary=self.tbl_changes_pending_files_map,
3514 backref="changesfile"),
3515 in_queue_id = self.tbl_changes.c.in_queue,
3516 in_queue = relation(PolicyQueue,
3517 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3518 approved_for_id = self.tbl_changes.c.approved_for))
3520 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3521 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3523 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3524 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3525 filename = self.tbl_changes_pending_files.c.filename,
3526 size = self.tbl_changes_pending_files.c.size,
3527 md5sum = self.tbl_changes_pending_files.c.md5sum,
3528 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3529 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3531 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3532 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3533 change = relation(DBChange),
3534 maintainer = relation(Maintainer,
3535 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3536 changedby = relation(Maintainer,
3537 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3538 fingerprint = relation(Fingerprint),
3539 source_files = relation(ChangePendingFile,
3540 secondary=self.tbl_changes_pending_source_files,
3541 backref="pending_sources")))
3544 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3545 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3546 keyring = relation(Keyring, backref="keyring_acl_map"),
3547 architecture = relation(Architecture)))
3549 mapper(Location, self.tbl_location,
3550 properties = dict(location_id = self.tbl_location.c.id,
3551 component_id = self.tbl_location.c.component,
3552 component = relation(Component, backref='location'),
3553 archive_id = self.tbl_location.c.archive,
3554 archive = relation(Archive),
3555 # FIXME: the 'type' column is old cruft and
3556 # should be removed in the future.
3557 archive_type = self.tbl_location.c.type),
3558 extension = validator)
3560 mapper(Maintainer, self.tbl_maintainer,
3561 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3562 maintains_sources = relation(DBSource, backref='maintainer',
3563 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3564 changed_sources = relation(DBSource, backref='changedby',
3565 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3566 extension = validator)
3568 mapper(NewComment, self.tbl_new_comments,
3569 properties = dict(comment_id = self.tbl_new_comments.c.id))
3571 mapper(Override, self.tbl_override,
3572 properties = dict(suite_id = self.tbl_override.c.suite,
3573 suite = relation(Suite, \
3574 backref=backref('overrides', lazy='dynamic')),
3575 package = self.tbl_override.c.package,
3576 component_id = self.tbl_override.c.component,
3577 component = relation(Component, \
3578 backref=backref('overrides', lazy='dynamic')),
3579 priority_id = self.tbl_override.c.priority,
3580 priority = relation(Priority, \
3581 backref=backref('overrides', lazy='dynamic')),
3582 section_id = self.tbl_override.c.section,
3583 section = relation(Section, \
3584 backref=backref('overrides', lazy='dynamic')),
3585 overridetype_id = self.tbl_override.c.type,
3586 overridetype = relation(OverrideType, \
3587 backref=backref('overrides', lazy='dynamic'))))
3589 mapper(OverrideType, self.tbl_override_type,
3590 properties = dict(overridetype = self.tbl_override_type.c.type,
3591 overridetype_id = self.tbl_override_type.c.id))
3593 mapper(PolicyQueue, self.tbl_policy_queue,
3594 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3596 mapper(PolicyQueueUpload, self.tbl_policy_queue_upload,
3598 changes = relation(DBChange),
3599 policy_queue = relation(PolicyQueue, backref='uploads'),
3600 target_suite = relation(Suite),
3601 source = relation(DBSource),
3602 binaries = relation(DBBinary, secondary=self.tbl_policy_queue_upload_binaries_map),
3605 mapper(PolicyQueueByhandFile, self.tbl_policy_queue_byhand_file,
3607 upload = relation(PolicyQueueUpload, backref='byhand'),
3611 mapper(Priority, self.tbl_priority,
3612 properties = dict(priority_id = self.tbl_priority.c.id))
3614 mapper(Section, self.tbl_section,
3615 properties = dict(section_id = self.tbl_section.c.id,
3616 section=self.tbl_section.c.section))
3618 mapper(DBSource, self.tbl_source,
3619 properties = dict(source_id = self.tbl_source.c.id,
3620 version = self.tbl_source.c.version,
3621 maintainer_id = self.tbl_source.c.maintainer,
3622 poolfile_id = self.tbl_source.c.file,
3623 poolfile = relation(PoolFile),
3624 fingerprint_id = self.tbl_source.c.sig_fpr,
3625 fingerprint = relation(Fingerprint),
3626 changedby_id = self.tbl_source.c.changedby,
3627 srcfiles = relation(DSCFile,
3628 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3629 suites = relation(Suite, secondary=self.tbl_src_associations,
3630 backref=backref('sources', lazy='dynamic')),
3631 uploaders = relation(Maintainer,
3632 secondary=self.tbl_src_uploaders),
3633 key = relation(SourceMetadata, cascade='all',
3634 collection_class=attribute_mapped_collection('key'))),
3635 extension = validator)
3637 mapper(SourceACL, self.tbl_source_acl,
3638 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3640 mapper(SrcFormat, self.tbl_src_format,
3641 properties = dict(src_format_id = self.tbl_src_format.c.id,
3642 format_name = self.tbl_src_format.c.format_name))
3644 mapper(Suite, self.tbl_suite,
3645 properties = dict(suite_id = self.tbl_suite.c.id,
3646 policy_queue = relation(PolicyQueue),
3647 copy_queues = relation(BuildQueue,
3648 secondary=self.tbl_suite_build_queue_copy),
3649 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
3650 backref=backref('suites', lazy='dynamic')),
3651 archive = relation(Archive, backref='suites')),
3652 extension = validator)
3654 mapper(Uid, self.tbl_uid,
3655 properties = dict(uid_id = self.tbl_uid.c.id,
3656 fingerprint = relation(Fingerprint)),
3657 extension = validator)
3659 mapper(UploadBlock, self.tbl_upload_blocks,
3660 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3661 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3662 uid = relation(Uid, backref="uploadblocks")))
3664 mapper(BinContents, self.tbl_bin_contents,
3666 binary = relation(DBBinary,
3667 backref=backref('contents', lazy='dynamic', cascade='all')),
3668 file = self.tbl_bin_contents.c.file))
3670 mapper(SrcContents, self.tbl_src_contents,
3672 source = relation(DBSource,
3673 backref=backref('contents', lazy='dynamic', cascade='all')),
3674 file = self.tbl_src_contents.c.file))
3676 mapper(MetadataKey, self.tbl_metadata_keys,
3678 key_id = self.tbl_metadata_keys.c.key_id,
3679 key = self.tbl_metadata_keys.c.key))
3681 mapper(BinaryMetadata, self.tbl_binaries_metadata,
3683 binary_id = self.tbl_binaries_metadata.c.bin_id,
3684 binary = relation(DBBinary),
3685 key_id = self.tbl_binaries_metadata.c.key_id,
3686 key = relation(MetadataKey),
3687 value = self.tbl_binaries_metadata.c.value))
3689 mapper(SourceMetadata, self.tbl_source_metadata,
3691 source_id = self.tbl_source_metadata.c.src_id,
3692 source = relation(DBSource),
3693 key_id = self.tbl_source_metadata.c.key_id,
3694 key = relation(MetadataKey),
3695 value = self.tbl_source_metadata.c.value))
3697 mapper(VersionCheck, self.tbl_version_check,
3699 suite_id = self.tbl_version_check.c.suite,
3700 suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
3701 reference_id = self.tbl_version_check.c.reference,
3702 reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
3704 ## Connection functions
3705 def __createconn(self):
3706 from config import Config
3708 if cnf.has_key("DB::Service"):
3709 connstr = "postgresql://service=%s" % cnf["DB::Service"]
3710 elif cnf.has_key("DB::Host"):
3712 connstr = "postgresql://%s" % cnf["DB::Host"]
3713 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3714 connstr += ":%s" % cnf["DB::Port"]
3715 connstr += "/%s" % cnf["DB::Name"]
3718 connstr = "postgresql:///%s" % cnf["DB::Name"]
3719 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3720 connstr += "?port=%s" % cnf["DB::Port"]
3722 engine_args = { 'echo': self.debug }
3723 if cnf.has_key('DB::PoolSize'):
3724 engine_args['pool_size'] = int(cnf['DB::PoolSize'])
3725 if cnf.has_key('DB::MaxOverflow'):
3726 engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
3727 if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
3728 cnf['DB::Unicode'] == 'false':
3729 engine_args['use_native_unicode'] = False
3731 # Monkey patch a new dialect in in order to support service= syntax
3732 import sqlalchemy.dialects.postgresql
3733 from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
3734 class PGDialect_psycopg2_dak(PGDialect_psycopg2):
3735 def create_connect_args(self, url):
3736 if str(url).startswith('postgresql://service='):
3738 servicename = str(url)[21:]
3739 return (['service=%s' % servicename], {})
3741 return PGDialect_psycopg2.create_connect_args(self, url)
3743 sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
3746 self.db_pg = create_engine(connstr, **engine_args)
3747 self.db_meta = MetaData()
3748 self.db_meta.bind = self.db_pg
3749 self.db_smaker = sessionmaker(bind=self.db_pg,
3753 self.__setuptables()
3754 self.__setupmappers()
3756 except OperationalError as e:
3758 utils.fubar("Cannot connect to database (%s)" % str(e))
3760 self.pid = os.getpid()
3762 def session(self, work_mem = 0):
3764 Returns a new session object. If a work_mem parameter is provided a new
3765 transaction is started and the work_mem parameter is set for this
3766 transaction. The work_mem parameter is measured in MB. A default value
3767 will be used if the parameter is not set.
3769 # reinitialize DBConn in new processes
3770 if self.pid != os.getpid():
3773 session = self.db_smaker()
3775 session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
3778 __all__.append('DBConn')