5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
37 from os.path import normpath
48 import simplejson as json
50 from datetime import datetime, timedelta
51 from errno import ENOENT
52 from tempfile import mkstemp, mkdtemp
53 from subprocess import Popen, PIPE
54 from tarfile import TarFile
56 from inspect import getargspec
59 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
61 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
62 backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
63 from sqlalchemy import types as sqltypes
64 from sqlalchemy.orm.collections import attribute_mapped_collection
65 from sqlalchemy.ext.associationproxy import association_proxy
67 # Don't remove this, we re-export the exceptions to scripts which import us
68 from sqlalchemy.exc import *
69 from sqlalchemy.orm.exc import NoResultFound
71 # Only import Config until Queue stuff is changed to store its config
73 from config import Config
74 from textutils import fix_maintainer
75 from dak_exceptions import DBUpdateError, NoSourceFieldError
77 # suppress some deprecation warnings in squeeze related to sqlalchemy
79 warnings.filterwarnings('ignore', \
80 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
84 ################################################################################
86 # Patch in support for the debversion field type so that it works during
90 # that is for sqlalchemy 0.6
91 UserDefinedType = sqltypes.UserDefinedType
93 # this one for sqlalchemy 0.5
94 UserDefinedType = sqltypes.TypeEngine
96 class DebVersion(UserDefinedType):
97 def get_col_spec(self):
100 def bind_processor(self, dialect):
103 # ' = None' is needed for sqlalchemy 0.5:
104 def result_processor(self, dialect, coltype = None):
107 sa_major_version = sqlalchemy.__version__[0:3]
108 if sa_major_version in ["0.5", "0.6"]:
109 from sqlalchemy.databases import postgres
110 postgres.ischema_names['debversion'] = DebVersion
112 raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
114 ################################################################################
116 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
118 ################################################################################
120 def session_wrapper(fn):
122 Wrapper around common ".., session=None):" handling. If the wrapped
123 function is called without passing 'session', we create a local one
124 and destroy it when the function ends.
126 Also attaches a commit_or_flush method to the session; if we created a
127 local session, this is a synonym for session.commit(), otherwise it is a
128 synonym for session.flush().
131 def wrapped(*args, **kwargs):
132 private_transaction = False
134 # Find the session object
135 session = kwargs.get('session')
138 if len(args) <= len(getargspec(fn)[0]) - 1:
139 # No session specified as last argument or in kwargs
140 private_transaction = True
141 session = kwargs['session'] = DBConn().session()
143 # Session is last argument in args
147 session = args[-1] = DBConn().session()
148 private_transaction = True
150 if private_transaction:
151 session.commit_or_flush = session.commit
153 session.commit_or_flush = session.flush
156 return fn(*args, **kwargs)
158 if private_transaction:
159 # We created a session; close it.
162 wrapped.__doc__ = fn.__doc__
163 wrapped.func_name = fn.func_name
167 __all__.append('session_wrapper')
169 ################################################################################
171 class ORMObject(object):
173 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
174 derived classes must implement the properties() method.
177 def properties(self):
179 This method should be implemented by all derived classes and returns a
180 list of the important properties. The properties 'created' and
181 'modified' will be added automatically. A suffix '_count' should be
182 added to properties that are lists or query objects. The most important
183 property name should be returned as the first element in the list
184 because it is used by repr().
190 Returns a JSON representation of the object based on the properties
191 returned from the properties() method.
194 # add created and modified
195 all_properties = self.properties() + ['created', 'modified']
196 for property in all_properties:
197 # check for list or query
198 if property[-6:] == '_count':
199 real_property = property[:-6]
200 if not hasattr(self, real_property):
202 value = getattr(self, real_property)
203 if hasattr(value, '__len__'):
206 elif hasattr(value, 'count'):
207 # query (but not during validation)
208 if self.in_validation:
210 value = value.count()
212 raise KeyError('Do not understand property %s.' % property)
214 if not hasattr(self, property):
217 value = getattr(self, property)
221 elif isinstance(value, ORMObject):
222 # use repr() for ORMObject types
225 # we want a string for all other types because json cannot
228 data[property] = value
229 return json.dumps(data)
233 Returns the name of the class.
235 return type(self).__name__
239 Returns a short string representation of the object using the first
240 element from the properties() method.
242 primary_property = self.properties()[0]
243 value = getattr(self, primary_property)
244 return '<%s %s>' % (self.classname(), str(value))
248 Returns a human readable form of the object using the properties()
251 return '<%s %s>' % (self.classname(), self.json())
253 def not_null_constraints(self):
255 Returns a list of properties that must be not NULL. Derived classes
256 should override this method if needed.
260 validation_message = \
261 "Validation failed because property '%s' must not be empty in object\n%s"
263 in_validation = False
267 This function validates the not NULL constraints as returned by
268 not_null_constraints(). It raises the DBUpdateError exception if
271 for property in self.not_null_constraints():
272 # TODO: It is a bit awkward that the mapper configuration allow
273 # directly setting the numeric _id columns. We should get rid of it
275 if hasattr(self, property + '_id') and \
276 getattr(self, property + '_id') is not None:
278 if not hasattr(self, property) or getattr(self, property) is None:
279 # str() might lead to races due to a 2nd flush
280 self.in_validation = True
281 message = self.validation_message % (property, str(self))
282 self.in_validation = False
283 raise DBUpdateError(message)
287 def get(cls, primary_key, session = None):
289 This is a support function that allows getting an object by its primary
292 Architecture.get(3[, session])
294 instead of the more verbose
296 session.query(Architecture).get(3)
298 return session.query(cls).get(primary_key)
300 def session(self, replace = False):
302 Returns the current session that is associated with the object. May
303 return None is object is in detached state.
306 return object_session(self)
308 def clone(self, session = None):
310 Clones the current object in a new session and returns the new clone. A
311 fresh session is created if the optional session parameter is not
312 provided. The function will fail if a session is provided and has
315 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
316 an existing object to allow several threads to work with their own
317 instances of an ORMObject.
319 WARNING: Only persistent (committed) objects can be cloned. Changes
320 made to the original object that are not committed yet will get lost.
321 The session of the new object will always be rolled back to avoid
325 if self.session() is None:
326 raise RuntimeError( \
327 'Method clone() failed for detached object:\n%s' % self)
328 self.session().flush()
329 mapper = object_mapper(self)
330 primary_key = mapper.primary_key_from_instance(self)
331 object_class = self.__class__
333 session = DBConn().session()
334 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
335 raise RuntimeError( \
336 'Method clone() failed due to unflushed changes in session.')
337 new_object = session.query(object_class).get(primary_key)
339 if new_object is None:
340 raise RuntimeError( \
341 'Method clone() failed for non-persistent object:\n%s' % self)
344 __all__.append('ORMObject')
346 ################################################################################
348 class Validator(MapperExtension):
350 This class calls the validate() method for each instance for the
351 'before_update' and 'before_insert' events. A global object validator is
352 used for configuring the individual mappers.
355 def before_update(self, mapper, connection, instance):
359 def before_insert(self, mapper, connection, instance):
363 validator = Validator()
365 ################################################################################
367 class Architecture(ORMObject):
368 def __init__(self, arch_string = None, description = None):
369 self.arch_string = arch_string
370 self.description = description
372 def __eq__(self, val):
373 if isinstance(val, str):
374 return (self.arch_string== val)
375 # This signals to use the normal comparison operator
376 return NotImplemented
378 def __ne__(self, val):
379 if isinstance(val, str):
380 return (self.arch_string != val)
381 # This signals to use the normal comparison operator
382 return NotImplemented
384 def properties(self):
385 return ['arch_string', 'arch_id', 'suites_count']
387 def not_null_constraints(self):
388 return ['arch_string']
390 __all__.append('Architecture')
393 def get_architecture(architecture, session=None):
395 Returns database id for given C{architecture}.
397 @type architecture: string
398 @param architecture: The name of the architecture
400 @type session: Session
401 @param session: Optional SQLA session object (a temporary one will be
402 generated if not supplied)
405 @return: Architecture object for the given arch (None if not present)
408 q = session.query(Architecture).filter_by(arch_string=architecture)
412 except NoResultFound:
415 __all__.append('get_architecture')
417 # TODO: should be removed because the implementation is too trivial
419 def get_architecture_suites(architecture, session=None):
421 Returns list of Suite objects for given C{architecture} name
423 @type architecture: str
424 @param architecture: Architecture name to search for
426 @type session: Session
427 @param session: Optional SQL session object (a temporary one will be
428 generated if not supplied)
431 @return: list of Suite objects for the given name (may be empty)
434 return get_architecture(architecture, session).suites
436 __all__.append('get_architecture_suites')
438 ################################################################################
440 class Archive(object):
441 def __init__(self, *args, **kwargs):
445 return '<Archive %s>' % self.archive_name
447 __all__.append('Archive')
450 def get_archive(archive, session=None):
452 returns database id for given C{archive}.
454 @type archive: string
455 @param archive: the name of the arhive
457 @type session: Session
458 @param session: Optional SQLA session object (a temporary one will be
459 generated if not supplied)
462 @return: Archive object for the given name (None if not present)
465 archive = archive.lower()
467 q = session.query(Archive).filter_by(archive_name=archive)
471 except NoResultFound:
474 __all__.append('get_archive')
476 ################################################################################
478 class BinContents(ORMObject):
479 def __init__(self, file = None, binary = None):
483 def properties(self):
484 return ['file', 'binary']
486 __all__.append('BinContents')
488 ################################################################################
490 class DBBinary(ORMObject):
491 def __init__(self, package = None, source = None, version = None, \
492 maintainer = None, architecture = None, poolfile = None, \
494 self.package = package
496 self.version = version
497 self.maintainer = maintainer
498 self.architecture = architecture
499 self.poolfile = poolfile
500 self.binarytype = binarytype
504 return self.binary_id
506 def properties(self):
507 return ['package', 'version', 'maintainer', 'source', 'architecture', \
508 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
509 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
511 def not_null_constraints(self):
512 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
515 metadata = association_proxy('key', 'value')
517 def get_component_name(self):
518 return self.poolfile.location.component.component_name
520 def scan_contents(self):
522 Yields the contents of the package. Only regular files are yielded and
523 the path names are normalized after converting them from either utf-8
524 or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
525 package does not contain any regular file.
527 fullpath = self.poolfile.fullpath
528 dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE)
529 tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
530 for member in tar.getmembers():
531 if not member.isdir():
532 name = normpath(member.name)
533 # enforce proper utf-8 encoding
536 except UnicodeDecodeError:
537 name = name.decode('iso8859-1').encode('utf-8')
543 def read_control(self):
545 Reads the control information from a binary.
548 @return: stanza text of the control section.
551 fullpath = self.poolfile.fullpath
552 deb_file = open(fullpath, 'r')
553 stanza = apt_inst.debExtractControl(deb_file)
558 def read_control_fields(self):
560 Reads the control information from a binary and return
564 @return: fields of the control section as a dictionary.
567 stanza = self.read_control()
568 return apt_pkg.TagSection(stanza)
570 __all__.append('DBBinary')
573 def get_suites_binary_in(package, session=None):
575 Returns list of Suite objects which given C{package} name is in
578 @param package: DBBinary package name to search for
581 @return: list of Suite objects for the given package
584 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
586 __all__.append('get_suites_binary_in')
589 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
591 Returns the component name of the newest binary package in suite_list or
592 None if no package is found. The result can be optionally filtered by a list
593 of architecture names.
596 @param package: DBBinary package name to search for
598 @type suite_list: list of str
599 @param suite_list: list of suite_name items
601 @type arch_list: list of str
602 @param arch_list: optional list of arch_string items that defaults to []
604 @rtype: str or NoneType
605 @return: name of component or None
608 q = session.query(DBBinary).filter_by(package = package). \
609 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
610 if len(arch_list) > 0:
611 q = q.join(DBBinary.architecture). \
612 filter(Architecture.arch_string.in_(arch_list))
613 binary = q.order_by(desc(DBBinary.version)).first()
617 return binary.get_component_name()
619 __all__.append('get_component_by_package_suite')
621 ################################################################################
623 class BinaryACL(object):
624 def __init__(self, *args, **kwargs):
628 return '<BinaryACL %s>' % self.binary_acl_id
630 __all__.append('BinaryACL')
632 ################################################################################
634 class BinaryACLMap(object):
635 def __init__(self, *args, **kwargs):
639 return '<BinaryACLMap %s>' % self.binary_acl_map_id
641 __all__.append('BinaryACLMap')
643 ################################################################################
648 ArchiveDir "%(archivepath)s";
649 OverrideDir "%(overridedir)s";
650 CacheDir "%(cachedir)s";
655 Packages::Compress ". bzip2 gzip";
656 Sources::Compress ". bzip2 gzip";
661 bindirectory "incoming"
666 BinOverride "override.sid.all3";
667 BinCacheDB "packages-accepted.db";
669 FileList "%(filelist)s";
672 Packages::Extensions ".deb .udeb";
675 bindirectory "incoming/"
678 BinOverride "override.sid.all3";
679 SrcOverride "override.sid.all3.src";
680 FileList "%(filelist)s";
684 class BuildQueue(object):
685 def __init__(self, *args, **kwargs):
689 return '<BuildQueue %s>' % self.queue_name
691 def write_metadata(self, starttime, force=False):
692 # Do we write out metafiles?
693 if not (force or self.generate_metadata):
696 session = DBConn().session().object_session(self)
698 fl_fd = fl_name = ac_fd = ac_name = None
700 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
701 startdir = os.getcwd()
704 # Grab files we want to include
705 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
706 newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
707 # Write file list with newer files
708 (fl_fd, fl_name) = mkstemp()
710 os.write(fl_fd, '%s\n' % n.fullpath)
715 # Write minimal apt.conf
716 # TODO: Remove hardcoding from template
717 (ac_fd, ac_name) = mkstemp()
718 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
720 'cachedir': cnf["Dir::Cache"],
721 'overridedir': cnf["Dir::Override"],
725 # Run apt-ftparchive generate
726 os.chdir(os.path.dirname(ac_name))
727 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
729 # Run apt-ftparchive release
730 # TODO: Eww - fix this
731 bname = os.path.basename(self.path)
735 # We have to remove the Release file otherwise it'll be included in the
738 os.unlink(os.path.join(bname, 'Release'))
742 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
744 # Crude hack with open and append, but this whole section is and should be redone.
745 if self.notautomatic:
746 release=open("Release", "a")
747 release.write("NotAutomatic: yes")
752 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
753 if cnf.has_key("Dinstall::SigningPubKeyring"):
754 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
756 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
758 # Move the files if we got this far
759 os.rename('Release', os.path.join(bname, 'Release'))
761 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
763 # Clean up any left behind files
790 def clean_and_update(self, starttime, Logger, dryrun=False):
791 """WARNING: This routine commits for you"""
792 session = DBConn().session().object_session(self)
794 if self.generate_metadata and not dryrun:
795 self.write_metadata(starttime)
797 # Grab files older than our execution time
798 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
799 older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
805 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
807 Logger.log(["I: Removing %s from the queue" % o.fullpath])
808 os.unlink(o.fullpath)
811 # If it wasn't there, don't worry
812 if e.errno == ENOENT:
815 # TODO: Replace with proper logging call
816 Logger.log(["E: Could not remove %s" % o.fullpath])
823 for f in os.listdir(self.path):
824 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
827 if not self.contains_filename(f):
828 fp = os.path.join(self.path, f)
830 Logger.log(["I: Would remove unused link %s" % fp])
832 Logger.log(["I: Removing unused link %s" % fp])
836 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
838 def contains_filename(self, filename):
841 @returns True if filename is supposed to be in the queue; False otherwise
843 session = DBConn().session().object_session(self)
844 if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
846 elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
850 def add_file_from_pool(self, poolfile):
851 """Copies a file into the pool. Assumes that the PoolFile object is
852 attached to the same SQLAlchemy session as the Queue object is.
854 The caller is responsible for committing after calling this function."""
855 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
857 # Check if we have a file of this name or this ID already
858 for f in self.queuefiles:
859 if f.fileid is not None and f.fileid == poolfile.file_id or \
860 f.poolfile.filename == poolfile_basename:
861 # In this case, update the BuildQueueFile entry so we
862 # don't remove it too early
863 f.lastused = datetime.now()
864 DBConn().session().object_session(poolfile).add(f)
867 # Prepare BuildQueueFile object
868 qf = BuildQueueFile()
869 qf.build_queue_id = self.queue_id
870 qf.lastused = datetime.now()
871 qf.filename = poolfile_basename
873 targetpath = poolfile.fullpath
874 queuepath = os.path.join(self.path, poolfile_basename)
878 # We need to copy instead of symlink
880 utils.copy(targetpath, queuepath)
881 # NULL in the fileid field implies a copy
884 os.symlink(targetpath, queuepath)
885 qf.fileid = poolfile.file_id
889 # Get the same session as the PoolFile is using and add the qf to it
890 DBConn().session().object_session(poolfile).add(qf)
894 def add_changes_from_policy_queue(self, policyqueue, changes):
896 Copies a changes from a policy queue together with its poolfiles.
898 @type policyqueue: PolicyQueue
899 @param policyqueue: policy queue to copy the changes from
901 @type changes: DBChange
902 @param changes: changes to copy to this build queue
904 for policyqueuefile in changes.files:
905 self.add_file_from_policy_queue(policyqueue, policyqueuefile)
906 for poolfile in changes.poolfiles:
907 self.add_file_from_pool(poolfile)
909 def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
911 Copies a file from a policy queue.
912 Assumes that the policyqueuefile is attached to the same SQLAlchemy
913 session as the Queue object is. The caller is responsible for
914 committing after calling this function.
916 @type policyqueue: PolicyQueue
917 @param policyqueue: policy queue to copy the file from
919 @type policyqueuefile: ChangePendingFile
920 @param policyqueuefile: file to be added to the build queue
922 session = DBConn().session().object_session(policyqueuefile)
924 # Is the file already there?
926 f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
927 f.lastused = datetime.now()
929 except NoResultFound:
930 pass # continue below
932 # We have to add the file.
933 f = BuildQueuePolicyFile()
935 f.file = policyqueuefile
936 f.filename = policyqueuefile.filename
938 source = os.path.join(policyqueue.path, policyqueuefile.filename)
941 # Always copy files from policy queues as they might move around.
943 utils.copy(source, target)
950 __all__.append('BuildQueue')
953 def get_build_queue(queuename, session=None):
955 Returns BuildQueue object for given C{queue name}, creating it if it does not
958 @type queuename: string
959 @param queuename: The name of the queue
961 @type session: Session
962 @param session: Optional SQLA session object (a temporary one will be
963 generated if not supplied)
966 @return: BuildQueue object for the given queue
969 q = session.query(BuildQueue).filter_by(queue_name=queuename)
973 except NoResultFound:
976 __all__.append('get_build_queue')
978 ################################################################################
980 class BuildQueueFile(object):
982 BuildQueueFile represents a file in a build queue coming from a pool.
985 def __init__(self, *args, **kwargs):
989 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
993 return os.path.join(self.buildqueue.path, self.filename)
996 __all__.append('BuildQueueFile')
998 ################################################################################
1000 class BuildQueuePolicyFile(object):
1002 BuildQueuePolicyFile represents a file in a build queue that comes from a
1003 policy queue (and not a pool).
1006 def __init__(self, *args, **kwargs):
1010 #def filename(self):
1011 # return self.file.filename
1015 return os.path.join(self.build_queue.path, self.filename)
1017 __all__.append('BuildQueuePolicyFile')
1019 ################################################################################
1021 class ChangePendingBinary(object):
1022 def __init__(self, *args, **kwargs):
1026 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
1028 __all__.append('ChangePendingBinary')
1030 ################################################################################
1032 class ChangePendingFile(object):
1033 def __init__(self, *args, **kwargs):
1037 return '<ChangePendingFile %s>' % self.change_pending_file_id
1039 __all__.append('ChangePendingFile')
1041 ################################################################################
1043 class ChangePendingSource(object):
1044 def __init__(self, *args, **kwargs):
1048 return '<ChangePendingSource %s>' % self.change_pending_source_id
1050 __all__.append('ChangePendingSource')
1052 ################################################################################
1054 class Component(ORMObject):
1055 def __init__(self, component_name = None):
1056 self.component_name = component_name
1058 def __eq__(self, val):
1059 if isinstance(val, str):
1060 return (self.component_name == val)
1061 # This signals to use the normal comparison operator
1062 return NotImplemented
1064 def __ne__(self, val):
1065 if isinstance(val, str):
1066 return (self.component_name != val)
1067 # This signals to use the normal comparison operator
1068 return NotImplemented
1070 def properties(self):
1071 return ['component_name', 'component_id', 'description', \
1072 'location_count', 'meets_dfsg', 'overrides_count']
1074 def not_null_constraints(self):
1075 return ['component_name']
1078 __all__.append('Component')
1081 def get_component(component, session=None):
1083 Returns database id for given C{component}.
1085 @type component: string
1086 @param component: The name of the override type
1089 @return: the database id for the given component
1092 component = component.lower()
1094 q = session.query(Component).filter_by(component_name=component)
1098 except NoResultFound:
1101 __all__.append('get_component')
1103 ################################################################################
1105 class DBConfig(object):
1106 def __init__(self, *args, **kwargs):
1110 return '<DBConfig %s>' % self.name
1112 __all__.append('DBConfig')
1114 ################################################################################
1117 def get_or_set_contents_file_id(filename, session=None):
1119 Returns database id for given filename.
1121 If no matching file is found, a row is inserted.
1123 @type filename: string
1124 @param filename: The filename
1125 @type session: SQLAlchemy
1126 @param session: Optional SQL session object (a temporary one will be
1127 generated if not supplied). If not passed, a commit will be performed at
1128 the end of the function, otherwise the caller is responsible for commiting.
1131 @return: the database id for the given component
1134 q = session.query(ContentFilename).filter_by(filename=filename)
1137 ret = q.one().cafilename_id
1138 except NoResultFound:
1139 cf = ContentFilename()
1140 cf.filename = filename
1142 session.commit_or_flush()
1143 ret = cf.cafilename_id
1147 __all__.append('get_or_set_contents_file_id')
1150 def get_contents(suite, overridetype, section=None, session=None):
1152 Returns contents for a suite / overridetype combination, limiting
1153 to a section if not None.
1156 @param suite: Suite object
1158 @type overridetype: OverrideType
1159 @param overridetype: OverrideType object
1161 @type section: Section
1162 @param section: Optional section object to limit results to
1164 @type session: SQLAlchemy
1165 @param session: Optional SQL session object (a temporary one will be
1166 generated if not supplied)
1168 @rtype: ResultsProxy
1169 @return: ResultsProxy object set up to return tuples of (filename, section,
1173 # find me all of the contents for a given suite
1174 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1178 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1179 JOIN content_file_names n ON (c.filename=n.id)
1180 JOIN binaries b ON (b.id=c.binary_pkg)
1181 JOIN override o ON (o.package=b.package)
1182 JOIN section s ON (s.id=o.section)
1183 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1184 AND b.type=:overridetypename"""
1186 vals = {'suiteid': suite.suite_id,
1187 'overridetypeid': overridetype.overridetype_id,
1188 'overridetypename': overridetype.overridetype}
1190 if section is not None:
1191 contents_q += " AND s.id = :sectionid"
1192 vals['sectionid'] = section.section_id
1194 contents_q += " ORDER BY fn"
1196 return session.execute(contents_q, vals)
1198 __all__.append('get_contents')
1200 ################################################################################
1202 class ContentFilepath(object):
1203 def __init__(self, *args, **kwargs):
1207 return '<ContentFilepath %s>' % self.filepath
1209 __all__.append('ContentFilepath')
1212 def get_or_set_contents_path_id(filepath, session=None):
1214 Returns database id for given path.
1216 If no matching file is found, a row is inserted.
1218 @type filepath: string
1219 @param filepath: The filepath
1221 @type session: SQLAlchemy
1222 @param session: Optional SQL session object (a temporary one will be
1223 generated if not supplied). If not passed, a commit will be performed at
1224 the end of the function, otherwise the caller is responsible for commiting.
1227 @return: the database id for the given path
1230 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1233 ret = q.one().cafilepath_id
1234 except NoResultFound:
1235 cf = ContentFilepath()
1236 cf.filepath = filepath
1238 session.commit_or_flush()
1239 ret = cf.cafilepath_id
1243 __all__.append('get_or_set_contents_path_id')
1245 ################################################################################
1247 class ContentAssociation(object):
1248 def __init__(self, *args, **kwargs):
1252 return '<ContentAssociation %s>' % self.ca_id
1254 __all__.append('ContentAssociation')
1256 def insert_content_paths(binary_id, fullpaths, session=None):
1258 Make sure given path is associated with given binary id
1260 @type binary_id: int
1261 @param binary_id: the id of the binary
1262 @type fullpaths: list
1263 @param fullpaths: the list of paths of the file being associated with the binary
1264 @type session: SQLAlchemy session
1265 @param session: Optional SQLAlchemy session. If this is passed, the caller
1266 is responsible for ensuring a transaction has begun and committing the
1267 results or rolling back based on the result code. If not passed, a commit
1268 will be performed at the end of the function, otherwise the caller is
1269 responsible for commiting.
1271 @return: True upon success
1274 privatetrans = False
1276 session = DBConn().session()
1281 def generate_path_dicts():
1282 for fullpath in fullpaths:
1283 if fullpath.startswith( './' ):
1284 fullpath = fullpath[2:]
1286 yield {'filename':fullpath, 'id': binary_id }
1288 for d in generate_path_dicts():
1289 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1298 traceback.print_exc()
1300 # Only rollback if we set up the session ourself
1307 __all__.append('insert_content_paths')
1309 ################################################################################
1311 class DSCFile(object):
1312 def __init__(self, *args, **kwargs):
1316 return '<DSCFile %s>' % self.dscfile_id
1318 __all__.append('DSCFile')
1321 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1323 Returns a list of DSCFiles which may be empty
1325 @type dscfile_id: int (optional)
1326 @param dscfile_id: the dscfile_id of the DSCFiles to find
1328 @type source_id: int (optional)
1329 @param source_id: the source id related to the DSCFiles to find
1331 @type poolfile_id: int (optional)
1332 @param poolfile_id: the poolfile id related to the DSCFiles to find
1335 @return: Possibly empty list of DSCFiles
1338 q = session.query(DSCFile)
1340 if dscfile_id is not None:
1341 q = q.filter_by(dscfile_id=dscfile_id)
1343 if source_id is not None:
1344 q = q.filter_by(source_id=source_id)
1346 if poolfile_id is not None:
1347 q = q.filter_by(poolfile_id=poolfile_id)
1351 __all__.append('get_dscfiles')
1353 ################################################################################
1355 class PoolFile(ORMObject):
1356 def __init__(self, filename = None, location = None, filesize = -1, \
1358 self.filename = filename
1359 self.location = location
1360 self.filesize = filesize
1361 self.md5sum = md5sum
1365 return os.path.join(self.location.path, self.filename)
1367 def is_valid(self, filesize = -1, md5sum = None):
1368 return self.filesize == long(filesize) and self.md5sum == md5sum
1370 def properties(self):
1371 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1372 'sha256sum', 'location', 'source', 'binary', 'last_used']
1374 def not_null_constraints(self):
1375 return ['filename', 'md5sum', 'location']
1377 __all__.append('PoolFile')
1380 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1383 (ValidFileFound [boolean], PoolFile object or None)
1385 @type filename: string
1386 @param filename: the filename of the file to check against the DB
1389 @param filesize: the size of the file to check against the DB
1391 @type md5sum: string
1392 @param md5sum: the md5sum of the file to check against the DB
1394 @type location_id: int
1395 @param location_id: the id of the location to look in
1398 @return: Tuple of length 2.
1399 - If valid pool file found: (C{True}, C{PoolFile object})
1400 - If valid pool file not found:
1401 - (C{False}, C{None}) if no file found
1402 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1405 poolfile = session.query(Location).get(location_id). \
1406 files.filter_by(filename=filename).first()
1408 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1411 return (valid, poolfile)
1413 __all__.append('check_poolfile')
1415 # TODO: the implementation can trivially be inlined at the place where the
1416 # function is called
1418 def get_poolfile_by_id(file_id, session=None):
1420 Returns a PoolFile objects or None for the given id
1423 @param file_id: the id of the file to look for
1425 @rtype: PoolFile or None
1426 @return: either the PoolFile object or None
1429 return session.query(PoolFile).get(file_id)
1431 __all__.append('get_poolfile_by_id')
1434 def get_poolfile_like_name(filename, session=None):
1436 Returns an array of PoolFile objects which are like the given name
1438 @type filename: string
1439 @param filename: the filename of the file to check against the DB
1442 @return: array of PoolFile objects
1445 # TODO: There must be a way of properly using bind parameters with %FOO%
1446 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1450 __all__.append('get_poolfile_like_name')
1453 def add_poolfile(filename, datadict, location_id, session=None):
1455 Add a new file to the pool
1457 @type filename: string
1458 @param filename: filename
1460 @type datadict: dict
1461 @param datadict: dict with needed data
1463 @type location_id: int
1464 @param location_id: database id of the location
1467 @return: the PoolFile object created
1469 poolfile = PoolFile()
1470 poolfile.filename = filename
1471 poolfile.filesize = datadict["size"]
1472 poolfile.md5sum = datadict["md5sum"]
1473 poolfile.sha1sum = datadict["sha1sum"]
1474 poolfile.sha256sum = datadict["sha256sum"]
1475 poolfile.location_id = location_id
1477 session.add(poolfile)
1478 # Flush to get a file id (NB: This is not a commit)
1483 __all__.append('add_poolfile')
1485 ################################################################################
1487 class Fingerprint(ORMObject):
1488 def __init__(self, fingerprint = None):
1489 self.fingerprint = fingerprint
1491 def properties(self):
1492 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1495 def not_null_constraints(self):
1496 return ['fingerprint']
1498 __all__.append('Fingerprint')
1501 def get_fingerprint(fpr, session=None):
1503 Returns Fingerprint object for given fpr.
1506 @param fpr: The fpr to find / add
1508 @type session: SQLAlchemy
1509 @param session: Optional SQL session object (a temporary one will be
1510 generated if not supplied).
1513 @return: the Fingerprint object for the given fpr or None
1516 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1520 except NoResultFound:
1525 __all__.append('get_fingerprint')
1528 def get_or_set_fingerprint(fpr, session=None):
1530 Returns Fingerprint object for given fpr.
1532 If no matching fpr is found, a row is inserted.
1535 @param fpr: The fpr to find / add
1537 @type session: SQLAlchemy
1538 @param session: Optional SQL session object (a temporary one will be
1539 generated if not supplied). If not passed, a commit will be performed at
1540 the end of the function, otherwise the caller is responsible for commiting.
1541 A flush will be performed either way.
1544 @return: the Fingerprint object for the given fpr
1547 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1551 except NoResultFound:
1552 fingerprint = Fingerprint()
1553 fingerprint.fingerprint = fpr
1554 session.add(fingerprint)
1555 session.commit_or_flush()
1560 __all__.append('get_or_set_fingerprint')
1562 ################################################################################
1564 # Helper routine for Keyring class
1565 def get_ldap_name(entry):
1567 for k in ["cn", "mn", "sn"]:
1569 if ret and ret[0] != "" and ret[0] != "-":
1571 return " ".join(name)
1573 ################################################################################
1575 class Keyring(object):
1576 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1577 " --with-colons --fingerprint --fingerprint"
1582 def __init__(self, *args, **kwargs):
1586 return '<Keyring %s>' % self.keyring_name
1588 def de_escape_gpg_str(self, txt):
1589 esclist = re.split(r'(\\x..)', txt)
1590 for x in range(1,len(esclist),2):
1591 esclist[x] = "%c" % (int(esclist[x][2:],16))
1592 return "".join(esclist)
1594 def parse_address(self, uid):
1595 """parses uid and returns a tuple of real name and email address"""
1597 (name, address) = email.Utils.parseaddr(uid)
1598 name = re.sub(r"\s*[(].*[)]", "", name)
1599 name = self.de_escape_gpg_str(name)
1602 return (name, address)
1604 def load_keys(self, keyring):
1605 if not self.keyring_id:
1606 raise Exception('Must be initialized with database information')
1608 k = os.popen(self.gpg_invocation % keyring, "r")
1612 for line in k.xreadlines():
1613 field = line.split(":")
1614 if field[0] == "pub":
1617 (name, addr) = self.parse_address(field[9])
1619 self.keys[key]["email"] = addr
1620 self.keys[key]["name"] = name
1621 self.keys[key]["fingerprints"] = []
1623 elif key and field[0] == "sub" and len(field) >= 12:
1624 signingkey = ("s" in field[11])
1625 elif key and field[0] == "uid":
1626 (name, addr) = self.parse_address(field[9])
1627 if "email" not in self.keys[key] and "@" in addr:
1628 self.keys[key]["email"] = addr
1629 self.keys[key]["name"] = name
1630 elif signingkey and field[0] == "fpr":
1631 self.keys[key]["fingerprints"].append(field[9])
1632 self.fpr_lookup[field[9]] = key
1634 def import_users_from_ldap(self, session):
1638 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1639 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1641 l = ldap.open(LDAPServer)
1642 l.simple_bind_s("","")
1643 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1644 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1645 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1647 ldap_fin_uid_id = {}
1654 uid = entry["uid"][0]
1655 name = get_ldap_name(entry)
1656 fingerprints = entry["keyFingerPrint"]
1658 for f in fingerprints:
1659 key = self.fpr_lookup.get(f, None)
1660 if key not in self.keys:
1662 self.keys[key]["uid"] = uid
1666 keyid = get_or_set_uid(uid, session).uid_id
1667 byuid[keyid] = (uid, name)
1668 byname[uid] = (keyid, name)
1670 return (byname, byuid)
1672 def generate_users_from_keyring(self, format, session):
1676 for x in self.keys.keys():
1677 if "email" not in self.keys[x]:
1679 self.keys[x]["uid"] = format % "invalid-uid"
1681 uid = format % self.keys[x]["email"]
1682 keyid = get_or_set_uid(uid, session).uid_id
1683 byuid[keyid] = (uid, self.keys[x]["name"])
1684 byname[uid] = (keyid, self.keys[x]["name"])
1685 self.keys[x]["uid"] = uid
1688 uid = format % "invalid-uid"
1689 keyid = get_or_set_uid(uid, session).uid_id
1690 byuid[keyid] = (uid, "ungeneratable user id")
1691 byname[uid] = (keyid, "ungeneratable user id")
1693 return (byname, byuid)
1695 __all__.append('Keyring')
1698 def get_keyring(keyring, session=None):
1700 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1701 If C{keyring} already has an entry, simply return the existing Keyring
1703 @type keyring: string
1704 @param keyring: the keyring name
1707 @return: the Keyring object for this keyring
1710 q = session.query(Keyring).filter_by(keyring_name=keyring)
1714 except NoResultFound:
1717 __all__.append('get_keyring')
1719 ################################################################################
1721 class KeyringACLMap(object):
1722 def __init__(self, *args, **kwargs):
1726 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1728 __all__.append('KeyringACLMap')
1730 ################################################################################
1732 class DBChange(object):
1733 def __init__(self, *args, **kwargs):
1737 return '<DBChange %s>' % self.changesname
1739 def clean_from_queue(self):
1740 session = DBConn().session().object_session(self)
1742 # Remove changes_pool_files entries
1745 # Remove changes_pending_files references
1748 # Clear out of queue
1749 self.in_queue = None
1750 self.approved_for_id = None
1752 __all__.append('DBChange')
1755 def get_dbchange(filename, session=None):
1757 returns DBChange object for given C{filename}.
1759 @type filename: string
1760 @param filename: the name of the file
1762 @type session: Session
1763 @param session: Optional SQLA session object (a temporary one will be
1764 generated if not supplied)
1767 @return: DBChange object for the given filename (C{None} if not present)
1770 q = session.query(DBChange).filter_by(changesname=filename)
1774 except NoResultFound:
1777 __all__.append('get_dbchange')
1779 ################################################################################
1781 class Location(ORMObject):
1782 def __init__(self, path = None, component = None):
1784 self.component = component
1785 # the column 'type' should go away, see comment at mapper
1786 self.archive_type = 'pool'
1788 def properties(self):
1789 return ['path', 'location_id', 'archive_type', 'component', \
1792 def not_null_constraints(self):
1793 return ['path', 'archive_type']
1795 __all__.append('Location')
1798 def get_location(location, component=None, archive=None, session=None):
1800 Returns Location object for the given combination of location, component
1803 @type location: string
1804 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1806 @type component: string
1807 @param component: the component name (if None, no restriction applied)
1809 @type archive: string
1810 @param archive: the archive name (if None, no restriction applied)
1812 @rtype: Location / None
1813 @return: Either a Location object or None if one can't be found
1816 q = session.query(Location).filter_by(path=location)
1818 if archive is not None:
1819 q = q.join(Archive).filter_by(archive_name=archive)
1821 if component is not None:
1822 q = q.join(Component).filter_by(component_name=component)
1826 except NoResultFound:
1829 __all__.append('get_location')
1831 ################################################################################
1833 class Maintainer(ORMObject):
1834 def __init__(self, name = None):
1837 def properties(self):
1838 return ['name', 'maintainer_id']
1840 def not_null_constraints(self):
1843 def get_split_maintainer(self):
1844 if not hasattr(self, 'name') or self.name is None:
1845 return ('', '', '', '')
1847 return fix_maintainer(self.name.strip())
1849 __all__.append('Maintainer')
1852 def get_or_set_maintainer(name, session=None):
1854 Returns Maintainer object for given maintainer name.
1856 If no matching maintainer name is found, a row is inserted.
1859 @param name: The maintainer name to add
1861 @type session: SQLAlchemy
1862 @param session: Optional SQL session object (a temporary one will be
1863 generated if not supplied). If not passed, a commit will be performed at
1864 the end of the function, otherwise the caller is responsible for commiting.
1865 A flush will be performed either way.
1868 @return: the Maintainer object for the given maintainer
1871 q = session.query(Maintainer).filter_by(name=name)
1874 except NoResultFound:
1875 maintainer = Maintainer()
1876 maintainer.name = name
1877 session.add(maintainer)
1878 session.commit_or_flush()
1883 __all__.append('get_or_set_maintainer')
1886 def get_maintainer(maintainer_id, session=None):
1888 Return the name of the maintainer behind C{maintainer_id} or None if that
1889 maintainer_id is invalid.
1891 @type maintainer_id: int
1892 @param maintainer_id: the id of the maintainer
1895 @return: the Maintainer with this C{maintainer_id}
1898 return session.query(Maintainer).get(maintainer_id)
1900 __all__.append('get_maintainer')
1902 ################################################################################
1904 class NewComment(object):
1905 def __init__(self, *args, **kwargs):
1909 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
1911 __all__.append('NewComment')
1914 def has_new_comment(package, version, session=None):
1916 Returns true if the given combination of C{package}, C{version} has a comment.
1918 @type package: string
1919 @param package: name of the package
1921 @type version: string
1922 @param version: package version
1924 @type session: Session
1925 @param session: Optional SQLA session object (a temporary one will be
1926 generated if not supplied)
1932 q = session.query(NewComment)
1933 q = q.filter_by(package=package)
1934 q = q.filter_by(version=version)
1936 return bool(q.count() > 0)
1938 __all__.append('has_new_comment')
1941 def get_new_comments(package=None, version=None, comment_id=None, session=None):
1943 Returns (possibly empty) list of NewComment objects for the given
1946 @type package: string (optional)
1947 @param package: name of the package
1949 @type version: string (optional)
1950 @param version: package version
1952 @type comment_id: int (optional)
1953 @param comment_id: An id of a comment
1955 @type session: Session
1956 @param session: Optional SQLA session object (a temporary one will be
1957 generated if not supplied)
1960 @return: A (possibly empty) list of NewComment objects will be returned
1963 q = session.query(NewComment)
1964 if package is not None: q = q.filter_by(package=package)
1965 if version is not None: q = q.filter_by(version=version)
1966 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
1970 __all__.append('get_new_comments')
1972 ################################################################################
1974 class Override(ORMObject):
1975 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
1976 section = None, priority = None):
1977 self.package = package
1979 self.component = component
1980 self.overridetype = overridetype
1981 self.section = section
1982 self.priority = priority
1984 def properties(self):
1985 return ['package', 'suite', 'component', 'overridetype', 'section', \
1988 def not_null_constraints(self):
1989 return ['package', 'suite', 'component', 'overridetype', 'section']
1991 __all__.append('Override')
1994 def get_override(package, suite=None, component=None, overridetype=None, session=None):
1996 Returns Override object for the given parameters
1998 @type package: string
1999 @param package: The name of the package
2001 @type suite: string, list or None
2002 @param suite: The name of the suite (or suites if a list) to limit to. If
2003 None, don't limit. Defaults to None.
2005 @type component: string, list or None
2006 @param component: The name of the component (or components if a list) to
2007 limit to. If None, don't limit. Defaults to None.
2009 @type overridetype: string, list or None
2010 @param overridetype: The name of the overridetype (or overridetypes if a list) to
2011 limit to. If None, don't limit. Defaults to None.
2013 @type session: Session
2014 @param session: Optional SQLA session object (a temporary one will be
2015 generated if not supplied)
2018 @return: A (possibly empty) list of Override objects will be returned
2021 q = session.query(Override)
2022 q = q.filter_by(package=package)
2024 if suite is not None:
2025 if not isinstance(suite, list): suite = [suite]
2026 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
2028 if component is not None:
2029 if not isinstance(component, list): component = [component]
2030 q = q.join(Component).filter(Component.component_name.in_(component))
2032 if overridetype is not None:
2033 if not isinstance(overridetype, list): overridetype = [overridetype]
2034 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
2038 __all__.append('get_override')
2041 ################################################################################
2043 class OverrideType(ORMObject):
2044 def __init__(self, overridetype = None):
2045 self.overridetype = overridetype
2047 def properties(self):
2048 return ['overridetype', 'overridetype_id', 'overrides_count']
2050 def not_null_constraints(self):
2051 return ['overridetype']
2053 __all__.append('OverrideType')
2056 def get_override_type(override_type, session=None):
2058 Returns OverrideType object for given C{override type}.
2060 @type override_type: string
2061 @param override_type: The name of the override type
2063 @type session: Session
2064 @param session: Optional SQLA session object (a temporary one will be
2065 generated if not supplied)
2068 @return: the database id for the given override type
2071 q = session.query(OverrideType).filter_by(overridetype=override_type)
2075 except NoResultFound:
2078 __all__.append('get_override_type')
2080 ################################################################################
2082 class PolicyQueue(object):
2083 def __init__(self, *args, **kwargs):
2087 return '<PolicyQueue %s>' % self.queue_name
2089 __all__.append('PolicyQueue')
2092 def get_policy_queue(queuename, session=None):
2094 Returns PolicyQueue object for given C{queue name}
2096 @type queuename: string
2097 @param queuename: The name of the queue
2099 @type session: Session
2100 @param session: Optional SQLA session object (a temporary one will be
2101 generated if not supplied)
2104 @return: PolicyQueue object for the given queue
2107 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2111 except NoResultFound:
2114 __all__.append('get_policy_queue')
2117 def get_policy_queue_from_path(pathname, session=None):
2119 Returns PolicyQueue object for given C{path name}
2121 @type queuename: string
2122 @param queuename: The path
2124 @type session: Session
2125 @param session: Optional SQLA session object (a temporary one will be
2126 generated if not supplied)
2129 @return: PolicyQueue object for the given queue
2132 q = session.query(PolicyQueue).filter_by(path=pathname)
2136 except NoResultFound:
2139 __all__.append('get_policy_queue_from_path')
2141 ################################################################################
2143 class Priority(ORMObject):
2144 def __init__(self, priority = None, level = None):
2145 self.priority = priority
2148 def properties(self):
2149 return ['priority', 'priority_id', 'level', 'overrides_count']
2151 def not_null_constraints(self):
2152 return ['priority', 'level']
2154 def __eq__(self, val):
2155 if isinstance(val, str):
2156 return (self.priority == val)
2157 # This signals to use the normal comparison operator
2158 return NotImplemented
2160 def __ne__(self, val):
2161 if isinstance(val, str):
2162 return (self.priority != val)
2163 # This signals to use the normal comparison operator
2164 return NotImplemented
2166 __all__.append('Priority')
2169 def get_priority(priority, session=None):
2171 Returns Priority object for given C{priority name}.
2173 @type priority: string
2174 @param priority: The name of the priority
2176 @type session: Session
2177 @param session: Optional SQLA session object (a temporary one will be
2178 generated if not supplied)
2181 @return: Priority object for the given priority
2184 q = session.query(Priority).filter_by(priority=priority)
2188 except NoResultFound:
2191 __all__.append('get_priority')
2194 def get_priorities(session=None):
2196 Returns dictionary of priority names -> id mappings
2198 @type session: Session
2199 @param session: Optional SQL session object (a temporary one will be
2200 generated if not supplied)
2203 @return: dictionary of priority names -> id mappings
2207 q = session.query(Priority)
2209 ret[x.priority] = x.priority_id
2213 __all__.append('get_priorities')
2215 ################################################################################
2217 class Section(ORMObject):
2218 def __init__(self, section = None):
2219 self.section = section
2221 def properties(self):
2222 return ['section', 'section_id', 'overrides_count']
2224 def not_null_constraints(self):
2227 def __eq__(self, val):
2228 if isinstance(val, str):
2229 return (self.section == val)
2230 # This signals to use the normal comparison operator
2231 return NotImplemented
2233 def __ne__(self, val):
2234 if isinstance(val, str):
2235 return (self.section != val)
2236 # This signals to use the normal comparison operator
2237 return NotImplemented
2239 __all__.append('Section')
2242 def get_section(section, session=None):
2244 Returns Section object for given C{section name}.
2246 @type section: string
2247 @param section: The name of the section
2249 @type session: Session
2250 @param session: Optional SQLA session object (a temporary one will be
2251 generated if not supplied)
2254 @return: Section object for the given section name
2257 q = session.query(Section).filter_by(section=section)
2261 except NoResultFound:
2264 __all__.append('get_section')
2267 def get_sections(session=None):
2269 Returns dictionary of section names -> id mappings
2271 @type session: Session
2272 @param session: Optional SQL session object (a temporary one will be
2273 generated if not supplied)
2276 @return: dictionary of section names -> id mappings
2280 q = session.query(Section)
2282 ret[x.section] = x.section_id
2286 __all__.append('get_sections')
2288 ################################################################################
2290 class SrcContents(ORMObject):
2291 def __init__(self, file = None, source = None):
2293 self.source = source
2295 def properties(self):
2296 return ['file', 'source']
2298 __all__.append('SrcContents')
2300 ################################################################################
2302 from debian.debfile import Deb822
2304 # Temporary Deb822 subclass to fix bugs with : handling; see #597249
2305 class Dak822(Deb822):
2306 def _internal_parser(self, sequence, fields=None):
2307 # The key is non-whitespace, non-colon characters before any colon.
2308 key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
2309 single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
2310 multi = re.compile(key_part + r"$")
2311 multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
2313 wanted_field = lambda f: fields is None or f in fields
2315 if isinstance(sequence, basestring):
2316 sequence = sequence.splitlines()
2320 for line in self.gpg_stripped_paragraph(sequence):
2321 m = single.match(line)
2324 self[curkey] = content
2326 if not wanted_field(m.group('key')):
2330 curkey = m.group('key')
2331 content = m.group('data')
2334 m = multi.match(line)
2337 self[curkey] = content
2339 if not wanted_field(m.group('key')):
2343 curkey = m.group('key')
2347 m = multidata.match(line)
2349 content += '\n' + line # XXX not m.group('data')?
2353 self[curkey] = content
2356 class DBSource(ORMObject):
2357 def __init__(self, source = None, version = None, maintainer = None, \
2358 changedby = None, poolfile = None, install_date = None):
2359 self.source = source
2360 self.version = version
2361 self.maintainer = maintainer
2362 self.changedby = changedby
2363 self.poolfile = poolfile
2364 self.install_date = install_date
2368 return self.source_id
2370 def properties(self):
2371 return ['source', 'source_id', 'maintainer', 'changedby', \
2372 'fingerprint', 'poolfile', 'version', 'suites_count', \
2373 'install_date', 'binaries_count', 'uploaders_count']
2375 def not_null_constraints(self):
2376 return ['source', 'version', 'install_date', 'maintainer', \
2377 'changedby', 'poolfile', 'install_date']
2379 def read_control_fields(self):
2381 Reads the control information from a dsc
2384 @return: fields is the dsc information in a dictionary form
2386 fullpath = self.poolfile.fullpath
2387 fields = Dak822(open(self.poolfile.fullpath, 'r'))
2390 metadata = association_proxy('key', 'value')
2392 def scan_contents(self):
2394 Returns a set of names for non directories. The path names are
2395 normalized after converting them from either utf-8 or iso8859-1
2398 fullpath = self.poolfile.fullpath
2399 from daklib.contents import UnpackedSource
2400 unpacked = UnpackedSource(fullpath)
2402 for name in unpacked.get_all_filenames():
2403 # enforce proper utf-8 encoding
2405 name.decode('utf-8')
2406 except UnicodeDecodeError:
2407 name = name.decode('iso8859-1').encode('utf-8')
2411 __all__.append('DBSource')
2414 def source_exists(source, source_version, suites = ["any"], session=None):
2416 Ensure that source exists somewhere in the archive for the binary
2417 upload being processed.
2418 1. exact match => 1.0-3
2419 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2421 @type source: string
2422 @param source: source name
2424 @type source_version: string
2425 @param source_version: expected source version
2428 @param suites: list of suites to check in, default I{any}
2430 @type session: Session
2431 @param session: Optional SQLA session object (a temporary one will be
2432 generated if not supplied)
2435 @return: returns 1 if a source with expected version is found, otherwise 0
2442 from daklib.regexes import re_bin_only_nmu
2443 orig_source_version = re_bin_only_nmu.sub('', source_version)
2445 for suite in suites:
2446 q = session.query(DBSource).filter_by(source=source). \
2447 filter(DBSource.version.in_([source_version, orig_source_version]))
2449 # source must exist in suite X, or in some other suite that's
2450 # mapped to X, recursively... silent-maps are counted too,
2451 # unreleased-maps aren't.
2452 maps = cnf.ValueList("SuiteMappings")[:]
2454 maps = [ m.split() for m in maps ]
2455 maps = [ (x[1], x[2]) for x in maps
2456 if x[0] == "map" or x[0] == "silent-map" ]
2458 for (from_, to) in maps:
2459 if from_ in s and to not in s:
2462 q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
2467 # No source found so return not ok
2472 __all__.append('source_exists')
2475 def get_suites_source_in(source, session=None):
2477 Returns list of Suite objects which given C{source} name is in
2480 @param source: DBSource package name to search for
2483 @return: list of Suite objects for the given source
2486 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2488 __all__.append('get_suites_source_in')
2491 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2493 Returns list of DBSource objects for given C{source} name and other parameters
2496 @param source: DBSource package name to search for
2498 @type version: str or None
2499 @param version: DBSource version name to search for or None if not applicable
2501 @type dm_upload_allowed: bool
2502 @param dm_upload_allowed: If None, no effect. If True or False, only
2503 return packages with that dm_upload_allowed setting
2505 @type session: Session
2506 @param session: Optional SQL session object (a temporary one will be
2507 generated if not supplied)
2510 @return: list of DBSource objects for the given name (may be empty)
2513 q = session.query(DBSource).filter_by(source=source)
2515 if version is not None:
2516 q = q.filter_by(version=version)
2518 if dm_upload_allowed is not None:
2519 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2523 __all__.append('get_sources_from_name')
2525 # FIXME: This function fails badly if it finds more than 1 source package and
2526 # its implementation is trivial enough to be inlined.
2528 def get_source_in_suite(source, suite, session=None):
2530 Returns a DBSource object for a combination of C{source} and C{suite}.
2532 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2533 - B{suite} - a suite name, eg. I{unstable}
2535 @type source: string
2536 @param source: source package name
2539 @param suite: the suite name
2542 @return: the version for I{source} in I{suite}
2546 q = get_suite(suite, session).get_sources(source)
2549 except NoResultFound:
2552 __all__.append('get_source_in_suite')
2555 def import_metadata_into_db(obj, session=None):
2557 This routine works on either DBBinary or DBSource objects and imports
2558 their metadata into the database
2560 fields = obj.read_control_fields()
2561 for k in fields.keys():
2564 val = str(fields[k])
2565 except UnicodeEncodeError:
2566 # Fall back to UTF-8
2568 val = fields[k].encode('utf-8')
2569 except UnicodeEncodeError:
2570 # Finally try iso8859-1
2571 val = fields[k].encode('iso8859-1')
2572 # Otherwise we allow the exception to percolate up and we cause
2573 # a reject as someone is playing silly buggers
2575 obj.metadata[get_or_set_metadatakey(k, session)] = val
2577 session.commit_or_flush()
2579 __all__.append('import_metadata_into_db')
2582 ################################################################################
2585 def add_dsc_to_db(u, filename, session=None):
2586 entry = u.pkg.files[filename]
2590 source.source = u.pkg.dsc["source"]
2591 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2592 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2593 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2594 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2595 source.install_date = datetime.now().date()
2597 dsc_component = entry["component"]
2598 dsc_location_id = entry["location id"]
2600 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2602 # Set up a new poolfile if necessary
2603 if not entry.has_key("files id") or not entry["files id"]:
2604 filename = entry["pool name"] + filename
2605 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2607 pfs.append(poolfile)
2608 entry["files id"] = poolfile.file_id
2610 source.poolfile_id = entry["files id"]
2613 suite_names = u.pkg.changes["distribution"].keys()
2614 source.suites = session.query(Suite). \
2615 filter(Suite.suite_name.in_(suite_names)).all()
2617 # Add the source files to the DB (files and dsc_files)
2619 dscfile.source_id = source.source_id
2620 dscfile.poolfile_id = entry["files id"]
2621 session.add(dscfile)
2623 for dsc_file, dentry in u.pkg.dsc_files.items():
2625 df.source_id = source.source_id
2627 # If the .orig tarball is already in the pool, it's
2628 # files id is stored in dsc_files by check_dsc().
2629 files_id = dentry.get("files id", None)
2631 # Find the entry in the files hash
2632 # TODO: Bail out here properly
2634 for f, e in u.pkg.files.items():
2639 if files_id is None:
2640 filename = dfentry["pool name"] + dsc_file
2642 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2643 # FIXME: needs to check for -1/-2 and or handle exception
2644 if found and obj is not None:
2645 files_id = obj.file_id
2648 # If still not found, add it
2649 if files_id is None:
2650 # HACK: Force sha1sum etc into dentry
2651 dentry["sha1sum"] = dfentry["sha1sum"]
2652 dentry["sha256sum"] = dfentry["sha256sum"]
2653 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2654 pfs.append(poolfile)
2655 files_id = poolfile.file_id
2657 poolfile = get_poolfile_by_id(files_id, session)
2658 if poolfile is None:
2659 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2660 pfs.append(poolfile)
2662 df.poolfile_id = files_id
2665 # Add the src_uploaders to the DB
2666 source.uploaders = [source.maintainer]
2667 if u.pkg.dsc.has_key("uploaders"):
2668 for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
2670 source.uploaders.append(get_or_set_maintainer(up, session))
2674 return source, dsc_component, dsc_location_id, pfs
2676 __all__.append('add_dsc_to_db')
2679 def add_deb_to_db(u, filename, session=None):
2681 Contrary to what you might expect, this routine deals with both
2682 debs and udebs. That info is in 'dbtype', whilst 'type' is
2683 'deb' for both of them
2686 entry = u.pkg.files[filename]
2689 bin.package = entry["package"]
2690 bin.version = entry["version"]
2691 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2692 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2693 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2694 bin.binarytype = entry["dbtype"]
2697 filename = entry["pool name"] + filename
2698 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2699 if not entry.get("location id", None):
2700 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2702 if entry.get("files id", None):
2703 poolfile = get_poolfile_by_id(bin.poolfile_id)
2704 bin.poolfile_id = entry["files id"]
2706 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2707 bin.poolfile_id = entry["files id"] = poolfile.file_id
2710 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2711 if len(bin_sources) != 1:
2712 raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2713 (bin.package, bin.version, entry["architecture"],
2714 filename, bin.binarytype, u.pkg.changes["fingerprint"])
2716 bin.source_id = bin_sources[0].source_id
2718 if entry.has_key("built-using"):
2719 for srcname, version in entry["built-using"]:
2720 exsources = get_sources_from_name(srcname, version, session=session)
2721 if len(exsources) != 1:
2722 raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
2723 (srcname, version, bin.package, bin.version, entry["architecture"],
2724 filename, bin.binarytype, u.pkg.changes["fingerprint"])
2726 bin.extra_sources.append(exsources[0])
2728 # Add and flush object so it has an ID
2731 suite_names = u.pkg.changes["distribution"].keys()
2732 bin.suites = session.query(Suite). \
2733 filter(Suite.suite_name.in_(suite_names)).all()
2737 # Deal with contents - disabled for now
2738 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2740 # print "REJECT\nCould not determine contents of package %s" % bin.package
2741 # session.rollback()
2742 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2744 return bin, poolfile
2746 __all__.append('add_deb_to_db')
2748 ################################################################################
2750 class SourceACL(object):
2751 def __init__(self, *args, **kwargs):
2755 return '<SourceACL %s>' % self.source_acl_id
2757 __all__.append('SourceACL')
2759 ################################################################################
2761 class SrcFormat(object):
2762 def __init__(self, *args, **kwargs):
2766 return '<SrcFormat %s>' % (self.format_name)
2768 __all__.append('SrcFormat')
2770 ################################################################################
2772 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2773 ('SuiteID', 'suite_id'),
2774 ('Version', 'version'),
2775 ('Origin', 'origin'),
2777 ('Description', 'description'),
2778 ('Untouchable', 'untouchable'),
2779 ('Announce', 'announce'),
2780 ('Codename', 'codename'),
2781 ('OverrideCodename', 'overridecodename'),
2782 ('ValidTime', 'validtime'),
2783 ('Priority', 'priority'),
2784 ('NotAutomatic', 'notautomatic'),
2785 ('CopyChanges', 'copychanges'),
2786 ('OverrideSuite', 'overridesuite')]
2788 # Why the heck don't we have any UNIQUE constraints in table suite?
2789 # TODO: Add UNIQUE constraints for appropriate columns.
2790 class Suite(ORMObject):
2791 def __init__(self, suite_name = None, version = None):
2792 self.suite_name = suite_name
2793 self.version = version
2795 def properties(self):
2796 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2799 def not_null_constraints(self):
2800 return ['suite_name']
2802 def __eq__(self, val):
2803 if isinstance(val, str):
2804 return (self.suite_name == val)
2805 # This signals to use the normal comparison operator
2806 return NotImplemented
2808 def __ne__(self, val):
2809 if isinstance(val, str):
2810 return (self.suite_name != val)
2811 # This signals to use the normal comparison operator
2812 return NotImplemented
2816 for disp, field in SUITE_FIELDS:
2817 val = getattr(self, field, None)
2819 ret.append("%s: %s" % (disp, val))
2821 return "\n".join(ret)
2823 def get_architectures(self, skipsrc=False, skipall=False):
2825 Returns list of Architecture objects
2827 @type skipsrc: boolean
2828 @param skipsrc: Whether to skip returning the 'source' architecture entry
2831 @type skipall: boolean
2832 @param skipall: Whether to skip returning the 'all' architecture entry
2836 @return: list of Architecture objects for the given name (may be empty)
2839 q = object_session(self).query(Architecture).with_parent(self)
2841 q = q.filter(Architecture.arch_string != 'source')
2843 q = q.filter(Architecture.arch_string != 'all')
2844 return q.order_by(Architecture.arch_string).all()
2846 def get_sources(self, source):
2848 Returns a query object representing DBSource that is part of C{suite}.
2850 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2852 @type source: string
2853 @param source: source package name
2855 @rtype: sqlalchemy.orm.query.Query
2856 @return: a query of DBSource
2860 session = object_session(self)
2861 return session.query(DBSource).filter_by(source = source). \
2864 __all__.append('Suite')
2867 def get_suite(suite, session=None):
2869 Returns Suite object for given C{suite name}.
2872 @param suite: The name of the suite
2874 @type session: Session
2875 @param session: Optional SQLA session object (a temporary one will be
2876 generated if not supplied)
2879 @return: Suite object for the requested suite name (None if not present)
2882 q = session.query(Suite).filter_by(suite_name=suite)
2886 except NoResultFound:
2889 __all__.append('get_suite')
2891 ################################################################################
2893 # TODO: should be removed because the implementation is too trivial
2895 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
2897 Returns list of Architecture objects for given C{suite} name
2900 @param suite: Suite name to search for
2902 @type skipsrc: boolean
2903 @param skipsrc: Whether to skip returning the 'source' architecture entry
2906 @type skipall: boolean
2907 @param skipall: Whether to skip returning the 'all' architecture entry
2910 @type session: Session
2911 @param session: Optional SQL session object (a temporary one will be
2912 generated if not supplied)
2915 @return: list of Architecture objects for the given name (may be empty)
2918 return get_suite(suite, session).get_architectures(skipsrc, skipall)
2920 __all__.append('get_suite_architectures')
2922 ################################################################################
2924 class SuiteSrcFormat(object):
2925 def __init__(self, *args, **kwargs):
2929 return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
2931 __all__.append('SuiteSrcFormat')
2934 def get_suite_src_formats(suite, session=None):
2936 Returns list of allowed SrcFormat for C{suite}.
2939 @param suite: Suite name to search for
2941 @type session: Session
2942 @param session: Optional SQL session object (a temporary one will be
2943 generated if not supplied)
2946 @return: the list of allowed source formats for I{suite}
2949 q = session.query(SrcFormat)
2950 q = q.join(SuiteSrcFormat)
2951 q = q.join(Suite).filter_by(suite_name=suite)
2952 q = q.order_by('format_name')
2956 __all__.append('get_suite_src_formats')
2958 ################################################################################
2960 class Uid(ORMObject):
2961 def __init__(self, uid = None, name = None):
2965 def __eq__(self, val):
2966 if isinstance(val, str):
2967 return (self.uid == val)
2968 # This signals to use the normal comparison operator
2969 return NotImplemented
2971 def __ne__(self, val):
2972 if isinstance(val, str):
2973 return (self.uid != val)
2974 # This signals to use the normal comparison operator
2975 return NotImplemented
2977 def properties(self):
2978 return ['uid', 'name', 'fingerprint']
2980 def not_null_constraints(self):
2983 __all__.append('Uid')
2986 def get_or_set_uid(uidname, session=None):
2988 Returns uid object for given uidname.
2990 If no matching uidname is found, a row is inserted.
2992 @type uidname: string
2993 @param uidname: The uid to add
2995 @type session: SQLAlchemy
2996 @param session: Optional SQL session object (a temporary one will be
2997 generated if not supplied). If not passed, a commit will be performed at
2998 the end of the function, otherwise the caller is responsible for commiting.
3001 @return: the uid object for the given uidname
3004 q = session.query(Uid).filter_by(uid=uidname)
3008 except NoResultFound:
3012 session.commit_or_flush()
3017 __all__.append('get_or_set_uid')
3020 def get_uid_from_fingerprint(fpr, session=None):
3021 q = session.query(Uid)
3022 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
3026 except NoResultFound:
3029 __all__.append('get_uid_from_fingerprint')
3031 ################################################################################
3033 class UploadBlock(object):
3034 def __init__(self, *args, **kwargs):
3038 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
3040 __all__.append('UploadBlock')
3042 ################################################################################
3044 class MetadataKey(ORMObject):
3045 def __init__(self, key = None):
3048 def properties(self):
3051 def not_null_constraints(self):
3054 __all__.append('MetadataKey')
3057 def get_or_set_metadatakey(keyname, session=None):
3059 Returns MetadataKey object for given uidname.
3061 If no matching keyname is found, a row is inserted.
3063 @type uidname: string
3064 @param uidname: The keyname to add
3066 @type session: SQLAlchemy
3067 @param session: Optional SQL session object (a temporary one will be
3068 generated if not supplied). If not passed, a commit will be performed at
3069 the end of the function, otherwise the caller is responsible for commiting.
3072 @return: the metadatakey object for the given keyname
3075 q = session.query(MetadataKey).filter_by(key=keyname)
3079 except NoResultFound:
3080 ret = MetadataKey(keyname)
3082 session.commit_or_flush()
3086 __all__.append('get_or_set_metadatakey')
3088 ################################################################################
3090 class BinaryMetadata(ORMObject):
3091 def __init__(self, key = None, value = None, binary = None):
3094 self.binary = binary
3096 def properties(self):
3097 return ['binary', 'key', 'value']
3099 def not_null_constraints(self):
3102 __all__.append('BinaryMetadata')
3104 ################################################################################
3106 class SourceMetadata(ORMObject):
3107 def __init__(self, key = None, value = None, source = None):
3110 self.source = source
3112 def properties(self):
3113 return ['source', 'key', 'value']
3115 def not_null_constraints(self):
3118 __all__.append('SourceMetadata')
3120 ################################################################################
3122 class VersionCheck(ORMObject):
3123 def __init__(self, *args, **kwargs):
3126 def properties(self):
3127 #return ['suite_id', 'check', 'reference_id']
3130 def not_null_constraints(self):
3131 return ['suite', 'check', 'reference']
3133 __all__.append('VersionCheck')
3136 def get_version_checks(suite_name, check = None, session = None):
3137 suite = get_suite(suite_name, session)
3140 q = session.query(VersionCheck).filter_by(suite=suite)
3142 q = q.filter_by(check=check)
3145 __all__.append('get_version_checks')
3147 ################################################################################
3149 class DBConn(object):
3151 database module init.
3155 def __init__(self, *args, **kwargs):
3156 self.__dict__ = self.__shared_state
3158 if not getattr(self, 'initialised', False):
3159 self.initialised = True
3160 self.debug = kwargs.has_key('debug')
3163 def __setuptables(self):
3170 'binaries_metadata',
3174 'build_queue_files',
3175 'build_queue_policy_files',
3180 'changes_pending_binaries',
3181 'changes_pending_files',
3182 'changes_pending_source',
3183 'changes_pending_files_map',
3184 'changes_pending_source_files',
3185 'changes_pool_files',
3187 'extra_src_references',
3196 # TODO: the maintainer column in table override should be removed.
3210 'suite_architectures',
3211 'suite_build_queue_copy',
3212 'suite_src_formats',
3219 'almost_obsolete_all_associations',
3220 'almost_obsolete_src_associations',
3221 'any_associations_source',
3222 'bin_associations_binaries',
3223 'binaries_suite_arch',
3224 'binfiles_suite_component_arch',
3227 'newest_all_associations',
3228 'newest_any_associations',
3230 'newest_src_association',
3231 'obsolete_all_associations',
3232 'obsolete_any_associations',
3233 'obsolete_any_by_all_associations',
3234 'obsolete_src_associations',
3236 'src_associations_bin',
3237 'src_associations_src',
3238 'suite_arch_by_name',
3241 for table_name in tables:
3242 table = Table(table_name, self.db_meta, \
3243 autoload=True, useexisting=True)
3244 setattr(self, 'tbl_%s' % table_name, table)
3246 for view_name in views:
3247 view = Table(view_name, self.db_meta, autoload=True)
3248 setattr(self, 'view_%s' % view_name, view)
3250 def __setupmappers(self):
3251 mapper(Architecture, self.tbl_architecture,
3252 properties = dict(arch_id = self.tbl_architecture.c.id,
3253 suites = relation(Suite, secondary=self.tbl_suite_architectures,
3254 order_by='suite_name',
3255 backref=backref('architectures', order_by='arch_string'))),
3256 extension = validator)
3258 mapper(Archive, self.tbl_archive,
3259 properties = dict(archive_id = self.tbl_archive.c.id,
3260 archive_name = self.tbl_archive.c.name))
3262 mapper(BuildQueue, self.tbl_build_queue,
3263 properties = dict(queue_id = self.tbl_build_queue.c.id))
3265 mapper(BuildQueueFile, self.tbl_build_queue_files,
3266 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3267 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3269 mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
3271 build_queue = relation(BuildQueue, backref='policy_queue_files'),
3272 file = relation(ChangePendingFile, lazy='joined')))
3274 mapper(DBBinary, self.tbl_binaries,
3275 properties = dict(binary_id = self.tbl_binaries.c.id,
3276 package = self.tbl_binaries.c.package,
3277 version = self.tbl_binaries.c.version,
3278 maintainer_id = self.tbl_binaries.c.maintainer,
3279 maintainer = relation(Maintainer),
3280 source_id = self.tbl_binaries.c.source,
3281 source = relation(DBSource, backref='binaries'),
3282 arch_id = self.tbl_binaries.c.architecture,
3283 architecture = relation(Architecture),
3284 poolfile_id = self.tbl_binaries.c.file,
3285 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3286 binarytype = self.tbl_binaries.c.type,
3287 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3288 fingerprint = relation(Fingerprint),
3289 install_date = self.tbl_binaries.c.install_date,
3290 suites = relation(Suite, secondary=self.tbl_bin_associations,
3291 backref=backref('binaries', lazy='dynamic')),
3292 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
3293 backref=backref('extra_binary_references', lazy='dynamic')),
3294 key = relation(BinaryMetadata, cascade='all',
3295 collection_class=attribute_mapped_collection('key'))),
3296 extension = validator)
3298 mapper(BinaryACL, self.tbl_binary_acl,
3299 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3301 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3302 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3303 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3304 architecture = relation(Architecture)))
3306 mapper(Component, self.tbl_component,
3307 properties = dict(component_id = self.tbl_component.c.id,
3308 component_name = self.tbl_component.c.name),
3309 extension = validator)
3311 mapper(DBConfig, self.tbl_config,
3312 properties = dict(config_id = self.tbl_config.c.id))
3314 mapper(DSCFile, self.tbl_dsc_files,
3315 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3316 source_id = self.tbl_dsc_files.c.source,
3317 source = relation(DBSource),
3318 poolfile_id = self.tbl_dsc_files.c.file,
3319 poolfile = relation(PoolFile)))
3321 mapper(PoolFile, self.tbl_files,
3322 properties = dict(file_id = self.tbl_files.c.id,
3323 filesize = self.tbl_files.c.size,
3324 location_id = self.tbl_files.c.location,
3325 location = relation(Location,
3326 # using lazy='dynamic' in the back
3327 # reference because we have A LOT of
3328 # files in one location
3329 backref=backref('files', lazy='dynamic'))),
3330 extension = validator)
3332 mapper(Fingerprint, self.tbl_fingerprint,
3333 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3334 uid_id = self.tbl_fingerprint.c.uid,
3335 uid = relation(Uid),
3336 keyring_id = self.tbl_fingerprint.c.keyring,
3337 keyring = relation(Keyring),
3338 source_acl = relation(SourceACL),
3339 binary_acl = relation(BinaryACL)),
3340 extension = validator)
3342 mapper(Keyring, self.tbl_keyrings,
3343 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3344 keyring_id = self.tbl_keyrings.c.id))
3346 mapper(DBChange, self.tbl_changes,
3347 properties = dict(change_id = self.tbl_changes.c.id,
3348 poolfiles = relation(PoolFile,
3349 secondary=self.tbl_changes_pool_files,
3350 backref="changeslinks"),
3351 seen = self.tbl_changes.c.seen,
3352 source = self.tbl_changes.c.source,
3353 binaries = self.tbl_changes.c.binaries,
3354 architecture = self.tbl_changes.c.architecture,
3355 distribution = self.tbl_changes.c.distribution,
3356 urgency = self.tbl_changes.c.urgency,
3357 maintainer = self.tbl_changes.c.maintainer,
3358 changedby = self.tbl_changes.c.changedby,
3359 date = self.tbl_changes.c.date,
3360 version = self.tbl_changes.c.version,
3361 files = relation(ChangePendingFile,
3362 secondary=self.tbl_changes_pending_files_map,
3363 backref="changesfile"),
3364 in_queue_id = self.tbl_changes.c.in_queue,
3365 in_queue = relation(PolicyQueue,
3366 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3367 approved_for_id = self.tbl_changes.c.approved_for))
3369 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3370 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3372 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3373 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3374 filename = self.tbl_changes_pending_files.c.filename,
3375 size = self.tbl_changes_pending_files.c.size,
3376 md5sum = self.tbl_changes_pending_files.c.md5sum,
3377 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3378 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3380 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3381 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3382 change = relation(DBChange),
3383 maintainer = relation(Maintainer,
3384 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3385 changedby = relation(Maintainer,
3386 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3387 fingerprint = relation(Fingerprint),
3388 source_files = relation(ChangePendingFile,
3389 secondary=self.tbl_changes_pending_source_files,
3390 backref="pending_sources")))
3393 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3394 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3395 keyring = relation(Keyring, backref="keyring_acl_map"),
3396 architecture = relation(Architecture)))
3398 mapper(Location, self.tbl_location,
3399 properties = dict(location_id = self.tbl_location.c.id,
3400 component_id = self.tbl_location.c.component,
3401 component = relation(Component, backref='location'),
3402 archive_id = self.tbl_location.c.archive,
3403 archive = relation(Archive),
3404 # FIXME: the 'type' column is old cruft and
3405 # should be removed in the future.
3406 archive_type = self.tbl_location.c.type),
3407 extension = validator)
3409 mapper(Maintainer, self.tbl_maintainer,
3410 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3411 maintains_sources = relation(DBSource, backref='maintainer',
3412 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3413 changed_sources = relation(DBSource, backref='changedby',
3414 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3415 extension = validator)
3417 mapper(NewComment, self.tbl_new_comments,
3418 properties = dict(comment_id = self.tbl_new_comments.c.id))
3420 mapper(Override, self.tbl_override,
3421 properties = dict(suite_id = self.tbl_override.c.suite,
3422 suite = relation(Suite, \
3423 backref=backref('overrides', lazy='dynamic')),
3424 package = self.tbl_override.c.package,
3425 component_id = self.tbl_override.c.component,
3426 component = relation(Component, \
3427 backref=backref('overrides', lazy='dynamic')),
3428 priority_id = self.tbl_override.c.priority,
3429 priority = relation(Priority, \
3430 backref=backref('overrides', lazy='dynamic')),
3431 section_id = self.tbl_override.c.section,
3432 section = relation(Section, \
3433 backref=backref('overrides', lazy='dynamic')),
3434 overridetype_id = self.tbl_override.c.type,
3435 overridetype = relation(OverrideType, \
3436 backref=backref('overrides', lazy='dynamic'))))
3438 mapper(OverrideType, self.tbl_override_type,
3439 properties = dict(overridetype = self.tbl_override_type.c.type,
3440 overridetype_id = self.tbl_override_type.c.id))
3442 mapper(PolicyQueue, self.tbl_policy_queue,
3443 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3445 mapper(Priority, self.tbl_priority,
3446 properties = dict(priority_id = self.tbl_priority.c.id))
3448 mapper(Section, self.tbl_section,
3449 properties = dict(section_id = self.tbl_section.c.id,
3450 section=self.tbl_section.c.section))
3452 mapper(DBSource, self.tbl_source,
3453 properties = dict(source_id = self.tbl_source.c.id,
3454 version = self.tbl_source.c.version,
3455 maintainer_id = self.tbl_source.c.maintainer,
3456 poolfile_id = self.tbl_source.c.file,
3457 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3458 fingerprint_id = self.tbl_source.c.sig_fpr,
3459 fingerprint = relation(Fingerprint),
3460 changedby_id = self.tbl_source.c.changedby,
3461 srcfiles = relation(DSCFile,
3462 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3463 suites = relation(Suite, secondary=self.tbl_src_associations,
3464 backref=backref('sources', lazy='dynamic')),
3465 uploaders = relation(Maintainer,
3466 secondary=self.tbl_src_uploaders),
3467 key = relation(SourceMetadata, cascade='all',
3468 collection_class=attribute_mapped_collection('key'))),
3469 extension = validator)
3471 mapper(SourceACL, self.tbl_source_acl,
3472 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3474 mapper(SrcFormat, self.tbl_src_format,
3475 properties = dict(src_format_id = self.tbl_src_format.c.id,
3476 format_name = self.tbl_src_format.c.format_name))
3478 mapper(Suite, self.tbl_suite,
3479 properties = dict(suite_id = self.tbl_suite.c.id,
3480 policy_queue = relation(PolicyQueue),
3481 copy_queues = relation(BuildQueue,
3482 secondary=self.tbl_suite_build_queue_copy)),
3483 extension = validator)
3485 mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
3486 properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
3487 suite = relation(Suite, backref='suitesrcformats'),
3488 src_format_id = self.tbl_suite_src_formats.c.src_format,
3489 src_format = relation(SrcFormat)))
3491 mapper(Uid, self.tbl_uid,
3492 properties = dict(uid_id = self.tbl_uid.c.id,
3493 fingerprint = relation(Fingerprint)),
3494 extension = validator)
3496 mapper(UploadBlock, self.tbl_upload_blocks,
3497 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3498 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3499 uid = relation(Uid, backref="uploadblocks")))
3501 mapper(BinContents, self.tbl_bin_contents,
3503 binary = relation(DBBinary,
3504 backref=backref('contents', lazy='dynamic', cascade='all')),
3505 file = self.tbl_bin_contents.c.file))
3507 mapper(SrcContents, self.tbl_src_contents,
3509 source = relation(DBSource,
3510 backref=backref('contents', lazy='dynamic', cascade='all')),
3511 file = self.tbl_src_contents.c.file))
3513 mapper(MetadataKey, self.tbl_metadata_keys,
3515 key_id = self.tbl_metadata_keys.c.key_id,
3516 key = self.tbl_metadata_keys.c.key))
3518 mapper(BinaryMetadata, self.tbl_binaries_metadata,
3520 binary_id = self.tbl_binaries_metadata.c.bin_id,
3521 binary = relation(DBBinary),
3522 key_id = self.tbl_binaries_metadata.c.key_id,
3523 key = relation(MetadataKey),
3524 value = self.tbl_binaries_metadata.c.value))
3526 mapper(SourceMetadata, self.tbl_source_metadata,
3528 source_id = self.tbl_source_metadata.c.src_id,
3529 source = relation(DBSource),
3530 key_id = self.tbl_source_metadata.c.key_id,
3531 key = relation(MetadataKey),
3532 value = self.tbl_source_metadata.c.value))
3534 mapper(VersionCheck, self.tbl_version_check,
3536 suite_id = self.tbl_version_check.c.suite,
3537 suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
3538 reference_id = self.tbl_version_check.c.reference,
3539 reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
3541 ## Connection functions
3542 def __createconn(self):
3543 from config import Config
3545 if cnf.has_key("DB::Service"):
3546 connstr = "postgresql://service=%s" % cnf["DB::Service"]
3547 elif cnf.has_key("DB::Host"):
3549 connstr = "postgresql://%s" % cnf["DB::Host"]
3550 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3551 connstr += ":%s" % cnf["DB::Port"]
3552 connstr += "/%s" % cnf["DB::Name"]
3555 connstr = "postgresql:///%s" % cnf["DB::Name"]
3556 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3557 connstr += "?port=%s" % cnf["DB::Port"]
3559 engine_args = { 'echo': self.debug }
3560 if cnf.has_key('DB::PoolSize'):
3561 engine_args['pool_size'] = int(cnf['DB::PoolSize'])
3562 if cnf.has_key('DB::MaxOverflow'):
3563 engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
3564 if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
3565 cnf['DB::Unicode'] == 'false':
3566 engine_args['use_native_unicode'] = False
3568 # Monkey patch a new dialect in in order to support service= syntax
3569 import sqlalchemy.dialects.postgresql
3570 from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
3571 class PGDialect_psycopg2_dak(PGDialect_psycopg2):
3572 def create_connect_args(self, url):
3573 if str(url).startswith('postgresql://service='):
3575 servicename = str(url)[21:]
3576 return (['service=%s' % servicename], {})
3578 return PGDialect_psycopg2.create_connect_args(self, url)
3580 sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
3582 self.db_pg = create_engine(connstr, **engine_args)
3583 self.db_meta = MetaData()
3584 self.db_meta.bind = self.db_pg
3585 self.db_smaker = sessionmaker(bind=self.db_pg,
3589 self.__setuptables()
3590 self.__setupmappers()
3591 self.pid = os.getpid()
3593 def session(self, work_mem = 0):
3595 Returns a new session object. If a work_mem parameter is provided a new
3596 transaction is started and the work_mem parameter is set for this
3597 transaction. The work_mem parameter is measured in MB. A default value
3598 will be used if the parameter is not set.
3600 # reinitialize DBConn in new processes
3601 if self.pid != os.getpid():
3604 session = self.db_smaker()
3606 session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
3609 __all__.append('DBConn')