5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
38 from os.path import normpath
50 import simplejson as json
52 from datetime import datetime, timedelta
53 from errno import ENOENT
54 from tempfile import mkstemp, mkdtemp
55 from subprocess import Popen, PIPE
56 from tarfile import TarFile
58 from inspect import getargspec
61 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
63 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
64 backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
65 from sqlalchemy import types as sqltypes
66 from sqlalchemy.orm.collections import attribute_mapped_collection
67 from sqlalchemy.ext.associationproxy import association_proxy
69 # Don't remove this, we re-export the exceptions to scripts which import us
70 from sqlalchemy.exc import *
71 from sqlalchemy.orm.exc import NoResultFound
73 # Only import Config until Queue stuff is changed to store its config
75 from config import Config
76 from textutils import fix_maintainer
77 from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
80 # suppress some deprecation warnings in squeeze related to sqlalchemy
82 warnings.filterwarnings('ignore', \
83 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
85 warnings.filterwarnings('ignore', \
86 "Predicate of partial index .* ignored during reflection", \
90 ################################################################################
92 # Patch in support for the debversion field type so that it works during
96 # that is for sqlalchemy 0.6
97 UserDefinedType = sqltypes.UserDefinedType
99 # this one for sqlalchemy 0.5
100 UserDefinedType = sqltypes.TypeEngine
102 class DebVersion(UserDefinedType):
103 def get_col_spec(self):
106 def bind_processor(self, dialect):
109 # ' = None' is needed for sqlalchemy 0.5:
110 def result_processor(self, dialect, coltype = None):
113 sa_major_version = sqlalchemy.__version__[0:3]
114 if sa_major_version in ["0.5", "0.6", "0.7"]:
115 from sqlalchemy.databases import postgres
116 postgres.ischema_names['debversion'] = DebVersion
118 raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
120 ################################################################################
122 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
124 ################################################################################
126 def session_wrapper(fn):
128 Wrapper around common ".., session=None):" handling. If the wrapped
129 function is called without passing 'session', we create a local one
130 and destroy it when the function ends.
132 Also attaches a commit_or_flush method to the session; if we created a
133 local session, this is a synonym for session.commit(), otherwise it is a
134 synonym for session.flush().
137 def wrapped(*args, **kwargs):
138 private_transaction = False
140 # Find the session object
141 session = kwargs.get('session')
144 if len(args) <= len(getargspec(fn)[0]) - 1:
145 # No session specified as last argument or in kwargs
146 private_transaction = True
147 session = kwargs['session'] = DBConn().session()
149 # Session is last argument in args
153 session = args[-1] = DBConn().session()
154 private_transaction = True
156 if private_transaction:
157 session.commit_or_flush = session.commit
159 session.commit_or_flush = session.flush
162 return fn(*args, **kwargs)
164 if private_transaction:
165 # We created a session; close it.
168 wrapped.__doc__ = fn.__doc__
169 wrapped.func_name = fn.func_name
173 __all__.append('session_wrapper')
175 ################################################################################
177 class ORMObject(object):
179 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
180 derived classes must implement the properties() method.
183 def properties(self):
185 This method should be implemented by all derived classes and returns a
186 list of the important properties. The properties 'created' and
187 'modified' will be added automatically. A suffix '_count' should be
188 added to properties that are lists or query objects. The most important
189 property name should be returned as the first element in the list
190 because it is used by repr().
196 Returns a JSON representation of the object based on the properties
197 returned from the properties() method.
200 # add created and modified
201 all_properties = self.properties() + ['created', 'modified']
202 for property in all_properties:
203 # check for list or query
204 if property[-6:] == '_count':
205 real_property = property[:-6]
206 if not hasattr(self, real_property):
208 value = getattr(self, real_property)
209 if hasattr(value, '__len__'):
212 elif hasattr(value, 'count'):
213 # query (but not during validation)
214 if self.in_validation:
216 value = value.count()
218 raise KeyError('Do not understand property %s.' % property)
220 if not hasattr(self, property):
223 value = getattr(self, property)
227 elif isinstance(value, ORMObject):
228 # use repr() for ORMObject types
231 # we want a string for all other types because json cannot
234 data[property] = value
235 return json.dumps(data)
239 Returns the name of the class.
241 return type(self).__name__
245 Returns a short string representation of the object using the first
246 element from the properties() method.
248 primary_property = self.properties()[0]
249 value = getattr(self, primary_property)
250 return '<%s %s>' % (self.classname(), str(value))
254 Returns a human readable form of the object using the properties()
257 return '<%s %s>' % (self.classname(), self.json())
259 def not_null_constraints(self):
261 Returns a list of properties that must be not NULL. Derived classes
262 should override this method if needed.
266 validation_message = \
267 "Validation failed because property '%s' must not be empty in object\n%s"
269 in_validation = False
273 This function validates the not NULL constraints as returned by
274 not_null_constraints(). It raises the DBUpdateError exception if
277 for property in self.not_null_constraints():
278 # TODO: It is a bit awkward that the mapper configuration allow
279 # directly setting the numeric _id columns. We should get rid of it
281 if hasattr(self, property + '_id') and \
282 getattr(self, property + '_id') is not None:
284 if not hasattr(self, property) or getattr(self, property) is None:
285 # str() might lead to races due to a 2nd flush
286 self.in_validation = True
287 message = self.validation_message % (property, str(self))
288 self.in_validation = False
289 raise DBUpdateError(message)
293 def get(cls, primary_key, session = None):
295 This is a support function that allows getting an object by its primary
298 Architecture.get(3[, session])
300 instead of the more verbose
302 session.query(Architecture).get(3)
304 return session.query(cls).get(primary_key)
306 def session(self, replace = False):
308 Returns the current session that is associated with the object. May
309 return None is object is in detached state.
312 return object_session(self)
314 def clone(self, session = None):
316 Clones the current object in a new session and returns the new clone. A
317 fresh session is created if the optional session parameter is not
318 provided. The function will fail if a session is provided and has
321 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
322 an existing object to allow several threads to work with their own
323 instances of an ORMObject.
325 WARNING: Only persistent (committed) objects can be cloned. Changes
326 made to the original object that are not committed yet will get lost.
327 The session of the new object will always be rolled back to avoid
331 if self.session() is None:
332 raise RuntimeError( \
333 'Method clone() failed for detached object:\n%s' % self)
334 self.session().flush()
335 mapper = object_mapper(self)
336 primary_key = mapper.primary_key_from_instance(self)
337 object_class = self.__class__
339 session = DBConn().session()
340 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
341 raise RuntimeError( \
342 'Method clone() failed due to unflushed changes in session.')
343 new_object = session.query(object_class).get(primary_key)
345 if new_object is None:
346 raise RuntimeError( \
347 'Method clone() failed for non-persistent object:\n%s' % self)
350 __all__.append('ORMObject')
352 ################################################################################
354 class Validator(MapperExtension):
356 This class calls the validate() method for each instance for the
357 'before_update' and 'before_insert' events. A global object validator is
358 used for configuring the individual mappers.
361 def before_update(self, mapper, connection, instance):
365 def before_insert(self, mapper, connection, instance):
369 validator = Validator()
371 ################################################################################
373 class Architecture(ORMObject):
374 def __init__(self, arch_string = None, description = None):
375 self.arch_string = arch_string
376 self.description = description
378 def __eq__(self, val):
379 if isinstance(val, str):
380 return (self.arch_string== val)
381 # This signals to use the normal comparison operator
382 return NotImplemented
384 def __ne__(self, val):
385 if isinstance(val, str):
386 return (self.arch_string != val)
387 # This signals to use the normal comparison operator
388 return NotImplemented
390 def properties(self):
391 return ['arch_string', 'arch_id', 'suites_count']
393 def not_null_constraints(self):
394 return ['arch_string']
396 __all__.append('Architecture')
399 def get_architecture(architecture, session=None):
401 Returns database id for given C{architecture}.
403 @type architecture: string
404 @param architecture: The name of the architecture
406 @type session: Session
407 @param session: Optional SQLA session object (a temporary one will be
408 generated if not supplied)
411 @return: Architecture object for the given arch (None if not present)
414 q = session.query(Architecture).filter_by(arch_string=architecture)
418 except NoResultFound:
421 __all__.append('get_architecture')
423 # TODO: should be removed because the implementation is too trivial
425 def get_architecture_suites(architecture, session=None):
427 Returns list of Suite objects for given C{architecture} name
429 @type architecture: str
430 @param architecture: Architecture name to search for
432 @type session: Session
433 @param session: Optional SQL session object (a temporary one will be
434 generated if not supplied)
437 @return: list of Suite objects for the given name (may be empty)
440 return get_architecture(architecture, session).suites
442 __all__.append('get_architecture_suites')
444 ################################################################################
446 class Archive(object):
447 def __init__(self, *args, **kwargs):
451 return '<Archive %s>' % self.archive_name
453 __all__.append('Archive')
456 def get_archive(archive, session=None):
458 returns database id for given C{archive}.
460 @type archive: string
461 @param archive: the name of the arhive
463 @type session: Session
464 @param session: Optional SQLA session object (a temporary one will be
465 generated if not supplied)
468 @return: Archive object for the given name (None if not present)
471 archive = archive.lower()
473 q = session.query(Archive).filter_by(archive_name=archive)
477 except NoResultFound:
480 __all__.append('get_archive')
482 ################################################################################
484 class BinContents(ORMObject):
485 def __init__(self, file = None, binary = None):
489 def properties(self):
490 return ['file', 'binary']
492 __all__.append('BinContents')
494 ################################################################################
496 def subprocess_setup():
497 # Python installs a SIGPIPE handler by default. This is usually not what
498 # non-Python subprocesses expect.
499 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
501 class DBBinary(ORMObject):
502 def __init__(self, package = None, source = None, version = None, \
503 maintainer = None, architecture = None, poolfile = None, \
505 self.package = package
507 self.version = version
508 self.maintainer = maintainer
509 self.architecture = architecture
510 self.poolfile = poolfile
511 self.binarytype = binarytype
515 return self.binary_id
517 def properties(self):
518 return ['package', 'version', 'maintainer', 'source', 'architecture', \
519 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
520 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
522 def not_null_constraints(self):
523 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
526 metadata = association_proxy('key', 'value')
528 def get_component_name(self):
529 return self.poolfile.location.component.component_name
531 def scan_contents(self):
533 Yields the contents of the package. Only regular files are yielded and
534 the path names are normalized after converting them from either utf-8
535 or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
536 package does not contain any regular file.
538 fullpath = self.poolfile.fullpath
539 dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
540 preexec_fn = subprocess_setup)
541 tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
542 for member in tar.getmembers():
543 if not member.isdir():
544 name = normpath(member.name)
545 # enforce proper utf-8 encoding
548 except UnicodeDecodeError:
549 name = name.decode('iso8859-1').encode('utf-8')
555 def read_control(self):
557 Reads the control information from a binary.
560 @return: stanza text of the control section.
563 fullpath = self.poolfile.fullpath
564 deb_file = open(fullpath, 'r')
565 stanza = utils.deb_extract_control(deb_file)
570 def read_control_fields(self):
572 Reads the control information from a binary and return
576 @return: fields of the control section as a dictionary.
579 stanza = self.read_control()
580 return apt_pkg.TagSection(stanza)
582 __all__.append('DBBinary')
585 def get_suites_binary_in(package, session=None):
587 Returns list of Suite objects which given C{package} name is in
590 @param package: DBBinary package name to search for
593 @return: list of Suite objects for the given package
596 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
598 __all__.append('get_suites_binary_in')
601 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
603 Returns the component name of the newest binary package in suite_list or
604 None if no package is found. The result can be optionally filtered by a list
605 of architecture names.
608 @param package: DBBinary package name to search for
610 @type suite_list: list of str
611 @param suite_list: list of suite_name items
613 @type arch_list: list of str
614 @param arch_list: optional list of arch_string items that defaults to []
616 @rtype: str or NoneType
617 @return: name of component or None
620 q = session.query(DBBinary).filter_by(package = package). \
621 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
622 if len(arch_list) > 0:
623 q = q.join(DBBinary.architecture). \
624 filter(Architecture.arch_string.in_(arch_list))
625 binary = q.order_by(desc(DBBinary.version)).first()
629 return binary.get_component_name()
631 __all__.append('get_component_by_package_suite')
633 ################################################################################
635 class BinaryACL(object):
636 def __init__(self, *args, **kwargs):
640 return '<BinaryACL %s>' % self.binary_acl_id
642 __all__.append('BinaryACL')
644 ################################################################################
646 class BinaryACLMap(object):
647 def __init__(self, *args, **kwargs):
651 return '<BinaryACLMap %s>' % self.binary_acl_map_id
653 __all__.append('BinaryACLMap')
655 ################################################################################
660 ArchiveDir "%(archivepath)s";
661 OverrideDir "%(overridedir)s";
662 CacheDir "%(cachedir)s";
667 Packages::Compress ". bzip2 gzip";
668 Sources::Compress ". bzip2 gzip";
673 bindirectory "incoming"
678 BinOverride "override.sid.all3";
679 BinCacheDB "packages-accepted.db";
681 FileList "%(filelist)s";
684 Packages::Extensions ".deb .udeb";
687 bindirectory "incoming/"
690 BinOverride "override.sid.all3";
691 SrcOverride "override.sid.all3.src";
692 FileList "%(filelist)s";
696 class BuildQueue(object):
697 def __init__(self, *args, **kwargs):
701 return '<BuildQueue %s>' % self.queue_name
703 def write_metadata(self, starttime, force=False):
704 # Do we write out metafiles?
705 if not (force or self.generate_metadata):
708 session = DBConn().session().object_session(self)
710 fl_fd = fl_name = ac_fd = ac_name = None
712 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
713 startdir = os.getcwd()
716 # Grab files we want to include
717 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
718 newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
719 # Write file list with newer files
720 (fl_fd, fl_name) = mkstemp()
722 os.write(fl_fd, '%s\n' % n.fullpath)
727 # Write minimal apt.conf
728 # TODO: Remove hardcoding from template
729 (ac_fd, ac_name) = mkstemp()
730 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
732 'cachedir': cnf["Dir::Cache"],
733 'overridedir': cnf["Dir::Override"],
737 # Run apt-ftparchive generate
738 os.chdir(os.path.dirname(ac_name))
739 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
741 # Run apt-ftparchive release
742 # TODO: Eww - fix this
743 bname = os.path.basename(self.path)
747 # We have to remove the Release file otherwise it'll be included in the
750 os.unlink(os.path.join(bname, 'Release'))
754 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
756 # Crude hack with open and append, but this whole section is and should be redone.
757 if self.notautomatic:
758 release=open("Release", "a")
759 release.write("NotAutomatic: yes\n")
764 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
765 if cnf.has_key("Dinstall::SigningPubKeyring"):
766 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
768 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
770 # Move the files if we got this far
771 os.rename('Release', os.path.join(bname, 'Release'))
773 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
775 # Clean up any left behind files
802 def clean_and_update(self, starttime, Logger, dryrun=False):
803 """WARNING: This routine commits for you"""
804 session = DBConn().session().object_session(self)
806 if self.generate_metadata and not dryrun:
807 self.write_metadata(starttime)
809 # Grab files older than our execution time
810 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
811 older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
817 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
819 Logger.log(["I: Removing %s from the queue" % o.fullpath])
820 os.unlink(o.fullpath)
823 # If it wasn't there, don't worry
824 if e.errno == ENOENT:
827 # TODO: Replace with proper logging call
828 Logger.log(["E: Could not remove %s" % o.fullpath])
835 for f in os.listdir(self.path):
836 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
839 if not self.contains_filename(f):
840 fp = os.path.join(self.path, f)
842 Logger.log(["I: Would remove unused link %s" % fp])
844 Logger.log(["I: Removing unused link %s" % fp])
848 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
850 def contains_filename(self, filename):
853 @returns True if filename is supposed to be in the queue; False otherwise
855 session = DBConn().session().object_session(self)
856 if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
858 elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
862 def add_file_from_pool(self, poolfile):
863 """Copies a file into the pool. Assumes that the PoolFile object is
864 attached to the same SQLAlchemy session as the Queue object is.
866 The caller is responsible for committing after calling this function."""
867 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
869 # Check if we have a file of this name or this ID already
870 for f in self.queuefiles:
871 if (f.fileid is not None and f.fileid == poolfile.file_id) or \
872 (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
873 # In this case, update the BuildQueueFile entry so we
874 # don't remove it too early
875 f.lastused = datetime.now()
876 DBConn().session().object_session(poolfile).add(f)
879 # Prepare BuildQueueFile object
880 qf = BuildQueueFile()
881 qf.build_queue_id = self.queue_id
882 qf.filename = poolfile_basename
884 targetpath = poolfile.fullpath
885 queuepath = os.path.join(self.path, poolfile_basename)
889 # We need to copy instead of symlink
891 utils.copy(targetpath, queuepath)
892 # NULL in the fileid field implies a copy
895 os.symlink(targetpath, queuepath)
896 qf.fileid = poolfile.file_id
897 except FileExistsError:
898 if not poolfile.identical_to(queuepath):
903 # Get the same session as the PoolFile is using and add the qf to it
904 DBConn().session().object_session(poolfile).add(qf)
908 def add_changes_from_policy_queue(self, policyqueue, changes):
910 Copies a changes from a policy queue together with its poolfiles.
912 @type policyqueue: PolicyQueue
913 @param policyqueue: policy queue to copy the changes from
915 @type changes: DBChange
916 @param changes: changes to copy to this build queue
918 for policyqueuefile in changes.files:
919 self.add_file_from_policy_queue(policyqueue, policyqueuefile)
920 for poolfile in changes.poolfiles:
921 self.add_file_from_pool(poolfile)
923 def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
925 Copies a file from a policy queue.
926 Assumes that the policyqueuefile is attached to the same SQLAlchemy
927 session as the Queue object is. The caller is responsible for
928 committing after calling this function.
930 @type policyqueue: PolicyQueue
931 @param policyqueue: policy queue to copy the file from
933 @type policyqueuefile: ChangePendingFile
934 @param policyqueuefile: file to be added to the build queue
936 session = DBConn().session().object_session(policyqueuefile)
938 # Is the file already there?
940 f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
941 f.lastused = datetime.now()
943 except NoResultFound:
944 pass # continue below
946 # We have to add the file.
947 f = BuildQueuePolicyFile()
949 f.file = policyqueuefile
950 f.filename = policyqueuefile.filename
952 source = os.path.join(policyqueue.path, policyqueuefile.filename)
955 # Always copy files from policy queues as they might move around.
957 utils.copy(source, target)
958 except FileExistsError:
959 if not policyqueuefile.identical_to(target):
967 __all__.append('BuildQueue')
970 def get_build_queue(queuename, session=None):
972 Returns BuildQueue object for given C{queue name}, creating it if it does not
975 @type queuename: string
976 @param queuename: The name of the queue
978 @type session: Session
979 @param session: Optional SQLA session object (a temporary one will be
980 generated if not supplied)
983 @return: BuildQueue object for the given queue
986 q = session.query(BuildQueue).filter_by(queue_name=queuename)
990 except NoResultFound:
993 __all__.append('get_build_queue')
995 ################################################################################
997 class BuildQueueFile(object):
999 BuildQueueFile represents a file in a build queue coming from a pool.
1002 def __init__(self, *args, **kwargs):
1006 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
1010 return os.path.join(self.buildqueue.path, self.filename)
1013 __all__.append('BuildQueueFile')
1015 ################################################################################
1017 class BuildQueuePolicyFile(object):
1019 BuildQueuePolicyFile represents a file in a build queue that comes from a
1020 policy queue (and not a pool).
1023 def __init__(self, *args, **kwargs):
1027 #def filename(self):
1028 # return self.file.filename
1032 return os.path.join(self.build_queue.path, self.filename)
1034 __all__.append('BuildQueuePolicyFile')
1036 ################################################################################
1038 class ChangePendingBinary(object):
1039 def __init__(self, *args, **kwargs):
1043 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
1045 __all__.append('ChangePendingBinary')
1047 ################################################################################
1049 class ChangePendingFile(object):
1050 def __init__(self, *args, **kwargs):
1054 return '<ChangePendingFile %s>' % self.change_pending_file_id
1056 def identical_to(self, filename):
1058 compare size and hash with the given file
1061 @return: true if the given file has the same size and hash as this object; false otherwise
1063 st = os.stat(filename)
1064 if self.size != st.st_size:
1067 f = open(filename, "r")
1068 sha256sum = apt_pkg.sha256sum(f)
1069 if sha256sum != self.sha256sum:
1074 __all__.append('ChangePendingFile')
1076 ################################################################################
1078 class ChangePendingSource(object):
1079 def __init__(self, *args, **kwargs):
1083 return '<ChangePendingSource %s>' % self.change_pending_source_id
1085 __all__.append('ChangePendingSource')
1087 ################################################################################
1089 class Component(ORMObject):
1090 def __init__(self, component_name = None):
1091 self.component_name = component_name
1093 def __eq__(self, val):
1094 if isinstance(val, str):
1095 return (self.component_name == val)
1096 # This signals to use the normal comparison operator
1097 return NotImplemented
1099 def __ne__(self, val):
1100 if isinstance(val, str):
1101 return (self.component_name != val)
1102 # This signals to use the normal comparison operator
1103 return NotImplemented
1105 def properties(self):
1106 return ['component_name', 'component_id', 'description', \
1107 'location_count', 'meets_dfsg', 'overrides_count']
1109 def not_null_constraints(self):
1110 return ['component_name']
1113 __all__.append('Component')
1116 def get_component(component, session=None):
1118 Returns database id for given C{component}.
1120 @type component: string
1121 @param component: The name of the override type
1124 @return: the database id for the given component
1127 component = component.lower()
1129 q = session.query(Component).filter_by(component_name=component)
1133 except NoResultFound:
1136 __all__.append('get_component')
1139 def get_component_names(session=None):
1141 Returns list of strings of component names.
1144 @return: list of strings of component names
1147 return [ x.component_name for x in session.query(Component).all() ]
1149 __all__.append('get_component_names')
1151 ################################################################################
1153 class DBConfig(object):
1154 def __init__(self, *args, **kwargs):
1158 return '<DBConfig %s>' % self.name
1160 __all__.append('DBConfig')
1162 ################################################################################
1165 def get_or_set_contents_file_id(filename, session=None):
1167 Returns database id for given filename.
1169 If no matching file is found, a row is inserted.
1171 @type filename: string
1172 @param filename: The filename
1173 @type session: SQLAlchemy
1174 @param session: Optional SQL session object (a temporary one will be
1175 generated if not supplied). If not passed, a commit will be performed at
1176 the end of the function, otherwise the caller is responsible for commiting.
1179 @return: the database id for the given component
1182 q = session.query(ContentFilename).filter_by(filename=filename)
1185 ret = q.one().cafilename_id
1186 except NoResultFound:
1187 cf = ContentFilename()
1188 cf.filename = filename
1190 session.commit_or_flush()
1191 ret = cf.cafilename_id
1195 __all__.append('get_or_set_contents_file_id')
1198 def get_contents(suite, overridetype, section=None, session=None):
1200 Returns contents for a suite / overridetype combination, limiting
1201 to a section if not None.
1204 @param suite: Suite object
1206 @type overridetype: OverrideType
1207 @param overridetype: OverrideType object
1209 @type section: Section
1210 @param section: Optional section object to limit results to
1212 @type session: SQLAlchemy
1213 @param session: Optional SQL session object (a temporary one will be
1214 generated if not supplied)
1216 @rtype: ResultsProxy
1217 @return: ResultsProxy object set up to return tuples of (filename, section,
1221 # find me all of the contents for a given suite
1222 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1226 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1227 JOIN content_file_names n ON (c.filename=n.id)
1228 JOIN binaries b ON (b.id=c.binary_pkg)
1229 JOIN override o ON (o.package=b.package)
1230 JOIN section s ON (s.id=o.section)
1231 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1232 AND b.type=:overridetypename"""
1234 vals = {'suiteid': suite.suite_id,
1235 'overridetypeid': overridetype.overridetype_id,
1236 'overridetypename': overridetype.overridetype}
1238 if section is not None:
1239 contents_q += " AND s.id = :sectionid"
1240 vals['sectionid'] = section.section_id
1242 contents_q += " ORDER BY fn"
1244 return session.execute(contents_q, vals)
1246 __all__.append('get_contents')
1248 ################################################################################
1250 class ContentFilepath(object):
1251 def __init__(self, *args, **kwargs):
1255 return '<ContentFilepath %s>' % self.filepath
1257 __all__.append('ContentFilepath')
1260 def get_or_set_contents_path_id(filepath, session=None):
1262 Returns database id for given path.
1264 If no matching file is found, a row is inserted.
1266 @type filepath: string
1267 @param filepath: The filepath
1269 @type session: SQLAlchemy
1270 @param session: Optional SQL session object (a temporary one will be
1271 generated if not supplied). If not passed, a commit will be performed at
1272 the end of the function, otherwise the caller is responsible for commiting.
1275 @return: the database id for the given path
1278 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1281 ret = q.one().cafilepath_id
1282 except NoResultFound:
1283 cf = ContentFilepath()
1284 cf.filepath = filepath
1286 session.commit_or_flush()
1287 ret = cf.cafilepath_id
1291 __all__.append('get_or_set_contents_path_id')
1293 ################################################################################
1295 class ContentAssociation(object):
1296 def __init__(self, *args, **kwargs):
1300 return '<ContentAssociation %s>' % self.ca_id
1302 __all__.append('ContentAssociation')
1304 def insert_content_paths(binary_id, fullpaths, session=None):
1306 Make sure given path is associated with given binary id
1308 @type binary_id: int
1309 @param binary_id: the id of the binary
1310 @type fullpaths: list
1311 @param fullpaths: the list of paths of the file being associated with the binary
1312 @type session: SQLAlchemy session
1313 @param session: Optional SQLAlchemy session. If this is passed, the caller
1314 is responsible for ensuring a transaction has begun and committing the
1315 results or rolling back based on the result code. If not passed, a commit
1316 will be performed at the end of the function, otherwise the caller is
1317 responsible for commiting.
1319 @return: True upon success
1322 privatetrans = False
1324 session = DBConn().session()
1329 def generate_path_dicts():
1330 for fullpath in fullpaths:
1331 if fullpath.startswith( './' ):
1332 fullpath = fullpath[2:]
1334 yield {'filename':fullpath, 'id': binary_id }
1336 for d in generate_path_dicts():
1337 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1346 traceback.print_exc()
1348 # Only rollback if we set up the session ourself
1355 __all__.append('insert_content_paths')
1357 ################################################################################
1359 class DSCFile(object):
1360 def __init__(self, *args, **kwargs):
1364 return '<DSCFile %s>' % self.dscfile_id
1366 __all__.append('DSCFile')
1369 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1371 Returns a list of DSCFiles which may be empty
1373 @type dscfile_id: int (optional)
1374 @param dscfile_id: the dscfile_id of the DSCFiles to find
1376 @type source_id: int (optional)
1377 @param source_id: the source id related to the DSCFiles to find
1379 @type poolfile_id: int (optional)
1380 @param poolfile_id: the poolfile id related to the DSCFiles to find
1383 @return: Possibly empty list of DSCFiles
1386 q = session.query(DSCFile)
1388 if dscfile_id is not None:
1389 q = q.filter_by(dscfile_id=dscfile_id)
1391 if source_id is not None:
1392 q = q.filter_by(source_id=source_id)
1394 if poolfile_id is not None:
1395 q = q.filter_by(poolfile_id=poolfile_id)
1399 __all__.append('get_dscfiles')
1401 ################################################################################
1403 class ExternalOverride(ORMObject):
1404 def __init__(self, *args, **kwargs):
1408 return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
1410 __all__.append('ExternalOverride')
1412 ################################################################################
1414 class PoolFile(ORMObject):
1415 def __init__(self, filename = None, location = None, filesize = -1, \
1417 self.filename = filename
1418 self.location = location
1419 self.filesize = filesize
1420 self.md5sum = md5sum
1424 return os.path.join(self.location.path, self.filename)
1426 def is_valid(self, filesize = -1, md5sum = None):
1427 return self.filesize == long(filesize) and self.md5sum == md5sum
1429 def properties(self):
1430 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1431 'sha256sum', 'location', 'source', 'binary', 'last_used']
1433 def not_null_constraints(self):
1434 return ['filename', 'md5sum', 'location']
1436 def identical_to(self, filename):
1438 compare size and hash with the given file
1441 @return: true if the given file has the same size and hash as this object; false otherwise
1443 st = os.stat(filename)
1444 if self.filesize != st.st_size:
1447 f = open(filename, "r")
1448 sha256sum = apt_pkg.sha256sum(f)
1449 if sha256sum != self.sha256sum:
1454 __all__.append('PoolFile')
1457 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1460 (ValidFileFound [boolean], PoolFile object or None)
1462 @type filename: string
1463 @param filename: the filename of the file to check against the DB
1466 @param filesize: the size of the file to check against the DB
1468 @type md5sum: string
1469 @param md5sum: the md5sum of the file to check against the DB
1471 @type location_id: int
1472 @param location_id: the id of the location to look in
1475 @return: Tuple of length 2.
1476 - If valid pool file found: (C{True}, C{PoolFile object})
1477 - If valid pool file not found:
1478 - (C{False}, C{None}) if no file found
1479 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1482 poolfile = session.query(Location).get(location_id). \
1483 files.filter_by(filename=filename).first()
1485 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1488 return (valid, poolfile)
1490 __all__.append('check_poolfile')
1492 # TODO: the implementation can trivially be inlined at the place where the
1493 # function is called
1495 def get_poolfile_by_id(file_id, session=None):
1497 Returns a PoolFile objects or None for the given id
1500 @param file_id: the id of the file to look for
1502 @rtype: PoolFile or None
1503 @return: either the PoolFile object or None
1506 return session.query(PoolFile).get(file_id)
1508 __all__.append('get_poolfile_by_id')
1511 def get_poolfile_like_name(filename, session=None):
1513 Returns an array of PoolFile objects which are like the given name
1515 @type filename: string
1516 @param filename: the filename of the file to check against the DB
1519 @return: array of PoolFile objects
1522 # TODO: There must be a way of properly using bind parameters with %FOO%
1523 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1527 __all__.append('get_poolfile_like_name')
1530 def add_poolfile(filename, datadict, location_id, session=None):
1532 Add a new file to the pool
1534 @type filename: string
1535 @param filename: filename
1537 @type datadict: dict
1538 @param datadict: dict with needed data
1540 @type location_id: int
1541 @param location_id: database id of the location
1544 @return: the PoolFile object created
1546 poolfile = PoolFile()
1547 poolfile.filename = filename
1548 poolfile.filesize = datadict["size"]
1549 poolfile.md5sum = datadict["md5sum"]
1550 poolfile.sha1sum = datadict["sha1sum"]
1551 poolfile.sha256sum = datadict["sha256sum"]
1552 poolfile.location_id = location_id
1554 session.add(poolfile)
1555 # Flush to get a file id (NB: This is not a commit)
1560 __all__.append('add_poolfile')
1562 ################################################################################
1564 class Fingerprint(ORMObject):
1565 def __init__(self, fingerprint = None):
1566 self.fingerprint = fingerprint
1568 def properties(self):
1569 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1572 def not_null_constraints(self):
1573 return ['fingerprint']
1575 __all__.append('Fingerprint')
1578 def get_fingerprint(fpr, session=None):
1580 Returns Fingerprint object for given fpr.
1583 @param fpr: The fpr to find / add
1585 @type session: SQLAlchemy
1586 @param session: Optional SQL session object (a temporary one will be
1587 generated if not supplied).
1590 @return: the Fingerprint object for the given fpr or None
1593 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1597 except NoResultFound:
1602 __all__.append('get_fingerprint')
1605 def get_or_set_fingerprint(fpr, session=None):
1607 Returns Fingerprint object for given fpr.
1609 If no matching fpr is found, a row is inserted.
1612 @param fpr: The fpr to find / add
1614 @type session: SQLAlchemy
1615 @param session: Optional SQL session object (a temporary one will be
1616 generated if not supplied). If not passed, a commit will be performed at
1617 the end of the function, otherwise the caller is responsible for commiting.
1618 A flush will be performed either way.
1621 @return: the Fingerprint object for the given fpr
1624 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1628 except NoResultFound:
1629 fingerprint = Fingerprint()
1630 fingerprint.fingerprint = fpr
1631 session.add(fingerprint)
1632 session.commit_or_flush()
1637 __all__.append('get_or_set_fingerprint')
1639 ################################################################################
1641 # Helper routine for Keyring class
1642 def get_ldap_name(entry):
1644 for k in ["cn", "mn", "sn"]:
1646 if ret and ret[0] != "" and ret[0] != "-":
1648 return " ".join(name)
1650 ################################################################################
1652 class Keyring(object):
1653 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1654 " --with-colons --fingerprint --fingerprint"
1659 def __init__(self, *args, **kwargs):
1663 return '<Keyring %s>' % self.keyring_name
1665 def de_escape_gpg_str(self, txt):
1666 esclist = re.split(r'(\\x..)', txt)
1667 for x in range(1,len(esclist),2):
1668 esclist[x] = "%c" % (int(esclist[x][2:],16))
1669 return "".join(esclist)
1671 def parse_address(self, uid):
1672 """parses uid and returns a tuple of real name and email address"""
1674 (name, address) = email.Utils.parseaddr(uid)
1675 name = re.sub(r"\s*[(].*[)]", "", name)
1676 name = self.de_escape_gpg_str(name)
1679 return (name, address)
1681 def load_keys(self, keyring):
1682 if not self.keyring_id:
1683 raise Exception('Must be initialized with database information')
1685 k = os.popen(self.gpg_invocation % keyring, "r")
1690 field = line.split(":")
1691 if field[0] == "pub":
1694 (name, addr) = self.parse_address(field[9])
1696 self.keys[key]["email"] = addr
1697 self.keys[key]["name"] = name
1698 self.keys[key]["fingerprints"] = []
1700 elif key and field[0] == "sub" and len(field) >= 12:
1701 signingkey = ("s" in field[11])
1702 elif key and field[0] == "uid":
1703 (name, addr) = self.parse_address(field[9])
1704 if "email" not in self.keys[key] and "@" in addr:
1705 self.keys[key]["email"] = addr
1706 self.keys[key]["name"] = name
1707 elif signingkey and field[0] == "fpr":
1708 self.keys[key]["fingerprints"].append(field[9])
1709 self.fpr_lookup[field[9]] = key
1711 def import_users_from_ldap(self, session):
1715 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1716 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1718 l = ldap.open(LDAPServer)
1719 l.simple_bind_s("","")
1720 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1721 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1722 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1724 ldap_fin_uid_id = {}
1731 uid = entry["uid"][0]
1732 name = get_ldap_name(entry)
1733 fingerprints = entry["keyFingerPrint"]
1735 for f in fingerprints:
1736 key = self.fpr_lookup.get(f, None)
1737 if key not in self.keys:
1739 self.keys[key]["uid"] = uid
1743 keyid = get_or_set_uid(uid, session).uid_id
1744 byuid[keyid] = (uid, name)
1745 byname[uid] = (keyid, name)
1747 return (byname, byuid)
1749 def generate_users_from_keyring(self, format, session):
1753 for x in self.keys.keys():
1754 if "email" not in self.keys[x]:
1756 self.keys[x]["uid"] = format % "invalid-uid"
1758 uid = format % self.keys[x]["email"]
1759 keyid = get_or_set_uid(uid, session).uid_id
1760 byuid[keyid] = (uid, self.keys[x]["name"])
1761 byname[uid] = (keyid, self.keys[x]["name"])
1762 self.keys[x]["uid"] = uid
1765 uid = format % "invalid-uid"
1766 keyid = get_or_set_uid(uid, session).uid_id
1767 byuid[keyid] = (uid, "ungeneratable user id")
1768 byname[uid] = (keyid, "ungeneratable user id")
1770 return (byname, byuid)
1772 __all__.append('Keyring')
1775 def get_keyring(keyring, session=None):
1777 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1778 If C{keyring} already has an entry, simply return the existing Keyring
1780 @type keyring: string
1781 @param keyring: the keyring name
1784 @return: the Keyring object for this keyring
1787 q = session.query(Keyring).filter_by(keyring_name=keyring)
1791 except NoResultFound:
1794 __all__.append('get_keyring')
1797 def get_active_keyring_paths(session=None):
1800 @return: list of active keyring paths
1802 return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
1804 __all__.append('get_active_keyring_paths')
1807 def get_primary_keyring_path(session=None):
1809 Get the full path to the highest priority active keyring
1812 @return: path to the active keyring with the highest priority or None if no
1813 keyring is configured
1815 keyrings = get_active_keyring_paths()
1817 if len(keyrings) > 0:
1822 __all__.append('get_primary_keyring_path')
1824 ################################################################################
1826 class KeyringACLMap(object):
1827 def __init__(self, *args, **kwargs):
1831 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1833 __all__.append('KeyringACLMap')
1835 ################################################################################
1837 class DBChange(object):
1838 def __init__(self, *args, **kwargs):
1842 return '<DBChange %s>' % self.changesname
1844 def clean_from_queue(self):
1845 session = DBConn().session().object_session(self)
1847 # Remove changes_pool_files entries
1850 # Remove changes_pending_files references
1853 # Clear out of queue
1854 self.in_queue = None
1855 self.approved_for_id = None
1857 __all__.append('DBChange')
1860 def get_dbchange(filename, session=None):
1862 returns DBChange object for given C{filename}.
1864 @type filename: string
1865 @param filename: the name of the file
1867 @type session: Session
1868 @param session: Optional SQLA session object (a temporary one will be
1869 generated if not supplied)
1872 @return: DBChange object for the given filename (C{None} if not present)
1875 q = session.query(DBChange).filter_by(changesname=filename)
1879 except NoResultFound:
1882 __all__.append('get_dbchange')
1884 ################################################################################
1886 class Location(ORMObject):
1887 def __init__(self, path = None, component = None):
1889 self.component = component
1890 # the column 'type' should go away, see comment at mapper
1891 self.archive_type = 'pool'
1893 def properties(self):
1894 return ['path', 'location_id', 'archive_type', 'component', \
1897 def not_null_constraints(self):
1898 return ['path', 'archive_type']
1900 __all__.append('Location')
1903 def get_location(location, component=None, archive=None, session=None):
1905 Returns Location object for the given combination of location, component
1908 @type location: string
1909 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1911 @type component: string
1912 @param component: the component name (if None, no restriction applied)
1914 @type archive: string
1915 @param archive: the archive name (if None, no restriction applied)
1917 @rtype: Location / None
1918 @return: Either a Location object or None if one can't be found
1921 q = session.query(Location).filter_by(path=location)
1923 if archive is not None:
1924 q = q.join(Archive).filter_by(archive_name=archive)
1926 if component is not None:
1927 q = q.join(Component).filter_by(component_name=component)
1931 except NoResultFound:
1934 __all__.append('get_location')
1936 ################################################################################
1938 class Maintainer(ORMObject):
1939 def __init__(self, name = None):
1942 def properties(self):
1943 return ['name', 'maintainer_id']
1945 def not_null_constraints(self):
1948 def get_split_maintainer(self):
1949 if not hasattr(self, 'name') or self.name is None:
1950 return ('', '', '', '')
1952 return fix_maintainer(self.name.strip())
1954 __all__.append('Maintainer')
1957 def get_or_set_maintainer(name, session=None):
1959 Returns Maintainer object for given maintainer name.
1961 If no matching maintainer name is found, a row is inserted.
1964 @param name: The maintainer name to add
1966 @type session: SQLAlchemy
1967 @param session: Optional SQL session object (a temporary one will be
1968 generated if not supplied). If not passed, a commit will be performed at
1969 the end of the function, otherwise the caller is responsible for commiting.
1970 A flush will be performed either way.
1973 @return: the Maintainer object for the given maintainer
1976 q = session.query(Maintainer).filter_by(name=name)
1979 except NoResultFound:
1980 maintainer = Maintainer()
1981 maintainer.name = name
1982 session.add(maintainer)
1983 session.commit_or_flush()
1988 __all__.append('get_or_set_maintainer')
1991 def get_maintainer(maintainer_id, session=None):
1993 Return the name of the maintainer behind C{maintainer_id} or None if that
1994 maintainer_id is invalid.
1996 @type maintainer_id: int
1997 @param maintainer_id: the id of the maintainer
2000 @return: the Maintainer with this C{maintainer_id}
2003 return session.query(Maintainer).get(maintainer_id)
2005 __all__.append('get_maintainer')
2007 ################################################################################
2009 class NewComment(object):
2010 def __init__(self, *args, **kwargs):
2014 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
2016 __all__.append('NewComment')
2019 def has_new_comment(package, version, session=None):
2021 Returns true if the given combination of C{package}, C{version} has a comment.
2023 @type package: string
2024 @param package: name of the package
2026 @type version: string
2027 @param version: package version
2029 @type session: Session
2030 @param session: Optional SQLA session object (a temporary one will be
2031 generated if not supplied)
2037 q = session.query(NewComment)
2038 q = q.filter_by(package=package)
2039 q = q.filter_by(version=version)
2041 return bool(q.count() > 0)
2043 __all__.append('has_new_comment')
2046 def get_new_comments(package=None, version=None, comment_id=None, session=None):
2048 Returns (possibly empty) list of NewComment objects for the given
2051 @type package: string (optional)
2052 @param package: name of the package
2054 @type version: string (optional)
2055 @param version: package version
2057 @type comment_id: int (optional)
2058 @param comment_id: An id of a comment
2060 @type session: Session
2061 @param session: Optional SQLA session object (a temporary one will be
2062 generated if not supplied)
2065 @return: A (possibly empty) list of NewComment objects will be returned
2068 q = session.query(NewComment)
2069 if package is not None: q = q.filter_by(package=package)
2070 if version is not None: q = q.filter_by(version=version)
2071 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
2075 __all__.append('get_new_comments')
2077 ################################################################################
2079 class Override(ORMObject):
2080 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
2081 section = None, priority = None):
2082 self.package = package
2084 self.component = component
2085 self.overridetype = overridetype
2086 self.section = section
2087 self.priority = priority
2089 def properties(self):
2090 return ['package', 'suite', 'component', 'overridetype', 'section', \
2093 def not_null_constraints(self):
2094 return ['package', 'suite', 'component', 'overridetype', 'section']
2096 __all__.append('Override')
2099 def get_override(package, suite=None, component=None, overridetype=None, session=None):
2101 Returns Override object for the given parameters
2103 @type package: string
2104 @param package: The name of the package
2106 @type suite: string, list or None
2107 @param suite: The name of the suite (or suites if a list) to limit to. If
2108 None, don't limit. Defaults to None.
2110 @type component: string, list or None
2111 @param component: The name of the component (or components if a list) to
2112 limit to. If None, don't limit. Defaults to None.
2114 @type overridetype: string, list or None
2115 @param overridetype: The name of the overridetype (or overridetypes if a list) to
2116 limit to. If None, don't limit. Defaults to None.
2118 @type session: Session
2119 @param session: Optional SQLA session object (a temporary one will be
2120 generated if not supplied)
2123 @return: A (possibly empty) list of Override objects will be returned
2126 q = session.query(Override)
2127 q = q.filter_by(package=package)
2129 if suite is not None:
2130 if not isinstance(suite, list): suite = [suite]
2131 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
2133 if component is not None:
2134 if not isinstance(component, list): component = [component]
2135 q = q.join(Component).filter(Component.component_name.in_(component))
2137 if overridetype is not None:
2138 if not isinstance(overridetype, list): overridetype = [overridetype]
2139 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
2143 __all__.append('get_override')
2146 ################################################################################
2148 class OverrideType(ORMObject):
2149 def __init__(self, overridetype = None):
2150 self.overridetype = overridetype
2152 def properties(self):
2153 return ['overridetype', 'overridetype_id', 'overrides_count']
2155 def not_null_constraints(self):
2156 return ['overridetype']
2158 __all__.append('OverrideType')
2161 def get_override_type(override_type, session=None):
2163 Returns OverrideType object for given C{override type}.
2165 @type override_type: string
2166 @param override_type: The name of the override type
2168 @type session: Session
2169 @param session: Optional SQLA session object (a temporary one will be
2170 generated if not supplied)
2173 @return: the database id for the given override type
2176 q = session.query(OverrideType).filter_by(overridetype=override_type)
2180 except NoResultFound:
2183 __all__.append('get_override_type')
2185 ################################################################################
2187 class PolicyQueue(object):
2188 def __init__(self, *args, **kwargs):
2192 return '<PolicyQueue %s>' % self.queue_name
2194 __all__.append('PolicyQueue')
2197 def get_policy_queue(queuename, session=None):
2199 Returns PolicyQueue object for given C{queue name}
2201 @type queuename: string
2202 @param queuename: The name of the queue
2204 @type session: Session
2205 @param session: Optional SQLA session object (a temporary one will be
2206 generated if not supplied)
2209 @return: PolicyQueue object for the given queue
2212 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2216 except NoResultFound:
2219 __all__.append('get_policy_queue')
2222 def get_policy_queue_from_path(pathname, session=None):
2224 Returns PolicyQueue object for given C{path name}
2226 @type queuename: string
2227 @param queuename: The path
2229 @type session: Session
2230 @param session: Optional SQLA session object (a temporary one will be
2231 generated if not supplied)
2234 @return: PolicyQueue object for the given queue
2237 q = session.query(PolicyQueue).filter_by(path=pathname)
2241 except NoResultFound:
2244 __all__.append('get_policy_queue_from_path')
2246 ################################################################################
2248 class Priority(ORMObject):
2249 def __init__(self, priority = None, level = None):
2250 self.priority = priority
2253 def properties(self):
2254 return ['priority', 'priority_id', 'level', 'overrides_count']
2256 def not_null_constraints(self):
2257 return ['priority', 'level']
2259 def __eq__(self, val):
2260 if isinstance(val, str):
2261 return (self.priority == val)
2262 # This signals to use the normal comparison operator
2263 return NotImplemented
2265 def __ne__(self, val):
2266 if isinstance(val, str):
2267 return (self.priority != val)
2268 # This signals to use the normal comparison operator
2269 return NotImplemented
2271 __all__.append('Priority')
2274 def get_priority(priority, session=None):
2276 Returns Priority object for given C{priority name}.
2278 @type priority: string
2279 @param priority: The name of the priority
2281 @type session: Session
2282 @param session: Optional SQLA session object (a temporary one will be
2283 generated if not supplied)
2286 @return: Priority object for the given priority
2289 q = session.query(Priority).filter_by(priority=priority)
2293 except NoResultFound:
2296 __all__.append('get_priority')
2299 def get_priorities(session=None):
2301 Returns dictionary of priority names -> id mappings
2303 @type session: Session
2304 @param session: Optional SQL session object (a temporary one will be
2305 generated if not supplied)
2308 @return: dictionary of priority names -> id mappings
2312 q = session.query(Priority)
2314 ret[x.priority] = x.priority_id
2318 __all__.append('get_priorities')
2320 ################################################################################
2322 class Section(ORMObject):
2323 def __init__(self, section = None):
2324 self.section = section
2326 def properties(self):
2327 return ['section', 'section_id', 'overrides_count']
2329 def not_null_constraints(self):
2332 def __eq__(self, val):
2333 if isinstance(val, str):
2334 return (self.section == val)
2335 # This signals to use the normal comparison operator
2336 return NotImplemented
2338 def __ne__(self, val):
2339 if isinstance(val, str):
2340 return (self.section != val)
2341 # This signals to use the normal comparison operator
2342 return NotImplemented
2344 __all__.append('Section')
2347 def get_section(section, session=None):
2349 Returns Section object for given C{section name}.
2351 @type section: string
2352 @param section: The name of the section
2354 @type session: Session
2355 @param session: Optional SQLA session object (a temporary one will be
2356 generated if not supplied)
2359 @return: Section object for the given section name
2362 q = session.query(Section).filter_by(section=section)
2366 except NoResultFound:
2369 __all__.append('get_section')
2372 def get_sections(session=None):
2374 Returns dictionary of section names -> id mappings
2376 @type session: Session
2377 @param session: Optional SQL session object (a temporary one will be
2378 generated if not supplied)
2381 @return: dictionary of section names -> id mappings
2385 q = session.query(Section)
2387 ret[x.section] = x.section_id
2391 __all__.append('get_sections')
2393 ################################################################################
2395 class SrcContents(ORMObject):
2396 def __init__(self, file = None, source = None):
2398 self.source = source
2400 def properties(self):
2401 return ['file', 'source']
2403 __all__.append('SrcContents')
2405 ################################################################################
2407 from debian.debfile import Deb822
2409 # Temporary Deb822 subclass to fix bugs with : handling; see #597249
2410 class Dak822(Deb822):
2411 def _internal_parser(self, sequence, fields=None):
2412 # The key is non-whitespace, non-colon characters before any colon.
2413 key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
2414 single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
2415 multi = re.compile(key_part + r"$")
2416 multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
2418 wanted_field = lambda f: fields is None or f in fields
2420 if isinstance(sequence, basestring):
2421 sequence = sequence.splitlines()
2425 for line in self.gpg_stripped_paragraph(sequence):
2426 m = single.match(line)
2429 self[curkey] = content
2431 if not wanted_field(m.group('key')):
2435 curkey = m.group('key')
2436 content = m.group('data')
2439 m = multi.match(line)
2442 self[curkey] = content
2444 if not wanted_field(m.group('key')):
2448 curkey = m.group('key')
2452 m = multidata.match(line)
2454 content += '\n' + line # XXX not m.group('data')?
2458 self[curkey] = content
2461 class DBSource(ORMObject):
2462 def __init__(self, source = None, version = None, maintainer = None, \
2463 changedby = None, poolfile = None, install_date = None):
2464 self.source = source
2465 self.version = version
2466 self.maintainer = maintainer
2467 self.changedby = changedby
2468 self.poolfile = poolfile
2469 self.install_date = install_date
2473 return self.source_id
2475 def properties(self):
2476 return ['source', 'source_id', 'maintainer', 'changedby', \
2477 'fingerprint', 'poolfile', 'version', 'suites_count', \
2478 'install_date', 'binaries_count', 'uploaders_count']
2480 def not_null_constraints(self):
2481 return ['source', 'version', 'install_date', 'maintainer', \
2482 'changedby', 'poolfile', 'install_date']
2484 def read_control_fields(self):
2486 Reads the control information from a dsc
2489 @return: fields is the dsc information in a dictionary form
2491 fullpath = self.poolfile.fullpath
2492 fields = Dak822(open(self.poolfile.fullpath, 'r'))
2495 metadata = association_proxy('key', 'value')
2497 def get_component_name(self):
2498 return self.poolfile.location.component.component_name
2500 def scan_contents(self):
2502 Returns a set of names for non directories. The path names are
2503 normalized after converting them from either utf-8 or iso8859-1
2506 fullpath = self.poolfile.fullpath
2507 from daklib.contents import UnpackedSource
2508 unpacked = UnpackedSource(fullpath)
2510 for name in unpacked.get_all_filenames():
2511 # enforce proper utf-8 encoding
2513 name.decode('utf-8')
2514 except UnicodeDecodeError:
2515 name = name.decode('iso8859-1').encode('utf-8')
2519 __all__.append('DBSource')
2522 def source_exists(source, source_version, suites = ["any"], session=None):
2524 Ensure that source exists somewhere in the archive for the binary
2525 upload being processed.
2526 1. exact match => 1.0-3
2527 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2529 @type source: string
2530 @param source: source name
2532 @type source_version: string
2533 @param source_version: expected source version
2536 @param suites: list of suites to check in, default I{any}
2538 @type session: Session
2539 @param session: Optional SQLA session object (a temporary one will be
2540 generated if not supplied)
2543 @return: returns 1 if a source with expected version is found, otherwise 0
2550 from daklib.regexes import re_bin_only_nmu
2551 orig_source_version = re_bin_only_nmu.sub('', source_version)
2553 for suite in suites:
2554 q = session.query(DBSource).filter_by(source=source). \
2555 filter(DBSource.version.in_([source_version, orig_source_version]))
2557 # source must exist in 'suite' or a suite that is enhanced by 'suite'
2558 s = get_suite(suite, session)
2560 enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
2561 considered_suites = [ vc.reference for vc in enhances_vcs ]
2562 considered_suites.append(s)
2564 q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
2569 # No source found so return not ok
2574 __all__.append('source_exists')
2577 def get_suites_source_in(source, session=None):
2579 Returns list of Suite objects which given C{source} name is in
2582 @param source: DBSource package name to search for
2585 @return: list of Suite objects for the given source
2588 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2590 __all__.append('get_suites_source_in')
2593 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2595 Returns list of DBSource objects for given C{source} name and other parameters
2598 @param source: DBSource package name to search for
2600 @type version: str or None
2601 @param version: DBSource version name to search for or None if not applicable
2603 @type dm_upload_allowed: bool
2604 @param dm_upload_allowed: If None, no effect. If True or False, only
2605 return packages with that dm_upload_allowed setting
2607 @type session: Session
2608 @param session: Optional SQL session object (a temporary one will be
2609 generated if not supplied)
2612 @return: list of DBSource objects for the given name (may be empty)
2615 q = session.query(DBSource).filter_by(source=source)
2617 if version is not None:
2618 q = q.filter_by(version=version)
2620 if dm_upload_allowed is not None:
2621 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2625 __all__.append('get_sources_from_name')
2627 # FIXME: This function fails badly if it finds more than 1 source package and
2628 # its implementation is trivial enough to be inlined.
2630 def get_source_in_suite(source, suite, session=None):
2632 Returns a DBSource object for a combination of C{source} and C{suite}.
2634 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2635 - B{suite} - a suite name, eg. I{unstable}
2637 @type source: string
2638 @param source: source package name
2641 @param suite: the suite name
2644 @return: the version for I{source} in I{suite}
2648 q = get_suite(suite, session).get_sources(source)
2651 except NoResultFound:
2654 __all__.append('get_source_in_suite')
2657 def import_metadata_into_db(obj, session=None):
2659 This routine works on either DBBinary or DBSource objects and imports
2660 their metadata into the database
2662 fields = obj.read_control_fields()
2663 for k in fields.keys():
2666 val = str(fields[k])
2667 except UnicodeEncodeError:
2668 # Fall back to UTF-8
2670 val = fields[k].encode('utf-8')
2671 except UnicodeEncodeError:
2672 # Finally try iso8859-1
2673 val = fields[k].encode('iso8859-1')
2674 # Otherwise we allow the exception to percolate up and we cause
2675 # a reject as someone is playing silly buggers
2677 obj.metadata[get_or_set_metadatakey(k, session)] = val
2679 session.commit_or_flush()
2681 __all__.append('import_metadata_into_db')
2684 ################################################################################
2686 def split_uploaders(uploaders_list):
2688 Split the Uploaders field into the individual uploaders and yield each of
2689 them. Beware: email addresses might contain commas.
2692 for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
2693 yield uploader.strip()
2696 def add_dsc_to_db(u, filename, session=None):
2697 entry = u.pkg.files[filename]
2701 source.source = u.pkg.dsc["source"]
2702 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2703 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2704 # If Changed-By isn't available, fall back to maintainer
2705 if u.pkg.changes.has_key("changed-by"):
2706 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2708 source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2709 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2710 source.install_date = datetime.now().date()
2712 dsc_component = entry["component"]
2713 dsc_location_id = entry["location id"]
2715 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2717 # Set up a new poolfile if necessary
2718 if not entry.has_key("files id") or not entry["files id"]:
2719 filename = entry["pool name"] + filename
2720 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2722 pfs.append(poolfile)
2723 entry["files id"] = poolfile.file_id
2725 source.poolfile_id = entry["files id"]
2728 suite_names = u.pkg.changes["distribution"].keys()
2729 source.suites = session.query(Suite). \
2730 filter(Suite.suite_name.in_(suite_names)).all()
2732 # Add the source files to the DB (files and dsc_files)
2734 dscfile.source_id = source.source_id
2735 dscfile.poolfile_id = entry["files id"]
2736 session.add(dscfile)
2738 for dsc_file, dentry in u.pkg.dsc_files.items():
2740 df.source_id = source.source_id
2742 # If the .orig tarball is already in the pool, it's
2743 # files id is stored in dsc_files by check_dsc().
2744 files_id = dentry.get("files id", None)
2746 # Find the entry in the files hash
2747 # TODO: Bail out here properly
2749 for f, e in u.pkg.files.items():
2754 if files_id is None:
2755 filename = dfentry["pool name"] + dsc_file
2757 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2758 # FIXME: needs to check for -1/-2 and or handle exception
2759 if found and obj is not None:
2760 files_id = obj.file_id
2763 # If still not found, add it
2764 if files_id is None:
2765 # HACK: Force sha1sum etc into dentry
2766 dentry["sha1sum"] = dfentry["sha1sum"]
2767 dentry["sha256sum"] = dfentry["sha256sum"]
2768 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2769 pfs.append(poolfile)
2770 files_id = poolfile.file_id
2772 poolfile = get_poolfile_by_id(files_id, session)
2773 if poolfile is None:
2774 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2775 pfs.append(poolfile)
2777 df.poolfile_id = files_id
2780 # Add the src_uploaders to the DB
2782 session.refresh(source)
2783 source.uploaders = [source.maintainer]
2784 if u.pkg.dsc.has_key("uploaders"):
2785 for up in split_uploaders(u.pkg.dsc["uploaders"]):
2786 source.uploaders.append(get_or_set_maintainer(up, session))
2790 return source, dsc_component, dsc_location_id, pfs
2792 __all__.append('add_dsc_to_db')
2795 def add_deb_to_db(u, filename, session=None):
2797 Contrary to what you might expect, this routine deals with both
2798 debs and udebs. That info is in 'dbtype', whilst 'type' is
2799 'deb' for both of them
2802 entry = u.pkg.files[filename]
2805 bin.package = entry["package"]
2806 bin.version = entry["version"]
2807 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2808 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2809 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2810 bin.binarytype = entry["dbtype"]
2813 filename = entry["pool name"] + filename
2814 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2815 if not entry.get("location id", None):
2816 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2818 if entry.get("files id", None):
2819 poolfile = get_poolfile_by_id(bin.poolfile_id)
2820 bin.poolfile_id = entry["files id"]
2822 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2823 bin.poolfile_id = entry["files id"] = poolfile.file_id
2826 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2828 # If we couldn't find anything and the upload contains Arch: source,
2829 # fall back to trying the source package, source version uploaded
2830 # This maintains backwards compatibility with previous dak behaviour
2831 # and deals with slightly broken binary debs which don't properly
2832 # declare their source package name
2833 if len(bin_sources) == 0:
2834 if u.pkg.changes["architecture"].has_key("source") \
2835 and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
2836 bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
2838 # If we couldn't find a source here, we reject
2839 # TODO: Fix this so that it doesn't kill process-upload and instead just
2840 # performs a reject. To be honest, we should probably spot this
2841 # *much* earlier than here
2842 if len(bin_sources) != 1:
2843 raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2844 (bin.package, bin.version, entry["architecture"],
2845 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2847 bin.source_id = bin_sources[0].source_id
2849 if entry.has_key("built-using"):
2850 for srcname, version in entry["built-using"]:
2851 exsources = get_sources_from_name(srcname, version, session=session)
2852 if len(exsources) != 1:
2853 raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
2854 (srcname, version, bin.package, bin.version, entry["architecture"],
2855 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2857 bin.extra_sources.append(exsources[0])
2859 # Add and flush object so it has an ID
2862 suite_names = u.pkg.changes["distribution"].keys()
2863 bin.suites = session.query(Suite). \
2864 filter(Suite.suite_name.in_(suite_names)).all()
2868 # Deal with contents - disabled for now
2869 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2871 # print "REJECT\nCould not determine contents of package %s" % bin.package
2872 # session.rollback()
2873 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2875 return bin, poolfile
2877 __all__.append('add_deb_to_db')
2879 ################################################################################
2881 class SourceACL(object):
2882 def __init__(self, *args, **kwargs):
2886 return '<SourceACL %s>' % self.source_acl_id
2888 __all__.append('SourceACL')
2890 ################################################################################
2892 class SrcFormat(object):
2893 def __init__(self, *args, **kwargs):
2897 return '<SrcFormat %s>' % (self.format_name)
2899 __all__.append('SrcFormat')
2901 ################################################################################
2903 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2904 ('SuiteID', 'suite_id'),
2905 ('Version', 'version'),
2906 ('Origin', 'origin'),
2908 ('Description', 'description'),
2909 ('Untouchable', 'untouchable'),
2910 ('Announce', 'announce'),
2911 ('Codename', 'codename'),
2912 ('OverrideCodename', 'overridecodename'),
2913 ('ValidTime', 'validtime'),
2914 ('Priority', 'priority'),
2915 ('NotAutomatic', 'notautomatic'),
2916 ('CopyChanges', 'copychanges'),
2917 ('OverrideSuite', 'overridesuite')]
2919 # Why the heck don't we have any UNIQUE constraints in table suite?
2920 # TODO: Add UNIQUE constraints for appropriate columns.
2921 class Suite(ORMObject):
2922 def __init__(self, suite_name = None, version = None):
2923 self.suite_name = suite_name
2924 self.version = version
2926 def properties(self):
2927 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2930 def not_null_constraints(self):
2931 return ['suite_name']
2933 def __eq__(self, val):
2934 if isinstance(val, str):
2935 return (self.suite_name == val)
2936 # This signals to use the normal comparison operator
2937 return NotImplemented
2939 def __ne__(self, val):
2940 if isinstance(val, str):
2941 return (self.suite_name != val)
2942 # This signals to use the normal comparison operator
2943 return NotImplemented
2947 for disp, field in SUITE_FIELDS:
2948 val = getattr(self, field, None)
2950 ret.append("%s: %s" % (disp, val))
2952 return "\n".join(ret)
2954 def get_architectures(self, skipsrc=False, skipall=False):
2956 Returns list of Architecture objects
2958 @type skipsrc: boolean
2959 @param skipsrc: Whether to skip returning the 'source' architecture entry
2962 @type skipall: boolean
2963 @param skipall: Whether to skip returning the 'all' architecture entry
2967 @return: list of Architecture objects for the given name (may be empty)
2970 q = object_session(self).query(Architecture).with_parent(self)
2972 q = q.filter(Architecture.arch_string != 'source')
2974 q = q.filter(Architecture.arch_string != 'all')
2975 return q.order_by(Architecture.arch_string).all()
2977 def get_sources(self, source):
2979 Returns a query object representing DBSource that is part of C{suite}.
2981 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2983 @type source: string
2984 @param source: source package name
2986 @rtype: sqlalchemy.orm.query.Query
2987 @return: a query of DBSource
2991 session = object_session(self)
2992 return session.query(DBSource).filter_by(source = source). \
2995 def get_overridesuite(self):
2996 if self.overridesuite is None:
2999 return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
3001 __all__.append('Suite')
3004 def get_suite(suite, session=None):
3006 Returns Suite object for given C{suite name}.
3009 @param suite: The name of the suite
3011 @type session: Session
3012 @param session: Optional SQLA session object (a temporary one will be
3013 generated if not supplied)
3016 @return: Suite object for the requested suite name (None if not present)
3019 q = session.query(Suite).filter_by(suite_name=suite)
3023 except NoResultFound:
3026 __all__.append('get_suite')
3028 ################################################################################
3031 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
3033 Returns list of Architecture objects for given C{suite} name. The list is
3034 empty if suite does not exist.
3037 @param suite: Suite name to search for
3039 @type skipsrc: boolean
3040 @param skipsrc: Whether to skip returning the 'source' architecture entry
3043 @type skipall: boolean
3044 @param skipall: Whether to skip returning the 'all' architecture entry
3047 @type session: Session
3048 @param session: Optional SQL session object (a temporary one will be
3049 generated if not supplied)
3052 @return: list of Architecture objects for the given name (may be empty)
3056 return get_suite(suite, session).get_architectures(skipsrc, skipall)
3057 except AttributeError:
3060 __all__.append('get_suite_architectures')
3062 ################################################################################
3064 class Uid(ORMObject):
3065 def __init__(self, uid = None, name = None):
3069 def __eq__(self, val):
3070 if isinstance(val, str):
3071 return (self.uid == val)
3072 # This signals to use the normal comparison operator
3073 return NotImplemented
3075 def __ne__(self, val):
3076 if isinstance(val, str):
3077 return (self.uid != val)
3078 # This signals to use the normal comparison operator
3079 return NotImplemented
3081 def properties(self):
3082 return ['uid', 'name', 'fingerprint']
3084 def not_null_constraints(self):
3087 __all__.append('Uid')
3090 def get_or_set_uid(uidname, session=None):
3092 Returns uid object for given uidname.
3094 If no matching uidname is found, a row is inserted.
3096 @type uidname: string
3097 @param uidname: The uid to add
3099 @type session: SQLAlchemy
3100 @param session: Optional SQL session object (a temporary one will be
3101 generated if not supplied). If not passed, a commit will be performed at
3102 the end of the function, otherwise the caller is responsible for commiting.
3105 @return: the uid object for the given uidname
3108 q = session.query(Uid).filter_by(uid=uidname)
3112 except NoResultFound:
3116 session.commit_or_flush()
3121 __all__.append('get_or_set_uid')
3124 def get_uid_from_fingerprint(fpr, session=None):
3125 q = session.query(Uid)
3126 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
3130 except NoResultFound:
3133 __all__.append('get_uid_from_fingerprint')
3135 ################################################################################
3137 class UploadBlock(object):
3138 def __init__(self, *args, **kwargs):
3142 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
3144 __all__.append('UploadBlock')
3146 ################################################################################
3148 class MetadataKey(ORMObject):
3149 def __init__(self, key = None):
3152 def properties(self):
3155 def not_null_constraints(self):
3158 __all__.append('MetadataKey')
3161 def get_or_set_metadatakey(keyname, session=None):
3163 Returns MetadataKey object for given uidname.
3165 If no matching keyname is found, a row is inserted.
3167 @type uidname: string
3168 @param uidname: The keyname to add
3170 @type session: SQLAlchemy
3171 @param session: Optional SQL session object (a temporary one will be
3172 generated if not supplied). If not passed, a commit will be performed at
3173 the end of the function, otherwise the caller is responsible for commiting.
3176 @return: the metadatakey object for the given keyname
3179 q = session.query(MetadataKey).filter_by(key=keyname)
3183 except NoResultFound:
3184 ret = MetadataKey(keyname)
3186 session.commit_or_flush()
3190 __all__.append('get_or_set_metadatakey')
3192 ################################################################################
3194 class BinaryMetadata(ORMObject):
3195 def __init__(self, key = None, value = None, binary = None):
3198 self.binary = binary
3200 def properties(self):
3201 return ['binary', 'key', 'value']
3203 def not_null_constraints(self):
3206 __all__.append('BinaryMetadata')
3208 ################################################################################
3210 class SourceMetadata(ORMObject):
3211 def __init__(self, key = None, value = None, source = None):
3214 self.source = source
3216 def properties(self):
3217 return ['source', 'key', 'value']
3219 def not_null_constraints(self):
3222 __all__.append('SourceMetadata')
3224 ################################################################################
3226 class VersionCheck(ORMObject):
3227 def __init__(self, *args, **kwargs):
3230 def properties(self):
3231 #return ['suite_id', 'check', 'reference_id']
3234 def not_null_constraints(self):
3235 return ['suite', 'check', 'reference']
3237 __all__.append('VersionCheck')
3240 def get_version_checks(suite_name, check = None, session = None):
3241 suite = get_suite(suite_name, session)
3243 # Make sure that what we return is iterable so that list comprehensions
3244 # involving this don't cause a traceback
3246 q = session.query(VersionCheck).filter_by(suite=suite)
3248 q = q.filter_by(check=check)
3251 __all__.append('get_version_checks')
3253 ################################################################################
3255 class DBConn(object):
3257 database module init.
3261 def __init__(self, *args, **kwargs):
3262 self.__dict__ = self.__shared_state
3264 if not getattr(self, 'initialised', False):
3265 self.initialised = True
3266 self.debug = kwargs.has_key('debug')
3269 def __setuptables(self):
3276 'binaries_metadata',
3280 'build_queue_files',
3281 'build_queue_policy_files',
3286 'changes_pending_binaries',
3287 'changes_pending_files',
3288 'changes_pending_source',
3289 'changes_pending_files_map',
3290 'changes_pending_source_files',
3291 'changes_pool_files',
3293 'external_overrides',
3294 'extra_src_references',
3303 # TODO: the maintainer column in table override should be removed.
3317 'suite_architectures',
3318 'suite_build_queue_copy',
3319 'suite_src_formats',
3326 'almost_obsolete_all_associations',
3327 'almost_obsolete_src_associations',
3328 'any_associations_source',
3329 'bin_associations_binaries',
3330 'binaries_suite_arch',
3331 'binfiles_suite_component_arch',
3334 'newest_all_associations',
3335 'newest_any_associations',
3337 'newest_src_association',
3338 'obsolete_all_associations',
3339 'obsolete_any_associations',
3340 'obsolete_any_by_all_associations',
3341 'obsolete_src_associations',
3343 'src_associations_bin',
3344 'src_associations_src',
3345 'suite_arch_by_name',
3348 for table_name in tables:
3349 table = Table(table_name, self.db_meta, \
3350 autoload=True, useexisting=True)
3351 setattr(self, 'tbl_%s' % table_name, table)
3353 for view_name in views:
3354 view = Table(view_name, self.db_meta, autoload=True)
3355 setattr(self, 'view_%s' % view_name, view)
3357 def __setupmappers(self):
3358 mapper(Architecture, self.tbl_architecture,
3359 properties = dict(arch_id = self.tbl_architecture.c.id,
3360 suites = relation(Suite, secondary=self.tbl_suite_architectures,
3361 order_by=self.tbl_suite.c.suite_name,
3362 backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
3363 extension = validator)
3365 mapper(Archive, self.tbl_archive,
3366 properties = dict(archive_id = self.tbl_archive.c.id,
3367 archive_name = self.tbl_archive.c.name))
3369 mapper(BuildQueue, self.tbl_build_queue,
3370 properties = dict(queue_id = self.tbl_build_queue.c.id))
3372 mapper(BuildQueueFile, self.tbl_build_queue_files,
3373 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3374 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3376 mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
3378 build_queue = relation(BuildQueue, backref='policy_queue_files'),
3379 file = relation(ChangePendingFile, lazy='joined')))
3381 mapper(DBBinary, self.tbl_binaries,
3382 properties = dict(binary_id = self.tbl_binaries.c.id,
3383 package = self.tbl_binaries.c.package,
3384 version = self.tbl_binaries.c.version,
3385 maintainer_id = self.tbl_binaries.c.maintainer,
3386 maintainer = relation(Maintainer),
3387 source_id = self.tbl_binaries.c.source,
3388 source = relation(DBSource, backref='binaries'),
3389 arch_id = self.tbl_binaries.c.architecture,
3390 architecture = relation(Architecture),
3391 poolfile_id = self.tbl_binaries.c.file,
3392 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3393 binarytype = self.tbl_binaries.c.type,
3394 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3395 fingerprint = relation(Fingerprint),
3396 install_date = self.tbl_binaries.c.install_date,
3397 suites = relation(Suite, secondary=self.tbl_bin_associations,
3398 backref=backref('binaries', lazy='dynamic')),
3399 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
3400 backref=backref('extra_binary_references', lazy='dynamic')),
3401 key = relation(BinaryMetadata, cascade='all',
3402 collection_class=attribute_mapped_collection('key'))),
3403 extension = validator)
3405 mapper(BinaryACL, self.tbl_binary_acl,
3406 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3408 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3409 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3410 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3411 architecture = relation(Architecture)))
3413 mapper(Component, self.tbl_component,
3414 properties = dict(component_id = self.tbl_component.c.id,
3415 component_name = self.tbl_component.c.name),
3416 extension = validator)
3418 mapper(DBConfig, self.tbl_config,
3419 properties = dict(config_id = self.tbl_config.c.id))
3421 mapper(DSCFile, self.tbl_dsc_files,
3422 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3423 source_id = self.tbl_dsc_files.c.source,
3424 source = relation(DBSource),
3425 poolfile_id = self.tbl_dsc_files.c.file,
3426 poolfile = relation(PoolFile)))
3428 mapper(ExternalOverride, self.tbl_external_overrides,
3430 suite_id = self.tbl_external_overrides.c.suite,
3431 suite = relation(Suite),
3432 component_id = self.tbl_external_overrides.c.component,
3433 component = relation(Component)))
3435 mapper(PoolFile, self.tbl_files,
3436 properties = dict(file_id = self.tbl_files.c.id,
3437 filesize = self.tbl_files.c.size,
3438 location_id = self.tbl_files.c.location,
3439 location = relation(Location,
3440 # using lazy='dynamic' in the back
3441 # reference because we have A LOT of
3442 # files in one location
3443 backref=backref('files', lazy='dynamic'))),
3444 extension = validator)
3446 mapper(Fingerprint, self.tbl_fingerprint,
3447 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3448 uid_id = self.tbl_fingerprint.c.uid,
3449 uid = relation(Uid),
3450 keyring_id = self.tbl_fingerprint.c.keyring,
3451 keyring = relation(Keyring),
3452 source_acl = relation(SourceACL),
3453 binary_acl = relation(BinaryACL)),
3454 extension = validator)
3456 mapper(Keyring, self.tbl_keyrings,
3457 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3458 keyring_id = self.tbl_keyrings.c.id))
3460 mapper(DBChange, self.tbl_changes,
3461 properties = dict(change_id = self.tbl_changes.c.id,
3462 poolfiles = relation(PoolFile,
3463 secondary=self.tbl_changes_pool_files,
3464 backref="changeslinks"),
3465 seen = self.tbl_changes.c.seen,
3466 source = self.tbl_changes.c.source,
3467 binaries = self.tbl_changes.c.binaries,
3468 architecture = self.tbl_changes.c.architecture,
3469 distribution = self.tbl_changes.c.distribution,
3470 urgency = self.tbl_changes.c.urgency,
3471 maintainer = self.tbl_changes.c.maintainer,
3472 changedby = self.tbl_changes.c.changedby,
3473 date = self.tbl_changes.c.date,
3474 version = self.tbl_changes.c.version,
3475 files = relation(ChangePendingFile,
3476 secondary=self.tbl_changes_pending_files_map,
3477 backref="changesfile"),
3478 in_queue_id = self.tbl_changes.c.in_queue,
3479 in_queue = relation(PolicyQueue,
3480 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3481 approved_for_id = self.tbl_changes.c.approved_for))
3483 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3484 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3486 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3487 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3488 filename = self.tbl_changes_pending_files.c.filename,
3489 size = self.tbl_changes_pending_files.c.size,
3490 md5sum = self.tbl_changes_pending_files.c.md5sum,
3491 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3492 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3494 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3495 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3496 change = relation(DBChange),
3497 maintainer = relation(Maintainer,
3498 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3499 changedby = relation(Maintainer,
3500 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3501 fingerprint = relation(Fingerprint),
3502 source_files = relation(ChangePendingFile,
3503 secondary=self.tbl_changes_pending_source_files,
3504 backref="pending_sources")))
3507 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3508 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3509 keyring = relation(Keyring, backref="keyring_acl_map"),
3510 architecture = relation(Architecture)))
3512 mapper(Location, self.tbl_location,
3513 properties = dict(location_id = self.tbl_location.c.id,
3514 component_id = self.tbl_location.c.component,
3515 component = relation(Component, backref='location'),
3516 archive_id = self.tbl_location.c.archive,
3517 archive = relation(Archive),
3518 # FIXME: the 'type' column is old cruft and
3519 # should be removed in the future.
3520 archive_type = self.tbl_location.c.type),
3521 extension = validator)
3523 mapper(Maintainer, self.tbl_maintainer,
3524 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3525 maintains_sources = relation(DBSource, backref='maintainer',
3526 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3527 changed_sources = relation(DBSource, backref='changedby',
3528 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3529 extension = validator)
3531 mapper(NewComment, self.tbl_new_comments,
3532 properties = dict(comment_id = self.tbl_new_comments.c.id))
3534 mapper(Override, self.tbl_override,
3535 properties = dict(suite_id = self.tbl_override.c.suite,
3536 suite = relation(Suite, \
3537 backref=backref('overrides', lazy='dynamic')),
3538 package = self.tbl_override.c.package,
3539 component_id = self.tbl_override.c.component,
3540 component = relation(Component, \
3541 backref=backref('overrides', lazy='dynamic')),
3542 priority_id = self.tbl_override.c.priority,
3543 priority = relation(Priority, \
3544 backref=backref('overrides', lazy='dynamic')),
3545 section_id = self.tbl_override.c.section,
3546 section = relation(Section, \
3547 backref=backref('overrides', lazy='dynamic')),
3548 overridetype_id = self.tbl_override.c.type,
3549 overridetype = relation(OverrideType, \
3550 backref=backref('overrides', lazy='dynamic'))))
3552 mapper(OverrideType, self.tbl_override_type,
3553 properties = dict(overridetype = self.tbl_override_type.c.type,
3554 overridetype_id = self.tbl_override_type.c.id))
3556 mapper(PolicyQueue, self.tbl_policy_queue,
3557 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3559 mapper(Priority, self.tbl_priority,
3560 properties = dict(priority_id = self.tbl_priority.c.id))
3562 mapper(Section, self.tbl_section,
3563 properties = dict(section_id = self.tbl_section.c.id,
3564 section=self.tbl_section.c.section))
3566 mapper(DBSource, self.tbl_source,
3567 properties = dict(source_id = self.tbl_source.c.id,
3568 version = self.tbl_source.c.version,
3569 maintainer_id = self.tbl_source.c.maintainer,
3570 poolfile_id = self.tbl_source.c.file,
3571 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3572 fingerprint_id = self.tbl_source.c.sig_fpr,
3573 fingerprint = relation(Fingerprint),
3574 changedby_id = self.tbl_source.c.changedby,
3575 srcfiles = relation(DSCFile,
3576 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3577 suites = relation(Suite, secondary=self.tbl_src_associations,
3578 backref=backref('sources', lazy='dynamic')),
3579 uploaders = relation(Maintainer,
3580 secondary=self.tbl_src_uploaders),
3581 key = relation(SourceMetadata, cascade='all',
3582 collection_class=attribute_mapped_collection('key'))),
3583 extension = validator)
3585 mapper(SourceACL, self.tbl_source_acl,
3586 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3588 mapper(SrcFormat, self.tbl_src_format,
3589 properties = dict(src_format_id = self.tbl_src_format.c.id,
3590 format_name = self.tbl_src_format.c.format_name))
3592 mapper(Suite, self.tbl_suite,
3593 properties = dict(suite_id = self.tbl_suite.c.id,
3594 policy_queue = relation(PolicyQueue),
3595 copy_queues = relation(BuildQueue,
3596 secondary=self.tbl_suite_build_queue_copy),
3597 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
3598 backref=backref('suites', lazy='dynamic'))),
3599 extension = validator)
3601 mapper(Uid, self.tbl_uid,
3602 properties = dict(uid_id = self.tbl_uid.c.id,
3603 fingerprint = relation(Fingerprint)),
3604 extension = validator)
3606 mapper(UploadBlock, self.tbl_upload_blocks,
3607 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3608 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3609 uid = relation(Uid, backref="uploadblocks")))
3611 mapper(BinContents, self.tbl_bin_contents,
3613 binary = relation(DBBinary,
3614 backref=backref('contents', lazy='dynamic', cascade='all')),
3615 file = self.tbl_bin_contents.c.file))
3617 mapper(SrcContents, self.tbl_src_contents,
3619 source = relation(DBSource,
3620 backref=backref('contents', lazy='dynamic', cascade='all')),
3621 file = self.tbl_src_contents.c.file))
3623 mapper(MetadataKey, self.tbl_metadata_keys,
3625 key_id = self.tbl_metadata_keys.c.key_id,
3626 key = self.tbl_metadata_keys.c.key))
3628 mapper(BinaryMetadata, self.tbl_binaries_metadata,
3630 binary_id = self.tbl_binaries_metadata.c.bin_id,
3631 binary = relation(DBBinary),
3632 key_id = self.tbl_binaries_metadata.c.key_id,
3633 key = relation(MetadataKey),
3634 value = self.tbl_binaries_metadata.c.value))
3636 mapper(SourceMetadata, self.tbl_source_metadata,
3638 source_id = self.tbl_source_metadata.c.src_id,
3639 source = relation(DBSource),
3640 key_id = self.tbl_source_metadata.c.key_id,
3641 key = relation(MetadataKey),
3642 value = self.tbl_source_metadata.c.value))
3644 mapper(VersionCheck, self.tbl_version_check,
3646 suite_id = self.tbl_version_check.c.suite,
3647 suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
3648 reference_id = self.tbl_version_check.c.reference,
3649 reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
3651 ## Connection functions
3652 def __createconn(self):
3653 from config import Config
3655 if cnf.has_key("DB::Service"):
3656 connstr = "postgresql://service=%s" % cnf["DB::Service"]
3657 elif cnf.has_key("DB::Host"):
3659 connstr = "postgresql://%s" % cnf["DB::Host"]
3660 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3661 connstr += ":%s" % cnf["DB::Port"]
3662 connstr += "/%s" % cnf["DB::Name"]
3665 connstr = "postgresql:///%s" % cnf["DB::Name"]
3666 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3667 connstr += "?port=%s" % cnf["DB::Port"]
3669 engine_args = { 'echo': self.debug }
3670 if cnf.has_key('DB::PoolSize'):
3671 engine_args['pool_size'] = int(cnf['DB::PoolSize'])
3672 if cnf.has_key('DB::MaxOverflow'):
3673 engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
3674 if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
3675 cnf['DB::Unicode'] == 'false':
3676 engine_args['use_native_unicode'] = False
3678 # Monkey patch a new dialect in in order to support service= syntax
3679 import sqlalchemy.dialects.postgresql
3680 from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
3681 class PGDialect_psycopg2_dak(PGDialect_psycopg2):
3682 def create_connect_args(self, url):
3683 if str(url).startswith('postgresql://service='):
3685 servicename = str(url)[21:]
3686 return (['service=%s' % servicename], {})
3688 return PGDialect_psycopg2.create_connect_args(self, url)
3690 sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
3693 self.db_pg = create_engine(connstr, **engine_args)
3694 self.db_meta = MetaData()
3695 self.db_meta.bind = self.db_pg
3696 self.db_smaker = sessionmaker(bind=self.db_pg,
3700 self.__setuptables()
3701 self.__setupmappers()
3703 except OperationalError as e:
3705 utils.fubar("Cannot connect to database (%s)" % str(e))
3707 self.pid = os.getpid()
3709 def session(self, work_mem = 0):
3711 Returns a new session object. If a work_mem parameter is provided a new
3712 transaction is started and the work_mem parameter is set for this
3713 transaction. The work_mem parameter is measured in MB. A default value
3714 will be used if the parameter is not set.
3716 # reinitialize DBConn in new processes
3717 if self.pid != os.getpid():
3720 session = self.db_smaker()
3722 session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
3725 __all__.append('DBConn')