5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
38 from os.path import normpath
50 import simplejson as json
52 from datetime import datetime, timedelta
53 from errno import ENOENT
54 from tempfile import mkstemp, mkdtemp
55 from subprocess import Popen, PIPE
56 from tarfile import TarFile
58 from inspect import getargspec
61 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
63 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
64 backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
65 from sqlalchemy import types as sqltypes
66 from sqlalchemy.orm.collections import attribute_mapped_collection
67 from sqlalchemy.ext.associationproxy import association_proxy
69 # Don't remove this, we re-export the exceptions to scripts which import us
70 from sqlalchemy.exc import *
71 from sqlalchemy.orm.exc import NoResultFound
73 # Only import Config until Queue stuff is changed to store its config
75 from config import Config
76 from textutils import fix_maintainer
77 from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
79 # suppress some deprecation warnings in squeeze related to sqlalchemy
81 warnings.filterwarnings('ignore', \
82 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
84 warnings.filterwarnings('ignore', \
85 "Predicate of partial index .* ignored during reflection", \
89 ################################################################################
91 # Patch in support for the debversion field type so that it works during
95 # that is for sqlalchemy 0.6
96 UserDefinedType = sqltypes.UserDefinedType
98 # this one for sqlalchemy 0.5
99 UserDefinedType = sqltypes.TypeEngine
101 class DebVersion(UserDefinedType):
102 def get_col_spec(self):
105 def bind_processor(self, dialect):
108 # ' = None' is needed for sqlalchemy 0.5:
109 def result_processor(self, dialect, coltype = None):
112 sa_major_version = sqlalchemy.__version__[0:3]
113 if sa_major_version in ["0.5", "0.6", "0.7"]:
114 from sqlalchemy.databases import postgres
115 postgres.ischema_names['debversion'] = DebVersion
117 raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
119 ################################################################################
121 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
123 ################################################################################
125 def session_wrapper(fn):
127 Wrapper around common ".., session=None):" handling. If the wrapped
128 function is called without passing 'session', we create a local one
129 and destroy it when the function ends.
131 Also attaches a commit_or_flush method to the session; if we created a
132 local session, this is a synonym for session.commit(), otherwise it is a
133 synonym for session.flush().
136 def wrapped(*args, **kwargs):
137 private_transaction = False
139 # Find the session object
140 session = kwargs.get('session')
143 if len(args) <= len(getargspec(fn)[0]) - 1:
144 # No session specified as last argument or in kwargs
145 private_transaction = True
146 session = kwargs['session'] = DBConn().session()
148 # Session is last argument in args
152 session = args[-1] = DBConn().session()
153 private_transaction = True
155 if private_transaction:
156 session.commit_or_flush = session.commit
158 session.commit_or_flush = session.flush
161 return fn(*args, **kwargs)
163 if private_transaction:
164 # We created a session; close it.
167 wrapped.__doc__ = fn.__doc__
168 wrapped.func_name = fn.func_name
172 __all__.append('session_wrapper')
174 ################################################################################
176 class ORMObject(object):
178 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
179 derived classes must implement the properties() method.
182 def properties(self):
184 This method should be implemented by all derived classes and returns a
185 list of the important properties. The properties 'created' and
186 'modified' will be added automatically. A suffix '_count' should be
187 added to properties that are lists or query objects. The most important
188 property name should be returned as the first element in the list
189 because it is used by repr().
195 Returns a JSON representation of the object based on the properties
196 returned from the properties() method.
199 # add created and modified
200 all_properties = self.properties() + ['created', 'modified']
201 for property in all_properties:
202 # check for list or query
203 if property[-6:] == '_count':
204 real_property = property[:-6]
205 if not hasattr(self, real_property):
207 value = getattr(self, real_property)
208 if hasattr(value, '__len__'):
211 elif hasattr(value, 'count'):
212 # query (but not during validation)
213 if self.in_validation:
215 value = value.count()
217 raise KeyError('Do not understand property %s.' % property)
219 if not hasattr(self, property):
222 value = getattr(self, property)
226 elif isinstance(value, ORMObject):
227 # use repr() for ORMObject types
230 # we want a string for all other types because json cannot
233 data[property] = value
234 return json.dumps(data)
238 Returns the name of the class.
240 return type(self).__name__
244 Returns a short string representation of the object using the first
245 element from the properties() method.
247 primary_property = self.properties()[0]
248 value = getattr(self, primary_property)
249 return '<%s %s>' % (self.classname(), str(value))
253 Returns a human readable form of the object using the properties()
256 return '<%s %s>' % (self.classname(), self.json())
258 def not_null_constraints(self):
260 Returns a list of properties that must be not NULL. Derived classes
261 should override this method if needed.
265 validation_message = \
266 "Validation failed because property '%s' must not be empty in object\n%s"
268 in_validation = False
272 This function validates the not NULL constraints as returned by
273 not_null_constraints(). It raises the DBUpdateError exception if
276 for property in self.not_null_constraints():
277 # TODO: It is a bit awkward that the mapper configuration allow
278 # directly setting the numeric _id columns. We should get rid of it
280 if hasattr(self, property + '_id') and \
281 getattr(self, property + '_id') is not None:
283 if not hasattr(self, property) or getattr(self, property) is None:
284 # str() might lead to races due to a 2nd flush
285 self.in_validation = True
286 message = self.validation_message % (property, str(self))
287 self.in_validation = False
288 raise DBUpdateError(message)
292 def get(cls, primary_key, session = None):
294 This is a support function that allows getting an object by its primary
297 Architecture.get(3[, session])
299 instead of the more verbose
301 session.query(Architecture).get(3)
303 return session.query(cls).get(primary_key)
305 def session(self, replace = False):
307 Returns the current session that is associated with the object. May
308 return None is object is in detached state.
311 return object_session(self)
313 def clone(self, session = None):
315 Clones the current object in a new session and returns the new clone. A
316 fresh session is created if the optional session parameter is not
317 provided. The function will fail if a session is provided and has
320 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
321 an existing object to allow several threads to work with their own
322 instances of an ORMObject.
324 WARNING: Only persistent (committed) objects can be cloned. Changes
325 made to the original object that are not committed yet will get lost.
326 The session of the new object will always be rolled back to avoid
330 if self.session() is None:
331 raise RuntimeError( \
332 'Method clone() failed for detached object:\n%s' % self)
333 self.session().flush()
334 mapper = object_mapper(self)
335 primary_key = mapper.primary_key_from_instance(self)
336 object_class = self.__class__
338 session = DBConn().session()
339 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
340 raise RuntimeError( \
341 'Method clone() failed due to unflushed changes in session.')
342 new_object = session.query(object_class).get(primary_key)
344 if new_object is None:
345 raise RuntimeError( \
346 'Method clone() failed for non-persistent object:\n%s' % self)
349 __all__.append('ORMObject')
351 ################################################################################
353 class Validator(MapperExtension):
355 This class calls the validate() method for each instance for the
356 'before_update' and 'before_insert' events. A global object validator is
357 used for configuring the individual mappers.
360 def before_update(self, mapper, connection, instance):
364 def before_insert(self, mapper, connection, instance):
368 validator = Validator()
370 ################################################################################
372 class Architecture(ORMObject):
373 def __init__(self, arch_string = None, description = None):
374 self.arch_string = arch_string
375 self.description = description
377 def __eq__(self, val):
378 if isinstance(val, str):
379 return (self.arch_string== val)
380 # This signals to use the normal comparison operator
381 return NotImplemented
383 def __ne__(self, val):
384 if isinstance(val, str):
385 return (self.arch_string != val)
386 # This signals to use the normal comparison operator
387 return NotImplemented
389 def properties(self):
390 return ['arch_string', 'arch_id', 'suites_count']
392 def not_null_constraints(self):
393 return ['arch_string']
395 __all__.append('Architecture')
398 def get_architecture(architecture, session=None):
400 Returns database id for given C{architecture}.
402 @type architecture: string
403 @param architecture: The name of the architecture
405 @type session: Session
406 @param session: Optional SQLA session object (a temporary one will be
407 generated if not supplied)
410 @return: Architecture object for the given arch (None if not present)
413 q = session.query(Architecture).filter_by(arch_string=architecture)
417 except NoResultFound:
420 __all__.append('get_architecture')
422 # TODO: should be removed because the implementation is too trivial
424 def get_architecture_suites(architecture, session=None):
426 Returns list of Suite objects for given C{architecture} name
428 @type architecture: str
429 @param architecture: Architecture name to search for
431 @type session: Session
432 @param session: Optional SQL session object (a temporary one will be
433 generated if not supplied)
436 @return: list of Suite objects for the given name (may be empty)
439 return get_architecture(architecture, session).suites
441 __all__.append('get_architecture_suites')
443 ################################################################################
445 class Archive(object):
446 def __init__(self, *args, **kwargs):
450 return '<Archive %s>' % self.archive_name
452 __all__.append('Archive')
455 def get_archive(archive, session=None):
457 returns database id for given C{archive}.
459 @type archive: string
460 @param archive: the name of the arhive
462 @type session: Session
463 @param session: Optional SQLA session object (a temporary one will be
464 generated if not supplied)
467 @return: Archive object for the given name (None if not present)
470 archive = archive.lower()
472 q = session.query(Archive).filter_by(archive_name=archive)
476 except NoResultFound:
479 __all__.append('get_archive')
481 ################################################################################
483 class BinContents(ORMObject):
484 def __init__(self, file = None, binary = None):
488 def properties(self):
489 return ['file', 'binary']
491 __all__.append('BinContents')
493 ################################################################################
495 def subprocess_setup():
496 # Python installs a SIGPIPE handler by default. This is usually not what
497 # non-Python subprocesses expect.
498 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
500 class DBBinary(ORMObject):
501 def __init__(self, package = None, source = None, version = None, \
502 maintainer = None, architecture = None, poolfile = None, \
503 binarytype = 'deb', fingerprint=None):
504 self.package = package
506 self.version = version
507 self.maintainer = maintainer
508 self.architecture = architecture
509 self.poolfile = poolfile
510 self.binarytype = binarytype
511 self.fingerprint = fingerprint
515 return self.binary_id
517 def properties(self):
518 return ['package', 'version', 'maintainer', 'source', 'architecture', \
519 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
520 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
522 def not_null_constraints(self):
523 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
526 metadata = association_proxy('key', 'value')
528 def get_component_name(self):
529 return self.poolfile.location.component.component_name
531 def scan_contents(self):
533 Yields the contents of the package. Only regular files are yielded and
534 the path names are normalized after converting them from either utf-8
535 or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
536 package does not contain any regular file.
538 fullpath = self.poolfile.fullpath
539 dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
540 preexec_fn = subprocess_setup)
541 tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
542 for member in tar.getmembers():
543 if not member.isdir():
544 name = normpath(member.name)
545 # enforce proper utf-8 encoding
548 except UnicodeDecodeError:
549 name = name.decode('iso8859-1').encode('utf-8')
555 def read_control(self):
557 Reads the control information from a binary.
560 @return: stanza text of the control section.
563 fullpath = self.poolfile.fullpath
564 deb_file = open(fullpath, 'r')
565 stanza = utils.deb_extract_control(deb_file)
570 def read_control_fields(self):
572 Reads the control information from a binary and return
576 @return: fields of the control section as a dictionary.
579 stanza = self.read_control()
580 return apt_pkg.TagSection(stanza)
582 __all__.append('DBBinary')
585 def get_suites_binary_in(package, session=None):
587 Returns list of Suite objects which given C{package} name is in
590 @param package: DBBinary package name to search for
593 @return: list of Suite objects for the given package
596 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
598 __all__.append('get_suites_binary_in')
601 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
603 Returns the component name of the newest binary package in suite_list or
604 None if no package is found. The result can be optionally filtered by a list
605 of architecture names.
608 @param package: DBBinary package name to search for
610 @type suite_list: list of str
611 @param suite_list: list of suite_name items
613 @type arch_list: list of str
614 @param arch_list: optional list of arch_string items that defaults to []
616 @rtype: str or NoneType
617 @return: name of component or None
620 q = session.query(DBBinary).filter_by(package = package). \
621 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
622 if len(arch_list) > 0:
623 q = q.join(DBBinary.architecture). \
624 filter(Architecture.arch_string.in_(arch_list))
625 binary = q.order_by(desc(DBBinary.version)).first()
629 return binary.get_component_name()
631 __all__.append('get_component_by_package_suite')
633 ################################################################################
635 class BinaryACL(object):
636 def __init__(self, *args, **kwargs):
640 return '<BinaryACL %s>' % self.binary_acl_id
642 __all__.append('BinaryACL')
644 ################################################################################
646 class BinaryACLMap(object):
647 def __init__(self, *args, **kwargs):
651 return '<BinaryACLMap %s>' % self.binary_acl_map_id
653 __all__.append('BinaryACLMap')
655 ################################################################################
660 ArchiveDir "%(archivepath)s";
661 OverrideDir "%(overridedir)s";
662 CacheDir "%(cachedir)s";
667 Packages::Compress ". bzip2 gzip";
668 Sources::Compress ". bzip2 gzip";
673 bindirectory "incoming"
678 BinOverride "override.sid.all3";
679 BinCacheDB "packages-accepted.db";
681 FileList "%(filelist)s";
684 Packages::Extensions ".deb .udeb";
687 bindirectory "incoming/"
690 BinOverride "override.sid.all3";
691 SrcOverride "override.sid.all3.src";
692 FileList "%(filelist)s";
696 class BuildQueue(object):
697 def __init__(self, *args, **kwargs):
701 return '<BuildQueue %s>' % self.queue_name
703 def write_metadata(self, starttime, force=False):
704 # Do we write out metafiles?
705 if not (force or self.generate_metadata):
708 session = DBConn().session().object_session(self)
710 fl_fd = fl_name = ac_fd = ac_name = None
712 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
713 startdir = os.getcwd()
716 # Grab files we want to include
717 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
718 newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
719 # Write file list with newer files
720 (fl_fd, fl_name) = mkstemp()
722 os.write(fl_fd, '%s\n' % n.fullpath)
727 # Write minimal apt.conf
728 # TODO: Remove hardcoding from template
729 (ac_fd, ac_name) = mkstemp()
730 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
732 'cachedir': cnf["Dir::Cache"],
733 'overridedir': cnf["Dir::Override"],
737 # Run apt-ftparchive generate
738 os.chdir(os.path.dirname(ac_name))
739 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
741 # Run apt-ftparchive release
742 # TODO: Eww - fix this
743 bname = os.path.basename(self.path)
747 # We have to remove the Release file otherwise it'll be included in the
750 os.unlink(os.path.join(bname, 'Release'))
754 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
756 # Crude hack with open and append, but this whole section is and should be redone.
757 if self.notautomatic:
758 release=open("Release", "a")
759 release.write("NotAutomatic: yes\n")
764 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
765 if cnf.has_key("Dinstall::SigningPubKeyring"):
766 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
768 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
770 # Move the files if we got this far
771 os.rename('Release', os.path.join(bname, 'Release'))
773 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
775 # Clean up any left behind files
802 def clean_and_update(self, starttime, Logger, dryrun=False):
803 """WARNING: This routine commits for you"""
804 session = DBConn().session().object_session(self)
806 if self.generate_metadata and not dryrun:
807 self.write_metadata(starttime)
809 # Grab files older than our execution time
810 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
811 older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
817 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
819 Logger.log(["I: Removing %s from the queue" % o.fullpath])
820 os.unlink(o.fullpath)
823 # If it wasn't there, don't worry
824 if e.errno == ENOENT:
827 # TODO: Replace with proper logging call
828 Logger.log(["E: Could not remove %s" % o.fullpath])
835 for f in os.listdir(self.path):
836 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
839 if not self.contains_filename(f):
840 fp = os.path.join(self.path, f)
842 Logger.log(["I: Would remove unused link %s" % fp])
844 Logger.log(["I: Removing unused link %s" % fp])
848 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
850 def contains_filename(self, filename):
853 @returns True if filename is supposed to be in the queue; False otherwise
855 session = DBConn().session().object_session(self)
856 if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
858 elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
862 def add_file_from_pool(self, poolfile):
863 """Copies a file into the pool. Assumes that the PoolFile object is
864 attached to the same SQLAlchemy session as the Queue object is.
866 The caller is responsible for committing after calling this function."""
867 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
869 # Check if we have a file of this name or this ID already
870 for f in self.queuefiles:
871 if (f.fileid is not None and f.fileid == poolfile.file_id) or \
872 (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
873 # In this case, update the BuildQueueFile entry so we
874 # don't remove it too early
875 f.lastused = datetime.now()
876 DBConn().session().object_session(poolfile).add(f)
879 # Prepare BuildQueueFile object
880 qf = BuildQueueFile()
881 qf.build_queue_id = self.queue_id
882 qf.filename = poolfile_basename
884 targetpath = poolfile.fullpath
885 queuepath = os.path.join(self.path, poolfile_basename)
889 # We need to copy instead of symlink
891 utils.copy(targetpath, queuepath)
892 # NULL in the fileid field implies a copy
895 os.symlink(targetpath, queuepath)
896 qf.fileid = poolfile.file_id
897 except FileExistsError:
898 if not poolfile.identical_to(queuepath):
903 # Get the same session as the PoolFile is using and add the qf to it
904 DBConn().session().object_session(poolfile).add(qf)
908 def add_changes_from_policy_queue(self, policyqueue, changes):
910 Copies a changes from a policy queue together with its poolfiles.
912 @type policyqueue: PolicyQueue
913 @param policyqueue: policy queue to copy the changes from
915 @type changes: DBChange
916 @param changes: changes to copy to this build queue
918 for policyqueuefile in changes.files:
919 self.add_file_from_policy_queue(policyqueue, policyqueuefile)
920 for poolfile in changes.poolfiles:
921 self.add_file_from_pool(poolfile)
923 def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
925 Copies a file from a policy queue.
926 Assumes that the policyqueuefile is attached to the same SQLAlchemy
927 session as the Queue object is. The caller is responsible for
928 committing after calling this function.
930 @type policyqueue: PolicyQueue
931 @param policyqueue: policy queue to copy the file from
933 @type policyqueuefile: ChangePendingFile
934 @param policyqueuefile: file to be added to the build queue
936 session = DBConn().session().object_session(policyqueuefile)
938 # Is the file already there?
940 f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
941 f.lastused = datetime.now()
943 except NoResultFound:
944 pass # continue below
946 # We have to add the file.
947 f = BuildQueuePolicyFile()
949 f.file = policyqueuefile
950 f.filename = policyqueuefile.filename
952 source = os.path.join(policyqueue.path, policyqueuefile.filename)
955 # Always copy files from policy queues as they might move around.
957 utils.copy(source, target)
958 except FileExistsError:
959 if not policyqueuefile.identical_to(target):
967 __all__.append('BuildQueue')
970 def get_build_queue(queuename, session=None):
972 Returns BuildQueue object for given C{queue name}, creating it if it does not
975 @type queuename: string
976 @param queuename: The name of the queue
978 @type session: Session
979 @param session: Optional SQLA session object (a temporary one will be
980 generated if not supplied)
983 @return: BuildQueue object for the given queue
986 q = session.query(BuildQueue).filter_by(queue_name=queuename)
990 except NoResultFound:
993 __all__.append('get_build_queue')
995 ################################################################################
997 class BuildQueueFile(object):
999 BuildQueueFile represents a file in a build queue coming from a pool.
1002 def __init__(self, *args, **kwargs):
1006 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
1010 return os.path.join(self.buildqueue.path, self.filename)
1013 __all__.append('BuildQueueFile')
1015 ################################################################################
1017 class BuildQueuePolicyFile(object):
1019 BuildQueuePolicyFile represents a file in a build queue that comes from a
1020 policy queue (and not a pool).
1023 def __init__(self, *args, **kwargs):
1027 #def filename(self):
1028 # return self.file.filename
1032 return os.path.join(self.build_queue.path, self.filename)
1034 __all__.append('BuildQueuePolicyFile')
1036 ################################################################################
1038 class ChangePendingBinary(object):
1039 def __init__(self, *args, **kwargs):
1043 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
1045 __all__.append('ChangePendingBinary')
1047 ################################################################################
1049 class ChangePendingFile(object):
1050 def __init__(self, *args, **kwargs):
1054 return '<ChangePendingFile %s>' % self.change_pending_file_id
1056 def identical_to(self, filename):
1058 compare size and hash with the given file
1061 @return: true if the given file has the same size and hash as this object; false otherwise
1063 st = os.stat(filename)
1064 if self.size != st.st_size:
1067 f = open(filename, "r")
1068 sha256sum = apt_pkg.sha256sum(f)
1069 if sha256sum != self.sha256sum:
1074 __all__.append('ChangePendingFile')
1076 ################################################################################
1078 class ChangePendingSource(object):
1079 def __init__(self, *args, **kwargs):
1083 return '<ChangePendingSource %s>' % self.change_pending_source_id
1085 __all__.append('ChangePendingSource')
1087 ################################################################################
1089 class Component(ORMObject):
1090 def __init__(self, component_name = None):
1091 self.component_name = component_name
1093 def __eq__(self, val):
1094 if isinstance(val, str):
1095 return (self.component_name == val)
1096 # This signals to use the normal comparison operator
1097 return NotImplemented
1099 def __ne__(self, val):
1100 if isinstance(val, str):
1101 return (self.component_name != val)
1102 # This signals to use the normal comparison operator
1103 return NotImplemented
1105 def properties(self):
1106 return ['component_name', 'component_id', 'description', \
1107 'location_count', 'meets_dfsg', 'overrides_count']
1109 def not_null_constraints(self):
1110 return ['component_name']
1113 __all__.append('Component')
1116 def get_component(component, session=None):
1118 Returns database id for given C{component}.
1120 @type component: string
1121 @param component: The name of the override type
1124 @return: the database id for the given component
1127 component = component.lower()
1129 q = session.query(Component).filter_by(component_name=component)
1133 except NoResultFound:
1136 __all__.append('get_component')
1139 def get_component_names(session=None):
1141 Returns list of strings of component names.
1144 @return: list of strings of component names
1147 return [ x.component_name for x in session.query(Component).all() ]
1149 __all__.append('get_component_names')
1151 ################################################################################
1153 class DBConfig(object):
1154 def __init__(self, *args, **kwargs):
1158 return '<DBConfig %s>' % self.name
1160 __all__.append('DBConfig')
1162 ################################################################################
1165 def get_or_set_contents_file_id(filename, session=None):
1167 Returns database id for given filename.
1169 If no matching file is found, a row is inserted.
1171 @type filename: string
1172 @param filename: The filename
1173 @type session: SQLAlchemy
1174 @param session: Optional SQL session object (a temporary one will be
1175 generated if not supplied). If not passed, a commit will be performed at
1176 the end of the function, otherwise the caller is responsible for commiting.
1179 @return: the database id for the given component
1182 q = session.query(ContentFilename).filter_by(filename=filename)
1185 ret = q.one().cafilename_id
1186 except NoResultFound:
1187 cf = ContentFilename()
1188 cf.filename = filename
1190 session.commit_or_flush()
1191 ret = cf.cafilename_id
1195 __all__.append('get_or_set_contents_file_id')
1198 def get_contents(suite, overridetype, section=None, session=None):
1200 Returns contents for a suite / overridetype combination, limiting
1201 to a section if not None.
1204 @param suite: Suite object
1206 @type overridetype: OverrideType
1207 @param overridetype: OverrideType object
1209 @type section: Section
1210 @param section: Optional section object to limit results to
1212 @type session: SQLAlchemy
1213 @param session: Optional SQL session object (a temporary one will be
1214 generated if not supplied)
1216 @rtype: ResultsProxy
1217 @return: ResultsProxy object set up to return tuples of (filename, section,
1221 # find me all of the contents for a given suite
1222 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1226 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1227 JOIN content_file_names n ON (c.filename=n.id)
1228 JOIN binaries b ON (b.id=c.binary_pkg)
1229 JOIN override o ON (o.package=b.package)
1230 JOIN section s ON (s.id=o.section)
1231 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1232 AND b.type=:overridetypename"""
1234 vals = {'suiteid': suite.suite_id,
1235 'overridetypeid': overridetype.overridetype_id,
1236 'overridetypename': overridetype.overridetype}
1238 if section is not None:
1239 contents_q += " AND s.id = :sectionid"
1240 vals['sectionid'] = section.section_id
1242 contents_q += " ORDER BY fn"
1244 return session.execute(contents_q, vals)
1246 __all__.append('get_contents')
1248 ################################################################################
1250 class ContentFilepath(object):
1251 def __init__(self, *args, **kwargs):
1255 return '<ContentFilepath %s>' % self.filepath
1257 __all__.append('ContentFilepath')
1260 def get_or_set_contents_path_id(filepath, session=None):
1262 Returns database id for given path.
1264 If no matching file is found, a row is inserted.
1266 @type filepath: string
1267 @param filepath: The filepath
1269 @type session: SQLAlchemy
1270 @param session: Optional SQL session object (a temporary one will be
1271 generated if not supplied). If not passed, a commit will be performed at
1272 the end of the function, otherwise the caller is responsible for commiting.
1275 @return: the database id for the given path
1278 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1281 ret = q.one().cafilepath_id
1282 except NoResultFound:
1283 cf = ContentFilepath()
1284 cf.filepath = filepath
1286 session.commit_or_flush()
1287 ret = cf.cafilepath_id
1291 __all__.append('get_or_set_contents_path_id')
1293 ################################################################################
1295 class ContentAssociation(object):
1296 def __init__(self, *args, **kwargs):
1300 return '<ContentAssociation %s>' % self.ca_id
1302 __all__.append('ContentAssociation')
1304 def insert_content_paths(binary_id, fullpaths, session=None):
1306 Make sure given path is associated with given binary id
1308 @type binary_id: int
1309 @param binary_id: the id of the binary
1310 @type fullpaths: list
1311 @param fullpaths: the list of paths of the file being associated with the binary
1312 @type session: SQLAlchemy session
1313 @param session: Optional SQLAlchemy session. If this is passed, the caller
1314 is responsible for ensuring a transaction has begun and committing the
1315 results or rolling back based on the result code. If not passed, a commit
1316 will be performed at the end of the function, otherwise the caller is
1317 responsible for commiting.
1319 @return: True upon success
1322 privatetrans = False
1324 session = DBConn().session()
1329 def generate_path_dicts():
1330 for fullpath in fullpaths:
1331 if fullpath.startswith( './' ):
1332 fullpath = fullpath[2:]
1334 yield {'filename':fullpath, 'id': binary_id }
1336 for d in generate_path_dicts():
1337 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1346 traceback.print_exc()
1348 # Only rollback if we set up the session ourself
1355 __all__.append('insert_content_paths')
1357 ################################################################################
1359 class DSCFile(object):
1360 def __init__(self, *args, **kwargs):
1364 return '<DSCFile %s>' % self.dscfile_id
1366 __all__.append('DSCFile')
1369 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1371 Returns a list of DSCFiles which may be empty
1373 @type dscfile_id: int (optional)
1374 @param dscfile_id: the dscfile_id of the DSCFiles to find
1376 @type source_id: int (optional)
1377 @param source_id: the source id related to the DSCFiles to find
1379 @type poolfile_id: int (optional)
1380 @param poolfile_id: the poolfile id related to the DSCFiles to find
1383 @return: Possibly empty list of DSCFiles
1386 q = session.query(DSCFile)
1388 if dscfile_id is not None:
1389 q = q.filter_by(dscfile_id=dscfile_id)
1391 if source_id is not None:
1392 q = q.filter_by(source_id=source_id)
1394 if poolfile_id is not None:
1395 q = q.filter_by(poolfile_id=poolfile_id)
1399 __all__.append('get_dscfiles')
1401 ################################################################################
1403 class ExternalOverride(ORMObject):
1404 def __init__(self, *args, **kwargs):
1408 return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
1410 __all__.append('ExternalOverride')
1412 ################################################################################
1414 class PoolFile(ORMObject):
1415 def __init__(self, filename = None, location = None, filesize = -1, \
1417 self.filename = filename
1418 self.location = location
1419 self.filesize = filesize
1420 self.md5sum = md5sum
1424 return os.path.join(self.location.path, self.filename)
1428 return os.path.basename(self.filename)
1430 def is_valid(self, filesize = -1, md5sum = None):
1431 return self.filesize == long(filesize) and self.md5sum == md5sum
1433 def properties(self):
1434 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1435 'sha256sum', 'location', 'source', 'binary', 'last_used']
1437 def not_null_constraints(self):
1438 return ['filename', 'md5sum', 'location']
1440 def identical_to(self, filename):
1442 compare size and hash with the given file
1445 @return: true if the given file has the same size and hash as this object; false otherwise
1447 st = os.stat(filename)
1448 if self.filesize != st.st_size:
1451 f = open(filename, "r")
1452 sha256sum = apt_pkg.sha256sum(f)
1453 if sha256sum != self.sha256sum:
1458 __all__.append('PoolFile')
1461 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1464 (ValidFileFound [boolean], PoolFile object or None)
1466 @type filename: string
1467 @param filename: the filename of the file to check against the DB
1470 @param filesize: the size of the file to check against the DB
1472 @type md5sum: string
1473 @param md5sum: the md5sum of the file to check against the DB
1475 @type location_id: int
1476 @param location_id: the id of the location to look in
1479 @return: Tuple of length 2.
1480 - If valid pool file found: (C{True}, C{PoolFile object})
1481 - If valid pool file not found:
1482 - (C{False}, C{None}) if no file found
1483 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1486 poolfile = session.query(Location).get(location_id). \
1487 files.filter_by(filename=filename).first()
1489 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1492 return (valid, poolfile)
1494 __all__.append('check_poolfile')
1496 # TODO: the implementation can trivially be inlined at the place where the
1497 # function is called
1499 def get_poolfile_by_id(file_id, session=None):
1501 Returns a PoolFile objects or None for the given id
1504 @param file_id: the id of the file to look for
1506 @rtype: PoolFile or None
1507 @return: either the PoolFile object or None
1510 return session.query(PoolFile).get(file_id)
1512 __all__.append('get_poolfile_by_id')
1515 def get_poolfile_like_name(filename, session=None):
1517 Returns an array of PoolFile objects which are like the given name
1519 @type filename: string
1520 @param filename: the filename of the file to check against the DB
1523 @return: array of PoolFile objects
1526 # TODO: There must be a way of properly using bind parameters with %FOO%
1527 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1531 __all__.append('get_poolfile_like_name')
1534 def add_poolfile(filename, datadict, location_id, session=None):
1536 Add a new file to the pool
1538 @type filename: string
1539 @param filename: filename
1541 @type datadict: dict
1542 @param datadict: dict with needed data
1544 @type location_id: int
1545 @param location_id: database id of the location
1548 @return: the PoolFile object created
1550 poolfile = PoolFile()
1551 poolfile.filename = filename
1552 poolfile.filesize = datadict["size"]
1553 poolfile.md5sum = datadict["md5sum"]
1554 poolfile.sha1sum = datadict["sha1sum"]
1555 poolfile.sha256sum = datadict["sha256sum"]
1556 poolfile.location_id = location_id
1558 session.add(poolfile)
1559 # Flush to get a file id (NB: This is not a commit)
1564 __all__.append('add_poolfile')
1566 ################################################################################
1568 class Fingerprint(ORMObject):
1569 def __init__(self, fingerprint = None):
1570 self.fingerprint = fingerprint
1572 def properties(self):
1573 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1576 def not_null_constraints(self):
1577 return ['fingerprint']
1579 __all__.append('Fingerprint')
1582 def get_fingerprint(fpr, session=None):
1584 Returns Fingerprint object for given fpr.
1587 @param fpr: The fpr to find / add
1589 @type session: SQLAlchemy
1590 @param session: Optional SQL session object (a temporary one will be
1591 generated if not supplied).
1594 @return: the Fingerprint object for the given fpr or None
1597 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1601 except NoResultFound:
1606 __all__.append('get_fingerprint')
1609 def get_or_set_fingerprint(fpr, session=None):
1611 Returns Fingerprint object for given fpr.
1613 If no matching fpr is found, a row is inserted.
1616 @param fpr: The fpr to find / add
1618 @type session: SQLAlchemy
1619 @param session: Optional SQL session object (a temporary one will be
1620 generated if not supplied). If not passed, a commit will be performed at
1621 the end of the function, otherwise the caller is responsible for commiting.
1622 A flush will be performed either way.
1625 @return: the Fingerprint object for the given fpr
1628 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1632 except NoResultFound:
1633 fingerprint = Fingerprint()
1634 fingerprint.fingerprint = fpr
1635 session.add(fingerprint)
1636 session.commit_or_flush()
1641 __all__.append('get_or_set_fingerprint')
1643 ################################################################################
1645 # Helper routine for Keyring class
1646 def get_ldap_name(entry):
1648 for k in ["cn", "mn", "sn"]:
1650 if ret and ret[0] != "" and ret[0] != "-":
1652 return " ".join(name)
1654 ################################################################################
1656 class Keyring(object):
1657 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1658 " --with-colons --fingerprint --fingerprint"
1663 def __init__(self, *args, **kwargs):
1667 return '<Keyring %s>' % self.keyring_name
1669 def de_escape_gpg_str(self, txt):
1670 esclist = re.split(r'(\\x..)', txt)
1671 for x in range(1,len(esclist),2):
1672 esclist[x] = "%c" % (int(esclist[x][2:],16))
1673 return "".join(esclist)
1675 def parse_address(self, uid):
1676 """parses uid and returns a tuple of real name and email address"""
1678 (name, address) = email.Utils.parseaddr(uid)
1679 name = re.sub(r"\s*[(].*[)]", "", name)
1680 name = self.de_escape_gpg_str(name)
1683 return (name, address)
1685 def load_keys(self, keyring):
1686 if not self.keyring_id:
1687 raise Exception('Must be initialized with database information')
1689 k = os.popen(self.gpg_invocation % keyring, "r")
1694 field = line.split(":")
1695 if field[0] == "pub":
1698 (name, addr) = self.parse_address(field[9])
1700 self.keys[key]["email"] = addr
1701 self.keys[key]["name"] = name
1702 self.keys[key]["fingerprints"] = []
1704 elif key and field[0] == "sub" and len(field) >= 12:
1705 signingkey = ("s" in field[11])
1706 elif key and field[0] == "uid":
1707 (name, addr) = self.parse_address(field[9])
1708 if "email" not in self.keys[key] and "@" in addr:
1709 self.keys[key]["email"] = addr
1710 self.keys[key]["name"] = name
1711 elif signingkey and field[0] == "fpr":
1712 self.keys[key]["fingerprints"].append(field[9])
1713 self.fpr_lookup[field[9]] = key
1715 def import_users_from_ldap(self, session):
1719 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1720 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1722 l = ldap.open(LDAPServer)
1723 l.simple_bind_s("","")
1724 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1725 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1726 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1728 ldap_fin_uid_id = {}
1735 uid = entry["uid"][0]
1736 name = get_ldap_name(entry)
1737 fingerprints = entry["keyFingerPrint"]
1739 for f in fingerprints:
1740 key = self.fpr_lookup.get(f, None)
1741 if key not in self.keys:
1743 self.keys[key]["uid"] = uid
1747 keyid = get_or_set_uid(uid, session).uid_id
1748 byuid[keyid] = (uid, name)
1749 byname[uid] = (keyid, name)
1751 return (byname, byuid)
1753 def generate_users_from_keyring(self, format, session):
1757 for x in self.keys.keys():
1758 if "email" not in self.keys[x]:
1760 self.keys[x]["uid"] = format % "invalid-uid"
1762 uid = format % self.keys[x]["email"]
1763 keyid = get_or_set_uid(uid, session).uid_id
1764 byuid[keyid] = (uid, self.keys[x]["name"])
1765 byname[uid] = (keyid, self.keys[x]["name"])
1766 self.keys[x]["uid"] = uid
1769 uid = format % "invalid-uid"
1770 keyid = get_or_set_uid(uid, session).uid_id
1771 byuid[keyid] = (uid, "ungeneratable user id")
1772 byname[uid] = (keyid, "ungeneratable user id")
1774 return (byname, byuid)
1776 __all__.append('Keyring')
1779 def get_keyring(keyring, session=None):
1781 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1782 If C{keyring} already has an entry, simply return the existing Keyring
1784 @type keyring: string
1785 @param keyring: the keyring name
1788 @return: the Keyring object for this keyring
1791 q = session.query(Keyring).filter_by(keyring_name=keyring)
1795 except NoResultFound:
1798 __all__.append('get_keyring')
1801 def get_active_keyring_paths(session=None):
1804 @return: list of active keyring paths
1806 return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
1808 __all__.append('get_active_keyring_paths')
1811 def get_primary_keyring_path(session=None):
1813 Get the full path to the highest priority active keyring
1816 @return: path to the active keyring with the highest priority or None if no
1817 keyring is configured
1819 keyrings = get_active_keyring_paths()
1821 if len(keyrings) > 0:
1826 __all__.append('get_primary_keyring_path')
1828 ################################################################################
1830 class KeyringACLMap(object):
1831 def __init__(self, *args, **kwargs):
1835 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1837 __all__.append('KeyringACLMap')
1839 ################################################################################
1841 class DBChange(object):
1842 def __init__(self, *args, **kwargs):
1846 return '<DBChange %s>' % self.changesname
1848 def clean_from_queue(self):
1849 session = DBConn().session().object_session(self)
1851 # Remove changes_pool_files entries
1854 # Remove changes_pending_files references
1857 # Clear out of queue
1858 self.in_queue = None
1859 self.approved_for_id = None
1861 __all__.append('DBChange')
1864 def get_dbchange(filename, session=None):
1866 returns DBChange object for given C{filename}.
1868 @type filename: string
1869 @param filename: the name of the file
1871 @type session: Session
1872 @param session: Optional SQLA session object (a temporary one will be
1873 generated if not supplied)
1876 @return: DBChange object for the given filename (C{None} if not present)
1879 q = session.query(DBChange).filter_by(changesname=filename)
1883 except NoResultFound:
1886 __all__.append('get_dbchange')
1888 ################################################################################
1890 class Location(ORMObject):
1891 def __init__(self, path = None, component = None):
1893 self.component = component
1894 # the column 'type' should go away, see comment at mapper
1895 self.archive_type = 'pool'
1897 def properties(self):
1898 return ['path', 'location_id', 'archive_type', 'component', \
1901 def not_null_constraints(self):
1902 return ['path', 'archive_type']
1904 __all__.append('Location')
1907 def get_location(location, component=None, archive=None, session=None):
1909 Returns Location object for the given combination of location, component
1912 @type location: string
1913 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1915 @type component: string
1916 @param component: the component name (if None, no restriction applied)
1918 @type archive: string
1919 @param archive: the archive name (if None, no restriction applied)
1921 @rtype: Location / None
1922 @return: Either a Location object or None if one can't be found
1925 q = session.query(Location).filter_by(path=location)
1927 if archive is not None:
1928 q = q.join(Archive).filter_by(archive_name=archive)
1930 if component is not None:
1931 q = q.join(Component).filter_by(component_name=component)
1935 except NoResultFound:
1938 __all__.append('get_location')
1940 ################################################################################
1942 class Maintainer(ORMObject):
1943 def __init__(self, name = None):
1946 def properties(self):
1947 return ['name', 'maintainer_id']
1949 def not_null_constraints(self):
1952 def get_split_maintainer(self):
1953 if not hasattr(self, 'name') or self.name is None:
1954 return ('', '', '', '')
1956 return fix_maintainer(self.name.strip())
1958 __all__.append('Maintainer')
1961 def get_or_set_maintainer(name, session=None):
1963 Returns Maintainer object for given maintainer name.
1965 If no matching maintainer name is found, a row is inserted.
1968 @param name: The maintainer name to add
1970 @type session: SQLAlchemy
1971 @param session: Optional SQL session object (a temporary one will be
1972 generated if not supplied). If not passed, a commit will be performed at
1973 the end of the function, otherwise the caller is responsible for commiting.
1974 A flush will be performed either way.
1977 @return: the Maintainer object for the given maintainer
1980 q = session.query(Maintainer).filter_by(name=name)
1983 except NoResultFound:
1984 maintainer = Maintainer()
1985 maintainer.name = name
1986 session.add(maintainer)
1987 session.commit_or_flush()
1992 __all__.append('get_or_set_maintainer')
1995 def get_maintainer(maintainer_id, session=None):
1997 Return the name of the maintainer behind C{maintainer_id} or None if that
1998 maintainer_id is invalid.
2000 @type maintainer_id: int
2001 @param maintainer_id: the id of the maintainer
2004 @return: the Maintainer with this C{maintainer_id}
2007 return session.query(Maintainer).get(maintainer_id)
2009 __all__.append('get_maintainer')
2011 ################################################################################
2013 class NewComment(object):
2014 def __init__(self, *args, **kwargs):
2018 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
2020 __all__.append('NewComment')
2023 def has_new_comment(package, version, session=None):
2025 Returns true if the given combination of C{package}, C{version} has a comment.
2027 @type package: string
2028 @param package: name of the package
2030 @type version: string
2031 @param version: package version
2033 @type session: Session
2034 @param session: Optional SQLA session object (a temporary one will be
2035 generated if not supplied)
2041 q = session.query(NewComment)
2042 q = q.filter_by(package=package)
2043 q = q.filter_by(version=version)
2045 return bool(q.count() > 0)
2047 __all__.append('has_new_comment')
2050 def get_new_comments(package=None, version=None, comment_id=None, session=None):
2052 Returns (possibly empty) list of NewComment objects for the given
2055 @type package: string (optional)
2056 @param package: name of the package
2058 @type version: string (optional)
2059 @param version: package version
2061 @type comment_id: int (optional)
2062 @param comment_id: An id of a comment
2064 @type session: Session
2065 @param session: Optional SQLA session object (a temporary one will be
2066 generated if not supplied)
2069 @return: A (possibly empty) list of NewComment objects will be returned
2072 q = session.query(NewComment)
2073 if package is not None: q = q.filter_by(package=package)
2074 if version is not None: q = q.filter_by(version=version)
2075 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
2079 __all__.append('get_new_comments')
2081 ################################################################################
2083 class Override(ORMObject):
2084 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
2085 section = None, priority = None):
2086 self.package = package
2088 self.component = component
2089 self.overridetype = overridetype
2090 self.section = section
2091 self.priority = priority
2093 def properties(self):
2094 return ['package', 'suite', 'component', 'overridetype', 'section', \
2097 def not_null_constraints(self):
2098 return ['package', 'suite', 'component', 'overridetype', 'section']
2100 __all__.append('Override')
2103 def get_override(package, suite=None, component=None, overridetype=None, session=None):
2105 Returns Override object for the given parameters
2107 @type package: string
2108 @param package: The name of the package
2110 @type suite: string, list or None
2111 @param suite: The name of the suite (or suites if a list) to limit to. If
2112 None, don't limit. Defaults to None.
2114 @type component: string, list or None
2115 @param component: The name of the component (or components if a list) to
2116 limit to. If None, don't limit. Defaults to None.
2118 @type overridetype: string, list or None
2119 @param overridetype: The name of the overridetype (or overridetypes if a list) to
2120 limit to. If None, don't limit. Defaults to None.
2122 @type session: Session
2123 @param session: Optional SQLA session object (a temporary one will be
2124 generated if not supplied)
2127 @return: A (possibly empty) list of Override objects will be returned
2130 q = session.query(Override)
2131 q = q.filter_by(package=package)
2133 if suite is not None:
2134 if not isinstance(suite, list): suite = [suite]
2135 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
2137 if component is not None:
2138 if not isinstance(component, list): component = [component]
2139 q = q.join(Component).filter(Component.component_name.in_(component))
2141 if overridetype is not None:
2142 if not isinstance(overridetype, list): overridetype = [overridetype]
2143 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
2147 __all__.append('get_override')
2150 ################################################################################
2152 class OverrideType(ORMObject):
2153 def __init__(self, overridetype = None):
2154 self.overridetype = overridetype
2156 def properties(self):
2157 return ['overridetype', 'overridetype_id', 'overrides_count']
2159 def not_null_constraints(self):
2160 return ['overridetype']
2162 __all__.append('OverrideType')
2165 def get_override_type(override_type, session=None):
2167 Returns OverrideType object for given C{override type}.
2169 @type override_type: string
2170 @param override_type: The name of the override type
2172 @type session: Session
2173 @param session: Optional SQLA session object (a temporary one will be
2174 generated if not supplied)
2177 @return: the database id for the given override type
2180 q = session.query(OverrideType).filter_by(overridetype=override_type)
2184 except NoResultFound:
2187 __all__.append('get_override_type')
2189 ################################################################################
2191 class PolicyQueue(object):
2192 def __init__(self, *args, **kwargs):
2196 return '<PolicyQueue %s>' % self.queue_name
2198 __all__.append('PolicyQueue')
2201 def get_policy_queue(queuename, session=None):
2203 Returns PolicyQueue object for given C{queue name}
2205 @type queuename: string
2206 @param queuename: The name of the queue
2208 @type session: Session
2209 @param session: Optional SQLA session object (a temporary one will be
2210 generated if not supplied)
2213 @return: PolicyQueue object for the given queue
2216 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2220 except NoResultFound:
2223 __all__.append('get_policy_queue')
2226 def get_policy_queue_from_path(pathname, session=None):
2228 Returns PolicyQueue object for given C{path name}
2230 @type queuename: string
2231 @param queuename: The path
2233 @type session: Session
2234 @param session: Optional SQLA session object (a temporary one will be
2235 generated if not supplied)
2238 @return: PolicyQueue object for the given queue
2241 q = session.query(PolicyQueue).filter_by(path=pathname)
2245 except NoResultFound:
2248 __all__.append('get_policy_queue_from_path')
2250 ################################################################################
2252 class Priority(ORMObject):
2253 def __init__(self, priority = None, level = None):
2254 self.priority = priority
2257 def properties(self):
2258 return ['priority', 'priority_id', 'level', 'overrides_count']
2260 def not_null_constraints(self):
2261 return ['priority', 'level']
2263 def __eq__(self, val):
2264 if isinstance(val, str):
2265 return (self.priority == val)
2266 # This signals to use the normal comparison operator
2267 return NotImplemented
2269 def __ne__(self, val):
2270 if isinstance(val, str):
2271 return (self.priority != val)
2272 # This signals to use the normal comparison operator
2273 return NotImplemented
2275 __all__.append('Priority')
2278 def get_priority(priority, session=None):
2280 Returns Priority object for given C{priority name}.
2282 @type priority: string
2283 @param priority: The name of the priority
2285 @type session: Session
2286 @param session: Optional SQLA session object (a temporary one will be
2287 generated if not supplied)
2290 @return: Priority object for the given priority
2293 q = session.query(Priority).filter_by(priority=priority)
2297 except NoResultFound:
2300 __all__.append('get_priority')
2303 def get_priorities(session=None):
2305 Returns dictionary of priority names -> id mappings
2307 @type session: Session
2308 @param session: Optional SQL session object (a temporary one will be
2309 generated if not supplied)
2312 @return: dictionary of priority names -> id mappings
2316 q = session.query(Priority)
2318 ret[x.priority] = x.priority_id
2322 __all__.append('get_priorities')
2324 ################################################################################
2326 class Section(ORMObject):
2327 def __init__(self, section = None):
2328 self.section = section
2330 def properties(self):
2331 return ['section', 'section_id', 'overrides_count']
2333 def not_null_constraints(self):
2336 def __eq__(self, val):
2337 if isinstance(val, str):
2338 return (self.section == val)
2339 # This signals to use the normal comparison operator
2340 return NotImplemented
2342 def __ne__(self, val):
2343 if isinstance(val, str):
2344 return (self.section != val)
2345 # This signals to use the normal comparison operator
2346 return NotImplemented
2348 __all__.append('Section')
2351 def get_section(section, session=None):
2353 Returns Section object for given C{section name}.
2355 @type section: string
2356 @param section: The name of the section
2358 @type session: Session
2359 @param session: Optional SQLA session object (a temporary one will be
2360 generated if not supplied)
2363 @return: Section object for the given section name
2366 q = session.query(Section).filter_by(section=section)
2370 except NoResultFound:
2373 __all__.append('get_section')
2376 def get_sections(session=None):
2378 Returns dictionary of section names -> id mappings
2380 @type session: Session
2381 @param session: Optional SQL session object (a temporary one will be
2382 generated if not supplied)
2385 @return: dictionary of section names -> id mappings
2389 q = session.query(Section)
2391 ret[x.section] = x.section_id
2395 __all__.append('get_sections')
2397 ################################################################################
2399 class SrcContents(ORMObject):
2400 def __init__(self, file = None, source = None):
2402 self.source = source
2404 def properties(self):
2405 return ['file', 'source']
2407 __all__.append('SrcContents')
2409 ################################################################################
2411 from debian.debfile import Deb822
2413 # Temporary Deb822 subclass to fix bugs with : handling; see #597249
2414 class Dak822(Deb822):
2415 def _internal_parser(self, sequence, fields=None):
2416 # The key is non-whitespace, non-colon characters before any colon.
2417 key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
2418 single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
2419 multi = re.compile(key_part + r"$")
2420 multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
2422 wanted_field = lambda f: fields is None or f in fields
2424 if isinstance(sequence, basestring):
2425 sequence = sequence.splitlines()
2429 for line in self.gpg_stripped_paragraph(sequence):
2430 m = single.match(line)
2433 self[curkey] = content
2435 if not wanted_field(m.group('key')):
2439 curkey = m.group('key')
2440 content = m.group('data')
2443 m = multi.match(line)
2446 self[curkey] = content
2448 if not wanted_field(m.group('key')):
2452 curkey = m.group('key')
2456 m = multidata.match(line)
2458 content += '\n' + line # XXX not m.group('data')?
2462 self[curkey] = content
2465 class DBSource(ORMObject):
2466 def __init__(self, source = None, version = None, maintainer = None, \
2467 changedby = None, poolfile = None, install_date = None, fingerprint = None):
2468 self.source = source
2469 self.version = version
2470 self.maintainer = maintainer
2471 self.changedby = changedby
2472 self.poolfile = poolfile
2473 self.install_date = install_date
2474 self.fingerprint = fingerprint
2478 return self.source_id
2480 def properties(self):
2481 return ['source', 'source_id', 'maintainer', 'changedby', \
2482 'fingerprint', 'poolfile', 'version', 'suites_count', \
2483 'install_date', 'binaries_count', 'uploaders_count']
2485 def not_null_constraints(self):
2486 return ['source', 'version', 'install_date', 'maintainer', \
2487 'changedby', 'poolfile']
2489 def read_control_fields(self):
2491 Reads the control information from a dsc
2494 @return: fields is the dsc information in a dictionary form
2496 fullpath = self.poolfile.fullpath
2497 fields = Dak822(open(self.poolfile.fullpath, 'r'))
2500 metadata = association_proxy('key', 'value')
2502 def get_component_name(self):
2503 return self.poolfile.location.component.component_name
2505 def scan_contents(self):
2507 Returns a set of names for non directories. The path names are
2508 normalized after converting them from either utf-8 or iso8859-1
2511 fullpath = self.poolfile.fullpath
2512 from daklib.contents import UnpackedSource
2513 unpacked = UnpackedSource(fullpath)
2515 for name in unpacked.get_all_filenames():
2516 # enforce proper utf-8 encoding
2518 name.decode('utf-8')
2519 except UnicodeDecodeError:
2520 name = name.decode('iso8859-1').encode('utf-8')
2524 __all__.append('DBSource')
2527 def source_exists(source, source_version, suites = ["any"], session=None):
2529 Ensure that source exists somewhere in the archive for the binary
2530 upload being processed.
2531 1. exact match => 1.0-3
2532 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2534 @type source: string
2535 @param source: source name
2537 @type source_version: string
2538 @param source_version: expected source version
2541 @param suites: list of suites to check in, default I{any}
2543 @type session: Session
2544 @param session: Optional SQLA session object (a temporary one will be
2545 generated if not supplied)
2548 @return: returns 1 if a source with expected version is found, otherwise 0
2555 from daklib.regexes import re_bin_only_nmu
2556 orig_source_version = re_bin_only_nmu.sub('', source_version)
2558 for suite in suites:
2559 q = session.query(DBSource).filter_by(source=source). \
2560 filter(DBSource.version.in_([source_version, orig_source_version]))
2562 # source must exist in 'suite' or a suite that is enhanced by 'suite'
2563 s = get_suite(suite, session)
2565 enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
2566 considered_suites = [ vc.reference for vc in enhances_vcs ]
2567 considered_suites.append(s)
2569 q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
2574 # No source found so return not ok
2579 __all__.append('source_exists')
2582 def get_suites_source_in(source, session=None):
2584 Returns list of Suite objects which given C{source} name is in
2587 @param source: DBSource package name to search for
2590 @return: list of Suite objects for the given source
2593 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2595 __all__.append('get_suites_source_in')
2598 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2600 Returns list of DBSource objects for given C{source} name and other parameters
2603 @param source: DBSource package name to search for
2605 @type version: str or None
2606 @param version: DBSource version name to search for or None if not applicable
2608 @type dm_upload_allowed: bool
2609 @param dm_upload_allowed: If None, no effect. If True or False, only
2610 return packages with that dm_upload_allowed setting
2612 @type session: Session
2613 @param session: Optional SQL session object (a temporary one will be
2614 generated if not supplied)
2617 @return: list of DBSource objects for the given name (may be empty)
2620 q = session.query(DBSource).filter_by(source=source)
2622 if version is not None:
2623 q = q.filter_by(version=version)
2625 if dm_upload_allowed is not None:
2626 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2630 __all__.append('get_sources_from_name')
2632 # FIXME: This function fails badly if it finds more than 1 source package and
2633 # its implementation is trivial enough to be inlined.
2635 def get_source_in_suite(source, suite, session=None):
2637 Returns a DBSource object for a combination of C{source} and C{suite}.
2639 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2640 - B{suite} - a suite name, eg. I{unstable}
2642 @type source: string
2643 @param source: source package name
2646 @param suite: the suite name
2649 @return: the version for I{source} in I{suite}
2653 q = get_suite(suite, session).get_sources(source)
2656 except NoResultFound:
2659 __all__.append('get_source_in_suite')
2662 def import_metadata_into_db(obj, session=None):
2664 This routine works on either DBBinary or DBSource objects and imports
2665 their metadata into the database
2667 fields = obj.read_control_fields()
2668 for k in fields.keys():
2671 val = str(fields[k])
2672 except UnicodeEncodeError:
2673 # Fall back to UTF-8
2675 val = fields[k].encode('utf-8')
2676 except UnicodeEncodeError:
2677 # Finally try iso8859-1
2678 val = fields[k].encode('iso8859-1')
2679 # Otherwise we allow the exception to percolate up and we cause
2680 # a reject as someone is playing silly buggers
2682 obj.metadata[get_or_set_metadatakey(k, session)] = val
2684 session.commit_or_flush()
2686 __all__.append('import_metadata_into_db')
2689 ################################################################################
2691 def split_uploaders(uploaders_list):
2693 Split the Uploaders field into the individual uploaders and yield each of
2694 them. Beware: email addresses might contain commas.
2697 for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
2698 yield uploader.strip()
2701 def add_dsc_to_db(u, filename, session=None):
2702 entry = u.pkg.files[filename]
2706 source.source = u.pkg.dsc["source"]
2707 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2708 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2709 # If Changed-By isn't available, fall back to maintainer
2710 if u.pkg.changes.has_key("changed-by"):
2711 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2713 source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2714 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2715 source.install_date = datetime.now().date()
2717 dsc_component = entry["component"]
2718 dsc_location_id = entry["location id"]
2720 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2722 # Set up a new poolfile if necessary
2723 if not entry.has_key("files id") or not entry["files id"]:
2724 filename = entry["pool name"] + filename
2725 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2727 pfs.append(poolfile)
2728 entry["files id"] = poolfile.file_id
2730 source.poolfile_id = entry["files id"]
2733 suite_names = u.pkg.changes["distribution"].keys()
2734 source.suites = session.query(Suite). \
2735 filter(Suite.suite_name.in_(suite_names)).all()
2737 # Add the source files to the DB (files and dsc_files)
2739 dscfile.source_id = source.source_id
2740 dscfile.poolfile_id = entry["files id"]
2741 session.add(dscfile)
2743 for dsc_file, dentry in u.pkg.dsc_files.items():
2745 df.source_id = source.source_id
2747 # If the .orig tarball is already in the pool, it's
2748 # files id is stored in dsc_files by check_dsc().
2749 files_id = dentry.get("files id", None)
2751 # Find the entry in the files hash
2752 # TODO: Bail out here properly
2754 for f, e in u.pkg.files.items():
2759 if files_id is None:
2760 filename = dfentry["pool name"] + dsc_file
2762 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2763 # FIXME: needs to check for -1/-2 and or handle exception
2764 if found and obj is not None:
2765 files_id = obj.file_id
2768 # If still not found, add it
2769 if files_id is None:
2770 # HACK: Force sha1sum etc into dentry
2771 dentry["sha1sum"] = dfentry["sha1sum"]
2772 dentry["sha256sum"] = dfentry["sha256sum"]
2773 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2774 pfs.append(poolfile)
2775 files_id = poolfile.file_id
2777 poolfile = get_poolfile_by_id(files_id, session)
2778 if poolfile is None:
2779 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2780 pfs.append(poolfile)
2782 df.poolfile_id = files_id
2785 # Add the src_uploaders to the DB
2787 session.refresh(source)
2788 source.uploaders = [source.maintainer]
2789 if u.pkg.dsc.has_key("uploaders"):
2790 for up in split_uploaders(u.pkg.dsc["uploaders"]):
2791 source.uploaders.append(get_or_set_maintainer(up, session))
2795 return source, dsc_component, dsc_location_id, pfs
2797 __all__.append('add_dsc_to_db')
2800 def add_deb_to_db(u, filename, session=None):
2802 Contrary to what you might expect, this routine deals with both
2803 debs and udebs. That info is in 'dbtype', whilst 'type' is
2804 'deb' for both of them
2807 entry = u.pkg.files[filename]
2810 bin.package = entry["package"]
2811 bin.version = entry["version"]
2812 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2813 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2814 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2815 bin.binarytype = entry["dbtype"]
2818 filename = entry["pool name"] + filename
2819 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2820 if not entry.get("location id", None):
2821 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2823 if entry.get("files id", None):
2824 poolfile = get_poolfile_by_id(bin.poolfile_id)
2825 bin.poolfile_id = entry["files id"]
2827 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2828 bin.poolfile_id = entry["files id"] = poolfile.file_id
2831 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2833 # If we couldn't find anything and the upload contains Arch: source,
2834 # fall back to trying the source package, source version uploaded
2835 # This maintains backwards compatibility with previous dak behaviour
2836 # and deals with slightly broken binary debs which don't properly
2837 # declare their source package name
2838 if len(bin_sources) == 0:
2839 if u.pkg.changes["architecture"].has_key("source") \
2840 and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
2841 bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
2843 # If we couldn't find a source here, we reject
2844 # TODO: Fix this so that it doesn't kill process-upload and instead just
2845 # performs a reject. To be honest, we should probably spot this
2846 # *much* earlier than here
2847 if len(bin_sources) != 1:
2848 raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2849 (bin.package, bin.version, entry["architecture"],
2850 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2852 bin.source_id = bin_sources[0].source_id
2854 if entry.has_key("built-using"):
2855 for srcname, version in entry["built-using"]:
2856 exsources = get_sources_from_name(srcname, version, session=session)
2857 if len(exsources) != 1:
2858 raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
2859 (srcname, version, bin.package, bin.version, entry["architecture"],
2860 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2862 bin.extra_sources.append(exsources[0])
2864 # Add and flush object so it has an ID
2867 suite_names = u.pkg.changes["distribution"].keys()
2868 bin.suites = session.query(Suite). \
2869 filter(Suite.suite_name.in_(suite_names)).all()
2873 # Deal with contents - disabled for now
2874 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2876 # print "REJECT\nCould not determine contents of package %s" % bin.package
2877 # session.rollback()
2878 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2880 return bin, poolfile
2882 __all__.append('add_deb_to_db')
2884 ################################################################################
2886 class SourceACL(object):
2887 def __init__(self, *args, **kwargs):
2891 return '<SourceACL %s>' % self.source_acl_id
2893 __all__.append('SourceACL')
2895 ################################################################################
2897 class SrcFormat(object):
2898 def __init__(self, *args, **kwargs):
2902 return '<SrcFormat %s>' % (self.format_name)
2904 __all__.append('SrcFormat')
2906 ################################################################################
2908 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2909 ('SuiteID', 'suite_id'),
2910 ('Version', 'version'),
2911 ('Origin', 'origin'),
2913 ('Description', 'description'),
2914 ('Untouchable', 'untouchable'),
2915 ('Announce', 'announce'),
2916 ('Codename', 'codename'),
2917 ('OverrideCodename', 'overridecodename'),
2918 ('ValidTime', 'validtime'),
2919 ('Priority', 'priority'),
2920 ('NotAutomatic', 'notautomatic'),
2921 ('CopyChanges', 'copychanges'),
2922 ('OverrideSuite', 'overridesuite')]
2924 # Why the heck don't we have any UNIQUE constraints in table suite?
2925 # TODO: Add UNIQUE constraints for appropriate columns.
2926 class Suite(ORMObject):
2927 def __init__(self, suite_name = None, version = None):
2928 self.suite_name = suite_name
2929 self.version = version
2931 def properties(self):
2932 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2935 def not_null_constraints(self):
2936 return ['suite_name']
2938 def __eq__(self, val):
2939 if isinstance(val, str):
2940 return (self.suite_name == val)
2941 # This signals to use the normal comparison operator
2942 return NotImplemented
2944 def __ne__(self, val):
2945 if isinstance(val, str):
2946 return (self.suite_name != val)
2947 # This signals to use the normal comparison operator
2948 return NotImplemented
2952 for disp, field in SUITE_FIELDS:
2953 val = getattr(self, field, None)
2955 ret.append("%s: %s" % (disp, val))
2957 return "\n".join(ret)
2959 def get_architectures(self, skipsrc=False, skipall=False):
2961 Returns list of Architecture objects
2963 @type skipsrc: boolean
2964 @param skipsrc: Whether to skip returning the 'source' architecture entry
2967 @type skipall: boolean
2968 @param skipall: Whether to skip returning the 'all' architecture entry
2972 @return: list of Architecture objects for the given name (may be empty)
2975 q = object_session(self).query(Architecture).with_parent(self)
2977 q = q.filter(Architecture.arch_string != 'source')
2979 q = q.filter(Architecture.arch_string != 'all')
2980 return q.order_by(Architecture.arch_string).all()
2982 def get_sources(self, source):
2984 Returns a query object representing DBSource that is part of C{suite}.
2986 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2988 @type source: string
2989 @param source: source package name
2991 @rtype: sqlalchemy.orm.query.Query
2992 @return: a query of DBSource
2996 session = object_session(self)
2997 return session.query(DBSource).filter_by(source = source). \
3000 def get_overridesuite(self):
3001 if self.overridesuite is None:
3004 return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
3008 return os.path.join(self.archive.path, 'dists', self.suite_name)
3010 __all__.append('Suite')
3013 def get_suite(suite, session=None):
3015 Returns Suite object for given C{suite name}.
3018 @param suite: The name of the suite
3020 @type session: Session
3021 @param session: Optional SQLA session object (a temporary one will be
3022 generated if not supplied)
3025 @return: Suite object for the requested suite name (None if not present)
3028 q = session.query(Suite).filter_by(suite_name=suite)
3032 except NoResultFound:
3035 __all__.append('get_suite')
3037 ################################################################################
3040 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
3042 Returns list of Architecture objects for given C{suite} name. The list is
3043 empty if suite does not exist.
3046 @param suite: Suite name to search for
3048 @type skipsrc: boolean
3049 @param skipsrc: Whether to skip returning the 'source' architecture entry
3052 @type skipall: boolean
3053 @param skipall: Whether to skip returning the 'all' architecture entry
3056 @type session: Session
3057 @param session: Optional SQL session object (a temporary one will be
3058 generated if not supplied)
3061 @return: list of Architecture objects for the given name (may be empty)
3065 return get_suite(suite, session).get_architectures(skipsrc, skipall)
3066 except AttributeError:
3069 __all__.append('get_suite_architectures')
3071 ################################################################################
3073 class Uid(ORMObject):
3074 def __init__(self, uid = None, name = None):
3078 def __eq__(self, val):
3079 if isinstance(val, str):
3080 return (self.uid == val)
3081 # This signals to use the normal comparison operator
3082 return NotImplemented
3084 def __ne__(self, val):
3085 if isinstance(val, str):
3086 return (self.uid != val)
3087 # This signals to use the normal comparison operator
3088 return NotImplemented
3090 def properties(self):
3091 return ['uid', 'name', 'fingerprint']
3093 def not_null_constraints(self):
3096 __all__.append('Uid')
3099 def get_or_set_uid(uidname, session=None):
3101 Returns uid object for given uidname.
3103 If no matching uidname is found, a row is inserted.
3105 @type uidname: string
3106 @param uidname: The uid to add
3108 @type session: SQLAlchemy
3109 @param session: Optional SQL session object (a temporary one will be
3110 generated if not supplied). If not passed, a commit will be performed at
3111 the end of the function, otherwise the caller is responsible for commiting.
3114 @return: the uid object for the given uidname
3117 q = session.query(Uid).filter_by(uid=uidname)
3121 except NoResultFound:
3125 session.commit_or_flush()
3130 __all__.append('get_or_set_uid')
3133 def get_uid_from_fingerprint(fpr, session=None):
3134 q = session.query(Uid)
3135 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
3139 except NoResultFound:
3142 __all__.append('get_uid_from_fingerprint')
3144 ################################################################################
3146 class UploadBlock(object):
3147 def __init__(self, *args, **kwargs):
3151 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
3153 __all__.append('UploadBlock')
3155 ################################################################################
3157 class MetadataKey(ORMObject):
3158 def __init__(self, key = None):
3161 def properties(self):
3164 def not_null_constraints(self):
3167 __all__.append('MetadataKey')
3170 def get_or_set_metadatakey(keyname, session=None):
3172 Returns MetadataKey object for given uidname.
3174 If no matching keyname is found, a row is inserted.
3176 @type uidname: string
3177 @param uidname: The keyname to add
3179 @type session: SQLAlchemy
3180 @param session: Optional SQL session object (a temporary one will be
3181 generated if not supplied). If not passed, a commit will be performed at
3182 the end of the function, otherwise the caller is responsible for commiting.
3185 @return: the metadatakey object for the given keyname
3188 q = session.query(MetadataKey).filter_by(key=keyname)
3192 except NoResultFound:
3193 ret = MetadataKey(keyname)
3195 session.commit_or_flush()
3199 __all__.append('get_or_set_metadatakey')
3201 ################################################################################
3203 class BinaryMetadata(ORMObject):
3204 def __init__(self, key = None, value = None, binary = None):
3207 self.binary = binary
3209 def properties(self):
3210 return ['binary', 'key', 'value']
3212 def not_null_constraints(self):
3215 __all__.append('BinaryMetadata')
3217 ################################################################################
3219 class SourceMetadata(ORMObject):
3220 def __init__(self, key = None, value = None, source = None):
3223 self.source = source
3225 def properties(self):
3226 return ['source', 'key', 'value']
3228 def not_null_constraints(self):
3231 __all__.append('SourceMetadata')
3233 ################################################################################
3235 class VersionCheck(ORMObject):
3236 def __init__(self, *args, **kwargs):
3239 def properties(self):
3240 #return ['suite_id', 'check', 'reference_id']
3243 def not_null_constraints(self):
3244 return ['suite', 'check', 'reference']
3246 __all__.append('VersionCheck')
3249 def get_version_checks(suite_name, check = None, session = None):
3250 suite = get_suite(suite_name, session)
3252 # Make sure that what we return is iterable so that list comprehensions
3253 # involving this don't cause a traceback
3255 q = session.query(VersionCheck).filter_by(suite=suite)
3257 q = q.filter_by(check=check)
3260 __all__.append('get_version_checks')
3262 ################################################################################
3264 class DBConn(object):
3266 database module init.
3270 def __init__(self, *args, **kwargs):
3271 self.__dict__ = self.__shared_state
3273 if not getattr(self, 'initialised', False):
3274 self.initialised = True
3275 self.debug = kwargs.has_key('debug')
3278 def __setuptables(self):
3285 'binaries_metadata',
3289 'build_queue_files',
3290 'build_queue_policy_files',
3295 'changes_pending_binaries',
3296 'changes_pending_files',
3297 'changes_pending_source',
3298 'changes_pending_files_map',
3299 'changes_pending_source_files',
3300 'changes_pool_files',
3302 'external_overrides',
3303 'extra_src_references',
3312 # TODO: the maintainer column in table override should be removed.
3326 'suite_architectures',
3327 'suite_build_queue_copy',
3328 'suite_src_formats',
3335 'almost_obsolete_all_associations',
3336 'almost_obsolete_src_associations',
3337 'any_associations_source',
3338 'bin_associations_binaries',
3339 'binaries_suite_arch',
3340 'binfiles_suite_component_arch',
3343 'newest_all_associations',
3344 'newest_any_associations',
3346 'newest_src_association',
3347 'obsolete_all_associations',
3348 'obsolete_any_associations',
3349 'obsolete_any_by_all_associations',
3350 'obsolete_src_associations',
3352 'src_associations_bin',
3353 'src_associations_src',
3354 'suite_arch_by_name',
3357 for table_name in tables:
3358 table = Table(table_name, self.db_meta, \
3359 autoload=True, useexisting=True)
3360 setattr(self, 'tbl_%s' % table_name, table)
3362 for view_name in views:
3363 view = Table(view_name, self.db_meta, autoload=True)
3364 setattr(self, 'view_%s' % view_name, view)
3366 def __setupmappers(self):
3367 mapper(Architecture, self.tbl_architecture,
3368 properties = dict(arch_id = self.tbl_architecture.c.id,
3369 suites = relation(Suite, secondary=self.tbl_suite_architectures,
3370 order_by=self.tbl_suite.c.suite_name,
3371 backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
3372 extension = validator)
3374 mapper(Archive, self.tbl_archive,
3375 properties = dict(archive_id = self.tbl_archive.c.id,
3376 archive_name = self.tbl_archive.c.name))
3378 mapper(BuildQueue, self.tbl_build_queue,
3379 properties = dict(queue_id = self.tbl_build_queue.c.id))
3381 mapper(BuildQueueFile, self.tbl_build_queue_files,
3382 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3383 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3385 mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
3387 build_queue = relation(BuildQueue, backref='policy_queue_files'),
3388 file = relation(ChangePendingFile, lazy='joined')))
3390 mapper(DBBinary, self.tbl_binaries,
3391 properties = dict(binary_id = self.tbl_binaries.c.id,
3392 package = self.tbl_binaries.c.package,
3393 version = self.tbl_binaries.c.version,
3394 maintainer_id = self.tbl_binaries.c.maintainer,
3395 maintainer = relation(Maintainer),
3396 source_id = self.tbl_binaries.c.source,
3397 source = relation(DBSource, backref='binaries'),
3398 arch_id = self.tbl_binaries.c.architecture,
3399 architecture = relation(Architecture),
3400 poolfile_id = self.tbl_binaries.c.file,
3401 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3402 binarytype = self.tbl_binaries.c.type,
3403 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3404 fingerprint = relation(Fingerprint),
3405 install_date = self.tbl_binaries.c.install_date,
3406 suites = relation(Suite, secondary=self.tbl_bin_associations,
3407 backref=backref('binaries', lazy='dynamic')),
3408 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
3409 backref=backref('extra_binary_references', lazy='dynamic')),
3410 key = relation(BinaryMetadata, cascade='all',
3411 collection_class=attribute_mapped_collection('key'))),
3412 extension = validator)
3414 mapper(BinaryACL, self.tbl_binary_acl,
3415 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3417 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3418 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3419 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3420 architecture = relation(Architecture)))
3422 mapper(Component, self.tbl_component,
3423 properties = dict(component_id = self.tbl_component.c.id,
3424 component_name = self.tbl_component.c.name),
3425 extension = validator)
3427 mapper(DBConfig, self.tbl_config,
3428 properties = dict(config_id = self.tbl_config.c.id))
3430 mapper(DSCFile, self.tbl_dsc_files,
3431 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3432 source_id = self.tbl_dsc_files.c.source,
3433 source = relation(DBSource),
3434 poolfile_id = self.tbl_dsc_files.c.file,
3435 poolfile = relation(PoolFile)))
3437 mapper(ExternalOverride, self.tbl_external_overrides,
3439 suite_id = self.tbl_external_overrides.c.suite,
3440 suite = relation(Suite),
3441 component_id = self.tbl_external_overrides.c.component,
3442 component = relation(Component)))
3444 mapper(PoolFile, self.tbl_files,
3445 properties = dict(file_id = self.tbl_files.c.id,
3446 filesize = self.tbl_files.c.size,
3447 location_id = self.tbl_files.c.location,
3448 location = relation(Location,
3449 # using lazy='dynamic' in the back
3450 # reference because we have A LOT of
3451 # files in one location
3452 backref=backref('files', lazy='dynamic'))),
3453 extension = validator)
3455 mapper(Fingerprint, self.tbl_fingerprint,
3456 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3457 uid_id = self.tbl_fingerprint.c.uid,
3458 uid = relation(Uid),
3459 keyring_id = self.tbl_fingerprint.c.keyring,
3460 keyring = relation(Keyring),
3461 source_acl = relation(SourceACL),
3462 binary_acl = relation(BinaryACL)),
3463 extension = validator)
3465 mapper(Keyring, self.tbl_keyrings,
3466 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3467 keyring_id = self.tbl_keyrings.c.id))
3469 mapper(DBChange, self.tbl_changes,
3470 properties = dict(change_id = self.tbl_changes.c.id,
3471 poolfiles = relation(PoolFile,
3472 secondary=self.tbl_changes_pool_files,
3473 backref="changeslinks"),
3474 seen = self.tbl_changes.c.seen,
3475 source = self.tbl_changes.c.source,
3476 binaries = self.tbl_changes.c.binaries,
3477 architecture = self.tbl_changes.c.architecture,
3478 distribution = self.tbl_changes.c.distribution,
3479 urgency = self.tbl_changes.c.urgency,
3480 maintainer = self.tbl_changes.c.maintainer,
3481 changedby = self.tbl_changes.c.changedby,
3482 date = self.tbl_changes.c.date,
3483 version = self.tbl_changes.c.version,
3484 files = relation(ChangePendingFile,
3485 secondary=self.tbl_changes_pending_files_map,
3486 backref="changesfile"),
3487 in_queue_id = self.tbl_changes.c.in_queue,
3488 in_queue = relation(PolicyQueue,
3489 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3490 approved_for_id = self.tbl_changes.c.approved_for))
3492 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3493 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3495 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3496 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3497 filename = self.tbl_changes_pending_files.c.filename,
3498 size = self.tbl_changes_pending_files.c.size,
3499 md5sum = self.tbl_changes_pending_files.c.md5sum,
3500 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3501 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3503 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3504 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3505 change = relation(DBChange),
3506 maintainer = relation(Maintainer,
3507 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3508 changedby = relation(Maintainer,
3509 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3510 fingerprint = relation(Fingerprint),
3511 source_files = relation(ChangePendingFile,
3512 secondary=self.tbl_changes_pending_source_files,
3513 backref="pending_sources")))
3516 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3517 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3518 keyring = relation(Keyring, backref="keyring_acl_map"),
3519 architecture = relation(Architecture)))
3521 mapper(Location, self.tbl_location,
3522 properties = dict(location_id = self.tbl_location.c.id,
3523 component_id = self.tbl_location.c.component,
3524 component = relation(Component, backref='location'),
3525 archive_id = self.tbl_location.c.archive,
3526 archive = relation(Archive),
3527 # FIXME: the 'type' column is old cruft and
3528 # should be removed in the future.
3529 archive_type = self.tbl_location.c.type),
3530 extension = validator)
3532 mapper(Maintainer, self.tbl_maintainer,
3533 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3534 maintains_sources = relation(DBSource, backref='maintainer',
3535 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3536 changed_sources = relation(DBSource, backref='changedby',
3537 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3538 extension = validator)
3540 mapper(NewComment, self.tbl_new_comments,
3541 properties = dict(comment_id = self.tbl_new_comments.c.id))
3543 mapper(Override, self.tbl_override,
3544 properties = dict(suite_id = self.tbl_override.c.suite,
3545 suite = relation(Suite, \
3546 backref=backref('overrides', lazy='dynamic')),
3547 package = self.tbl_override.c.package,
3548 component_id = self.tbl_override.c.component,
3549 component = relation(Component, \
3550 backref=backref('overrides', lazy='dynamic')),
3551 priority_id = self.tbl_override.c.priority,
3552 priority = relation(Priority, \
3553 backref=backref('overrides', lazy='dynamic')),
3554 section_id = self.tbl_override.c.section,
3555 section = relation(Section, \
3556 backref=backref('overrides', lazy='dynamic')),
3557 overridetype_id = self.tbl_override.c.type,
3558 overridetype = relation(OverrideType, \
3559 backref=backref('overrides', lazy='dynamic'))))
3561 mapper(OverrideType, self.tbl_override_type,
3562 properties = dict(overridetype = self.tbl_override_type.c.type,
3563 overridetype_id = self.tbl_override_type.c.id))
3565 mapper(PolicyQueue, self.tbl_policy_queue,
3566 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3568 mapper(Priority, self.tbl_priority,
3569 properties = dict(priority_id = self.tbl_priority.c.id))
3571 mapper(Section, self.tbl_section,
3572 properties = dict(section_id = self.tbl_section.c.id,
3573 section=self.tbl_section.c.section))
3575 mapper(DBSource, self.tbl_source,
3576 properties = dict(source_id = self.tbl_source.c.id,
3577 version = self.tbl_source.c.version,
3578 maintainer_id = self.tbl_source.c.maintainer,
3579 poolfile_id = self.tbl_source.c.file,
3580 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3581 fingerprint_id = self.tbl_source.c.sig_fpr,
3582 fingerprint = relation(Fingerprint),
3583 changedby_id = self.tbl_source.c.changedby,
3584 srcfiles = relation(DSCFile,
3585 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3586 suites = relation(Suite, secondary=self.tbl_src_associations,
3587 backref=backref('sources', lazy='dynamic')),
3588 uploaders = relation(Maintainer,
3589 secondary=self.tbl_src_uploaders),
3590 key = relation(SourceMetadata, cascade='all',
3591 collection_class=attribute_mapped_collection('key'))),
3592 extension = validator)
3594 mapper(SourceACL, self.tbl_source_acl,
3595 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3597 mapper(SrcFormat, self.tbl_src_format,
3598 properties = dict(src_format_id = self.tbl_src_format.c.id,
3599 format_name = self.tbl_src_format.c.format_name))
3601 mapper(Suite, self.tbl_suite,
3602 properties = dict(suite_id = self.tbl_suite.c.id,
3603 policy_queue = relation(PolicyQueue),
3604 copy_queues = relation(BuildQueue,
3605 secondary=self.tbl_suite_build_queue_copy),
3606 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
3607 backref=backref('suites', lazy='dynamic')),
3608 archive = relation(Archive, backref='suites')),
3609 extension = validator)
3611 mapper(Uid, self.tbl_uid,
3612 properties = dict(uid_id = self.tbl_uid.c.id,
3613 fingerprint = relation(Fingerprint)),
3614 extension = validator)
3616 mapper(UploadBlock, self.tbl_upload_blocks,
3617 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3618 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3619 uid = relation(Uid, backref="uploadblocks")))
3621 mapper(BinContents, self.tbl_bin_contents,
3623 binary = relation(DBBinary,
3624 backref=backref('contents', lazy='dynamic', cascade='all')),
3625 file = self.tbl_bin_contents.c.file))
3627 mapper(SrcContents, self.tbl_src_contents,
3629 source = relation(DBSource,
3630 backref=backref('contents', lazy='dynamic', cascade='all')),
3631 file = self.tbl_src_contents.c.file))
3633 mapper(MetadataKey, self.tbl_metadata_keys,
3635 key_id = self.tbl_metadata_keys.c.key_id,
3636 key = self.tbl_metadata_keys.c.key))
3638 mapper(BinaryMetadata, self.tbl_binaries_metadata,
3640 binary_id = self.tbl_binaries_metadata.c.bin_id,
3641 binary = relation(DBBinary),
3642 key_id = self.tbl_binaries_metadata.c.key_id,
3643 key = relation(MetadataKey),
3644 value = self.tbl_binaries_metadata.c.value))
3646 mapper(SourceMetadata, self.tbl_source_metadata,
3648 source_id = self.tbl_source_metadata.c.src_id,
3649 source = relation(DBSource),
3650 key_id = self.tbl_source_metadata.c.key_id,
3651 key = relation(MetadataKey),
3652 value = self.tbl_source_metadata.c.value))
3654 mapper(VersionCheck, self.tbl_version_check,
3656 suite_id = self.tbl_version_check.c.suite,
3657 suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
3658 reference_id = self.tbl_version_check.c.reference,
3659 reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
3661 ## Connection functions
3662 def __createconn(self):
3663 from config import Config
3665 if cnf.has_key("DB::Service"):
3666 connstr = "postgresql://service=%s" % cnf["DB::Service"]
3667 elif cnf.has_key("DB::Host"):
3669 connstr = "postgresql://%s" % cnf["DB::Host"]
3670 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3671 connstr += ":%s" % cnf["DB::Port"]
3672 connstr += "/%s" % cnf["DB::Name"]
3675 connstr = "postgresql:///%s" % cnf["DB::Name"]
3676 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3677 connstr += "?port=%s" % cnf["DB::Port"]
3679 engine_args = { 'echo': self.debug }
3680 if cnf.has_key('DB::PoolSize'):
3681 engine_args['pool_size'] = int(cnf['DB::PoolSize'])
3682 if cnf.has_key('DB::MaxOverflow'):
3683 engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
3684 if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
3685 cnf['DB::Unicode'] == 'false':
3686 engine_args['use_native_unicode'] = False
3688 # Monkey patch a new dialect in in order to support service= syntax
3689 import sqlalchemy.dialects.postgresql
3690 from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
3691 class PGDialect_psycopg2_dak(PGDialect_psycopg2):
3692 def create_connect_args(self, url):
3693 if str(url).startswith('postgresql://service='):
3695 servicename = str(url)[21:]
3696 return (['service=%s' % servicename], {})
3698 return PGDialect_psycopg2.create_connect_args(self, url)
3700 sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
3703 self.db_pg = create_engine(connstr, **engine_args)
3704 self.db_meta = MetaData()
3705 self.db_meta.bind = self.db_pg
3706 self.db_smaker = sessionmaker(bind=self.db_pg,
3710 self.__setuptables()
3711 self.__setupmappers()
3713 except OperationalError as e:
3715 utils.fubar("Cannot connect to database (%s)" % str(e))
3717 self.pid = os.getpid()
3719 def session(self, work_mem = 0):
3721 Returns a new session object. If a work_mem parameter is provided a new
3722 transaction is started and the work_mem parameter is set for this
3723 transaction. The work_mem parameter is measured in MB. A default value
3724 will be used if the parameter is not set.
3726 # reinitialize DBConn in new processes
3727 if self.pid != os.getpid():
3730 session = self.db_smaker()
3732 session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
3735 __all__.append('DBConn')