5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
38 from os.path import normpath
50 import simplejson as json
52 from datetime import datetime, timedelta
53 from errno import ENOENT
54 from tempfile import mkstemp, mkdtemp
55 from subprocess import Popen, PIPE
56 from tarfile import TarFile
58 from inspect import getargspec
61 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
63 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
64 backref, MapperExtension, EXT_CONTINUE, object_mapper, clear_mappers
65 from sqlalchemy import types as sqltypes
66 from sqlalchemy.orm.collections import attribute_mapped_collection
67 from sqlalchemy.ext.associationproxy import association_proxy
69 # Don't remove this, we re-export the exceptions to scripts which import us
70 from sqlalchemy.exc import *
71 from sqlalchemy.orm.exc import NoResultFound
73 # Only import Config until Queue stuff is changed to store its config
75 from config import Config
76 from textutils import fix_maintainer
77 from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
79 # suppress some deprecation warnings in squeeze related to sqlalchemy
81 warnings.filterwarnings('ignore', \
82 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
84 warnings.filterwarnings('ignore', \
85 "Predicate of partial index .* ignored during reflection", \
89 ################################################################################
91 # Patch in support for the debversion field type so that it works during
95 # that is for sqlalchemy 0.6
96 UserDefinedType = sqltypes.UserDefinedType
98 # this one for sqlalchemy 0.5
99 UserDefinedType = sqltypes.TypeEngine
101 class DebVersion(UserDefinedType):
102 def get_col_spec(self):
105 def bind_processor(self, dialect):
108 # ' = None' is needed for sqlalchemy 0.5:
109 def result_processor(self, dialect, coltype = None):
112 sa_major_version = sqlalchemy.__version__[0:3]
113 if sa_major_version in ["0.5", "0.6"]:
114 from sqlalchemy.databases import postgres
115 postgres.ischema_names['debversion'] = DebVersion
117 raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
119 ################################################################################
121 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
123 ################################################################################
125 def session_wrapper(fn):
127 Wrapper around common ".., session=None):" handling. If the wrapped
128 function is called without passing 'session', we create a local one
129 and destroy it when the function ends.
131 Also attaches a commit_or_flush method to the session; if we created a
132 local session, this is a synonym for session.commit(), otherwise it is a
133 synonym for session.flush().
136 def wrapped(*args, **kwargs):
137 private_transaction = False
139 # Find the session object
140 session = kwargs.get('session')
143 if len(args) <= len(getargspec(fn)[0]) - 1:
144 # No session specified as last argument or in kwargs
145 private_transaction = True
146 session = kwargs['session'] = DBConn().session()
148 # Session is last argument in args
152 session = args[-1] = DBConn().session()
153 private_transaction = True
155 if private_transaction:
156 session.commit_or_flush = session.commit
158 session.commit_or_flush = session.flush
161 return fn(*args, **kwargs)
163 if private_transaction:
164 # We created a session; close it.
167 wrapped.__doc__ = fn.__doc__
168 wrapped.func_name = fn.func_name
172 __all__.append('session_wrapper')
174 ################################################################################
176 class ORMObject(object):
178 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
179 derived classes must implement the properties() method.
182 def properties(self):
184 This method should be implemented by all derived classes and returns a
185 list of the important properties. The properties 'created' and
186 'modified' will be added automatically. A suffix '_count' should be
187 added to properties that are lists or query objects. The most important
188 property name should be returned as the first element in the list
189 because it is used by repr().
195 Returns a JSON representation of the object based on the properties
196 returned from the properties() method.
199 # add created and modified
200 all_properties = self.properties() + ['created', 'modified']
201 for property in all_properties:
202 # check for list or query
203 if property[-6:] == '_count':
204 real_property = property[:-6]
205 if not hasattr(self, real_property):
207 value = getattr(self, real_property)
208 if hasattr(value, '__len__'):
211 elif hasattr(value, 'count'):
212 # query (but not during validation)
213 if self.in_validation:
215 value = value.count()
217 raise KeyError('Do not understand property %s.' % property)
219 if not hasattr(self, property):
222 value = getattr(self, property)
226 elif isinstance(value, ORMObject):
227 # use repr() for ORMObject types
230 # we want a string for all other types because json cannot
233 data[property] = value
234 return json.dumps(data)
238 Returns the name of the class.
240 return type(self).__name__
244 Returns a short string representation of the object using the first
245 element from the properties() method.
247 primary_property = self.properties()[0]
248 value = getattr(self, primary_property)
249 return '<%s %s>' % (self.classname(), str(value))
253 Returns a human readable form of the object using the properties()
256 return '<%s %s>' % (self.classname(), self.json())
258 def not_null_constraints(self):
260 Returns a list of properties that must be not NULL. Derived classes
261 should override this method if needed.
265 validation_message = \
266 "Validation failed because property '%s' must not be empty in object\n%s"
268 in_validation = False
272 This function validates the not NULL constraints as returned by
273 not_null_constraints(). It raises the DBUpdateError exception if
276 for property in self.not_null_constraints():
277 # TODO: It is a bit awkward that the mapper configuration allow
278 # directly setting the numeric _id columns. We should get rid of it
280 if hasattr(self, property + '_id') and \
281 getattr(self, property + '_id') is not None:
283 if not hasattr(self, property) or getattr(self, property) is None:
284 # str() might lead to races due to a 2nd flush
285 self.in_validation = True
286 message = self.validation_message % (property, str(self))
287 self.in_validation = False
288 raise DBUpdateError(message)
292 def get(cls, primary_key, session = None):
294 This is a support function that allows getting an object by its primary
297 Architecture.get(3[, session])
299 instead of the more verbose
301 session.query(Architecture).get(3)
303 return session.query(cls).get(primary_key)
305 def session(self, replace = False):
307 Returns the current session that is associated with the object. May
308 return None is object is in detached state.
311 return object_session(self)
313 def clone(self, session = None):
315 Clones the current object in a new session and returns the new clone. A
316 fresh session is created if the optional session parameter is not
317 provided. The function will fail if a session is provided and has
320 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
321 an existing object to allow several threads to work with their own
322 instances of an ORMObject.
324 WARNING: Only persistent (committed) objects can be cloned. Changes
325 made to the original object that are not committed yet will get lost.
326 The session of the new object will always be rolled back to avoid
330 if self.session() is None:
331 raise RuntimeError( \
332 'Method clone() failed for detached object:\n%s' % self)
333 self.session().flush()
334 mapper = object_mapper(self)
335 primary_key = mapper.primary_key_from_instance(self)
336 object_class = self.__class__
338 session = DBConn().session()
339 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
340 raise RuntimeError( \
341 'Method clone() failed due to unflushed changes in session.')
342 new_object = session.query(object_class).get(primary_key)
344 if new_object is None:
345 raise RuntimeError( \
346 'Method clone() failed for non-persistent object:\n%s' % self)
349 __all__.append('ORMObject')
351 ################################################################################
353 class Validator(MapperExtension):
355 This class calls the validate() method for each instance for the
356 'before_update' and 'before_insert' events. A global object validator is
357 used for configuring the individual mappers.
360 def before_update(self, mapper, connection, instance):
364 def before_insert(self, mapper, connection, instance):
368 validator = Validator()
370 ################################################################################
372 class Architecture(ORMObject):
373 def __init__(self, arch_string = None, description = None):
374 self.arch_string = arch_string
375 self.description = description
377 def __eq__(self, val):
378 if isinstance(val, str):
379 return (self.arch_string== val)
380 # This signals to use the normal comparison operator
381 return NotImplemented
383 def __ne__(self, val):
384 if isinstance(val, str):
385 return (self.arch_string != val)
386 # This signals to use the normal comparison operator
387 return NotImplemented
389 def properties(self):
390 return ['arch_string', 'arch_id', 'suites_count']
392 def not_null_constraints(self):
393 return ['arch_string']
395 __all__.append('Architecture')
398 def get_architecture(architecture, session=None):
400 Returns database id for given C{architecture}.
402 @type architecture: string
403 @param architecture: The name of the architecture
405 @type session: Session
406 @param session: Optional SQLA session object (a temporary one will be
407 generated if not supplied)
410 @return: Architecture object for the given arch (None if not present)
413 q = session.query(Architecture).filter_by(arch_string=architecture)
417 except NoResultFound:
420 __all__.append('get_architecture')
422 # TODO: should be removed because the implementation is too trivial
424 def get_architecture_suites(architecture, session=None):
426 Returns list of Suite objects for given C{architecture} name
428 @type architecture: str
429 @param architecture: Architecture name to search for
431 @type session: Session
432 @param session: Optional SQL session object (a temporary one will be
433 generated if not supplied)
436 @return: list of Suite objects for the given name (may be empty)
439 return get_architecture(architecture, session).suites
441 __all__.append('get_architecture_suites')
443 ################################################################################
445 class Archive(object):
446 def __init__(self, *args, **kwargs):
450 return '<Archive %s>' % self.archive_name
452 __all__.append('Archive')
455 def get_archive(archive, session=None):
457 returns database id for given C{archive}.
459 @type archive: string
460 @param archive: the name of the arhive
462 @type session: Session
463 @param session: Optional SQLA session object (a temporary one will be
464 generated if not supplied)
467 @return: Archive object for the given name (None if not present)
470 archive = archive.lower()
472 q = session.query(Archive).filter_by(archive_name=archive)
476 except NoResultFound:
479 __all__.append('get_archive')
481 ################################################################################
483 class BinContents(ORMObject):
484 def __init__(self, file = None, binary = None):
488 def properties(self):
489 return ['file', 'binary']
491 __all__.append('BinContents')
493 ################################################################################
495 def subprocess_setup():
496 # Python installs a SIGPIPE handler by default. This is usually not what
497 # non-Python subprocesses expect.
498 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
500 class DBBinary(ORMObject):
501 def __init__(self, package = None, source = None, version = None, \
502 maintainer = None, architecture = None, poolfile = None, \
504 self.package = package
506 self.version = version
507 self.maintainer = maintainer
508 self.architecture = architecture
509 self.poolfile = poolfile
510 self.binarytype = binarytype
514 return self.binary_id
516 def properties(self):
517 return ['package', 'version', 'maintainer', 'source', 'architecture', \
518 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
519 'suites_count', 'binary_id', 'contents_count', 'extra_sources']
521 def not_null_constraints(self):
522 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
525 metadata = association_proxy('key', 'value')
527 def get_component_name(self):
528 return self.poolfile.location.component.component_name
530 def scan_contents(self):
532 Yields the contents of the package. Only regular files are yielded and
533 the path names are normalized after converting them from either utf-8
534 or iso8859-1 encoding. It yields the string ' <EMPTY PACKAGE>' if the
535 package does not contain any regular file.
537 fullpath = self.poolfile.fullpath
538 dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
539 preexec_fn = subprocess_setup)
540 tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
541 for member in tar.getmembers():
542 if not member.isdir():
543 name = normpath(member.name)
544 # enforce proper utf-8 encoding
547 except UnicodeDecodeError:
548 name = name.decode('iso8859-1').encode('utf-8')
554 def read_control(self):
556 Reads the control information from a binary.
559 @return: stanza text of the control section.
562 fullpath = self.poolfile.fullpath
563 deb_file = open(fullpath, 'r')
564 stanza = apt_inst.debExtractControl(deb_file)
569 def read_control_fields(self):
571 Reads the control information from a binary and return
575 @return: fields of the control section as a dictionary.
578 stanza = self.read_control()
579 return apt_pkg.TagSection(stanza)
581 __all__.append('DBBinary')
584 def get_suites_binary_in(package, session=None):
586 Returns list of Suite objects which given C{package} name is in
589 @param package: DBBinary package name to search for
592 @return: list of Suite objects for the given package
595 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
597 __all__.append('get_suites_binary_in')
600 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
602 Returns the component name of the newest binary package in suite_list or
603 None if no package is found. The result can be optionally filtered by a list
604 of architecture names.
607 @param package: DBBinary package name to search for
609 @type suite_list: list of str
610 @param suite_list: list of suite_name items
612 @type arch_list: list of str
613 @param arch_list: optional list of arch_string items that defaults to []
615 @rtype: str or NoneType
616 @return: name of component or None
619 q = session.query(DBBinary).filter_by(package = package). \
620 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
621 if len(arch_list) > 0:
622 q = q.join(DBBinary.architecture). \
623 filter(Architecture.arch_string.in_(arch_list))
624 binary = q.order_by(desc(DBBinary.version)).first()
628 return binary.get_component_name()
630 __all__.append('get_component_by_package_suite')
632 ################################################################################
634 class BinaryACL(object):
635 def __init__(self, *args, **kwargs):
639 return '<BinaryACL %s>' % self.binary_acl_id
641 __all__.append('BinaryACL')
643 ################################################################################
645 class BinaryACLMap(object):
646 def __init__(self, *args, **kwargs):
650 return '<BinaryACLMap %s>' % self.binary_acl_map_id
652 __all__.append('BinaryACLMap')
654 ################################################################################
659 ArchiveDir "%(archivepath)s";
660 OverrideDir "%(overridedir)s";
661 CacheDir "%(cachedir)s";
666 Packages::Compress ". bzip2 gzip";
667 Sources::Compress ". bzip2 gzip";
672 bindirectory "incoming"
677 BinOverride "override.sid.all3";
678 BinCacheDB "packages-accepted.db";
680 FileList "%(filelist)s";
683 Packages::Extensions ".deb .udeb";
686 bindirectory "incoming/"
689 BinOverride "override.sid.all3";
690 SrcOverride "override.sid.all3.src";
691 FileList "%(filelist)s";
695 class BuildQueue(object):
696 def __init__(self, *args, **kwargs):
700 return '<BuildQueue %s>' % self.queue_name
702 def write_metadata(self, starttime, force=False):
703 # Do we write out metafiles?
704 if not (force or self.generate_metadata):
707 session = DBConn().session().object_session(self)
709 fl_fd = fl_name = ac_fd = ac_name = None
711 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
712 startdir = os.getcwd()
715 # Grab files we want to include
716 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
717 newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
718 # Write file list with newer files
719 (fl_fd, fl_name) = mkstemp()
721 os.write(fl_fd, '%s\n' % n.fullpath)
726 # Write minimal apt.conf
727 # TODO: Remove hardcoding from template
728 (ac_fd, ac_name) = mkstemp()
729 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
731 'cachedir': cnf["Dir::Cache"],
732 'overridedir': cnf["Dir::Override"],
736 # Run apt-ftparchive generate
737 os.chdir(os.path.dirname(ac_name))
738 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
740 # Run apt-ftparchive release
741 # TODO: Eww - fix this
742 bname = os.path.basename(self.path)
746 # We have to remove the Release file otherwise it'll be included in the
749 os.unlink(os.path.join(bname, 'Release'))
753 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
755 # Crude hack with open and append, but this whole section is and should be redone.
756 if self.notautomatic:
757 release=open("Release", "a")
758 release.write("NotAutomatic: yes\n")
763 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
764 if cnf.has_key("Dinstall::SigningPubKeyring"):
765 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
767 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
769 # Move the files if we got this far
770 os.rename('Release', os.path.join(bname, 'Release'))
772 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
774 # Clean up any left behind files
801 def clean_and_update(self, starttime, Logger, dryrun=False):
802 """WARNING: This routine commits for you"""
803 session = DBConn().session().object_session(self)
805 if self.generate_metadata and not dryrun:
806 self.write_metadata(starttime)
808 # Grab files older than our execution time
809 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
810 older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
816 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
818 Logger.log(["I: Removing %s from the queue" % o.fullpath])
819 os.unlink(o.fullpath)
822 # If it wasn't there, don't worry
823 if e.errno == ENOENT:
826 # TODO: Replace with proper logging call
827 Logger.log(["E: Could not remove %s" % o.fullpath])
834 for f in os.listdir(self.path):
835 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
838 if not self.contains_filename(f):
839 fp = os.path.join(self.path, f)
841 Logger.log(["I: Would remove unused link %s" % fp])
843 Logger.log(["I: Removing unused link %s" % fp])
847 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
849 def contains_filename(self, filename):
852 @returns True if filename is supposed to be in the queue; False otherwise
854 session = DBConn().session().object_session(self)
855 if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
857 elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
861 def add_file_from_pool(self, poolfile):
862 """Copies a file into the pool. Assumes that the PoolFile object is
863 attached to the same SQLAlchemy session as the Queue object is.
865 The caller is responsible for committing after calling this function."""
866 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
868 # Check if we have a file of this name or this ID already
869 for f in self.queuefiles:
870 if (f.fileid is not None and f.fileid == poolfile.file_id) or \
871 (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
872 # In this case, update the BuildQueueFile entry so we
873 # don't remove it too early
874 f.lastused = datetime.now()
875 DBConn().session().object_session(poolfile).add(f)
878 # Prepare BuildQueueFile object
879 qf = BuildQueueFile()
880 qf.build_queue_id = self.queue_id
881 qf.filename = poolfile_basename
883 targetpath = poolfile.fullpath
884 queuepath = os.path.join(self.path, poolfile_basename)
888 # We need to copy instead of symlink
890 utils.copy(targetpath, queuepath)
891 # NULL in the fileid field implies a copy
894 os.symlink(targetpath, queuepath)
895 qf.fileid = poolfile.file_id
896 except FileExistsError:
897 if not poolfile.identical_to(queuepath):
902 # Get the same session as the PoolFile is using and add the qf to it
903 DBConn().session().object_session(poolfile).add(qf)
907 def add_changes_from_policy_queue(self, policyqueue, changes):
909 Copies a changes from a policy queue together with its poolfiles.
911 @type policyqueue: PolicyQueue
912 @param policyqueue: policy queue to copy the changes from
914 @type changes: DBChange
915 @param changes: changes to copy to this build queue
917 for policyqueuefile in changes.files:
918 self.add_file_from_policy_queue(policyqueue, policyqueuefile)
919 for poolfile in changes.poolfiles:
920 self.add_file_from_pool(poolfile)
922 def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
924 Copies a file from a policy queue.
925 Assumes that the policyqueuefile is attached to the same SQLAlchemy
926 session as the Queue object is. The caller is responsible for
927 committing after calling this function.
929 @type policyqueue: PolicyQueue
930 @param policyqueue: policy queue to copy the file from
932 @type policyqueuefile: ChangePendingFile
933 @param policyqueuefile: file to be added to the build queue
935 session = DBConn().session().object_session(policyqueuefile)
937 # Is the file already there?
939 f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
940 f.lastused = datetime.now()
942 except NoResultFound:
943 pass # continue below
945 # We have to add the file.
946 f = BuildQueuePolicyFile()
948 f.file = policyqueuefile
949 f.filename = policyqueuefile.filename
951 source = os.path.join(policyqueue.path, policyqueuefile.filename)
954 # Always copy files from policy queues as they might move around.
956 utils.copy(source, target)
957 except FileExistsError:
958 if not policyqueuefile.identical_to(target):
966 __all__.append('BuildQueue')
969 def get_build_queue(queuename, session=None):
971 Returns BuildQueue object for given C{queue name}, creating it if it does not
974 @type queuename: string
975 @param queuename: The name of the queue
977 @type session: Session
978 @param session: Optional SQLA session object (a temporary one will be
979 generated if not supplied)
982 @return: BuildQueue object for the given queue
985 q = session.query(BuildQueue).filter_by(queue_name=queuename)
989 except NoResultFound:
992 __all__.append('get_build_queue')
994 ################################################################################
996 class BuildQueueFile(object):
998 BuildQueueFile represents a file in a build queue coming from a pool.
1001 def __init__(self, *args, **kwargs):
1005 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
1009 return os.path.join(self.buildqueue.path, self.filename)
1012 __all__.append('BuildQueueFile')
1014 ################################################################################
1016 class BuildQueuePolicyFile(object):
1018 BuildQueuePolicyFile represents a file in a build queue that comes from a
1019 policy queue (and not a pool).
1022 def __init__(self, *args, **kwargs):
1026 #def filename(self):
1027 # return self.file.filename
1031 return os.path.join(self.build_queue.path, self.filename)
1033 __all__.append('BuildQueuePolicyFile')
1035 ################################################################################
1037 class ChangePendingBinary(object):
1038 def __init__(self, *args, **kwargs):
1042 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
1044 __all__.append('ChangePendingBinary')
1046 ################################################################################
1048 class ChangePendingFile(object):
1049 def __init__(self, *args, **kwargs):
1053 return '<ChangePendingFile %s>' % self.change_pending_file_id
1055 def identical_to(self, filename):
1057 compare size and hash with the given file
1060 @return: true if the given file has the same size and hash as this object; false otherwise
1062 st = os.stat(filename)
1063 if self.size != st.st_size:
1066 f = open(filename, "r")
1067 sha256sum = apt_pkg.sha256sum(f)
1068 if sha256sum != self.sha256sum:
1073 __all__.append('ChangePendingFile')
1075 ################################################################################
1077 class ChangePendingSource(object):
1078 def __init__(self, *args, **kwargs):
1082 return '<ChangePendingSource %s>' % self.change_pending_source_id
1084 __all__.append('ChangePendingSource')
1086 ################################################################################
1088 class Component(ORMObject):
1089 def __init__(self, component_name = None):
1090 self.component_name = component_name
1092 def __eq__(self, val):
1093 if isinstance(val, str):
1094 return (self.component_name == val)
1095 # This signals to use the normal comparison operator
1096 return NotImplemented
1098 def __ne__(self, val):
1099 if isinstance(val, str):
1100 return (self.component_name != val)
1101 # This signals to use the normal comparison operator
1102 return NotImplemented
1104 def properties(self):
1105 return ['component_name', 'component_id', 'description', \
1106 'location_count', 'meets_dfsg', 'overrides_count']
1108 def not_null_constraints(self):
1109 return ['component_name']
1112 __all__.append('Component')
1115 def get_component(component, session=None):
1117 Returns database id for given C{component}.
1119 @type component: string
1120 @param component: The name of the override type
1123 @return: the database id for the given component
1126 component = component.lower()
1128 q = session.query(Component).filter_by(component_name=component)
1132 except NoResultFound:
1135 __all__.append('get_component')
1138 def get_component_names(session=None):
1140 Returns list of strings of component names.
1143 @return: list of strings of component names
1146 return [ x.component_name for x in session.query(Component).all() ]
1148 __all__.append('get_component_names')
1150 ################################################################################
1152 class DBConfig(object):
1153 def __init__(self, *args, **kwargs):
1157 return '<DBConfig %s>' % self.name
1159 __all__.append('DBConfig')
1161 ################################################################################
1164 def get_or_set_contents_file_id(filename, session=None):
1166 Returns database id for given filename.
1168 If no matching file is found, a row is inserted.
1170 @type filename: string
1171 @param filename: The filename
1172 @type session: SQLAlchemy
1173 @param session: Optional SQL session object (a temporary one will be
1174 generated if not supplied). If not passed, a commit will be performed at
1175 the end of the function, otherwise the caller is responsible for commiting.
1178 @return: the database id for the given component
1181 q = session.query(ContentFilename).filter_by(filename=filename)
1184 ret = q.one().cafilename_id
1185 except NoResultFound:
1186 cf = ContentFilename()
1187 cf.filename = filename
1189 session.commit_or_flush()
1190 ret = cf.cafilename_id
1194 __all__.append('get_or_set_contents_file_id')
1197 def get_contents(suite, overridetype, section=None, session=None):
1199 Returns contents for a suite / overridetype combination, limiting
1200 to a section if not None.
1203 @param suite: Suite object
1205 @type overridetype: OverrideType
1206 @param overridetype: OverrideType object
1208 @type section: Section
1209 @param section: Optional section object to limit results to
1211 @type session: SQLAlchemy
1212 @param session: Optional SQL session object (a temporary one will be
1213 generated if not supplied)
1215 @rtype: ResultsProxy
1216 @return: ResultsProxy object set up to return tuples of (filename, section,
1220 # find me all of the contents for a given suite
1221 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1225 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1226 JOIN content_file_names n ON (c.filename=n.id)
1227 JOIN binaries b ON (b.id=c.binary_pkg)
1228 JOIN override o ON (o.package=b.package)
1229 JOIN section s ON (s.id=o.section)
1230 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1231 AND b.type=:overridetypename"""
1233 vals = {'suiteid': suite.suite_id,
1234 'overridetypeid': overridetype.overridetype_id,
1235 'overridetypename': overridetype.overridetype}
1237 if section is not None:
1238 contents_q += " AND s.id = :sectionid"
1239 vals['sectionid'] = section.section_id
1241 contents_q += " ORDER BY fn"
1243 return session.execute(contents_q, vals)
1245 __all__.append('get_contents')
1247 ################################################################################
1249 class ContentFilepath(object):
1250 def __init__(self, *args, **kwargs):
1254 return '<ContentFilepath %s>' % self.filepath
1256 __all__.append('ContentFilepath')
1259 def get_or_set_contents_path_id(filepath, session=None):
1261 Returns database id for given path.
1263 If no matching file is found, a row is inserted.
1265 @type filepath: string
1266 @param filepath: The filepath
1268 @type session: SQLAlchemy
1269 @param session: Optional SQL session object (a temporary one will be
1270 generated if not supplied). If not passed, a commit will be performed at
1271 the end of the function, otherwise the caller is responsible for commiting.
1274 @return: the database id for the given path
1277 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1280 ret = q.one().cafilepath_id
1281 except NoResultFound:
1282 cf = ContentFilepath()
1283 cf.filepath = filepath
1285 session.commit_or_flush()
1286 ret = cf.cafilepath_id
1290 __all__.append('get_or_set_contents_path_id')
1292 ################################################################################
1294 class ContentAssociation(object):
1295 def __init__(self, *args, **kwargs):
1299 return '<ContentAssociation %s>' % self.ca_id
1301 __all__.append('ContentAssociation')
1303 def insert_content_paths(binary_id, fullpaths, session=None):
1305 Make sure given path is associated with given binary id
1307 @type binary_id: int
1308 @param binary_id: the id of the binary
1309 @type fullpaths: list
1310 @param fullpaths: the list of paths of the file being associated with the binary
1311 @type session: SQLAlchemy session
1312 @param session: Optional SQLAlchemy session. If this is passed, the caller
1313 is responsible for ensuring a transaction has begun and committing the
1314 results or rolling back based on the result code. If not passed, a commit
1315 will be performed at the end of the function, otherwise the caller is
1316 responsible for commiting.
1318 @return: True upon success
1321 privatetrans = False
1323 session = DBConn().session()
1328 def generate_path_dicts():
1329 for fullpath in fullpaths:
1330 if fullpath.startswith( './' ):
1331 fullpath = fullpath[2:]
1333 yield {'filename':fullpath, 'id': binary_id }
1335 for d in generate_path_dicts():
1336 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1345 traceback.print_exc()
1347 # Only rollback if we set up the session ourself
1354 __all__.append('insert_content_paths')
1356 ################################################################################
1358 class DSCFile(object):
1359 def __init__(self, *args, **kwargs):
1363 return '<DSCFile %s>' % self.dscfile_id
1365 __all__.append('DSCFile')
1368 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1370 Returns a list of DSCFiles which may be empty
1372 @type dscfile_id: int (optional)
1373 @param dscfile_id: the dscfile_id of the DSCFiles to find
1375 @type source_id: int (optional)
1376 @param source_id: the source id related to the DSCFiles to find
1378 @type poolfile_id: int (optional)
1379 @param poolfile_id: the poolfile id related to the DSCFiles to find
1382 @return: Possibly empty list of DSCFiles
1385 q = session.query(DSCFile)
1387 if dscfile_id is not None:
1388 q = q.filter_by(dscfile_id=dscfile_id)
1390 if source_id is not None:
1391 q = q.filter_by(source_id=source_id)
1393 if poolfile_id is not None:
1394 q = q.filter_by(poolfile_id=poolfile_id)
1398 __all__.append('get_dscfiles')
1400 ################################################################################
1402 class ExternalOverride(ORMObject):
1403 def __init__(self, *args, **kwargs):
1407 return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
1409 __all__.append('ExternalOverride')
1411 ################################################################################
1413 class PoolFile(ORMObject):
1414 def __init__(self, filename = None, location = None, filesize = -1, \
1416 self.filename = filename
1417 self.location = location
1418 self.filesize = filesize
1419 self.md5sum = md5sum
1423 return os.path.join(self.location.path, self.filename)
1425 def is_valid(self, filesize = -1, md5sum = None):
1426 return self.filesize == long(filesize) and self.md5sum == md5sum
1428 def properties(self):
1429 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1430 'sha256sum', 'location', 'source', 'binary', 'last_used']
1432 def not_null_constraints(self):
1433 return ['filename', 'md5sum', 'location']
1435 def identical_to(self, filename):
1437 compare size and hash with the given file
1440 @return: true if the given file has the same size and hash as this object; false otherwise
1442 st = os.stat(filename)
1443 if self.filesize != st.st_size:
1446 f = open(filename, "r")
1447 sha256sum = apt_pkg.sha256sum(f)
1448 if sha256sum != self.sha256sum:
1453 __all__.append('PoolFile')
1456 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1459 (ValidFileFound [boolean], PoolFile object or None)
1461 @type filename: string
1462 @param filename: the filename of the file to check against the DB
1465 @param filesize: the size of the file to check against the DB
1467 @type md5sum: string
1468 @param md5sum: the md5sum of the file to check against the DB
1470 @type location_id: int
1471 @param location_id: the id of the location to look in
1474 @return: Tuple of length 2.
1475 - If valid pool file found: (C{True}, C{PoolFile object})
1476 - If valid pool file not found:
1477 - (C{False}, C{None}) if no file found
1478 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1481 poolfile = session.query(Location).get(location_id). \
1482 files.filter_by(filename=filename).first()
1484 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1487 return (valid, poolfile)
1489 __all__.append('check_poolfile')
1491 # TODO: the implementation can trivially be inlined at the place where the
1492 # function is called
1494 def get_poolfile_by_id(file_id, session=None):
1496 Returns a PoolFile objects or None for the given id
1499 @param file_id: the id of the file to look for
1501 @rtype: PoolFile or None
1502 @return: either the PoolFile object or None
1505 return session.query(PoolFile).get(file_id)
1507 __all__.append('get_poolfile_by_id')
1510 def get_poolfile_like_name(filename, session=None):
1512 Returns an array of PoolFile objects which are like the given name
1514 @type filename: string
1515 @param filename: the filename of the file to check against the DB
1518 @return: array of PoolFile objects
1521 # TODO: There must be a way of properly using bind parameters with %FOO%
1522 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1526 __all__.append('get_poolfile_like_name')
1529 def add_poolfile(filename, datadict, location_id, session=None):
1531 Add a new file to the pool
1533 @type filename: string
1534 @param filename: filename
1536 @type datadict: dict
1537 @param datadict: dict with needed data
1539 @type location_id: int
1540 @param location_id: database id of the location
1543 @return: the PoolFile object created
1545 poolfile = PoolFile()
1546 poolfile.filename = filename
1547 poolfile.filesize = datadict["size"]
1548 poolfile.md5sum = datadict["md5sum"]
1549 poolfile.sha1sum = datadict["sha1sum"]
1550 poolfile.sha256sum = datadict["sha256sum"]
1551 poolfile.location_id = location_id
1553 session.add(poolfile)
1554 # Flush to get a file id (NB: This is not a commit)
1559 __all__.append('add_poolfile')
1561 ################################################################################
1563 class Fingerprint(ORMObject):
1564 def __init__(self, fingerprint = None):
1565 self.fingerprint = fingerprint
1567 def properties(self):
1568 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1571 def not_null_constraints(self):
1572 return ['fingerprint']
1574 __all__.append('Fingerprint')
1577 def get_fingerprint(fpr, session=None):
1579 Returns Fingerprint object for given fpr.
1582 @param fpr: The fpr to find / add
1584 @type session: SQLAlchemy
1585 @param session: Optional SQL session object (a temporary one will be
1586 generated if not supplied).
1589 @return: the Fingerprint object for the given fpr or None
1592 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1596 except NoResultFound:
1601 __all__.append('get_fingerprint')
1604 def get_or_set_fingerprint(fpr, session=None):
1606 Returns Fingerprint object for given fpr.
1608 If no matching fpr is found, a row is inserted.
1611 @param fpr: The fpr to find / add
1613 @type session: SQLAlchemy
1614 @param session: Optional SQL session object (a temporary one will be
1615 generated if not supplied). If not passed, a commit will be performed at
1616 the end of the function, otherwise the caller is responsible for commiting.
1617 A flush will be performed either way.
1620 @return: the Fingerprint object for the given fpr
1623 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1627 except NoResultFound:
1628 fingerprint = Fingerprint()
1629 fingerprint.fingerprint = fpr
1630 session.add(fingerprint)
1631 session.commit_or_flush()
1636 __all__.append('get_or_set_fingerprint')
1638 ################################################################################
1640 # Helper routine for Keyring class
1641 def get_ldap_name(entry):
1643 for k in ["cn", "mn", "sn"]:
1645 if ret and ret[0] != "" and ret[0] != "-":
1647 return " ".join(name)
1649 ################################################################################
1651 class Keyring(object):
1652 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1653 " --with-colons --fingerprint --fingerprint"
1658 def __init__(self, *args, **kwargs):
1662 return '<Keyring %s>' % self.keyring_name
1664 def de_escape_gpg_str(self, txt):
1665 esclist = re.split(r'(\\x..)', txt)
1666 for x in range(1,len(esclist),2):
1667 esclist[x] = "%c" % (int(esclist[x][2:],16))
1668 return "".join(esclist)
1670 def parse_address(self, uid):
1671 """parses uid and returns a tuple of real name and email address"""
1673 (name, address) = email.Utils.parseaddr(uid)
1674 name = re.sub(r"\s*[(].*[)]", "", name)
1675 name = self.de_escape_gpg_str(name)
1678 return (name, address)
1680 def load_keys(self, keyring):
1681 if not self.keyring_id:
1682 raise Exception('Must be initialized with database information')
1684 k = os.popen(self.gpg_invocation % keyring, "r")
1689 field = line.split(":")
1690 if field[0] == "pub":
1693 (name, addr) = self.parse_address(field[9])
1695 self.keys[key]["email"] = addr
1696 self.keys[key]["name"] = name
1697 self.keys[key]["fingerprints"] = []
1699 elif key and field[0] == "sub" and len(field) >= 12:
1700 signingkey = ("s" in field[11])
1701 elif key and field[0] == "uid":
1702 (name, addr) = self.parse_address(field[9])
1703 if "email" not in self.keys[key] and "@" in addr:
1704 self.keys[key]["email"] = addr
1705 self.keys[key]["name"] = name
1706 elif signingkey and field[0] == "fpr":
1707 self.keys[key]["fingerprints"].append(field[9])
1708 self.fpr_lookup[field[9]] = key
1710 def import_users_from_ldap(self, session):
1714 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1715 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1717 l = ldap.open(LDAPServer)
1718 l.simple_bind_s("","")
1719 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1720 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1721 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1723 ldap_fin_uid_id = {}
1730 uid = entry["uid"][0]
1731 name = get_ldap_name(entry)
1732 fingerprints = entry["keyFingerPrint"]
1734 for f in fingerprints:
1735 key = self.fpr_lookup.get(f, None)
1736 if key not in self.keys:
1738 self.keys[key]["uid"] = uid
1742 keyid = get_or_set_uid(uid, session).uid_id
1743 byuid[keyid] = (uid, name)
1744 byname[uid] = (keyid, name)
1746 return (byname, byuid)
1748 def generate_users_from_keyring(self, format, session):
1752 for x in self.keys.keys():
1753 if "email" not in self.keys[x]:
1755 self.keys[x]["uid"] = format % "invalid-uid"
1757 uid = format % self.keys[x]["email"]
1758 keyid = get_or_set_uid(uid, session).uid_id
1759 byuid[keyid] = (uid, self.keys[x]["name"])
1760 byname[uid] = (keyid, self.keys[x]["name"])
1761 self.keys[x]["uid"] = uid
1764 uid = format % "invalid-uid"
1765 keyid = get_or_set_uid(uid, session).uid_id
1766 byuid[keyid] = (uid, "ungeneratable user id")
1767 byname[uid] = (keyid, "ungeneratable user id")
1769 return (byname, byuid)
1771 __all__.append('Keyring')
1774 def get_keyring(keyring, session=None):
1776 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1777 If C{keyring} already has an entry, simply return the existing Keyring
1779 @type keyring: string
1780 @param keyring: the keyring name
1783 @return: the Keyring object for this keyring
1786 q = session.query(Keyring).filter_by(keyring_name=keyring)
1790 except NoResultFound:
1793 __all__.append('get_keyring')
1796 def get_active_keyring_paths(session=None):
1799 @return: list of active keyring paths
1801 return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
1803 __all__.append('get_active_keyring_paths')
1806 def get_primary_keyring_path(session=None):
1808 Get the full path to the highest priority active keyring
1811 @return: path to the active keyring with the highest priority or None if no
1812 keyring is configured
1814 keyrings = get_active_keyring_paths()
1816 if len(keyrings) > 0:
1821 __all__.append('get_primary_keyring_path')
1823 ################################################################################
1825 class KeyringACLMap(object):
1826 def __init__(self, *args, **kwargs):
1830 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1832 __all__.append('KeyringACLMap')
1834 ################################################################################
1836 class DBChange(object):
1837 def __init__(self, *args, **kwargs):
1841 return '<DBChange %s>' % self.changesname
1843 def clean_from_queue(self):
1844 session = DBConn().session().object_session(self)
1846 # Remove changes_pool_files entries
1849 # Remove changes_pending_files references
1852 # Clear out of queue
1853 self.in_queue = None
1854 self.approved_for_id = None
1856 __all__.append('DBChange')
1859 def get_dbchange(filename, session=None):
1861 returns DBChange object for given C{filename}.
1863 @type filename: string
1864 @param filename: the name of the file
1866 @type session: Session
1867 @param session: Optional SQLA session object (a temporary one will be
1868 generated if not supplied)
1871 @return: DBChange object for the given filename (C{None} if not present)
1874 q = session.query(DBChange).filter_by(changesname=filename)
1878 except NoResultFound:
1881 __all__.append('get_dbchange')
1883 ################################################################################
1885 class Location(ORMObject):
1886 def __init__(self, path = None, component = None):
1888 self.component = component
1889 # the column 'type' should go away, see comment at mapper
1890 self.archive_type = 'pool'
1892 def properties(self):
1893 return ['path', 'location_id', 'archive_type', 'component', \
1896 def not_null_constraints(self):
1897 return ['path', 'archive_type']
1899 __all__.append('Location')
1902 def get_location(location, component=None, archive=None, session=None):
1904 Returns Location object for the given combination of location, component
1907 @type location: string
1908 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1910 @type component: string
1911 @param component: the component name (if None, no restriction applied)
1913 @type archive: string
1914 @param archive: the archive name (if None, no restriction applied)
1916 @rtype: Location / None
1917 @return: Either a Location object or None if one can't be found
1920 q = session.query(Location).filter_by(path=location)
1922 if archive is not None:
1923 q = q.join(Archive).filter_by(archive_name=archive)
1925 if component is not None:
1926 q = q.join(Component).filter_by(component_name=component)
1930 except NoResultFound:
1933 __all__.append('get_location')
1935 ################################################################################
1937 class Maintainer(ORMObject):
1938 def __init__(self, name = None):
1941 def properties(self):
1942 return ['name', 'maintainer_id']
1944 def not_null_constraints(self):
1947 def get_split_maintainer(self):
1948 if not hasattr(self, 'name') or self.name is None:
1949 return ('', '', '', '')
1951 return fix_maintainer(self.name.strip())
1953 __all__.append('Maintainer')
1956 def get_or_set_maintainer(name, session=None):
1958 Returns Maintainer object for given maintainer name.
1960 If no matching maintainer name is found, a row is inserted.
1963 @param name: The maintainer name to add
1965 @type session: SQLAlchemy
1966 @param session: Optional SQL session object (a temporary one will be
1967 generated if not supplied). If not passed, a commit will be performed at
1968 the end of the function, otherwise the caller is responsible for commiting.
1969 A flush will be performed either way.
1972 @return: the Maintainer object for the given maintainer
1975 q = session.query(Maintainer).filter_by(name=name)
1978 except NoResultFound:
1979 maintainer = Maintainer()
1980 maintainer.name = name
1981 session.add(maintainer)
1982 session.commit_or_flush()
1987 __all__.append('get_or_set_maintainer')
1990 def get_maintainer(maintainer_id, session=None):
1992 Return the name of the maintainer behind C{maintainer_id} or None if that
1993 maintainer_id is invalid.
1995 @type maintainer_id: int
1996 @param maintainer_id: the id of the maintainer
1999 @return: the Maintainer with this C{maintainer_id}
2002 return session.query(Maintainer).get(maintainer_id)
2004 __all__.append('get_maintainer')
2006 ################################################################################
2008 class NewComment(object):
2009 def __init__(self, *args, **kwargs):
2013 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
2015 __all__.append('NewComment')
2018 def has_new_comment(package, version, session=None):
2020 Returns true if the given combination of C{package}, C{version} has a comment.
2022 @type package: string
2023 @param package: name of the package
2025 @type version: string
2026 @param version: package version
2028 @type session: Session
2029 @param session: Optional SQLA session object (a temporary one will be
2030 generated if not supplied)
2036 q = session.query(NewComment)
2037 q = q.filter_by(package=package)
2038 q = q.filter_by(version=version)
2040 return bool(q.count() > 0)
2042 __all__.append('has_new_comment')
2045 def get_new_comments(package=None, version=None, comment_id=None, session=None):
2047 Returns (possibly empty) list of NewComment objects for the given
2050 @type package: string (optional)
2051 @param package: name of the package
2053 @type version: string (optional)
2054 @param version: package version
2056 @type comment_id: int (optional)
2057 @param comment_id: An id of a comment
2059 @type session: Session
2060 @param session: Optional SQLA session object (a temporary one will be
2061 generated if not supplied)
2064 @return: A (possibly empty) list of NewComment objects will be returned
2067 q = session.query(NewComment)
2068 if package is not None: q = q.filter_by(package=package)
2069 if version is not None: q = q.filter_by(version=version)
2070 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
2074 __all__.append('get_new_comments')
2076 ################################################################################
2078 class Override(ORMObject):
2079 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
2080 section = None, priority = None):
2081 self.package = package
2083 self.component = component
2084 self.overridetype = overridetype
2085 self.section = section
2086 self.priority = priority
2088 def properties(self):
2089 return ['package', 'suite', 'component', 'overridetype', 'section', \
2092 def not_null_constraints(self):
2093 return ['package', 'suite', 'component', 'overridetype', 'section']
2095 __all__.append('Override')
2098 def get_override(package, suite=None, component=None, overridetype=None, session=None):
2100 Returns Override object for the given parameters
2102 @type package: string
2103 @param package: The name of the package
2105 @type suite: string, list or None
2106 @param suite: The name of the suite (or suites if a list) to limit to. If
2107 None, don't limit. Defaults to None.
2109 @type component: string, list or None
2110 @param component: The name of the component (or components if a list) to
2111 limit to. If None, don't limit. Defaults to None.
2113 @type overridetype: string, list or None
2114 @param overridetype: The name of the overridetype (or overridetypes if a list) to
2115 limit to. If None, don't limit. Defaults to None.
2117 @type session: Session
2118 @param session: Optional SQLA session object (a temporary one will be
2119 generated if not supplied)
2122 @return: A (possibly empty) list of Override objects will be returned
2125 q = session.query(Override)
2126 q = q.filter_by(package=package)
2128 if suite is not None:
2129 if not isinstance(suite, list): suite = [suite]
2130 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
2132 if component is not None:
2133 if not isinstance(component, list): component = [component]
2134 q = q.join(Component).filter(Component.component_name.in_(component))
2136 if overridetype is not None:
2137 if not isinstance(overridetype, list): overridetype = [overridetype]
2138 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
2142 __all__.append('get_override')
2145 ################################################################################
2147 class OverrideType(ORMObject):
2148 def __init__(self, overridetype = None):
2149 self.overridetype = overridetype
2151 def properties(self):
2152 return ['overridetype', 'overridetype_id', 'overrides_count']
2154 def not_null_constraints(self):
2155 return ['overridetype']
2157 __all__.append('OverrideType')
2160 def get_override_type(override_type, session=None):
2162 Returns OverrideType object for given C{override type}.
2164 @type override_type: string
2165 @param override_type: The name of the override type
2167 @type session: Session
2168 @param session: Optional SQLA session object (a temporary one will be
2169 generated if not supplied)
2172 @return: the database id for the given override type
2175 q = session.query(OverrideType).filter_by(overridetype=override_type)
2179 except NoResultFound:
2182 __all__.append('get_override_type')
2184 ################################################################################
2186 class PolicyQueue(object):
2187 def __init__(self, *args, **kwargs):
2191 return '<PolicyQueue %s>' % self.queue_name
2193 __all__.append('PolicyQueue')
2196 def get_policy_queue(queuename, session=None):
2198 Returns PolicyQueue object for given C{queue name}
2200 @type queuename: string
2201 @param queuename: The name of the queue
2203 @type session: Session
2204 @param session: Optional SQLA session object (a temporary one will be
2205 generated if not supplied)
2208 @return: PolicyQueue object for the given queue
2211 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2215 except NoResultFound:
2218 __all__.append('get_policy_queue')
2221 def get_policy_queue_from_path(pathname, session=None):
2223 Returns PolicyQueue object for given C{path name}
2225 @type queuename: string
2226 @param queuename: The path
2228 @type session: Session
2229 @param session: Optional SQLA session object (a temporary one will be
2230 generated if not supplied)
2233 @return: PolicyQueue object for the given queue
2236 q = session.query(PolicyQueue).filter_by(path=pathname)
2240 except NoResultFound:
2243 __all__.append('get_policy_queue_from_path')
2245 ################################################################################
2247 class Priority(ORMObject):
2248 def __init__(self, priority = None, level = None):
2249 self.priority = priority
2252 def properties(self):
2253 return ['priority', 'priority_id', 'level', 'overrides_count']
2255 def not_null_constraints(self):
2256 return ['priority', 'level']
2258 def __eq__(self, val):
2259 if isinstance(val, str):
2260 return (self.priority == val)
2261 # This signals to use the normal comparison operator
2262 return NotImplemented
2264 def __ne__(self, val):
2265 if isinstance(val, str):
2266 return (self.priority != val)
2267 # This signals to use the normal comparison operator
2268 return NotImplemented
2270 __all__.append('Priority')
2273 def get_priority(priority, session=None):
2275 Returns Priority object for given C{priority name}.
2277 @type priority: string
2278 @param priority: The name of the priority
2280 @type session: Session
2281 @param session: Optional SQLA session object (a temporary one will be
2282 generated if not supplied)
2285 @return: Priority object for the given priority
2288 q = session.query(Priority).filter_by(priority=priority)
2292 except NoResultFound:
2295 __all__.append('get_priority')
2298 def get_priorities(session=None):
2300 Returns dictionary of priority names -> id mappings
2302 @type session: Session
2303 @param session: Optional SQL session object (a temporary one will be
2304 generated if not supplied)
2307 @return: dictionary of priority names -> id mappings
2311 q = session.query(Priority)
2313 ret[x.priority] = x.priority_id
2317 __all__.append('get_priorities')
2319 ################################################################################
2321 class Section(ORMObject):
2322 def __init__(self, section = None):
2323 self.section = section
2325 def properties(self):
2326 return ['section', 'section_id', 'overrides_count']
2328 def not_null_constraints(self):
2331 def __eq__(self, val):
2332 if isinstance(val, str):
2333 return (self.section == val)
2334 # This signals to use the normal comparison operator
2335 return NotImplemented
2337 def __ne__(self, val):
2338 if isinstance(val, str):
2339 return (self.section != val)
2340 # This signals to use the normal comparison operator
2341 return NotImplemented
2343 __all__.append('Section')
2346 def get_section(section, session=None):
2348 Returns Section object for given C{section name}.
2350 @type section: string
2351 @param section: The name of the section
2353 @type session: Session
2354 @param session: Optional SQLA session object (a temporary one will be
2355 generated if not supplied)
2358 @return: Section object for the given section name
2361 q = session.query(Section).filter_by(section=section)
2365 except NoResultFound:
2368 __all__.append('get_section')
2371 def get_sections(session=None):
2373 Returns dictionary of section names -> id mappings
2375 @type session: Session
2376 @param session: Optional SQL session object (a temporary one will be
2377 generated if not supplied)
2380 @return: dictionary of section names -> id mappings
2384 q = session.query(Section)
2386 ret[x.section] = x.section_id
2390 __all__.append('get_sections')
2392 ################################################################################
2394 class SrcContents(ORMObject):
2395 def __init__(self, file = None, source = None):
2397 self.source = source
2399 def properties(self):
2400 return ['file', 'source']
2402 __all__.append('SrcContents')
2404 ################################################################################
2406 from debian.debfile import Deb822
2408 # Temporary Deb822 subclass to fix bugs with : handling; see #597249
2409 class Dak822(Deb822):
2410 def _internal_parser(self, sequence, fields=None):
2411 # The key is non-whitespace, non-colon characters before any colon.
2412 key_part = r"^(?P<key>[^: \t\n\r\f\v]+)\s*:\s*"
2413 single = re.compile(key_part + r"(?P<data>\S.*?)\s*$")
2414 multi = re.compile(key_part + r"$")
2415 multidata = re.compile(r"^\s(?P<data>.+?)\s*$")
2417 wanted_field = lambda f: fields is None or f in fields
2419 if isinstance(sequence, basestring):
2420 sequence = sequence.splitlines()
2424 for line in self.gpg_stripped_paragraph(sequence):
2425 m = single.match(line)
2428 self[curkey] = content
2430 if not wanted_field(m.group('key')):
2434 curkey = m.group('key')
2435 content = m.group('data')
2438 m = multi.match(line)
2441 self[curkey] = content
2443 if not wanted_field(m.group('key')):
2447 curkey = m.group('key')
2451 m = multidata.match(line)
2453 content += '\n' + line # XXX not m.group('data')?
2457 self[curkey] = content
2460 class DBSource(ORMObject):
2461 def __init__(self, source = None, version = None, maintainer = None, \
2462 changedby = None, poolfile = None, install_date = None):
2463 self.source = source
2464 self.version = version
2465 self.maintainer = maintainer
2466 self.changedby = changedby
2467 self.poolfile = poolfile
2468 self.install_date = install_date
2472 return self.source_id
2474 def properties(self):
2475 return ['source', 'source_id', 'maintainer', 'changedby', \
2476 'fingerprint', 'poolfile', 'version', 'suites_count', \
2477 'install_date', 'binaries_count', 'uploaders_count']
2479 def not_null_constraints(self):
2480 return ['source', 'version', 'install_date', 'maintainer', \
2481 'changedby', 'poolfile', 'install_date']
2483 def read_control_fields(self):
2485 Reads the control information from a dsc
2488 @return: fields is the dsc information in a dictionary form
2490 fullpath = self.poolfile.fullpath
2491 fields = Dak822(open(self.poolfile.fullpath, 'r'))
2494 metadata = association_proxy('key', 'value')
2496 def get_component_name(self):
2497 return self.poolfile.location.component.component_name
2499 def scan_contents(self):
2501 Returns a set of names for non directories. The path names are
2502 normalized after converting them from either utf-8 or iso8859-1
2505 fullpath = self.poolfile.fullpath
2506 from daklib.contents import UnpackedSource
2507 unpacked = UnpackedSource(fullpath)
2509 for name in unpacked.get_all_filenames():
2510 # enforce proper utf-8 encoding
2512 name.decode('utf-8')
2513 except UnicodeDecodeError:
2514 name = name.decode('iso8859-1').encode('utf-8')
2518 __all__.append('DBSource')
2521 def source_exists(source, source_version, suites = ["any"], session=None):
2523 Ensure that source exists somewhere in the archive for the binary
2524 upload being processed.
2525 1. exact match => 1.0-3
2526 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2528 @type source: string
2529 @param source: source name
2531 @type source_version: string
2532 @param source_version: expected source version
2535 @param suites: list of suites to check in, default I{any}
2537 @type session: Session
2538 @param session: Optional SQLA session object (a temporary one will be
2539 generated if not supplied)
2542 @return: returns 1 if a source with expected version is found, otherwise 0
2549 from daklib.regexes import re_bin_only_nmu
2550 orig_source_version = re_bin_only_nmu.sub('', source_version)
2552 for suite in suites:
2553 q = session.query(DBSource).filter_by(source=source). \
2554 filter(DBSource.version.in_([source_version, orig_source_version]))
2556 # source must exist in 'suite' or a suite that is enhanced by 'suite'
2557 s = get_suite(suite, session)
2559 enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
2560 considered_suites = [ vc.reference for vc in enhances_vcs ]
2561 considered_suites.append(s)
2563 q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
2568 # No source found so return not ok
2573 __all__.append('source_exists')
2576 def get_suites_source_in(source, session=None):
2578 Returns list of Suite objects which given C{source} name is in
2581 @param source: DBSource package name to search for
2584 @return: list of Suite objects for the given source
2587 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2589 __all__.append('get_suites_source_in')
2592 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2594 Returns list of DBSource objects for given C{source} name and other parameters
2597 @param source: DBSource package name to search for
2599 @type version: str or None
2600 @param version: DBSource version name to search for or None if not applicable
2602 @type dm_upload_allowed: bool
2603 @param dm_upload_allowed: If None, no effect. If True or False, only
2604 return packages with that dm_upload_allowed setting
2606 @type session: Session
2607 @param session: Optional SQL session object (a temporary one will be
2608 generated if not supplied)
2611 @return: list of DBSource objects for the given name (may be empty)
2614 q = session.query(DBSource).filter_by(source=source)
2616 if version is not None:
2617 q = q.filter_by(version=version)
2619 if dm_upload_allowed is not None:
2620 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2624 __all__.append('get_sources_from_name')
2626 # FIXME: This function fails badly if it finds more than 1 source package and
2627 # its implementation is trivial enough to be inlined.
2629 def get_source_in_suite(source, suite, session=None):
2631 Returns a DBSource object for a combination of C{source} and C{suite}.
2633 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2634 - B{suite} - a suite name, eg. I{unstable}
2636 @type source: string
2637 @param source: source package name
2640 @param suite: the suite name
2643 @return: the version for I{source} in I{suite}
2647 q = get_suite(suite, session).get_sources(source)
2650 except NoResultFound:
2653 __all__.append('get_source_in_suite')
2656 def import_metadata_into_db(obj, session=None):
2658 This routine works on either DBBinary or DBSource objects and imports
2659 their metadata into the database
2661 fields = obj.read_control_fields()
2662 for k in fields.keys():
2665 val = str(fields[k])
2666 except UnicodeEncodeError:
2667 # Fall back to UTF-8
2669 val = fields[k].encode('utf-8')
2670 except UnicodeEncodeError:
2671 # Finally try iso8859-1
2672 val = fields[k].encode('iso8859-1')
2673 # Otherwise we allow the exception to percolate up and we cause
2674 # a reject as someone is playing silly buggers
2676 obj.metadata[get_or_set_metadatakey(k, session)] = val
2678 session.commit_or_flush()
2680 __all__.append('import_metadata_into_db')
2683 ################################################################################
2685 def split_uploaders(uploaders_list):
2687 Split the Uploaders field into the individual uploaders and yield each of
2688 them. Beware: email addresses might contain commas.
2691 for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
2692 yield uploader.strip()
2695 def add_dsc_to_db(u, filename, session=None):
2696 entry = u.pkg.files[filename]
2700 source.source = u.pkg.dsc["source"]
2701 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2702 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2703 # If Changed-By isn't available, fall back to maintainer
2704 if u.pkg.changes.has_key("changed-by"):
2705 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2707 source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2708 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2709 source.install_date = datetime.now().date()
2711 dsc_component = entry["component"]
2712 dsc_location_id = entry["location id"]
2714 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2716 # Set up a new poolfile if necessary
2717 if not entry.has_key("files id") or not entry["files id"]:
2718 filename = entry["pool name"] + filename
2719 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2721 pfs.append(poolfile)
2722 entry["files id"] = poolfile.file_id
2724 source.poolfile_id = entry["files id"]
2727 suite_names = u.pkg.changes["distribution"].keys()
2728 source.suites = session.query(Suite). \
2729 filter(Suite.suite_name.in_(suite_names)).all()
2731 # Add the source files to the DB (files and dsc_files)
2733 dscfile.source_id = source.source_id
2734 dscfile.poolfile_id = entry["files id"]
2735 session.add(dscfile)
2737 for dsc_file, dentry in u.pkg.dsc_files.items():
2739 df.source_id = source.source_id
2741 # If the .orig tarball is already in the pool, it's
2742 # files id is stored in dsc_files by check_dsc().
2743 files_id = dentry.get("files id", None)
2745 # Find the entry in the files hash
2746 # TODO: Bail out here properly
2748 for f, e in u.pkg.files.items():
2753 if files_id is None:
2754 filename = dfentry["pool name"] + dsc_file
2756 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2757 # FIXME: needs to check for -1/-2 and or handle exception
2758 if found and obj is not None:
2759 files_id = obj.file_id
2762 # If still not found, add it
2763 if files_id is None:
2764 # HACK: Force sha1sum etc into dentry
2765 dentry["sha1sum"] = dfentry["sha1sum"]
2766 dentry["sha256sum"] = dfentry["sha256sum"]
2767 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2768 pfs.append(poolfile)
2769 files_id = poolfile.file_id
2771 poolfile = get_poolfile_by_id(files_id, session)
2772 if poolfile is None:
2773 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2774 pfs.append(poolfile)
2776 df.poolfile_id = files_id
2779 # Add the src_uploaders to the DB
2781 session.refresh(source)
2782 source.uploaders = [source.maintainer]
2783 if u.pkg.dsc.has_key("uploaders"):
2784 for up in split_uploaders(u.pkg.dsc["uploaders"]):
2785 source.uploaders.append(get_or_set_maintainer(up, session))
2789 return source, dsc_component, dsc_location_id, pfs
2791 __all__.append('add_dsc_to_db')
2794 def add_deb_to_db(u, filename, session=None):
2796 Contrary to what you might expect, this routine deals with both
2797 debs and udebs. That info is in 'dbtype', whilst 'type' is
2798 'deb' for both of them
2801 entry = u.pkg.files[filename]
2804 bin.package = entry["package"]
2805 bin.version = entry["version"]
2806 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2807 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2808 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2809 bin.binarytype = entry["dbtype"]
2812 filename = entry["pool name"] + filename
2813 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2814 if not entry.get("location id", None):
2815 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2817 if entry.get("files id", None):
2818 poolfile = get_poolfile_by_id(bin.poolfile_id)
2819 bin.poolfile_id = entry["files id"]
2821 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2822 bin.poolfile_id = entry["files id"] = poolfile.file_id
2825 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2827 # If we couldn't find anything and the upload contains Arch: source,
2828 # fall back to trying the source package, source version uploaded
2829 # This maintains backwards compatibility with previous dak behaviour
2830 # and deals with slightly broken binary debs which don't properly
2831 # declare their source package name
2832 if len(bin_sources) == 0:
2833 if u.pkg.changes["architecture"].has_key("source") \
2834 and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
2835 bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
2837 # If we couldn't find a source here, we reject
2838 # TODO: Fix this so that it doesn't kill process-upload and instead just
2839 # performs a reject. To be honest, we should probably spot this
2840 # *much* earlier than here
2841 if len(bin_sources) != 1:
2842 raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2843 (bin.package, bin.version, entry["architecture"],
2844 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2846 bin.source_id = bin_sources[0].source_id
2848 if entry.has_key("built-using"):
2849 for srcname, version in entry["built-using"]:
2850 exsources = get_sources_from_name(srcname, version, session=session)
2851 if len(exsources) != 1:
2852 raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
2853 (srcname, version, bin.package, bin.version, entry["architecture"],
2854 filename, bin.binarytype, u.pkg.changes["fingerprint"]))
2856 bin.extra_sources.append(exsources[0])
2858 # Add and flush object so it has an ID
2861 suite_names = u.pkg.changes["distribution"].keys()
2862 bin.suites = session.query(Suite). \
2863 filter(Suite.suite_name.in_(suite_names)).all()
2867 # Deal with contents - disabled for now
2868 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2870 # print "REJECT\nCould not determine contents of package %s" % bin.package
2871 # session.rollback()
2872 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2874 return bin, poolfile
2876 __all__.append('add_deb_to_db')
2878 ################################################################################
2880 class SourceACL(object):
2881 def __init__(self, *args, **kwargs):
2885 return '<SourceACL %s>' % self.source_acl_id
2887 __all__.append('SourceACL')
2889 ################################################################################
2891 class SrcFormat(object):
2892 def __init__(self, *args, **kwargs):
2896 return '<SrcFormat %s>' % (self.format_name)
2898 __all__.append('SrcFormat')
2900 ################################################################################
2902 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2903 ('SuiteID', 'suite_id'),
2904 ('Version', 'version'),
2905 ('Origin', 'origin'),
2907 ('Description', 'description'),
2908 ('Untouchable', 'untouchable'),
2909 ('Announce', 'announce'),
2910 ('Codename', 'codename'),
2911 ('OverrideCodename', 'overridecodename'),
2912 ('ValidTime', 'validtime'),
2913 ('Priority', 'priority'),
2914 ('NotAutomatic', 'notautomatic'),
2915 ('CopyChanges', 'copychanges'),
2916 ('OverrideSuite', 'overridesuite')]
2918 # Why the heck don't we have any UNIQUE constraints in table suite?
2919 # TODO: Add UNIQUE constraints for appropriate columns.
2920 class Suite(ORMObject):
2921 def __init__(self, suite_name = None, version = None):
2922 self.suite_name = suite_name
2923 self.version = version
2925 def properties(self):
2926 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2929 def not_null_constraints(self):
2930 return ['suite_name']
2932 def __eq__(self, val):
2933 if isinstance(val, str):
2934 return (self.suite_name == val)
2935 # This signals to use the normal comparison operator
2936 return NotImplemented
2938 def __ne__(self, val):
2939 if isinstance(val, str):
2940 return (self.suite_name != val)
2941 # This signals to use the normal comparison operator
2942 return NotImplemented
2946 for disp, field in SUITE_FIELDS:
2947 val = getattr(self, field, None)
2949 ret.append("%s: %s" % (disp, val))
2951 return "\n".join(ret)
2953 def get_architectures(self, skipsrc=False, skipall=False):
2955 Returns list of Architecture objects
2957 @type skipsrc: boolean
2958 @param skipsrc: Whether to skip returning the 'source' architecture entry
2961 @type skipall: boolean
2962 @param skipall: Whether to skip returning the 'all' architecture entry
2966 @return: list of Architecture objects for the given name (may be empty)
2969 q = object_session(self).query(Architecture).with_parent(self)
2971 q = q.filter(Architecture.arch_string != 'source')
2973 q = q.filter(Architecture.arch_string != 'all')
2974 return q.order_by(Architecture.arch_string).all()
2976 def get_sources(self, source):
2978 Returns a query object representing DBSource that is part of C{suite}.
2980 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2982 @type source: string
2983 @param source: source package name
2985 @rtype: sqlalchemy.orm.query.Query
2986 @return: a query of DBSource
2990 session = object_session(self)
2991 return session.query(DBSource).filter_by(source = source). \
2994 def get_overridesuite(self):
2995 if self.overridesuite is None:
2998 return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
3000 __all__.append('Suite')
3003 def get_suite(suite, session=None):
3005 Returns Suite object for given C{suite name}.
3008 @param suite: The name of the suite
3010 @type session: Session
3011 @param session: Optional SQLA session object (a temporary one will be
3012 generated if not supplied)
3015 @return: Suite object for the requested suite name (None if not present)
3018 q = session.query(Suite).filter_by(suite_name=suite)
3022 except NoResultFound:
3025 __all__.append('get_suite')
3027 ################################################################################
3030 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
3032 Returns list of Architecture objects for given C{suite} name. The list is
3033 empty if suite does not exist.
3036 @param suite: Suite name to search for
3038 @type skipsrc: boolean
3039 @param skipsrc: Whether to skip returning the 'source' architecture entry
3042 @type skipall: boolean
3043 @param skipall: Whether to skip returning the 'all' architecture entry
3046 @type session: Session
3047 @param session: Optional SQL session object (a temporary one will be
3048 generated if not supplied)
3051 @return: list of Architecture objects for the given name (may be empty)
3055 return get_suite(suite, session).get_architectures(skipsrc, skipall)
3056 except AttributeError:
3059 __all__.append('get_suite_architectures')
3061 ################################################################################
3063 class Uid(ORMObject):
3064 def __init__(self, uid = None, name = None):
3068 def __eq__(self, val):
3069 if isinstance(val, str):
3070 return (self.uid == val)
3071 # This signals to use the normal comparison operator
3072 return NotImplemented
3074 def __ne__(self, val):
3075 if isinstance(val, str):
3076 return (self.uid != val)
3077 # This signals to use the normal comparison operator
3078 return NotImplemented
3080 def properties(self):
3081 return ['uid', 'name', 'fingerprint']
3083 def not_null_constraints(self):
3086 __all__.append('Uid')
3089 def get_or_set_uid(uidname, session=None):
3091 Returns uid object for given uidname.
3093 If no matching uidname is found, a row is inserted.
3095 @type uidname: string
3096 @param uidname: The uid to add
3098 @type session: SQLAlchemy
3099 @param session: Optional SQL session object (a temporary one will be
3100 generated if not supplied). If not passed, a commit will be performed at
3101 the end of the function, otherwise the caller is responsible for commiting.
3104 @return: the uid object for the given uidname
3107 q = session.query(Uid).filter_by(uid=uidname)
3111 except NoResultFound:
3115 session.commit_or_flush()
3120 __all__.append('get_or_set_uid')
3123 def get_uid_from_fingerprint(fpr, session=None):
3124 q = session.query(Uid)
3125 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
3129 except NoResultFound:
3132 __all__.append('get_uid_from_fingerprint')
3134 ################################################################################
3136 class UploadBlock(object):
3137 def __init__(self, *args, **kwargs):
3141 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
3143 __all__.append('UploadBlock')
3145 ################################################################################
3147 class MetadataKey(ORMObject):
3148 def __init__(self, key = None):
3151 def properties(self):
3154 def not_null_constraints(self):
3157 __all__.append('MetadataKey')
3160 def get_or_set_metadatakey(keyname, session=None):
3162 Returns MetadataKey object for given uidname.
3164 If no matching keyname is found, a row is inserted.
3166 @type uidname: string
3167 @param uidname: The keyname to add
3169 @type session: SQLAlchemy
3170 @param session: Optional SQL session object (a temporary one will be
3171 generated if not supplied). If not passed, a commit will be performed at
3172 the end of the function, otherwise the caller is responsible for commiting.
3175 @return: the metadatakey object for the given keyname
3178 q = session.query(MetadataKey).filter_by(key=keyname)
3182 except NoResultFound:
3183 ret = MetadataKey(keyname)
3185 session.commit_or_flush()
3189 __all__.append('get_or_set_metadatakey')
3191 ################################################################################
3193 class BinaryMetadata(ORMObject):
3194 def __init__(self, key = None, value = None, binary = None):
3197 self.binary = binary
3199 def properties(self):
3200 return ['binary', 'key', 'value']
3202 def not_null_constraints(self):
3205 __all__.append('BinaryMetadata')
3207 ################################################################################
3209 class SourceMetadata(ORMObject):
3210 def __init__(self, key = None, value = None, source = None):
3213 self.source = source
3215 def properties(self):
3216 return ['source', 'key', 'value']
3218 def not_null_constraints(self):
3221 __all__.append('SourceMetadata')
3223 ################################################################################
3225 class VersionCheck(ORMObject):
3226 def __init__(self, *args, **kwargs):
3229 def properties(self):
3230 #return ['suite_id', 'check', 'reference_id']
3233 def not_null_constraints(self):
3234 return ['suite', 'check', 'reference']
3236 __all__.append('VersionCheck')
3239 def get_version_checks(suite_name, check = None, session = None):
3240 suite = get_suite(suite_name, session)
3242 # Make sure that what we return is iterable so that list comprehensions
3243 # involving this don't cause a traceback
3245 q = session.query(VersionCheck).filter_by(suite=suite)
3247 q = q.filter_by(check=check)
3250 __all__.append('get_version_checks')
3252 ################################################################################
3254 class DBConn(object):
3256 database module init.
3260 def __init__(self, *args, **kwargs):
3261 self.__dict__ = self.__shared_state
3263 if not getattr(self, 'initialised', False):
3264 self.initialised = True
3265 self.debug = kwargs.has_key('debug')
3268 def __setuptables(self):
3275 'binaries_metadata',
3279 'build_queue_files',
3280 'build_queue_policy_files',
3285 'changes_pending_binaries',
3286 'changes_pending_files',
3287 'changes_pending_source',
3288 'changes_pending_files_map',
3289 'changes_pending_source_files',
3290 'changes_pool_files',
3292 'external_overrides',
3293 'extra_src_references',
3302 # TODO: the maintainer column in table override should be removed.
3316 'suite_architectures',
3317 'suite_build_queue_copy',
3318 'suite_src_formats',
3325 'almost_obsolete_all_associations',
3326 'almost_obsolete_src_associations',
3327 'any_associations_source',
3328 'bin_associations_binaries',
3329 'binaries_suite_arch',
3330 'binfiles_suite_component_arch',
3333 'newest_all_associations',
3334 'newest_any_associations',
3336 'newest_src_association',
3337 'obsolete_all_associations',
3338 'obsolete_any_associations',
3339 'obsolete_any_by_all_associations',
3340 'obsolete_src_associations',
3342 'src_associations_bin',
3343 'src_associations_src',
3344 'suite_arch_by_name',
3347 for table_name in tables:
3348 table = Table(table_name, self.db_meta, \
3349 autoload=True, useexisting=True)
3350 setattr(self, 'tbl_%s' % table_name, table)
3352 for view_name in views:
3353 view = Table(view_name, self.db_meta, autoload=True)
3354 setattr(self, 'view_%s' % view_name, view)
3356 def __setupmappers(self):
3357 mapper(Architecture, self.tbl_architecture,
3358 properties = dict(arch_id = self.tbl_architecture.c.id,
3359 suites = relation(Suite, secondary=self.tbl_suite_architectures,
3360 order_by='suite_name',
3361 backref=backref('architectures', order_by='arch_string'))),
3362 extension = validator)
3364 mapper(Archive, self.tbl_archive,
3365 properties = dict(archive_id = self.tbl_archive.c.id,
3366 archive_name = self.tbl_archive.c.name))
3368 mapper(BuildQueue, self.tbl_build_queue,
3369 properties = dict(queue_id = self.tbl_build_queue.c.id))
3371 mapper(BuildQueueFile, self.tbl_build_queue_files,
3372 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3373 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3375 mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
3377 build_queue = relation(BuildQueue, backref='policy_queue_files'),
3378 file = relation(ChangePendingFile, lazy='joined')))
3380 mapper(DBBinary, self.tbl_binaries,
3381 properties = dict(binary_id = self.tbl_binaries.c.id,
3382 package = self.tbl_binaries.c.package,
3383 version = self.tbl_binaries.c.version,
3384 maintainer_id = self.tbl_binaries.c.maintainer,
3385 maintainer = relation(Maintainer),
3386 source_id = self.tbl_binaries.c.source,
3387 source = relation(DBSource, backref='binaries'),
3388 arch_id = self.tbl_binaries.c.architecture,
3389 architecture = relation(Architecture),
3390 poolfile_id = self.tbl_binaries.c.file,
3391 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3392 binarytype = self.tbl_binaries.c.type,
3393 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3394 fingerprint = relation(Fingerprint),
3395 install_date = self.tbl_binaries.c.install_date,
3396 suites = relation(Suite, secondary=self.tbl_bin_associations,
3397 backref=backref('binaries', lazy='dynamic')),
3398 extra_sources = relation(DBSource, secondary=self.tbl_extra_src_references,
3399 backref=backref('extra_binary_references', lazy='dynamic')),
3400 key = relation(BinaryMetadata, cascade='all',
3401 collection_class=attribute_mapped_collection('key'))),
3402 extension = validator)
3404 mapper(BinaryACL, self.tbl_binary_acl,
3405 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3407 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3408 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3409 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3410 architecture = relation(Architecture)))
3412 mapper(Component, self.tbl_component,
3413 properties = dict(component_id = self.tbl_component.c.id,
3414 component_name = self.tbl_component.c.name),
3415 extension = validator)
3417 mapper(DBConfig, self.tbl_config,
3418 properties = dict(config_id = self.tbl_config.c.id))
3420 mapper(DSCFile, self.tbl_dsc_files,
3421 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3422 source_id = self.tbl_dsc_files.c.source,
3423 source = relation(DBSource),
3424 poolfile_id = self.tbl_dsc_files.c.file,
3425 poolfile = relation(PoolFile)))
3427 mapper(ExternalOverride, self.tbl_external_overrides,
3429 suite_id = self.tbl_external_overrides.c.suite,
3430 suite = relation(Suite),
3431 component_id = self.tbl_external_overrides.c.component,
3432 component = relation(Component)))
3434 mapper(PoolFile, self.tbl_files,
3435 properties = dict(file_id = self.tbl_files.c.id,
3436 filesize = self.tbl_files.c.size,
3437 location_id = self.tbl_files.c.location,
3438 location = relation(Location,
3439 # using lazy='dynamic' in the back
3440 # reference because we have A LOT of
3441 # files in one location
3442 backref=backref('files', lazy='dynamic'))),
3443 extension = validator)
3445 mapper(Fingerprint, self.tbl_fingerprint,
3446 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3447 uid_id = self.tbl_fingerprint.c.uid,
3448 uid = relation(Uid),
3449 keyring_id = self.tbl_fingerprint.c.keyring,
3450 keyring = relation(Keyring),
3451 source_acl = relation(SourceACL),
3452 binary_acl = relation(BinaryACL)),
3453 extension = validator)
3455 mapper(Keyring, self.tbl_keyrings,
3456 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3457 keyring_id = self.tbl_keyrings.c.id))
3459 mapper(DBChange, self.tbl_changes,
3460 properties = dict(change_id = self.tbl_changes.c.id,
3461 poolfiles = relation(PoolFile,
3462 secondary=self.tbl_changes_pool_files,
3463 backref="changeslinks"),
3464 seen = self.tbl_changes.c.seen,
3465 source = self.tbl_changes.c.source,
3466 binaries = self.tbl_changes.c.binaries,
3467 architecture = self.tbl_changes.c.architecture,
3468 distribution = self.tbl_changes.c.distribution,
3469 urgency = self.tbl_changes.c.urgency,
3470 maintainer = self.tbl_changes.c.maintainer,
3471 changedby = self.tbl_changes.c.changedby,
3472 date = self.tbl_changes.c.date,
3473 version = self.tbl_changes.c.version,
3474 files = relation(ChangePendingFile,
3475 secondary=self.tbl_changes_pending_files_map,
3476 backref="changesfile"),
3477 in_queue_id = self.tbl_changes.c.in_queue,
3478 in_queue = relation(PolicyQueue,
3479 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3480 approved_for_id = self.tbl_changes.c.approved_for))
3482 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3483 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3485 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3486 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3487 filename = self.tbl_changes_pending_files.c.filename,
3488 size = self.tbl_changes_pending_files.c.size,
3489 md5sum = self.tbl_changes_pending_files.c.md5sum,
3490 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3491 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3493 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3494 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3495 change = relation(DBChange),
3496 maintainer = relation(Maintainer,
3497 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3498 changedby = relation(Maintainer,
3499 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3500 fingerprint = relation(Fingerprint),
3501 source_files = relation(ChangePendingFile,
3502 secondary=self.tbl_changes_pending_source_files,
3503 backref="pending_sources")))
3506 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3507 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3508 keyring = relation(Keyring, backref="keyring_acl_map"),
3509 architecture = relation(Architecture)))
3511 mapper(Location, self.tbl_location,
3512 properties = dict(location_id = self.tbl_location.c.id,
3513 component_id = self.tbl_location.c.component,
3514 component = relation(Component, backref='location'),
3515 archive_id = self.tbl_location.c.archive,
3516 archive = relation(Archive),
3517 # FIXME: the 'type' column is old cruft and
3518 # should be removed in the future.
3519 archive_type = self.tbl_location.c.type),
3520 extension = validator)
3522 mapper(Maintainer, self.tbl_maintainer,
3523 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3524 maintains_sources = relation(DBSource, backref='maintainer',
3525 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3526 changed_sources = relation(DBSource, backref='changedby',
3527 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3528 extension = validator)
3530 mapper(NewComment, self.tbl_new_comments,
3531 properties = dict(comment_id = self.tbl_new_comments.c.id))
3533 mapper(Override, self.tbl_override,
3534 properties = dict(suite_id = self.tbl_override.c.suite,
3535 suite = relation(Suite, \
3536 backref=backref('overrides', lazy='dynamic')),
3537 package = self.tbl_override.c.package,
3538 component_id = self.tbl_override.c.component,
3539 component = relation(Component, \
3540 backref=backref('overrides', lazy='dynamic')),
3541 priority_id = self.tbl_override.c.priority,
3542 priority = relation(Priority, \
3543 backref=backref('overrides', lazy='dynamic')),
3544 section_id = self.tbl_override.c.section,
3545 section = relation(Section, \
3546 backref=backref('overrides', lazy='dynamic')),
3547 overridetype_id = self.tbl_override.c.type,
3548 overridetype = relation(OverrideType, \
3549 backref=backref('overrides', lazy='dynamic'))))
3551 mapper(OverrideType, self.tbl_override_type,
3552 properties = dict(overridetype = self.tbl_override_type.c.type,
3553 overridetype_id = self.tbl_override_type.c.id))
3555 mapper(PolicyQueue, self.tbl_policy_queue,
3556 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3558 mapper(Priority, self.tbl_priority,
3559 properties = dict(priority_id = self.tbl_priority.c.id))
3561 mapper(Section, self.tbl_section,
3562 properties = dict(section_id = self.tbl_section.c.id,
3563 section=self.tbl_section.c.section))
3565 mapper(DBSource, self.tbl_source,
3566 properties = dict(source_id = self.tbl_source.c.id,
3567 version = self.tbl_source.c.version,
3568 maintainer_id = self.tbl_source.c.maintainer,
3569 poolfile_id = self.tbl_source.c.file,
3570 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3571 fingerprint_id = self.tbl_source.c.sig_fpr,
3572 fingerprint = relation(Fingerprint),
3573 changedby_id = self.tbl_source.c.changedby,
3574 srcfiles = relation(DSCFile,
3575 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3576 suites = relation(Suite, secondary=self.tbl_src_associations,
3577 backref=backref('sources', lazy='dynamic')),
3578 uploaders = relation(Maintainer,
3579 secondary=self.tbl_src_uploaders),
3580 key = relation(SourceMetadata, cascade='all',
3581 collection_class=attribute_mapped_collection('key'))),
3582 extension = validator)
3584 mapper(SourceACL, self.tbl_source_acl,
3585 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3587 mapper(SrcFormat, self.tbl_src_format,
3588 properties = dict(src_format_id = self.tbl_src_format.c.id,
3589 format_name = self.tbl_src_format.c.format_name))
3591 mapper(Suite, self.tbl_suite,
3592 properties = dict(suite_id = self.tbl_suite.c.id,
3593 policy_queue = relation(PolicyQueue),
3594 copy_queues = relation(BuildQueue,
3595 secondary=self.tbl_suite_build_queue_copy),
3596 srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
3597 backref=backref('suites', lazy='dynamic'))),
3598 extension = validator)
3600 mapper(Uid, self.tbl_uid,
3601 properties = dict(uid_id = self.tbl_uid.c.id,
3602 fingerprint = relation(Fingerprint)),
3603 extension = validator)
3605 mapper(UploadBlock, self.tbl_upload_blocks,
3606 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3607 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3608 uid = relation(Uid, backref="uploadblocks")))
3610 mapper(BinContents, self.tbl_bin_contents,
3612 binary = relation(DBBinary,
3613 backref=backref('contents', lazy='dynamic', cascade='all')),
3614 file = self.tbl_bin_contents.c.file))
3616 mapper(SrcContents, self.tbl_src_contents,
3618 source = relation(DBSource,
3619 backref=backref('contents', lazy='dynamic', cascade='all')),
3620 file = self.tbl_src_contents.c.file))
3622 mapper(MetadataKey, self.tbl_metadata_keys,
3624 key_id = self.tbl_metadata_keys.c.key_id,
3625 key = self.tbl_metadata_keys.c.key))
3627 mapper(BinaryMetadata, self.tbl_binaries_metadata,
3629 binary_id = self.tbl_binaries_metadata.c.bin_id,
3630 binary = relation(DBBinary),
3631 key_id = self.tbl_binaries_metadata.c.key_id,
3632 key = relation(MetadataKey),
3633 value = self.tbl_binaries_metadata.c.value))
3635 mapper(SourceMetadata, self.tbl_source_metadata,
3637 source_id = self.tbl_source_metadata.c.src_id,
3638 source = relation(DBSource),
3639 key_id = self.tbl_source_metadata.c.key_id,
3640 key = relation(MetadataKey),
3641 value = self.tbl_source_metadata.c.value))
3643 mapper(VersionCheck, self.tbl_version_check,
3645 suite_id = self.tbl_version_check.c.suite,
3646 suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
3647 reference_id = self.tbl_version_check.c.reference,
3648 reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
3650 ## Connection functions
3651 def __createconn(self):
3652 from config import Config
3654 if cnf.has_key("DB::Service"):
3655 connstr = "postgresql://service=%s" % cnf["DB::Service"]
3656 elif cnf.has_key("DB::Host"):
3658 connstr = "postgresql://%s" % cnf["DB::Host"]
3659 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3660 connstr += ":%s" % cnf["DB::Port"]
3661 connstr += "/%s" % cnf["DB::Name"]
3664 connstr = "postgresql:///%s" % cnf["DB::Name"]
3665 if cnf.has_key("DB::Port") and cnf["DB::Port"] != "-1":
3666 connstr += "?port=%s" % cnf["DB::Port"]
3668 engine_args = { 'echo': self.debug }
3669 if cnf.has_key('DB::PoolSize'):
3670 engine_args['pool_size'] = int(cnf['DB::PoolSize'])
3671 if cnf.has_key('DB::MaxOverflow'):
3672 engine_args['max_overflow'] = int(cnf['DB::MaxOverflow'])
3673 if sa_major_version == '0.6' and cnf.has_key('DB::Unicode') and \
3674 cnf['DB::Unicode'] == 'false':
3675 engine_args['use_native_unicode'] = False
3677 # Monkey patch a new dialect in in order to support service= syntax
3678 import sqlalchemy.dialects.postgresql
3679 from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
3680 class PGDialect_psycopg2_dak(PGDialect_psycopg2):
3681 def create_connect_args(self, url):
3682 if str(url).startswith('postgresql://service='):
3684 servicename = str(url)[21:]
3685 return (['service=%s' % servicename], {})
3687 return PGDialect_psycopg2.create_connect_args(self, url)
3689 sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
3692 self.db_pg = create_engine(connstr, **engine_args)
3693 self.db_meta = MetaData()
3694 self.db_meta.bind = self.db_pg
3695 self.db_smaker = sessionmaker(bind=self.db_pg,
3699 self.__setuptables()
3700 self.__setupmappers()
3702 except OperationalError as e:
3704 utils.fubar("Cannot connect to database (%s)" % str(e))
3706 self.pid = os.getpid()
3708 def session(self, work_mem = 0):
3710 Returns a new session object. If a work_mem parameter is provided a new
3711 transaction is started and the work_mem parameter is set for this
3712 transaction. The work_mem parameter is measured in MB. A default value
3713 will be used if the parameter is not set.
3715 # reinitialize DBConn in new processes
3716 if self.pid != os.getpid():
3719 session = self.db_smaker()
3721 session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
3724 __all__.append('DBConn')