5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
47 import simplejson as json
49 from datetime import datetime, timedelta
50 from errno import ENOENT
51 from tempfile import mkstemp, mkdtemp
53 from inspect import getargspec
56 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
58 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
59 backref, MapperExtension, EXT_CONTINUE, object_mapper
60 from sqlalchemy import types as sqltypes
62 # Don't remove this, we re-export the exceptions to scripts which import us
63 from sqlalchemy.exc import *
64 from sqlalchemy.orm.exc import NoResultFound
66 # Only import Config until Queue stuff is changed to store its config
68 from config import Config
69 from textutils import fix_maintainer
70 from dak_exceptions import DBUpdateError, NoSourceFieldError
72 # suppress some deprecation warnings in squeeze related to sqlalchemy
74 warnings.filterwarnings('ignore', \
75 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
77 # TODO: sqlalchemy needs some extra configuration to correctly reflect
78 # the ind_deb_contents_* indexes - we ignore the warnings at the moment
79 warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning)
82 ################################################################################
84 # Patch in support for the debversion field type so that it works during
88 # that is for sqlalchemy 0.6
89 UserDefinedType = sqltypes.UserDefinedType
91 # this one for sqlalchemy 0.5
92 UserDefinedType = sqltypes.TypeEngine
94 class DebVersion(UserDefinedType):
95 def get_col_spec(self):
98 def bind_processor(self, dialect):
101 # ' = None' is needed for sqlalchemy 0.5:
102 def result_processor(self, dialect, coltype = None):
105 sa_major_version = sqlalchemy.__version__[0:3]
106 if sa_major_version in ["0.5", "0.6"]:
107 from sqlalchemy.databases import postgres
108 postgres.ischema_names['debversion'] = DebVersion
110 raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
112 ################################################################################
114 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
116 ################################################################################
118 def session_wrapper(fn):
120 Wrapper around common ".., session=None):" handling. If the wrapped
121 function is called without passing 'session', we create a local one
122 and destroy it when the function ends.
124 Also attaches a commit_or_flush method to the session; if we created a
125 local session, this is a synonym for session.commit(), otherwise it is a
126 synonym for session.flush().
129 def wrapped(*args, **kwargs):
130 private_transaction = False
132 # Find the session object
133 session = kwargs.get('session')
136 if len(args) <= len(getargspec(fn)[0]) - 1:
137 # No session specified as last argument or in kwargs
138 private_transaction = True
139 session = kwargs['session'] = DBConn().session()
141 # Session is last argument in args
145 session = args[-1] = DBConn().session()
146 private_transaction = True
148 if private_transaction:
149 session.commit_or_flush = session.commit
151 session.commit_or_flush = session.flush
154 return fn(*args, **kwargs)
156 if private_transaction:
157 # We created a session; close it.
160 wrapped.__doc__ = fn.__doc__
161 wrapped.func_name = fn.func_name
165 __all__.append('session_wrapper')
167 ################################################################################
169 class ORMObject(object):
171 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
172 derived classes must implement the properties() method.
175 def properties(self):
177 This method should be implemented by all derived classes and returns a
178 list of the important properties. The properties 'created' and
179 'modified' will be added automatically. A suffix '_count' should be
180 added to properties that are lists or query objects. The most important
181 property name should be returned as the first element in the list
182 because it is used by repr().
188 Returns a JSON representation of the object based on the properties
189 returned from the properties() method.
192 # add created and modified
193 all_properties = self.properties() + ['created', 'modified']
194 for property in all_properties:
195 # check for list or query
196 if property[-6:] == '_count':
197 real_property = property[:-6]
198 if not hasattr(self, real_property):
200 value = getattr(self, real_property)
201 if hasattr(value, '__len__'):
204 elif hasattr(value, 'count'):
206 value = value.count()
208 raise KeyError('Do not understand property %s.' % property)
210 if not hasattr(self, property):
213 value = getattr(self, property)
217 elif isinstance(value, ORMObject):
218 # use repr() for ORMObject types
221 # we want a string for all other types because json cannot
224 data[property] = value
225 return json.dumps(data)
229 Returns the name of the class.
231 return type(self).__name__
235 Returns a short string representation of the object using the first
236 element from the properties() method.
238 primary_property = self.properties()[0]
239 value = getattr(self, primary_property)
240 return '<%s %s>' % (self.classname(), str(value))
244 Returns a human readable form of the object using the properties()
247 return '<%s %s>' % (self.classname(), self.json())
249 def not_null_constraints(self):
251 Returns a list of properties that must be not NULL. Derived classes
252 should override this method if needed.
256 validation_message = \
257 "Validation failed because property '%s' must not be empty in object\n%s"
261 This function validates the not NULL constraints as returned by
262 not_null_constraints(). It raises the DBUpdateError exception if
265 for property in self.not_null_constraints():
266 # TODO: It is a bit awkward that the mapper configuration allow
267 # directly setting the numeric _id columns. We should get rid of it
269 if hasattr(self, property + '_id') and \
270 getattr(self, property + '_id') is not None:
272 if not hasattr(self, property) or getattr(self, property) is None:
273 raise DBUpdateError(self.validation_message % \
274 (property, str(self)))
278 def get(cls, primary_key, session = None):
280 This is a support function that allows getting an object by its primary
283 Architecture.get(3[, session])
285 instead of the more verbose
287 session.query(Architecture).get(3)
289 return session.query(cls).get(primary_key)
291 def session(self, replace = False):
293 Returns the current session that is associated with the object. May
294 return None is object is in detached state.
297 return object_session(self)
299 def clone(self, session = None):
301 Clones the current object in a new session and returns the new clone. A
302 fresh session is created if the optional session parameter is not
303 provided. The function will fail if a session is provided and has
306 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
307 an existing object to allow several threads to work with their own
308 instances of an ORMObject.
310 WARNING: Only persistent (committed) objects can be cloned. Changes
311 made to the original object that are not committed yet will get lost.
312 The session of the new object will always be rolled back to avoid
316 if self.session() is None:
317 raise RuntimeError( \
318 'Method clone() failed for detached object:\n%s' % self)
319 self.session().flush()
320 mapper = object_mapper(self)
321 primary_key = mapper.primary_key_from_instance(self)
322 object_class = self.__class__
324 session = DBConn().session()
325 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
326 raise RuntimeError( \
327 'Method clone() failed due to unflushed changes in session.')
328 new_object = session.query(object_class).get(primary_key)
330 if new_object is None:
331 raise RuntimeError( \
332 'Method clone() failed for non-persistent object:\n%s' % self)
335 __all__.append('ORMObject')
337 ################################################################################
339 class Validator(MapperExtension):
341 This class calls the validate() method for each instance for the
342 'before_update' and 'before_insert' events. A global object validator is
343 used for configuring the individual mappers.
346 def before_update(self, mapper, connection, instance):
350 def before_insert(self, mapper, connection, instance):
354 validator = Validator()
356 ################################################################################
358 class Architecture(ORMObject):
359 def __init__(self, arch_string = None, description = None):
360 self.arch_string = arch_string
361 self.description = description
363 def __eq__(self, val):
364 if isinstance(val, str):
365 return (self.arch_string== val)
366 # This signals to use the normal comparison operator
367 return NotImplemented
369 def __ne__(self, val):
370 if isinstance(val, str):
371 return (self.arch_string != val)
372 # This signals to use the normal comparison operator
373 return NotImplemented
375 def properties(self):
376 return ['arch_string', 'arch_id', 'suites_count']
378 def not_null_constraints(self):
379 return ['arch_string']
381 __all__.append('Architecture')
384 def get_architecture(architecture, session=None):
386 Returns database id for given C{architecture}.
388 @type architecture: string
389 @param architecture: The name of the architecture
391 @type session: Session
392 @param session: Optional SQLA session object (a temporary one will be
393 generated if not supplied)
396 @return: Architecture object for the given arch (None if not present)
399 q = session.query(Architecture).filter_by(arch_string=architecture)
403 except NoResultFound:
406 __all__.append('get_architecture')
408 # TODO: should be removed because the implementation is too trivial
410 def get_architecture_suites(architecture, session=None):
412 Returns list of Suite objects for given C{architecture} name
414 @type architecture: str
415 @param architecture: Architecture name to search for
417 @type session: Session
418 @param session: Optional SQL session object (a temporary one will be
419 generated if not supplied)
422 @return: list of Suite objects for the given name (may be empty)
425 return get_architecture(architecture, session).suites
427 __all__.append('get_architecture_suites')
429 ################################################################################
431 class Archive(object):
432 def __init__(self, *args, **kwargs):
436 return '<Archive %s>' % self.archive_name
438 __all__.append('Archive')
441 def get_archive(archive, session=None):
443 returns database id for given C{archive}.
445 @type archive: string
446 @param archive: the name of the arhive
448 @type session: Session
449 @param session: Optional SQLA session object (a temporary one will be
450 generated if not supplied)
453 @return: Archive object for the given name (None if not present)
456 archive = archive.lower()
458 q = session.query(Archive).filter_by(archive_name=archive)
462 except NoResultFound:
465 __all__.append('get_archive')
467 ################################################################################
469 class BinContents(ORMObject):
470 def properties(silf):
471 return ['file', 'binary']
473 __all__.append('BinContents')
475 ################################################################################
477 class DBBinary(ORMObject):
478 def __init__(self, package = None, source = None, version = None, \
479 maintainer = None, architecture = None, poolfile = None, \
481 self.package = package
483 self.version = version
484 self.maintainer = maintainer
485 self.architecture = architecture
486 self.poolfile = poolfile
487 self.binarytype = binarytype
489 def properties(self):
490 return ['package', 'version', 'maintainer', 'source', 'architecture', \
491 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
492 'suites_count', 'binary_id', 'contents_count']
494 def not_null_constraints(self):
495 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
498 def get_component_name(self):
499 return self.poolfile.location.component.component_name
501 __all__.append('DBBinary')
504 def get_suites_binary_in(package, session=None):
506 Returns list of Suite objects which given C{package} name is in
509 @param package: DBBinary package name to search for
512 @return: list of Suite objects for the given package
515 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
517 __all__.append('get_suites_binary_in')
520 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
522 Returns the component name of the newest binary package in suite_list or
523 None if no package is found. The result can be optionally filtered by a list
524 of architecture names.
527 @param package: DBBinary package name to search for
529 @type suite_list: list of str
530 @param suite_list: list of suite_name items
532 @type arch_list: list of str
533 @param arch_list: optional list of arch_string items that defaults to []
535 @rtype: str or NoneType
536 @return: name of component or None
539 q = session.query(DBBinary).filter_by(package = package). \
540 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
541 if len(arch_list) > 0:
542 q = q.join(DBBinary.architecture). \
543 filter(Architecture.arch_string.in_(arch_list))
544 binary = q.order_by(desc(DBBinary.version)).first()
548 return binary.get_component_name()
550 __all__.append('get_component_by_package_suite')
552 ################################################################################
554 class BinaryACL(object):
555 def __init__(self, *args, **kwargs):
559 return '<BinaryACL %s>' % self.binary_acl_id
561 __all__.append('BinaryACL')
563 ################################################################################
565 class BinaryACLMap(object):
566 def __init__(self, *args, **kwargs):
570 return '<BinaryACLMap %s>' % self.binary_acl_map_id
572 __all__.append('BinaryACLMap')
574 ################################################################################
579 ArchiveDir "%(archivepath)s";
580 OverrideDir "%(overridedir)s";
581 CacheDir "%(cachedir)s";
586 Packages::Compress ". bzip2 gzip";
587 Sources::Compress ". bzip2 gzip";
592 bindirectory "incoming"
597 BinOverride "override.sid.all3";
598 BinCacheDB "packages-accepted.db";
600 FileList "%(filelist)s";
603 Packages::Extensions ".deb .udeb";
606 bindirectory "incoming/"
609 BinOverride "override.sid.all3";
610 SrcOverride "override.sid.all3.src";
611 FileList "%(filelist)s";
615 class BuildQueue(object):
616 def __init__(self, *args, **kwargs):
620 return '<BuildQueue %s>' % self.queue_name
622 def write_metadata(self, starttime, force=False):
623 # Do we write out metafiles?
624 if not (force or self.generate_metadata):
627 session = DBConn().session().object_session(self)
629 fl_fd = fl_name = ac_fd = ac_name = None
631 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
632 startdir = os.getcwd()
635 # Grab files we want to include
636 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
637 # Write file list with newer files
638 (fl_fd, fl_name) = mkstemp()
640 os.write(fl_fd, '%s\n' % n.fullpath)
645 # Write minimal apt.conf
646 # TODO: Remove hardcoding from template
647 (ac_fd, ac_name) = mkstemp()
648 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
650 'cachedir': cnf["Dir::Cache"],
651 'overridedir': cnf["Dir::Override"],
655 # Run apt-ftparchive generate
656 os.chdir(os.path.dirname(ac_name))
657 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
659 # Run apt-ftparchive release
660 # TODO: Eww - fix this
661 bname = os.path.basename(self.path)
665 # We have to remove the Release file otherwise it'll be included in the
668 os.unlink(os.path.join(bname, 'Release'))
672 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
674 # Crude hack with open and append, but this whole section is and should be redone.
675 if self.notautomatic:
676 release=open("Release", "a")
677 release.write("NotAutomatic: yes")
682 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
683 if cnf.has_key("Dinstall::SigningPubKeyring"):
684 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
686 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
688 # Move the files if we got this far
689 os.rename('Release', os.path.join(bname, 'Release'))
691 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
693 # Clean up any left behind files
720 def clean_and_update(self, starttime, Logger, dryrun=False):
721 """WARNING: This routine commits for you"""
722 session = DBConn().session().object_session(self)
724 if self.generate_metadata and not dryrun:
725 self.write_metadata(starttime)
727 # Grab files older than our execution time
728 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
734 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
736 Logger.log(["I: Removing %s from the queue" % o.fullpath])
737 os.unlink(o.fullpath)
740 # If it wasn't there, don't worry
741 if e.errno == ENOENT:
744 # TODO: Replace with proper logging call
745 Logger.log(["E: Could not remove %s" % o.fullpath])
752 for f in os.listdir(self.path):
753 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
757 r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
758 except NoResultFound:
759 fp = os.path.join(self.path, f)
761 Logger.log(["I: Would remove unused link %s" % fp])
763 Logger.log(["I: Removing unused link %s" % fp])
767 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
769 def add_file_from_pool(self, poolfile):
770 """Copies a file into the pool. Assumes that the PoolFile object is
771 attached to the same SQLAlchemy session as the Queue object is.
773 The caller is responsible for committing after calling this function."""
774 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
776 # Check if we have a file of this name or this ID already
777 for f in self.queuefiles:
778 if f.fileid is not None and f.fileid == poolfile.file_id or \
779 f.poolfile.filename == poolfile_basename:
780 # In this case, update the BuildQueueFile entry so we
781 # don't remove it too early
782 f.lastused = datetime.now()
783 DBConn().session().object_session(poolfile).add(f)
786 # Prepare BuildQueueFile object
787 qf = BuildQueueFile()
788 qf.build_queue_id = self.queue_id
789 qf.lastused = datetime.now()
790 qf.filename = poolfile_basename
792 targetpath = poolfile.fullpath
793 queuepath = os.path.join(self.path, poolfile_basename)
797 # We need to copy instead of symlink
799 utils.copy(targetpath, queuepath)
800 # NULL in the fileid field implies a copy
803 os.symlink(targetpath, queuepath)
804 qf.fileid = poolfile.file_id
808 # Get the same session as the PoolFile is using and add the qf to it
809 DBConn().session().object_session(poolfile).add(qf)
814 __all__.append('BuildQueue')
817 def get_build_queue(queuename, session=None):
819 Returns BuildQueue object for given C{queue name}, creating it if it does not
822 @type queuename: string
823 @param queuename: The name of the queue
825 @type session: Session
826 @param session: Optional SQLA session object (a temporary one will be
827 generated if not supplied)
830 @return: BuildQueue object for the given queue
833 q = session.query(BuildQueue).filter_by(queue_name=queuename)
837 except NoResultFound:
840 __all__.append('get_build_queue')
842 ################################################################################
844 class BuildQueueFile(object):
845 def __init__(self, *args, **kwargs):
849 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
853 return os.path.join(self.buildqueue.path, self.filename)
856 __all__.append('BuildQueueFile')
858 ################################################################################
860 class ChangePendingBinary(object):
861 def __init__(self, *args, **kwargs):
865 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
867 __all__.append('ChangePendingBinary')
869 ################################################################################
871 class ChangePendingFile(object):
872 def __init__(self, *args, **kwargs):
876 return '<ChangePendingFile %s>' % self.change_pending_file_id
878 __all__.append('ChangePendingFile')
880 ################################################################################
882 class ChangePendingSource(object):
883 def __init__(self, *args, **kwargs):
887 return '<ChangePendingSource %s>' % self.change_pending_source_id
889 __all__.append('ChangePendingSource')
891 ################################################################################
893 class Component(ORMObject):
894 def __init__(self, component_name = None):
895 self.component_name = component_name
897 def __eq__(self, val):
898 if isinstance(val, str):
899 return (self.component_name == val)
900 # This signals to use the normal comparison operator
901 return NotImplemented
903 def __ne__(self, val):
904 if isinstance(val, str):
905 return (self.component_name != val)
906 # This signals to use the normal comparison operator
907 return NotImplemented
909 def properties(self):
910 return ['component_name', 'component_id', 'description', 'location', \
913 def not_null_constraints(self):
914 return ['component_name']
917 __all__.append('Component')
920 def get_component(component, session=None):
922 Returns database id for given C{component}.
924 @type component: string
925 @param component: The name of the override type
928 @return: the database id for the given component
931 component = component.lower()
933 q = session.query(Component).filter_by(component_name=component)
937 except NoResultFound:
940 __all__.append('get_component')
942 ################################################################################
944 class DBConfig(object):
945 def __init__(self, *args, **kwargs):
949 return '<DBConfig %s>' % self.name
951 __all__.append('DBConfig')
953 ################################################################################
956 def get_or_set_contents_file_id(filename, session=None):
958 Returns database id for given filename.
960 If no matching file is found, a row is inserted.
962 @type filename: string
963 @param filename: The filename
964 @type session: SQLAlchemy
965 @param session: Optional SQL session object (a temporary one will be
966 generated if not supplied). If not passed, a commit will be performed at
967 the end of the function, otherwise the caller is responsible for commiting.
970 @return: the database id for the given component
973 q = session.query(ContentFilename).filter_by(filename=filename)
976 ret = q.one().cafilename_id
977 except NoResultFound:
978 cf = ContentFilename()
979 cf.filename = filename
981 session.commit_or_flush()
982 ret = cf.cafilename_id
986 __all__.append('get_or_set_contents_file_id')
989 def get_contents(suite, overridetype, section=None, session=None):
991 Returns contents for a suite / overridetype combination, limiting
992 to a section if not None.
995 @param suite: Suite object
997 @type overridetype: OverrideType
998 @param overridetype: OverrideType object
1000 @type section: Section
1001 @param section: Optional section object to limit results to
1003 @type session: SQLAlchemy
1004 @param session: Optional SQL session object (a temporary one will be
1005 generated if not supplied)
1007 @rtype: ResultsProxy
1008 @return: ResultsProxy object set up to return tuples of (filename, section,
1012 # find me all of the contents for a given suite
1013 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1017 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1018 JOIN content_file_names n ON (c.filename=n.id)
1019 JOIN binaries b ON (b.id=c.binary_pkg)
1020 JOIN override o ON (o.package=b.package)
1021 JOIN section s ON (s.id=o.section)
1022 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1023 AND b.type=:overridetypename"""
1025 vals = {'suiteid': suite.suite_id,
1026 'overridetypeid': overridetype.overridetype_id,
1027 'overridetypename': overridetype.overridetype}
1029 if section is not None:
1030 contents_q += " AND s.id = :sectionid"
1031 vals['sectionid'] = section.section_id
1033 contents_q += " ORDER BY fn"
1035 return session.execute(contents_q, vals)
1037 __all__.append('get_contents')
1039 ################################################################################
1041 class ContentFilepath(object):
1042 def __init__(self, *args, **kwargs):
1046 return '<ContentFilepath %s>' % self.filepath
1048 __all__.append('ContentFilepath')
1051 def get_or_set_contents_path_id(filepath, session=None):
1053 Returns database id for given path.
1055 If no matching file is found, a row is inserted.
1057 @type filepath: string
1058 @param filepath: The filepath
1060 @type session: SQLAlchemy
1061 @param session: Optional SQL session object (a temporary one will be
1062 generated if not supplied). If not passed, a commit will be performed at
1063 the end of the function, otherwise the caller is responsible for commiting.
1066 @return: the database id for the given path
1069 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1072 ret = q.one().cafilepath_id
1073 except NoResultFound:
1074 cf = ContentFilepath()
1075 cf.filepath = filepath
1077 session.commit_or_flush()
1078 ret = cf.cafilepath_id
1082 __all__.append('get_or_set_contents_path_id')
1084 ################################################################################
1086 class ContentAssociation(object):
1087 def __init__(self, *args, **kwargs):
1091 return '<ContentAssociation %s>' % self.ca_id
1093 __all__.append('ContentAssociation')
1095 def insert_content_paths(binary_id, fullpaths, session=None):
1097 Make sure given path is associated with given binary id
1099 @type binary_id: int
1100 @param binary_id: the id of the binary
1101 @type fullpaths: list
1102 @param fullpaths: the list of paths of the file being associated with the binary
1103 @type session: SQLAlchemy session
1104 @param session: Optional SQLAlchemy session. If this is passed, the caller
1105 is responsible for ensuring a transaction has begun and committing the
1106 results or rolling back based on the result code. If not passed, a commit
1107 will be performed at the end of the function, otherwise the caller is
1108 responsible for commiting.
1110 @return: True upon success
1113 privatetrans = False
1115 session = DBConn().session()
1120 def generate_path_dicts():
1121 for fullpath in fullpaths:
1122 if fullpath.startswith( './' ):
1123 fullpath = fullpath[2:]
1125 yield {'filename':fullpath, 'id': binary_id }
1127 for d in generate_path_dicts():
1128 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1137 traceback.print_exc()
1139 # Only rollback if we set up the session ourself
1146 __all__.append('insert_content_paths')
1148 ################################################################################
1150 class DSCFile(object):
1151 def __init__(self, *args, **kwargs):
1155 return '<DSCFile %s>' % self.dscfile_id
1157 __all__.append('DSCFile')
1160 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1162 Returns a list of DSCFiles which may be empty
1164 @type dscfile_id: int (optional)
1165 @param dscfile_id: the dscfile_id of the DSCFiles to find
1167 @type source_id: int (optional)
1168 @param source_id: the source id related to the DSCFiles to find
1170 @type poolfile_id: int (optional)
1171 @param poolfile_id: the poolfile id related to the DSCFiles to find
1174 @return: Possibly empty list of DSCFiles
1177 q = session.query(DSCFile)
1179 if dscfile_id is not None:
1180 q = q.filter_by(dscfile_id=dscfile_id)
1182 if source_id is not None:
1183 q = q.filter_by(source_id=source_id)
1185 if poolfile_id is not None:
1186 q = q.filter_by(poolfile_id=poolfile_id)
1190 __all__.append('get_dscfiles')
1192 ################################################################################
1194 class PoolFile(ORMObject):
1195 def __init__(self, filename = None, location = None, filesize = -1, \
1197 self.filename = filename
1198 self.location = location
1199 self.filesize = filesize
1200 self.md5sum = md5sum
1204 return os.path.join(self.location.path, self.filename)
1206 def is_valid(self, filesize = -1, md5sum = None):
1207 return self.filesize == long(filesize) and self.md5sum == md5sum
1209 def properties(self):
1210 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1211 'sha256sum', 'location', 'source', 'binary', 'last_used']
1213 def not_null_constraints(self):
1214 return ['filename', 'md5sum', 'location']
1216 __all__.append('PoolFile')
1219 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1222 (ValidFileFound [boolean], PoolFile object or None)
1224 @type filename: string
1225 @param filename: the filename of the file to check against the DB
1228 @param filesize: the size of the file to check against the DB
1230 @type md5sum: string
1231 @param md5sum: the md5sum of the file to check against the DB
1233 @type location_id: int
1234 @param location_id: the id of the location to look in
1237 @return: Tuple of length 2.
1238 - If valid pool file found: (C{True}, C{PoolFile object})
1239 - If valid pool file not found:
1240 - (C{False}, C{None}) if no file found
1241 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1244 poolfile = session.query(Location).get(location_id). \
1245 files.filter_by(filename=filename).first()
1247 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1250 return (valid, poolfile)
1252 __all__.append('check_poolfile')
1254 # TODO: the implementation can trivially be inlined at the place where the
1255 # function is called
1257 def get_poolfile_by_id(file_id, session=None):
1259 Returns a PoolFile objects or None for the given id
1262 @param file_id: the id of the file to look for
1264 @rtype: PoolFile or None
1265 @return: either the PoolFile object or None
1268 return session.query(PoolFile).get(file_id)
1270 __all__.append('get_poolfile_by_id')
1273 def get_poolfile_like_name(filename, session=None):
1275 Returns an array of PoolFile objects which are like the given name
1277 @type filename: string
1278 @param filename: the filename of the file to check against the DB
1281 @return: array of PoolFile objects
1284 # TODO: There must be a way of properly using bind parameters with %FOO%
1285 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1289 __all__.append('get_poolfile_like_name')
1292 def add_poolfile(filename, datadict, location_id, session=None):
1294 Add a new file to the pool
1296 @type filename: string
1297 @param filename: filename
1299 @type datadict: dict
1300 @param datadict: dict with needed data
1302 @type location_id: int
1303 @param location_id: database id of the location
1306 @return: the PoolFile object created
1308 poolfile = PoolFile()
1309 poolfile.filename = filename
1310 poolfile.filesize = datadict["size"]
1311 poolfile.md5sum = datadict["md5sum"]
1312 poolfile.sha1sum = datadict["sha1sum"]
1313 poolfile.sha256sum = datadict["sha256sum"]
1314 poolfile.location_id = location_id
1316 session.add(poolfile)
1317 # Flush to get a file id (NB: This is not a commit)
1322 __all__.append('add_poolfile')
1324 ################################################################################
1326 class Fingerprint(ORMObject):
1327 def __init__(self, fingerprint = None):
1328 self.fingerprint = fingerprint
1330 def properties(self):
1331 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1334 def not_null_constraints(self):
1335 return ['fingerprint']
1337 __all__.append('Fingerprint')
1340 def get_fingerprint(fpr, session=None):
1342 Returns Fingerprint object for given fpr.
1345 @param fpr: The fpr to find / add
1347 @type session: SQLAlchemy
1348 @param session: Optional SQL session object (a temporary one will be
1349 generated if not supplied).
1352 @return: the Fingerprint object for the given fpr or None
1355 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1359 except NoResultFound:
1364 __all__.append('get_fingerprint')
1367 def get_or_set_fingerprint(fpr, session=None):
1369 Returns Fingerprint object for given fpr.
1371 If no matching fpr is found, a row is inserted.
1374 @param fpr: The fpr to find / add
1376 @type session: SQLAlchemy
1377 @param session: Optional SQL session object (a temporary one will be
1378 generated if not supplied). If not passed, a commit will be performed at
1379 the end of the function, otherwise the caller is responsible for commiting.
1380 A flush will be performed either way.
1383 @return: the Fingerprint object for the given fpr
1386 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1390 except NoResultFound:
1391 fingerprint = Fingerprint()
1392 fingerprint.fingerprint = fpr
1393 session.add(fingerprint)
1394 session.commit_or_flush()
1399 __all__.append('get_or_set_fingerprint')
1401 ################################################################################
1403 # Helper routine for Keyring class
1404 def get_ldap_name(entry):
1406 for k in ["cn", "mn", "sn"]:
1408 if ret and ret[0] != "" and ret[0] != "-":
1410 return " ".join(name)
1412 ################################################################################
1414 class Keyring(object):
1415 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1416 " --with-colons --fingerprint --fingerprint"
1421 def __init__(self, *args, **kwargs):
1425 return '<Keyring %s>' % self.keyring_name
1427 def de_escape_gpg_str(self, txt):
1428 esclist = re.split(r'(\\x..)', txt)
1429 for x in range(1,len(esclist),2):
1430 esclist[x] = "%c" % (int(esclist[x][2:],16))
1431 return "".join(esclist)
1433 def parse_address(self, uid):
1434 """parses uid and returns a tuple of real name and email address"""
1436 (name, address) = email.Utils.parseaddr(uid)
1437 name = re.sub(r"\s*[(].*[)]", "", name)
1438 name = self.de_escape_gpg_str(name)
1441 return (name, address)
1443 def load_keys(self, keyring):
1444 if not self.keyring_id:
1445 raise Exception('Must be initialized with database information')
1447 k = os.popen(self.gpg_invocation % keyring, "r")
1451 for line in k.xreadlines():
1452 field = line.split(":")
1453 if field[0] == "pub":
1456 (name, addr) = self.parse_address(field[9])
1458 self.keys[key]["email"] = addr
1459 self.keys[key]["name"] = name
1460 self.keys[key]["fingerprints"] = []
1462 elif key and field[0] == "sub" and len(field) >= 12:
1463 signingkey = ("s" in field[11])
1464 elif key and field[0] == "uid":
1465 (name, addr) = self.parse_address(field[9])
1466 if "email" not in self.keys[key] and "@" in addr:
1467 self.keys[key]["email"] = addr
1468 self.keys[key]["name"] = name
1469 elif signingkey and field[0] == "fpr":
1470 self.keys[key]["fingerprints"].append(field[9])
1471 self.fpr_lookup[field[9]] = key
1473 def import_users_from_ldap(self, session):
1477 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1478 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1480 l = ldap.open(LDAPServer)
1481 l.simple_bind_s("","")
1482 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1483 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1484 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1486 ldap_fin_uid_id = {}
1493 uid = entry["uid"][0]
1494 name = get_ldap_name(entry)
1495 fingerprints = entry["keyFingerPrint"]
1497 for f in fingerprints:
1498 key = self.fpr_lookup.get(f, None)
1499 if key not in self.keys:
1501 self.keys[key]["uid"] = uid
1505 keyid = get_or_set_uid(uid, session).uid_id
1506 byuid[keyid] = (uid, name)
1507 byname[uid] = (keyid, name)
1509 return (byname, byuid)
1511 def generate_users_from_keyring(self, format, session):
1515 for x in self.keys.keys():
1516 if "email" not in self.keys[x]:
1518 self.keys[x]["uid"] = format % "invalid-uid"
1520 uid = format % self.keys[x]["email"]
1521 keyid = get_or_set_uid(uid, session).uid_id
1522 byuid[keyid] = (uid, self.keys[x]["name"])
1523 byname[uid] = (keyid, self.keys[x]["name"])
1524 self.keys[x]["uid"] = uid
1527 uid = format % "invalid-uid"
1528 keyid = get_or_set_uid(uid, session).uid_id
1529 byuid[keyid] = (uid, "ungeneratable user id")
1530 byname[uid] = (keyid, "ungeneratable user id")
1532 return (byname, byuid)
1534 __all__.append('Keyring')
1537 def get_keyring(keyring, session=None):
1539 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1540 If C{keyring} already has an entry, simply return the existing Keyring
1542 @type keyring: string
1543 @param keyring: the keyring name
1546 @return: the Keyring object for this keyring
1549 q = session.query(Keyring).filter_by(keyring_name=keyring)
1553 except NoResultFound:
1556 __all__.append('get_keyring')
1558 ################################################################################
1560 class KeyringACLMap(object):
1561 def __init__(self, *args, **kwargs):
1565 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1567 __all__.append('KeyringACLMap')
1569 ################################################################################
1571 class DBChange(object):
1572 def __init__(self, *args, **kwargs):
1576 return '<DBChange %s>' % self.changesname
1578 def clean_from_queue(self):
1579 session = DBConn().session().object_session(self)
1581 # Remove changes_pool_files entries
1584 # Remove changes_pending_files references
1587 # Clear out of queue
1588 self.in_queue = None
1589 self.approved_for_id = None
1591 __all__.append('DBChange')
1594 def get_dbchange(filename, session=None):
1596 returns DBChange object for given C{filename}.
1598 @type filename: string
1599 @param filename: the name of the file
1601 @type session: Session
1602 @param session: Optional SQLA session object (a temporary one will be
1603 generated if not supplied)
1606 @return: DBChange object for the given filename (C{None} if not present)
1609 q = session.query(DBChange).filter_by(changesname=filename)
1613 except NoResultFound:
1616 __all__.append('get_dbchange')
1618 ################################################################################
1620 class Location(ORMObject):
1621 def __init__(self, path = None, component = None):
1623 self.component = component
1624 # the column 'type' should go away, see comment at mapper
1625 self.archive_type = 'pool'
1627 def properties(self):
1628 return ['path', 'location_id', 'archive_type', 'component', \
1631 def not_null_constraints(self):
1632 return ['path', 'archive_type']
1634 __all__.append('Location')
1637 def get_location(location, component=None, archive=None, session=None):
1639 Returns Location object for the given combination of location, component
1642 @type location: string
1643 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1645 @type component: string
1646 @param component: the component name (if None, no restriction applied)
1648 @type archive: string
1649 @param archive: the archive name (if None, no restriction applied)
1651 @rtype: Location / None
1652 @return: Either a Location object or None if one can't be found
1655 q = session.query(Location).filter_by(path=location)
1657 if archive is not None:
1658 q = q.join(Archive).filter_by(archive_name=archive)
1660 if component is not None:
1661 q = q.join(Component).filter_by(component_name=component)
1665 except NoResultFound:
1668 __all__.append('get_location')
1670 ################################################################################
1672 class Maintainer(ORMObject):
1673 def __init__(self, name = None):
1676 def properties(self):
1677 return ['name', 'maintainer_id']
1679 def not_null_constraints(self):
1682 def get_split_maintainer(self):
1683 if not hasattr(self, 'name') or self.name is None:
1684 return ('', '', '', '')
1686 return fix_maintainer(self.name.strip())
1688 __all__.append('Maintainer')
1691 def get_or_set_maintainer(name, session=None):
1693 Returns Maintainer object for given maintainer name.
1695 If no matching maintainer name is found, a row is inserted.
1698 @param name: The maintainer name to add
1700 @type session: SQLAlchemy
1701 @param session: Optional SQL session object (a temporary one will be
1702 generated if not supplied). If not passed, a commit will be performed at
1703 the end of the function, otherwise the caller is responsible for commiting.
1704 A flush will be performed either way.
1707 @return: the Maintainer object for the given maintainer
1710 q = session.query(Maintainer).filter_by(name=name)
1713 except NoResultFound:
1714 maintainer = Maintainer()
1715 maintainer.name = name
1716 session.add(maintainer)
1717 session.commit_or_flush()
1722 __all__.append('get_or_set_maintainer')
1725 def get_maintainer(maintainer_id, session=None):
1727 Return the name of the maintainer behind C{maintainer_id} or None if that
1728 maintainer_id is invalid.
1730 @type maintainer_id: int
1731 @param maintainer_id: the id of the maintainer
1734 @return: the Maintainer with this C{maintainer_id}
1737 return session.query(Maintainer).get(maintainer_id)
1739 __all__.append('get_maintainer')
1741 ################################################################################
1743 class NewComment(object):
1744 def __init__(self, *args, **kwargs):
1748 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
1750 __all__.append('NewComment')
1753 def has_new_comment(package, version, session=None):
1755 Returns true if the given combination of C{package}, C{version} has a comment.
1757 @type package: string
1758 @param package: name of the package
1760 @type version: string
1761 @param version: package version
1763 @type session: Session
1764 @param session: Optional SQLA session object (a temporary one will be
1765 generated if not supplied)
1771 q = session.query(NewComment)
1772 q = q.filter_by(package=package)
1773 q = q.filter_by(version=version)
1775 return bool(q.count() > 0)
1777 __all__.append('has_new_comment')
1780 def get_new_comments(package=None, version=None, comment_id=None, session=None):
1782 Returns (possibly empty) list of NewComment objects for the given
1785 @type package: string (optional)
1786 @param package: name of the package
1788 @type version: string (optional)
1789 @param version: package version
1791 @type comment_id: int (optional)
1792 @param comment_id: An id of a comment
1794 @type session: Session
1795 @param session: Optional SQLA session object (a temporary one will be
1796 generated if not supplied)
1799 @return: A (possibly empty) list of NewComment objects will be returned
1802 q = session.query(NewComment)
1803 if package is not None: q = q.filter_by(package=package)
1804 if version is not None: q = q.filter_by(version=version)
1805 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
1809 __all__.append('get_new_comments')
1811 ################################################################################
1813 class Override(object):
1814 def __init__(self, *args, **kwargs):
1818 return '<Override %s (%s)>' % (self.package, self.suite_id)
1820 __all__.append('Override')
1823 def get_override(package, suite=None, component=None, overridetype=None, session=None):
1825 Returns Override object for the given parameters
1827 @type package: string
1828 @param package: The name of the package
1830 @type suite: string, list or None
1831 @param suite: The name of the suite (or suites if a list) to limit to. If
1832 None, don't limit. Defaults to None.
1834 @type component: string, list or None
1835 @param component: The name of the component (or components if a list) to
1836 limit to. If None, don't limit. Defaults to None.
1838 @type overridetype: string, list or None
1839 @param overridetype: The name of the overridetype (or overridetypes if a list) to
1840 limit to. If None, don't limit. Defaults to None.
1842 @type session: Session
1843 @param session: Optional SQLA session object (a temporary one will be
1844 generated if not supplied)
1847 @return: A (possibly empty) list of Override objects will be returned
1850 q = session.query(Override)
1851 q = q.filter_by(package=package)
1853 if suite is not None:
1854 if not isinstance(suite, list): suite = [suite]
1855 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
1857 if component is not None:
1858 if not isinstance(component, list): component = [component]
1859 q = q.join(Component).filter(Component.component_name.in_(component))
1861 if overridetype is not None:
1862 if not isinstance(overridetype, list): overridetype = [overridetype]
1863 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
1867 __all__.append('get_override')
1870 ################################################################################
1872 class OverrideType(object):
1873 def __init__(self, *args, **kwargs):
1877 return '<OverrideType %s>' % self.overridetype
1879 __all__.append('OverrideType')
1882 def get_override_type(override_type, session=None):
1884 Returns OverrideType object for given C{override type}.
1886 @type override_type: string
1887 @param override_type: The name of the override type
1889 @type session: Session
1890 @param session: Optional SQLA session object (a temporary one will be
1891 generated if not supplied)
1894 @return: the database id for the given override type
1897 q = session.query(OverrideType).filter_by(overridetype=override_type)
1901 except NoResultFound:
1904 __all__.append('get_override_type')
1906 ################################################################################
1908 class DebContents(object):
1909 def __init__(self, *args, **kwargs):
1913 return '<DebConetnts %s: %s>' % (self.package.package,self.file)
1915 __all__.append('DebContents')
1918 class UdebContents(object):
1919 def __init__(self, *args, **kwargs):
1923 return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
1925 __all__.append('UdebContents')
1927 class PendingBinContents(object):
1928 def __init__(self, *args, **kwargs):
1932 return '<PendingBinContents %s>' % self.contents_id
1934 __all__.append('PendingBinContents')
1936 def insert_pending_content_paths(package,
1941 Make sure given paths are temporarily associated with given
1945 @param package: the package to associate with should have been read in from the binary control file
1946 @type fullpaths: list
1947 @param fullpaths: the list of paths of the file being associated with the binary
1948 @type session: SQLAlchemy session
1949 @param session: Optional SQLAlchemy session. If this is passed, the caller
1950 is responsible for ensuring a transaction has begun and committing the
1951 results or rolling back based on the result code. If not passed, a commit
1952 will be performed at the end of the function
1954 @return: True upon success, False if there is a problem
1957 privatetrans = False
1960 session = DBConn().session()
1964 arch = get_architecture(package['Architecture'], session)
1965 arch_id = arch.arch_id
1967 # Remove any already existing recorded files for this package
1968 q = session.query(PendingBinContents)
1969 q = q.filter_by(package=package['Package'])
1970 q = q.filter_by(version=package['Version'])
1971 q = q.filter_by(architecture=arch_id)
1974 for fullpath in fullpaths:
1976 if fullpath.startswith( "./" ):
1977 fullpath = fullpath[2:]
1979 pca = PendingBinContents()
1980 pca.package = package['Package']
1981 pca.version = package['Version']
1983 pca.architecture = arch_id
1986 pca.type = 8 # gross
1988 pca.type = 7 # also gross
1991 # Only commit if we set up the session ourself
1999 except Exception, e:
2000 traceback.print_exc()
2002 # Only rollback if we set up the session ourself
2009 __all__.append('insert_pending_content_paths')
2011 ################################################################################
2013 class PolicyQueue(object):
2014 def __init__(self, *args, **kwargs):
2018 return '<PolicyQueue %s>' % self.queue_name
2020 __all__.append('PolicyQueue')
2023 def get_policy_queue(queuename, session=None):
2025 Returns PolicyQueue object for given C{queue name}
2027 @type queuename: string
2028 @param queuename: The name of the queue
2030 @type session: Session
2031 @param session: Optional SQLA session object (a temporary one will be
2032 generated if not supplied)
2035 @return: PolicyQueue object for the given queue
2038 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2042 except NoResultFound:
2045 __all__.append('get_policy_queue')
2048 def get_policy_queue_from_path(pathname, session=None):
2050 Returns PolicyQueue object for given C{path name}
2052 @type queuename: string
2053 @param queuename: The path
2055 @type session: Session
2056 @param session: Optional SQLA session object (a temporary one will be
2057 generated if not supplied)
2060 @return: PolicyQueue object for the given queue
2063 q = session.query(PolicyQueue).filter_by(path=pathname)
2067 except NoResultFound:
2070 __all__.append('get_policy_queue_from_path')
2072 ################################################################################
2074 class Priority(object):
2075 def __init__(self, *args, **kwargs):
2078 def __eq__(self, val):
2079 if isinstance(val, str):
2080 return (self.priority == val)
2081 # This signals to use the normal comparison operator
2082 return NotImplemented
2084 def __ne__(self, val):
2085 if isinstance(val, str):
2086 return (self.priority != val)
2087 # This signals to use the normal comparison operator
2088 return NotImplemented
2091 return '<Priority %s (%s)>' % (self.priority, self.priority_id)
2093 __all__.append('Priority')
2096 def get_priority(priority, session=None):
2098 Returns Priority object for given C{priority name}.
2100 @type priority: string
2101 @param priority: The name of the priority
2103 @type session: Session
2104 @param session: Optional SQLA session object (a temporary one will be
2105 generated if not supplied)
2108 @return: Priority object for the given priority
2111 q = session.query(Priority).filter_by(priority=priority)
2115 except NoResultFound:
2118 __all__.append('get_priority')
2121 def get_priorities(session=None):
2123 Returns dictionary of priority names -> id mappings
2125 @type session: Session
2126 @param session: Optional SQL session object (a temporary one will be
2127 generated if not supplied)
2130 @return: dictionary of priority names -> id mappings
2134 q = session.query(Priority)
2136 ret[x.priority] = x.priority_id
2140 __all__.append('get_priorities')
2142 ################################################################################
2144 class Section(object):
2145 def __init__(self, *args, **kwargs):
2148 def __eq__(self, val):
2149 if isinstance(val, str):
2150 return (self.section == val)
2151 # This signals to use the normal comparison operator
2152 return NotImplemented
2154 def __ne__(self, val):
2155 if isinstance(val, str):
2156 return (self.section != val)
2157 # This signals to use the normal comparison operator
2158 return NotImplemented
2161 return '<Section %s>' % self.section
2163 __all__.append('Section')
2166 def get_section(section, session=None):
2168 Returns Section object for given C{section name}.
2170 @type section: string
2171 @param section: The name of the section
2173 @type session: Session
2174 @param session: Optional SQLA session object (a temporary one will be
2175 generated if not supplied)
2178 @return: Section object for the given section name
2181 q = session.query(Section).filter_by(section=section)
2185 except NoResultFound:
2188 __all__.append('get_section')
2191 def get_sections(session=None):
2193 Returns dictionary of section names -> id mappings
2195 @type session: Session
2196 @param session: Optional SQL session object (a temporary one will be
2197 generated if not supplied)
2200 @return: dictionary of section names -> id mappings
2204 q = session.query(Section)
2206 ret[x.section] = x.section_id
2210 __all__.append('get_sections')
2212 ################################################################################
2214 class DBSource(ORMObject):
2215 def __init__(self, source = None, version = None, maintainer = None, \
2216 changedby = None, poolfile = None, install_date = None):
2217 self.source = source
2218 self.version = version
2219 self.maintainer = maintainer
2220 self.changedby = changedby
2221 self.poolfile = poolfile
2222 self.install_date = install_date
2224 def properties(self):
2225 return ['source', 'source_id', 'maintainer', 'changedby', \
2226 'fingerprint', 'poolfile', 'version', 'suites_count', \
2227 'install_date', 'binaries_count']
2229 def not_null_constraints(self):
2230 return ['source', 'version', 'install_date', 'maintainer', \
2231 'changedby', 'poolfile', 'install_date']
2233 __all__.append('DBSource')
2236 def source_exists(source, source_version, suites = ["any"], session=None):
2238 Ensure that source exists somewhere in the archive for the binary
2239 upload being processed.
2240 1. exact match => 1.0-3
2241 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2243 @type source: string
2244 @param source: source name
2246 @type source_version: string
2247 @param source_version: expected source version
2250 @param suites: list of suites to check in, default I{any}
2252 @type session: Session
2253 @param session: Optional SQLA session object (a temporary one will be
2254 generated if not supplied)
2257 @return: returns 1 if a source with expected version is found, otherwise 0
2264 from daklib.regexes import re_bin_only_nmu
2265 orig_source_version = re_bin_only_nmu.sub('', source_version)
2267 for suite in suites:
2268 q = session.query(DBSource).filter_by(source=source). \
2269 filter(DBSource.version.in_([source_version, orig_source_version]))
2271 # source must exist in suite X, or in some other suite that's
2272 # mapped to X, recursively... silent-maps are counted too,
2273 # unreleased-maps aren't.
2274 maps = cnf.ValueList("SuiteMappings")[:]
2276 maps = [ m.split() for m in maps ]
2277 maps = [ (x[1], x[2]) for x in maps
2278 if x[0] == "map" or x[0] == "silent-map" ]
2281 if x[1] in s and x[0] not in s:
2284 q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
2289 # No source found so return not ok
2294 __all__.append('source_exists')
2297 def get_suites_source_in(source, session=None):
2299 Returns list of Suite objects which given C{source} name is in
2302 @param source: DBSource package name to search for
2305 @return: list of Suite objects for the given source
2308 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2310 __all__.append('get_suites_source_in')
2313 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2315 Returns list of DBSource objects for given C{source} name and other parameters
2318 @param source: DBSource package name to search for
2320 @type version: str or None
2321 @param version: DBSource version name to search for or None if not applicable
2323 @type dm_upload_allowed: bool
2324 @param dm_upload_allowed: If None, no effect. If True or False, only
2325 return packages with that dm_upload_allowed setting
2327 @type session: Session
2328 @param session: Optional SQL session object (a temporary one will be
2329 generated if not supplied)
2332 @return: list of DBSource objects for the given name (may be empty)
2335 q = session.query(DBSource).filter_by(source=source)
2337 if version is not None:
2338 q = q.filter_by(version=version)
2340 if dm_upload_allowed is not None:
2341 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2345 __all__.append('get_sources_from_name')
2347 # FIXME: This function fails badly if it finds more than 1 source package and
2348 # its implementation is trivial enough to be inlined.
2350 def get_source_in_suite(source, suite, session=None):
2352 Returns a DBSource object for a combination of C{source} and C{suite}.
2354 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2355 - B{suite} - a suite name, eg. I{unstable}
2357 @type source: string
2358 @param source: source package name
2361 @param suite: the suite name
2364 @return: the version for I{source} in I{suite}
2368 q = get_suite(suite, session).get_sources(source)
2371 except NoResultFound:
2374 __all__.append('get_source_in_suite')
2376 ################################################################################
2379 def add_dsc_to_db(u, filename, session=None):
2380 entry = u.pkg.files[filename]
2384 source.source = u.pkg.dsc["source"]
2385 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2386 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2387 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2388 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2389 source.install_date = datetime.now().date()
2391 dsc_component = entry["component"]
2392 dsc_location_id = entry["location id"]
2394 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2396 # Set up a new poolfile if necessary
2397 if not entry.has_key("files id") or not entry["files id"]:
2398 filename = entry["pool name"] + filename
2399 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2401 pfs.append(poolfile)
2402 entry["files id"] = poolfile.file_id
2404 source.poolfile_id = entry["files id"]
2407 suite_names = u.pkg.changes["distribution"].keys()
2408 source.suites = session.query(Suite). \
2409 filter(Suite.suite_name.in_(suite_names)).all()
2411 # Add the source files to the DB (files and dsc_files)
2413 dscfile.source_id = source.source_id
2414 dscfile.poolfile_id = entry["files id"]
2415 session.add(dscfile)
2417 for dsc_file, dentry in u.pkg.dsc_files.items():
2419 df.source_id = source.source_id
2421 # If the .orig tarball is already in the pool, it's
2422 # files id is stored in dsc_files by check_dsc().
2423 files_id = dentry.get("files id", None)
2425 # Find the entry in the files hash
2426 # TODO: Bail out here properly
2428 for f, e in u.pkg.files.items():
2433 if files_id is None:
2434 filename = dfentry["pool name"] + dsc_file
2436 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2437 # FIXME: needs to check for -1/-2 and or handle exception
2438 if found and obj is not None:
2439 files_id = obj.file_id
2442 # If still not found, add it
2443 if files_id is None:
2444 # HACK: Force sha1sum etc into dentry
2445 dentry["sha1sum"] = dfentry["sha1sum"]
2446 dentry["sha256sum"] = dfentry["sha256sum"]
2447 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2448 pfs.append(poolfile)
2449 files_id = poolfile.file_id
2451 poolfile = get_poolfile_by_id(files_id, session)
2452 if poolfile is None:
2453 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2454 pfs.append(poolfile)
2456 df.poolfile_id = files_id
2459 # Add the src_uploaders to the DB
2460 uploader_ids = [source.maintainer_id]
2461 if u.pkg.dsc.has_key("uploaders"):
2462 for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
2464 uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
2467 for up_id in uploader_ids:
2468 if added_ids.has_key(up_id):
2470 utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
2476 su.maintainer_id = up_id
2477 su.source_id = source.source_id
2482 return source, dsc_component, dsc_location_id, pfs
2484 __all__.append('add_dsc_to_db')
2487 def add_deb_to_db(u, filename, session=None):
2489 Contrary to what you might expect, this routine deals with both
2490 debs and udebs. That info is in 'dbtype', whilst 'type' is
2491 'deb' for both of them
2494 entry = u.pkg.files[filename]
2497 bin.package = entry["package"]
2498 bin.version = entry["version"]
2499 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2500 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2501 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2502 bin.binarytype = entry["dbtype"]
2505 filename = entry["pool name"] + filename
2506 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2507 if not entry.get("location id", None):
2508 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2510 if entry.get("files id", None):
2511 poolfile = get_poolfile_by_id(bin.poolfile_id)
2512 bin.poolfile_id = entry["files id"]
2514 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2515 bin.poolfile_id = entry["files id"] = poolfile.file_id
2518 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2519 if len(bin_sources) != 1:
2520 raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2521 (bin.package, bin.version, entry["architecture"],
2522 filename, bin.binarytype, u.pkg.changes["fingerprint"])
2524 bin.source_id = bin_sources[0].source_id
2526 # Add and flush object so it has an ID
2529 suite_names = u.pkg.changes["distribution"].keys()
2530 bin.suites = session.query(Suite). \
2531 filter(Suite.suite_name.in_(suite_names)).all()
2535 # Deal with contents - disabled for now
2536 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2538 # print "REJECT\nCould not determine contents of package %s" % bin.package
2539 # session.rollback()
2540 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2544 __all__.append('add_deb_to_db')
2546 ################################################################################
2548 class SourceACL(object):
2549 def __init__(self, *args, **kwargs):
2553 return '<SourceACL %s>' % self.source_acl_id
2555 __all__.append('SourceACL')
2557 ################################################################################
2559 class SrcFormat(object):
2560 def __init__(self, *args, **kwargs):
2564 return '<SrcFormat %s>' % (self.format_name)
2566 __all__.append('SrcFormat')
2568 ################################################################################
2570 class SrcUploader(object):
2571 def __init__(self, *args, **kwargs):
2575 return '<SrcUploader %s>' % self.uploader_id
2577 __all__.append('SrcUploader')
2579 ################################################################################
2581 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2582 ('SuiteID', 'suite_id'),
2583 ('Version', 'version'),
2584 ('Origin', 'origin'),
2586 ('Description', 'description'),
2587 ('Untouchable', 'untouchable'),
2588 ('Announce', 'announce'),
2589 ('Codename', 'codename'),
2590 ('OverrideCodename', 'overridecodename'),
2591 ('ValidTime', 'validtime'),
2592 ('Priority', 'priority'),
2593 ('NotAutomatic', 'notautomatic'),
2594 ('CopyChanges', 'copychanges'),
2595 ('OverrideSuite', 'overridesuite')]
2597 # Why the heck don't we have any UNIQUE constraints in table suite?
2598 # TODO: Add UNIQUE constraints for appropriate columns.
2599 class Suite(ORMObject):
2600 def __init__(self, suite_name = None, version = None):
2601 self.suite_name = suite_name
2602 self.version = version
2604 def properties(self):
2605 return ['suite_name', 'version', 'sources_count', 'binaries_count']
2607 def not_null_constraints(self):
2608 return ['suite_name', 'version']
2610 def __eq__(self, val):
2611 if isinstance(val, str):
2612 return (self.suite_name == val)
2613 # This signals to use the normal comparison operator
2614 return NotImplemented
2616 def __ne__(self, val):
2617 if isinstance(val, str):
2618 return (self.suite_name != val)
2619 # This signals to use the normal comparison operator
2620 return NotImplemented
2624 for disp, field in SUITE_FIELDS:
2625 val = getattr(self, field, None)
2627 ret.append("%s: %s" % (disp, val))
2629 return "\n".join(ret)
2631 def get_architectures(self, skipsrc=False, skipall=False):
2633 Returns list of Architecture objects
2635 @type skipsrc: boolean
2636 @param skipsrc: Whether to skip returning the 'source' architecture entry
2639 @type skipall: boolean
2640 @param skipall: Whether to skip returning the 'all' architecture entry
2644 @return: list of Architecture objects for the given name (may be empty)
2647 q = object_session(self).query(Architecture).with_parent(self)
2649 q = q.filter(Architecture.arch_string != 'source')
2651 q = q.filter(Architecture.arch_string != 'all')
2652 return q.order_by(Architecture.arch_string).all()
2654 def get_sources(self, source):
2656 Returns a query object representing DBSource that is part of C{suite}.
2658 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2660 @type source: string
2661 @param source: source package name
2663 @rtype: sqlalchemy.orm.query.Query
2664 @return: a query of DBSource
2668 session = object_session(self)
2669 return session.query(DBSource).filter_by(source = source). \
2672 __all__.append('Suite')
2675 def get_suite(suite, session=None):
2677 Returns Suite object for given C{suite name}.
2680 @param suite: The name of the suite
2682 @type session: Session
2683 @param session: Optional SQLA session object (a temporary one will be
2684 generated if not supplied)
2687 @return: Suite object for the requested suite name (None if not present)
2690 q = session.query(Suite).filter_by(suite_name=suite)
2694 except NoResultFound:
2697 __all__.append('get_suite')
2699 ################################################################################
2701 # TODO: should be removed because the implementation is too trivial
2703 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
2705 Returns list of Architecture objects for given C{suite} name
2708 @param suite: Suite name to search for
2710 @type skipsrc: boolean
2711 @param skipsrc: Whether to skip returning the 'source' architecture entry
2714 @type skipall: boolean
2715 @param skipall: Whether to skip returning the 'all' architecture entry
2718 @type session: Session
2719 @param session: Optional SQL session object (a temporary one will be
2720 generated if not supplied)
2723 @return: list of Architecture objects for the given name (may be empty)
2726 return get_suite(suite, session).get_architectures(skipsrc, skipall)
2728 __all__.append('get_suite_architectures')
2730 ################################################################################
2732 class SuiteSrcFormat(object):
2733 def __init__(self, *args, **kwargs):
2737 return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
2739 __all__.append('SuiteSrcFormat')
2742 def get_suite_src_formats(suite, session=None):
2744 Returns list of allowed SrcFormat for C{suite}.
2747 @param suite: Suite name to search for
2749 @type session: Session
2750 @param session: Optional SQL session object (a temporary one will be
2751 generated if not supplied)
2754 @return: the list of allowed source formats for I{suite}
2757 q = session.query(SrcFormat)
2758 q = q.join(SuiteSrcFormat)
2759 q = q.join(Suite).filter_by(suite_name=suite)
2760 q = q.order_by('format_name')
2764 __all__.append('get_suite_src_formats')
2766 ################################################################################
2768 class Uid(ORMObject):
2769 def __init__(self, uid = None, name = None):
2773 def __eq__(self, val):
2774 if isinstance(val, str):
2775 return (self.uid == val)
2776 # This signals to use the normal comparison operator
2777 return NotImplemented
2779 def __ne__(self, val):
2780 if isinstance(val, str):
2781 return (self.uid != val)
2782 # This signals to use the normal comparison operator
2783 return NotImplemented
2785 def properties(self):
2786 return ['uid', 'name', 'fingerprint']
2788 def not_null_constraints(self):
2791 __all__.append('Uid')
2794 def get_or_set_uid(uidname, session=None):
2796 Returns uid object for given uidname.
2798 If no matching uidname is found, a row is inserted.
2800 @type uidname: string
2801 @param uidname: The uid to add
2803 @type session: SQLAlchemy
2804 @param session: Optional SQL session object (a temporary one will be
2805 generated if not supplied). If not passed, a commit will be performed at
2806 the end of the function, otherwise the caller is responsible for commiting.
2809 @return: the uid object for the given uidname
2812 q = session.query(Uid).filter_by(uid=uidname)
2816 except NoResultFound:
2820 session.commit_or_flush()
2825 __all__.append('get_or_set_uid')
2828 def get_uid_from_fingerprint(fpr, session=None):
2829 q = session.query(Uid)
2830 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
2834 except NoResultFound:
2837 __all__.append('get_uid_from_fingerprint')
2839 ################################################################################
2841 class UploadBlock(object):
2842 def __init__(self, *args, **kwargs):
2846 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
2848 __all__.append('UploadBlock')
2850 ################################################################################
2852 class DBConn(object):
2854 database module init.
2858 def __init__(self, *args, **kwargs):
2859 self.__dict__ = self.__shared_state
2861 if not getattr(self, 'initialised', False):
2862 self.initialised = True
2863 self.debug = kwargs.has_key('debug')
2866 def __setuptables(self):
2867 tables_with_primary = (
2878 'changes_pending_binaries',
2879 'changes_pending_files',
2880 'changes_pending_source',
2890 'pending_bin_contents',
2902 # The following tables have primary keys but sqlalchemy
2903 # version 0.5 fails to reflect them correctly with database
2904 # versions before upgrade #41.
2906 #'build_queue_files',
2909 tables_no_primary = (
2910 'changes_pending_files_map',
2911 'changes_pending_source_files',
2912 'changes_pool_files',
2915 'suite_architectures',
2916 'suite_src_formats',
2917 'suite_build_queue_copy',
2919 # see the comment above
2921 'build_queue_files',
2925 'almost_obsolete_all_associations',
2926 'almost_obsolete_src_associations',
2927 'any_associations_source',
2928 'bin_assoc_by_arch',
2929 'bin_associations_binaries',
2930 'binaries_suite_arch',
2931 'binfiles_suite_component_arch',
2934 'newest_all_associations',
2935 'newest_any_associations',
2937 'newest_src_association',
2938 'obsolete_all_associations',
2939 'obsolete_any_associations',
2940 'obsolete_any_by_all_associations',
2941 'obsolete_src_associations',
2943 'src_associations_bin',
2944 'src_associations_src',
2945 'suite_arch_by_name',
2948 # Sqlalchemy version 0.5 fails to reflect the SERIAL type
2949 # correctly and that is why we have to use a workaround. It can
2950 # be removed as soon as we switch to version 0.6.
2951 for table_name in tables_with_primary:
2952 table = Table(table_name, self.db_meta, \
2953 Column('id', Integer, primary_key = True), \
2954 autoload=True, useexisting=True)
2955 setattr(self, 'tbl_%s' % table_name, table)
2957 for table_name in tables_no_primary:
2958 table = Table(table_name, self.db_meta, autoload=True)
2959 setattr(self, 'tbl_%s' % table_name, table)
2961 # bin_contents needs special attention until update #41 has been
2963 self.tbl_bin_contents = Table('bin_contents', self.db_meta, \
2964 Column('file', Text, primary_key = True),
2965 Column('binary_id', Integer, ForeignKey('binaries.id'), \
2966 primary_key = True),
2967 autoload=True, useexisting=True)
2969 for view_name in views:
2970 view = Table(view_name, self.db_meta, autoload=True)
2971 setattr(self, 'view_%s' % view_name, view)
2973 def __setupmappers(self):
2974 mapper(Architecture, self.tbl_architecture,
2975 properties = dict(arch_id = self.tbl_architecture.c.id,
2976 suites = relation(Suite, secondary=self.tbl_suite_architectures,
2977 order_by='suite_name',
2978 backref=backref('architectures', order_by='arch_string'))),
2979 extension = validator)
2981 mapper(Archive, self.tbl_archive,
2982 properties = dict(archive_id = self.tbl_archive.c.id,
2983 archive_name = self.tbl_archive.c.name))
2985 mapper(PendingBinContents, self.tbl_pending_bin_contents,
2986 properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
2987 filename = self.tbl_pending_bin_contents.c.filename,
2988 package = self.tbl_pending_bin_contents.c.package,
2989 version = self.tbl_pending_bin_contents.c.version,
2990 arch = self.tbl_pending_bin_contents.c.arch,
2991 otype = self.tbl_pending_bin_contents.c.type))
2993 mapper(DebContents, self.tbl_deb_contents,
2994 properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
2995 package=self.tbl_deb_contents.c.package,
2996 suite=self.tbl_deb_contents.c.suite,
2997 arch=self.tbl_deb_contents.c.arch,
2998 section=self.tbl_deb_contents.c.section,
2999 filename=self.tbl_deb_contents.c.filename))
3001 mapper(UdebContents, self.tbl_udeb_contents,
3002 properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
3003 package=self.tbl_udeb_contents.c.package,
3004 suite=self.tbl_udeb_contents.c.suite,
3005 arch=self.tbl_udeb_contents.c.arch,
3006 section=self.tbl_udeb_contents.c.section,
3007 filename=self.tbl_udeb_contents.c.filename))
3009 mapper(BuildQueue, self.tbl_build_queue,
3010 properties = dict(queue_id = self.tbl_build_queue.c.id))
3012 mapper(BuildQueueFile, self.tbl_build_queue_files,
3013 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3014 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3016 mapper(DBBinary, self.tbl_binaries,
3017 properties = dict(binary_id = self.tbl_binaries.c.id,
3018 package = self.tbl_binaries.c.package,
3019 version = self.tbl_binaries.c.version,
3020 maintainer_id = self.tbl_binaries.c.maintainer,
3021 maintainer = relation(Maintainer),
3022 source_id = self.tbl_binaries.c.source,
3023 source = relation(DBSource, backref='binaries'),
3024 arch_id = self.tbl_binaries.c.architecture,
3025 architecture = relation(Architecture),
3026 poolfile_id = self.tbl_binaries.c.file,
3027 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3028 binarytype = self.tbl_binaries.c.type,
3029 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3030 fingerprint = relation(Fingerprint),
3031 install_date = self.tbl_binaries.c.install_date,
3032 suites = relation(Suite, secondary=self.tbl_bin_associations,
3033 backref=backref('binaries', lazy='dynamic'))),
3034 extension = validator)
3036 mapper(BinaryACL, self.tbl_binary_acl,
3037 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3039 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3040 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3041 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3042 architecture = relation(Architecture)))
3044 mapper(Component, self.tbl_component,
3045 properties = dict(component_id = self.tbl_component.c.id,
3046 component_name = self.tbl_component.c.name),
3047 extension = validator)
3049 mapper(DBConfig, self.tbl_config,
3050 properties = dict(config_id = self.tbl_config.c.id))
3052 mapper(DSCFile, self.tbl_dsc_files,
3053 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3054 source_id = self.tbl_dsc_files.c.source,
3055 source = relation(DBSource),
3056 poolfile_id = self.tbl_dsc_files.c.file,
3057 poolfile = relation(PoolFile)))
3059 mapper(PoolFile, self.tbl_files,
3060 properties = dict(file_id = self.tbl_files.c.id,
3061 filesize = self.tbl_files.c.size,
3062 location_id = self.tbl_files.c.location,
3063 location = relation(Location,
3064 # using lazy='dynamic' in the back
3065 # reference because we have A LOT of
3066 # files in one location
3067 backref=backref('files', lazy='dynamic'))),
3068 extension = validator)
3070 mapper(Fingerprint, self.tbl_fingerprint,
3071 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3072 uid_id = self.tbl_fingerprint.c.uid,
3073 uid = relation(Uid),
3074 keyring_id = self.tbl_fingerprint.c.keyring,
3075 keyring = relation(Keyring),
3076 source_acl = relation(SourceACL),
3077 binary_acl = relation(BinaryACL)),
3078 extension = validator)
3080 mapper(Keyring, self.tbl_keyrings,
3081 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3082 keyring_id = self.tbl_keyrings.c.id))
3084 mapper(DBChange, self.tbl_changes,
3085 properties = dict(change_id = self.tbl_changes.c.id,
3086 poolfiles = relation(PoolFile,
3087 secondary=self.tbl_changes_pool_files,
3088 backref="changeslinks"),
3089 seen = self.tbl_changes.c.seen,
3090 source = self.tbl_changes.c.source,
3091 binaries = self.tbl_changes.c.binaries,
3092 architecture = self.tbl_changes.c.architecture,
3093 distribution = self.tbl_changes.c.distribution,
3094 urgency = self.tbl_changes.c.urgency,
3095 maintainer = self.tbl_changes.c.maintainer,
3096 changedby = self.tbl_changes.c.changedby,
3097 date = self.tbl_changes.c.date,
3098 version = self.tbl_changes.c.version,
3099 files = relation(ChangePendingFile,
3100 secondary=self.tbl_changes_pending_files_map,
3101 backref="changesfile"),
3102 in_queue_id = self.tbl_changes.c.in_queue,
3103 in_queue = relation(PolicyQueue,
3104 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3105 approved_for_id = self.tbl_changes.c.approved_for))
3107 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3108 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3110 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3111 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3112 filename = self.tbl_changes_pending_files.c.filename,
3113 size = self.tbl_changes_pending_files.c.size,
3114 md5sum = self.tbl_changes_pending_files.c.md5sum,
3115 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3116 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3118 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3119 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3120 change = relation(DBChange),
3121 maintainer = relation(Maintainer,
3122 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3123 changedby = relation(Maintainer,
3124 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3125 fingerprint = relation(Fingerprint),
3126 source_files = relation(ChangePendingFile,
3127 secondary=self.tbl_changes_pending_source_files,
3128 backref="pending_sources")))
3131 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3132 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3133 keyring = relation(Keyring, backref="keyring_acl_map"),
3134 architecture = relation(Architecture)))
3136 mapper(Location, self.tbl_location,
3137 properties = dict(location_id = self.tbl_location.c.id,
3138 component_id = self.tbl_location.c.component,
3139 component = relation(Component, \
3140 backref=backref('location', uselist = False)),
3141 archive_id = self.tbl_location.c.archive,
3142 archive = relation(Archive),
3143 # FIXME: the 'type' column is old cruft and
3144 # should be removed in the future.
3145 archive_type = self.tbl_location.c.type),
3146 extension = validator)
3148 mapper(Maintainer, self.tbl_maintainer,
3149 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3150 maintains_sources = relation(DBSource, backref='maintainer',
3151 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3152 changed_sources = relation(DBSource, backref='changedby',
3153 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3154 extension = validator)
3156 mapper(NewComment, self.tbl_new_comments,
3157 properties = dict(comment_id = self.tbl_new_comments.c.id))
3159 mapper(Override, self.tbl_override,
3160 properties = dict(suite_id = self.tbl_override.c.suite,
3161 suite = relation(Suite),
3162 package = self.tbl_override.c.package,
3163 component_id = self.tbl_override.c.component,
3164 component = relation(Component),
3165 priority_id = self.tbl_override.c.priority,
3166 priority = relation(Priority),
3167 section_id = self.tbl_override.c.section,
3168 section = relation(Section),
3169 overridetype_id = self.tbl_override.c.type,
3170 overridetype = relation(OverrideType)))
3172 mapper(OverrideType, self.tbl_override_type,
3173 properties = dict(overridetype = self.tbl_override_type.c.type,
3174 overridetype_id = self.tbl_override_type.c.id))
3176 mapper(PolicyQueue, self.tbl_policy_queue,
3177 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3179 mapper(Priority, self.tbl_priority,
3180 properties = dict(priority_id = self.tbl_priority.c.id))
3182 mapper(Section, self.tbl_section,
3183 properties = dict(section_id = self.tbl_section.c.id,
3184 section=self.tbl_section.c.section))
3186 mapper(DBSource, self.tbl_source,
3187 properties = dict(source_id = self.tbl_source.c.id,
3188 version = self.tbl_source.c.version,
3189 maintainer_id = self.tbl_source.c.maintainer,
3190 poolfile_id = self.tbl_source.c.file,
3191 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3192 fingerprint_id = self.tbl_source.c.sig_fpr,
3193 fingerprint = relation(Fingerprint),
3194 changedby_id = self.tbl_source.c.changedby,
3195 srcfiles = relation(DSCFile,
3196 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3197 suites = relation(Suite, secondary=self.tbl_src_associations,
3198 backref=backref('sources', lazy='dynamic')),
3199 srcuploaders = relation(SrcUploader)),
3200 extension = validator)
3202 mapper(SourceACL, self.tbl_source_acl,
3203 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3205 mapper(SrcFormat, self.tbl_src_format,
3206 properties = dict(src_format_id = self.tbl_src_format.c.id,
3207 format_name = self.tbl_src_format.c.format_name))
3209 mapper(SrcUploader, self.tbl_src_uploaders,
3210 properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
3211 source_id = self.tbl_src_uploaders.c.source,
3212 source = relation(DBSource,
3213 primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)),
3214 maintainer_id = self.tbl_src_uploaders.c.maintainer,
3215 maintainer = relation(Maintainer,
3216 primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
3218 mapper(Suite, self.tbl_suite,
3219 properties = dict(suite_id = self.tbl_suite.c.id,
3220 policy_queue = relation(PolicyQueue),
3221 copy_queues = relation(BuildQueue,
3222 secondary=self.tbl_suite_build_queue_copy)),
3223 extension = validator)
3225 mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
3226 properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
3227 suite = relation(Suite, backref='suitesrcformats'),
3228 src_format_id = self.tbl_suite_src_formats.c.src_format,
3229 src_format = relation(SrcFormat)))
3231 mapper(Uid, self.tbl_uid,
3232 properties = dict(uid_id = self.tbl_uid.c.id,
3233 fingerprint = relation(Fingerprint)),
3234 extension = validator)
3236 mapper(UploadBlock, self.tbl_upload_blocks,
3237 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3238 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3239 uid = relation(Uid, backref="uploadblocks")))
3241 mapper(BinContents, self.tbl_bin_contents,
3243 binary = relation(DBBinary,
3244 backref=backref('contents', lazy='dynamic')),
3245 file = self.tbl_bin_contents.c.file))
3247 ## Connection functions
3248 def __createconn(self):
3249 from config import Config
3253 connstr = "postgres://%s" % cnf["DB::Host"]
3254 if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
3255 connstr += ":%s" % cnf["DB::Port"]
3256 connstr += "/%s" % cnf["DB::Name"]
3259 connstr = "postgres:///%s" % cnf["DB::Name"]
3260 if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
3261 connstr += "?port=%s" % cnf["DB::Port"]
3263 self.db_pg = create_engine(connstr, echo=self.debug)
3264 self.db_meta = MetaData()
3265 self.db_meta.bind = self.db_pg
3266 self.db_smaker = sessionmaker(bind=self.db_pg,
3270 self.__setuptables()
3271 self.__setupmappers()
3274 return self.db_smaker()
3276 __all__.append('DBConn')