5 @contact: Debian FTPMaster <ftpmaster@debian.org>
6 @copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
7 @copyright: 2008-2009 Mark Hymers <mhy@debian.org>
8 @copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
9 @copyright: 2009 Mike O'Connor <stew@debian.org>
10 @license: GNU General Public License version 2 or later
13 # This program is free software; you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation; either version 2 of the License, or
16 # (at your option) any later version.
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
23 # You should have received a copy of the GNU General Public License
24 # along with this program; if not, write to the Free Software
25 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 ################################################################################
29 # < mhy> I need a funny comment
30 # < sgran> two peanuts were walking down a dark street
31 # < sgran> one was a-salted
32 # * mhy looks up the definition of "funny"
34 ################################################################################
47 import simplejson as json
49 from datetime import datetime, timedelta
50 from errno import ENOENT
51 from tempfile import mkstemp, mkdtemp
53 from inspect import getargspec
56 from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
58 from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
59 backref, MapperExtension, EXT_CONTINUE, object_mapper
60 from sqlalchemy import types as sqltypes
62 # Don't remove this, we re-export the exceptions to scripts which import us
63 from sqlalchemy.exc import *
64 from sqlalchemy.orm.exc import NoResultFound
66 # Only import Config until Queue stuff is changed to store its config
68 from config import Config
69 from textutils import fix_maintainer
70 from dak_exceptions import DBUpdateError, NoSourceFieldError
72 # suppress some deprecation warnings in squeeze related to sqlalchemy
74 warnings.filterwarnings('ignore', \
75 "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
77 # TODO: sqlalchemy needs some extra configuration to correctly reflect
78 # the ind_deb_contents_* indexes - we ignore the warnings at the moment
79 warnings.filterwarnings("ignore", 'Predicate of partial index', SAWarning)
82 ################################################################################
84 # Patch in support for the debversion field type so that it works during
88 # that is for sqlalchemy 0.6
89 UserDefinedType = sqltypes.UserDefinedType
91 # this one for sqlalchemy 0.5
92 UserDefinedType = sqltypes.TypeEngine
94 class DebVersion(UserDefinedType):
95 def get_col_spec(self):
98 def bind_processor(self, dialect):
101 # ' = None' is needed for sqlalchemy 0.5:
102 def result_processor(self, dialect, coltype = None):
105 sa_major_version = sqlalchemy.__version__[0:3]
106 if sa_major_version in ["0.5", "0.6"]:
107 from sqlalchemy.databases import postgres
108 postgres.ischema_names['debversion'] = DebVersion
110 raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
112 ################################################################################
114 __all__ = ['IntegrityError', 'SQLAlchemyError', 'DebVersion']
116 ################################################################################
118 def session_wrapper(fn):
120 Wrapper around common ".., session=None):" handling. If the wrapped
121 function is called without passing 'session', we create a local one
122 and destroy it when the function ends.
124 Also attaches a commit_or_flush method to the session; if we created a
125 local session, this is a synonym for session.commit(), otherwise it is a
126 synonym for session.flush().
129 def wrapped(*args, **kwargs):
130 private_transaction = False
132 # Find the session object
133 session = kwargs.get('session')
136 if len(args) <= len(getargspec(fn)[0]) - 1:
137 # No session specified as last argument or in kwargs
138 private_transaction = True
139 session = kwargs['session'] = DBConn().session()
141 # Session is last argument in args
145 session = args[-1] = DBConn().session()
146 private_transaction = True
148 if private_transaction:
149 session.commit_or_flush = session.commit
151 session.commit_or_flush = session.flush
154 return fn(*args, **kwargs)
156 if private_transaction:
157 # We created a session; close it.
160 wrapped.__doc__ = fn.__doc__
161 wrapped.func_name = fn.func_name
165 __all__.append('session_wrapper')
167 ################################################################################
169 class ORMObject(object):
171 ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
172 derived classes must implement the properties() method.
175 def properties(self):
177 This method should be implemented by all derived classes and returns a
178 list of the important properties. The properties 'created' and
179 'modified' will be added automatically. A suffix '_count' should be
180 added to properties that are lists or query objects. The most important
181 property name should be returned as the first element in the list
182 because it is used by repr().
188 Returns a JSON representation of the object based on the properties
189 returned from the properties() method.
192 # add created and modified
193 all_properties = self.properties() + ['created', 'modified']
194 for property in all_properties:
195 # check for list or query
196 if property[-6:] == '_count':
197 real_property = property[:-6]
198 if not hasattr(self, real_property):
200 value = getattr(self, real_property)
201 if hasattr(value, '__len__'):
204 elif hasattr(value, 'count'):
206 value = value.count()
208 raise KeyError('Do not understand property %s.' % property)
210 if not hasattr(self, property):
213 value = getattr(self, property)
217 elif isinstance(value, ORMObject):
218 # use repr() for ORMObject types
221 # we want a string for all other types because json cannot
224 data[property] = value
225 return json.dumps(data)
229 Returns the name of the class.
231 return type(self).__name__
235 Returns a short string representation of the object using the first
236 element from the properties() method.
238 primary_property = self.properties()[0]
239 value = getattr(self, primary_property)
240 return '<%s %s>' % (self.classname(), str(value))
244 Returns a human readable form of the object using the properties()
247 return '<%s %s>' % (self.classname(), self.json())
249 def not_null_constraints(self):
251 Returns a list of properties that must be not NULL. Derived classes
252 should override this method if needed.
256 validation_message = \
257 "Validation failed because property '%s' must not be empty in object\n%s"
261 This function validates the not NULL constraints as returned by
262 not_null_constraints(). It raises the DBUpdateError exception if
265 for property in self.not_null_constraints():
266 # TODO: It is a bit awkward that the mapper configuration allow
267 # directly setting the numeric _id columns. We should get rid of it
269 if hasattr(self, property + '_id') and \
270 getattr(self, property + '_id') is not None:
272 if not hasattr(self, property) or getattr(self, property) is None:
273 raise DBUpdateError(self.validation_message % \
274 (property, str(self)))
278 def get(cls, primary_key, session = None):
280 This is a support function that allows getting an object by its primary
283 Architecture.get(3[, session])
285 instead of the more verbose
287 session.query(Architecture).get(3)
289 return session.query(cls).get(primary_key)
291 def session(self, replace = False):
293 Returns the current session that is associated with the object. May
294 return None is object is in detached state.
297 return object_session(self)
299 def clone(self, session = None):
301 Clones the current object in a new session and returns the new clone. A
302 fresh session is created if the optional session parameter is not
303 provided. The function will fail if a session is provided and has
306 RATIONALE: SQLAlchemy's session is not thread safe. This method clones
307 an existing object to allow several threads to work with their own
308 instances of an ORMObject.
310 WARNING: Only persistent (committed) objects can be cloned. Changes
311 made to the original object that are not committed yet will get lost.
312 The session of the new object will always be rolled back to avoid
316 if self.session() is None:
317 raise RuntimeError( \
318 'Method clone() failed for detached object:\n%s' % self)
319 self.session().flush()
320 mapper = object_mapper(self)
321 primary_key = mapper.primary_key_from_instance(self)
322 object_class = self.__class__
324 session = DBConn().session()
325 elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
326 raise RuntimeError( \
327 'Method clone() failed due to unflushed changes in session.')
328 new_object = session.query(object_class).get(primary_key)
330 if new_object is None:
331 raise RuntimeError( \
332 'Method clone() failed for non-persistent object:\n%s' % self)
335 __all__.append('ORMObject')
337 ################################################################################
339 class Validator(MapperExtension):
341 This class calls the validate() method for each instance for the
342 'before_update' and 'before_insert' events. A global object validator is
343 used for configuring the individual mappers.
346 def before_update(self, mapper, connection, instance):
350 def before_insert(self, mapper, connection, instance):
354 validator = Validator()
356 ################################################################################
358 class Architecture(ORMObject):
359 def __init__(self, arch_string = None, description = None):
360 self.arch_string = arch_string
361 self.description = description
363 def __eq__(self, val):
364 if isinstance(val, str):
365 return (self.arch_string== val)
366 # This signals to use the normal comparison operator
367 return NotImplemented
369 def __ne__(self, val):
370 if isinstance(val, str):
371 return (self.arch_string != val)
372 # This signals to use the normal comparison operator
373 return NotImplemented
375 def properties(self):
376 return ['arch_string', 'arch_id', 'suites_count']
378 def not_null_constraints(self):
379 return ['arch_string']
381 __all__.append('Architecture')
384 def get_architecture(architecture, session=None):
386 Returns database id for given C{architecture}.
388 @type architecture: string
389 @param architecture: The name of the architecture
391 @type session: Session
392 @param session: Optional SQLA session object (a temporary one will be
393 generated if not supplied)
396 @return: Architecture object for the given arch (None if not present)
399 q = session.query(Architecture).filter_by(arch_string=architecture)
403 except NoResultFound:
406 __all__.append('get_architecture')
408 # TODO: should be removed because the implementation is too trivial
410 def get_architecture_suites(architecture, session=None):
412 Returns list of Suite objects for given C{architecture} name
414 @type architecture: str
415 @param architecture: Architecture name to search for
417 @type session: Session
418 @param session: Optional SQL session object (a temporary one will be
419 generated if not supplied)
422 @return: list of Suite objects for the given name (may be empty)
425 return get_architecture(architecture, session).suites
427 __all__.append('get_architecture_suites')
429 ################################################################################
431 class Archive(object):
432 def __init__(self, *args, **kwargs):
436 return '<Archive %s>' % self.archive_name
438 __all__.append('Archive')
441 def get_archive(archive, session=None):
443 returns database id for given C{archive}.
445 @type archive: string
446 @param archive: the name of the arhive
448 @type session: Session
449 @param session: Optional SQLA session object (a temporary one will be
450 generated if not supplied)
453 @return: Archive object for the given name (None if not present)
456 archive = archive.lower()
458 q = session.query(Archive).filter_by(archive_name=archive)
462 except NoResultFound:
465 __all__.append('get_archive')
467 ################################################################################
469 class BinContents(ORMObject):
470 def __init__(self, file = None, binary = None):
474 def properties(self):
475 return ['file', 'binary']
477 __all__.append('BinContents')
479 ################################################################################
481 class DBBinary(ORMObject):
482 def __init__(self, package = None, source = None, version = None, \
483 maintainer = None, architecture = None, poolfile = None, \
485 self.package = package
487 self.version = version
488 self.maintainer = maintainer
489 self.architecture = architecture
490 self.poolfile = poolfile
491 self.binarytype = binarytype
493 def properties(self):
494 return ['package', 'version', 'maintainer', 'source', 'architecture', \
495 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
496 'suites_count', 'binary_id', 'contents_count']
498 def not_null_constraints(self):
499 return ['package', 'version', 'maintainer', 'source', 'poolfile', \
502 def get_component_name(self):
503 return self.poolfile.location.component.component_name
505 __all__.append('DBBinary')
508 def get_suites_binary_in(package, session=None):
510 Returns list of Suite objects which given C{package} name is in
513 @param package: DBBinary package name to search for
516 @return: list of Suite objects for the given package
519 return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
521 __all__.append('get_suites_binary_in')
524 def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
526 Returns the component name of the newest binary package in suite_list or
527 None if no package is found. The result can be optionally filtered by a list
528 of architecture names.
531 @param package: DBBinary package name to search for
533 @type suite_list: list of str
534 @param suite_list: list of suite_name items
536 @type arch_list: list of str
537 @param arch_list: optional list of arch_string items that defaults to []
539 @rtype: str or NoneType
540 @return: name of component or None
543 q = session.query(DBBinary).filter_by(package = package). \
544 join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
545 if len(arch_list) > 0:
546 q = q.join(DBBinary.architecture). \
547 filter(Architecture.arch_string.in_(arch_list))
548 binary = q.order_by(desc(DBBinary.version)).first()
552 return binary.get_component_name()
554 __all__.append('get_component_by_package_suite')
556 ################################################################################
558 class BinaryACL(object):
559 def __init__(self, *args, **kwargs):
563 return '<BinaryACL %s>' % self.binary_acl_id
565 __all__.append('BinaryACL')
567 ################################################################################
569 class BinaryACLMap(object):
570 def __init__(self, *args, **kwargs):
574 return '<BinaryACLMap %s>' % self.binary_acl_map_id
576 __all__.append('BinaryACLMap')
578 ################################################################################
583 ArchiveDir "%(archivepath)s";
584 OverrideDir "%(overridedir)s";
585 CacheDir "%(cachedir)s";
590 Packages::Compress ". bzip2 gzip";
591 Sources::Compress ". bzip2 gzip";
596 bindirectory "incoming"
601 BinOverride "override.sid.all3";
602 BinCacheDB "packages-accepted.db";
604 FileList "%(filelist)s";
607 Packages::Extensions ".deb .udeb";
610 bindirectory "incoming/"
613 BinOverride "override.sid.all3";
614 SrcOverride "override.sid.all3.src";
615 FileList "%(filelist)s";
619 class BuildQueue(object):
620 def __init__(self, *args, **kwargs):
624 return '<BuildQueue %s>' % self.queue_name
626 def write_metadata(self, starttime, force=False):
627 # Do we write out metafiles?
628 if not (force or self.generate_metadata):
631 session = DBConn().session().object_session(self)
633 fl_fd = fl_name = ac_fd = ac_name = None
635 arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
636 startdir = os.getcwd()
639 # Grab files we want to include
640 newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
641 # Write file list with newer files
642 (fl_fd, fl_name) = mkstemp()
644 os.write(fl_fd, '%s\n' % n.fullpath)
649 # Write minimal apt.conf
650 # TODO: Remove hardcoding from template
651 (ac_fd, ac_name) = mkstemp()
652 os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
654 'cachedir': cnf["Dir::Cache"],
655 'overridedir': cnf["Dir::Override"],
659 # Run apt-ftparchive generate
660 os.chdir(os.path.dirname(ac_name))
661 os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
663 # Run apt-ftparchive release
664 # TODO: Eww - fix this
665 bname = os.path.basename(self.path)
669 # We have to remove the Release file otherwise it'll be included in the
672 os.unlink(os.path.join(bname, 'Release'))
676 os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
678 # Crude hack with open and append, but this whole section is and should be redone.
679 if self.notautomatic:
680 release=open("Release", "a")
681 release.write("NotAutomatic: yes")
686 keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
687 if cnf.has_key("Dinstall::SigningPubKeyring"):
688 keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
690 os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
692 # Move the files if we got this far
693 os.rename('Release', os.path.join(bname, 'Release'))
695 os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
697 # Clean up any left behind files
724 def clean_and_update(self, starttime, Logger, dryrun=False):
725 """WARNING: This routine commits for you"""
726 session = DBConn().session().object_session(self)
728 if self.generate_metadata and not dryrun:
729 self.write_metadata(starttime)
731 # Grab files older than our execution time
732 older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
738 Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
740 Logger.log(["I: Removing %s from the queue" % o.fullpath])
741 os.unlink(o.fullpath)
744 # If it wasn't there, don't worry
745 if e.errno == ENOENT:
748 # TODO: Replace with proper logging call
749 Logger.log(["E: Could not remove %s" % o.fullpath])
756 for f in os.listdir(self.path):
757 if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
761 r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
762 except NoResultFound:
763 fp = os.path.join(self.path, f)
765 Logger.log(["I: Would remove unused link %s" % fp])
767 Logger.log(["I: Removing unused link %s" % fp])
771 Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
773 def add_file_from_pool(self, poolfile):
774 """Copies a file into the pool. Assumes that the PoolFile object is
775 attached to the same SQLAlchemy session as the Queue object is.
777 The caller is responsible for committing after calling this function."""
778 poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
780 # Check if we have a file of this name or this ID already
781 for f in self.queuefiles:
782 if f.fileid is not None and f.fileid == poolfile.file_id or \
783 f.poolfile.filename == poolfile_basename:
784 # In this case, update the BuildQueueFile entry so we
785 # don't remove it too early
786 f.lastused = datetime.now()
787 DBConn().session().object_session(poolfile).add(f)
790 # Prepare BuildQueueFile object
791 qf = BuildQueueFile()
792 qf.build_queue_id = self.queue_id
793 qf.lastused = datetime.now()
794 qf.filename = poolfile_basename
796 targetpath = poolfile.fullpath
797 queuepath = os.path.join(self.path, poolfile_basename)
801 # We need to copy instead of symlink
803 utils.copy(targetpath, queuepath)
804 # NULL in the fileid field implies a copy
807 os.symlink(targetpath, queuepath)
808 qf.fileid = poolfile.file_id
812 # Get the same session as the PoolFile is using and add the qf to it
813 DBConn().session().object_session(poolfile).add(qf)
818 __all__.append('BuildQueue')
821 def get_build_queue(queuename, session=None):
823 Returns BuildQueue object for given C{queue name}, creating it if it does not
826 @type queuename: string
827 @param queuename: The name of the queue
829 @type session: Session
830 @param session: Optional SQLA session object (a temporary one will be
831 generated if not supplied)
834 @return: BuildQueue object for the given queue
837 q = session.query(BuildQueue).filter_by(queue_name=queuename)
841 except NoResultFound:
844 __all__.append('get_build_queue')
846 ################################################################################
848 class BuildQueueFile(object):
849 def __init__(self, *args, **kwargs):
853 return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
857 return os.path.join(self.buildqueue.path, self.filename)
860 __all__.append('BuildQueueFile')
862 ################################################################################
864 class ChangePendingBinary(object):
865 def __init__(self, *args, **kwargs):
869 return '<ChangePendingBinary %s>' % self.change_pending_binary_id
871 __all__.append('ChangePendingBinary')
873 ################################################################################
875 class ChangePendingFile(object):
876 def __init__(self, *args, **kwargs):
880 return '<ChangePendingFile %s>' % self.change_pending_file_id
882 __all__.append('ChangePendingFile')
884 ################################################################################
886 class ChangePendingSource(object):
887 def __init__(self, *args, **kwargs):
891 return '<ChangePendingSource %s>' % self.change_pending_source_id
893 __all__.append('ChangePendingSource')
895 ################################################################################
897 class Component(ORMObject):
898 def __init__(self, component_name = None):
899 self.component_name = component_name
901 def __eq__(self, val):
902 if isinstance(val, str):
903 return (self.component_name == val)
904 # This signals to use the normal comparison operator
905 return NotImplemented
907 def __ne__(self, val):
908 if isinstance(val, str):
909 return (self.component_name != val)
910 # This signals to use the normal comparison operator
911 return NotImplemented
913 def properties(self):
914 return ['component_name', 'component_id', 'description', \
915 'location_count', 'meets_dfsg', 'overrides_count']
917 def not_null_constraints(self):
918 return ['component_name']
921 __all__.append('Component')
924 def get_component(component, session=None):
926 Returns database id for given C{component}.
928 @type component: string
929 @param component: The name of the override type
932 @return: the database id for the given component
935 component = component.lower()
937 q = session.query(Component).filter_by(component_name=component)
941 except NoResultFound:
944 __all__.append('get_component')
946 ################################################################################
948 class DBConfig(object):
949 def __init__(self, *args, **kwargs):
953 return '<DBConfig %s>' % self.name
955 __all__.append('DBConfig')
957 ################################################################################
960 def get_or_set_contents_file_id(filename, session=None):
962 Returns database id for given filename.
964 If no matching file is found, a row is inserted.
966 @type filename: string
967 @param filename: The filename
968 @type session: SQLAlchemy
969 @param session: Optional SQL session object (a temporary one will be
970 generated if not supplied). If not passed, a commit will be performed at
971 the end of the function, otherwise the caller is responsible for commiting.
974 @return: the database id for the given component
977 q = session.query(ContentFilename).filter_by(filename=filename)
980 ret = q.one().cafilename_id
981 except NoResultFound:
982 cf = ContentFilename()
983 cf.filename = filename
985 session.commit_or_flush()
986 ret = cf.cafilename_id
990 __all__.append('get_or_set_contents_file_id')
993 def get_contents(suite, overridetype, section=None, session=None):
995 Returns contents for a suite / overridetype combination, limiting
996 to a section if not None.
999 @param suite: Suite object
1001 @type overridetype: OverrideType
1002 @param overridetype: OverrideType object
1004 @type section: Section
1005 @param section: Optional section object to limit results to
1007 @type session: SQLAlchemy
1008 @param session: Optional SQL session object (a temporary one will be
1009 generated if not supplied)
1011 @rtype: ResultsProxy
1012 @return: ResultsProxy object set up to return tuples of (filename, section,
1016 # find me all of the contents for a given suite
1017 contents_q = """SELECT (p.path||'/'||n.file) AS fn,
1021 FROM content_associations c join content_file_paths p ON (c.filepath=p.id)
1022 JOIN content_file_names n ON (c.filename=n.id)
1023 JOIN binaries b ON (b.id=c.binary_pkg)
1024 JOIN override o ON (o.package=b.package)
1025 JOIN section s ON (s.id=o.section)
1026 WHERE o.suite = :suiteid AND o.type = :overridetypeid
1027 AND b.type=:overridetypename"""
1029 vals = {'suiteid': suite.suite_id,
1030 'overridetypeid': overridetype.overridetype_id,
1031 'overridetypename': overridetype.overridetype}
1033 if section is not None:
1034 contents_q += " AND s.id = :sectionid"
1035 vals['sectionid'] = section.section_id
1037 contents_q += " ORDER BY fn"
1039 return session.execute(contents_q, vals)
1041 __all__.append('get_contents')
1043 ################################################################################
1045 class ContentFilepath(object):
1046 def __init__(self, *args, **kwargs):
1050 return '<ContentFilepath %s>' % self.filepath
1052 __all__.append('ContentFilepath')
1055 def get_or_set_contents_path_id(filepath, session=None):
1057 Returns database id for given path.
1059 If no matching file is found, a row is inserted.
1061 @type filepath: string
1062 @param filepath: The filepath
1064 @type session: SQLAlchemy
1065 @param session: Optional SQL session object (a temporary one will be
1066 generated if not supplied). If not passed, a commit will be performed at
1067 the end of the function, otherwise the caller is responsible for commiting.
1070 @return: the database id for the given path
1073 q = session.query(ContentFilepath).filter_by(filepath=filepath)
1076 ret = q.one().cafilepath_id
1077 except NoResultFound:
1078 cf = ContentFilepath()
1079 cf.filepath = filepath
1081 session.commit_or_flush()
1082 ret = cf.cafilepath_id
1086 __all__.append('get_or_set_contents_path_id')
1088 ################################################################################
1090 class ContentAssociation(object):
1091 def __init__(self, *args, **kwargs):
1095 return '<ContentAssociation %s>' % self.ca_id
1097 __all__.append('ContentAssociation')
1099 def insert_content_paths(binary_id, fullpaths, session=None):
1101 Make sure given path is associated with given binary id
1103 @type binary_id: int
1104 @param binary_id: the id of the binary
1105 @type fullpaths: list
1106 @param fullpaths: the list of paths of the file being associated with the binary
1107 @type session: SQLAlchemy session
1108 @param session: Optional SQLAlchemy session. If this is passed, the caller
1109 is responsible for ensuring a transaction has begun and committing the
1110 results or rolling back based on the result code. If not passed, a commit
1111 will be performed at the end of the function, otherwise the caller is
1112 responsible for commiting.
1114 @return: True upon success
1117 privatetrans = False
1119 session = DBConn().session()
1124 def generate_path_dicts():
1125 for fullpath in fullpaths:
1126 if fullpath.startswith( './' ):
1127 fullpath = fullpath[2:]
1129 yield {'filename':fullpath, 'id': binary_id }
1131 for d in generate_path_dicts():
1132 session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
1141 traceback.print_exc()
1143 # Only rollback if we set up the session ourself
1150 __all__.append('insert_content_paths')
1152 ################################################################################
1154 class DSCFile(object):
1155 def __init__(self, *args, **kwargs):
1159 return '<DSCFile %s>' % self.dscfile_id
1161 __all__.append('DSCFile')
1164 def get_dscfiles(dscfile_id=None, source_id=None, poolfile_id=None, session=None):
1166 Returns a list of DSCFiles which may be empty
1168 @type dscfile_id: int (optional)
1169 @param dscfile_id: the dscfile_id of the DSCFiles to find
1171 @type source_id: int (optional)
1172 @param source_id: the source id related to the DSCFiles to find
1174 @type poolfile_id: int (optional)
1175 @param poolfile_id: the poolfile id related to the DSCFiles to find
1178 @return: Possibly empty list of DSCFiles
1181 q = session.query(DSCFile)
1183 if dscfile_id is not None:
1184 q = q.filter_by(dscfile_id=dscfile_id)
1186 if source_id is not None:
1187 q = q.filter_by(source_id=source_id)
1189 if poolfile_id is not None:
1190 q = q.filter_by(poolfile_id=poolfile_id)
1194 __all__.append('get_dscfiles')
1196 ################################################################################
1198 class PoolFile(ORMObject):
1199 def __init__(self, filename = None, location = None, filesize = -1, \
1201 self.filename = filename
1202 self.location = location
1203 self.filesize = filesize
1204 self.md5sum = md5sum
1208 return os.path.join(self.location.path, self.filename)
1210 def is_valid(self, filesize = -1, md5sum = None):
1211 return self.filesize == long(filesize) and self.md5sum == md5sum
1213 def properties(self):
1214 return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
1215 'sha256sum', 'location', 'source', 'binary', 'last_used']
1217 def not_null_constraints(self):
1218 return ['filename', 'md5sum', 'location']
1220 __all__.append('PoolFile')
1223 def check_poolfile(filename, filesize, md5sum, location_id, session=None):
1226 (ValidFileFound [boolean], PoolFile object or None)
1228 @type filename: string
1229 @param filename: the filename of the file to check against the DB
1232 @param filesize: the size of the file to check against the DB
1234 @type md5sum: string
1235 @param md5sum: the md5sum of the file to check against the DB
1237 @type location_id: int
1238 @param location_id: the id of the location to look in
1241 @return: Tuple of length 2.
1242 - If valid pool file found: (C{True}, C{PoolFile object})
1243 - If valid pool file not found:
1244 - (C{False}, C{None}) if no file found
1245 - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
1248 poolfile = session.query(Location).get(location_id). \
1249 files.filter_by(filename=filename).first()
1251 if poolfile and poolfile.is_valid(filesize = filesize, md5sum = md5sum):
1254 return (valid, poolfile)
1256 __all__.append('check_poolfile')
1258 # TODO: the implementation can trivially be inlined at the place where the
1259 # function is called
1261 def get_poolfile_by_id(file_id, session=None):
1263 Returns a PoolFile objects or None for the given id
1266 @param file_id: the id of the file to look for
1268 @rtype: PoolFile or None
1269 @return: either the PoolFile object or None
1272 return session.query(PoolFile).get(file_id)
1274 __all__.append('get_poolfile_by_id')
1277 def get_poolfile_like_name(filename, session=None):
1279 Returns an array of PoolFile objects which are like the given name
1281 @type filename: string
1282 @param filename: the filename of the file to check against the DB
1285 @return: array of PoolFile objects
1288 # TODO: There must be a way of properly using bind parameters with %FOO%
1289 q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
1293 __all__.append('get_poolfile_like_name')
1296 def add_poolfile(filename, datadict, location_id, session=None):
1298 Add a new file to the pool
1300 @type filename: string
1301 @param filename: filename
1303 @type datadict: dict
1304 @param datadict: dict with needed data
1306 @type location_id: int
1307 @param location_id: database id of the location
1310 @return: the PoolFile object created
1312 poolfile = PoolFile()
1313 poolfile.filename = filename
1314 poolfile.filesize = datadict["size"]
1315 poolfile.md5sum = datadict["md5sum"]
1316 poolfile.sha1sum = datadict["sha1sum"]
1317 poolfile.sha256sum = datadict["sha256sum"]
1318 poolfile.location_id = location_id
1320 session.add(poolfile)
1321 # Flush to get a file id (NB: This is not a commit)
1326 __all__.append('add_poolfile')
1328 ################################################################################
1330 class Fingerprint(ORMObject):
1331 def __init__(self, fingerprint = None):
1332 self.fingerprint = fingerprint
1334 def properties(self):
1335 return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
1338 def not_null_constraints(self):
1339 return ['fingerprint']
1341 __all__.append('Fingerprint')
1344 def get_fingerprint(fpr, session=None):
1346 Returns Fingerprint object for given fpr.
1349 @param fpr: The fpr to find / add
1351 @type session: SQLAlchemy
1352 @param session: Optional SQL session object (a temporary one will be
1353 generated if not supplied).
1356 @return: the Fingerprint object for the given fpr or None
1359 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1363 except NoResultFound:
1368 __all__.append('get_fingerprint')
1371 def get_or_set_fingerprint(fpr, session=None):
1373 Returns Fingerprint object for given fpr.
1375 If no matching fpr is found, a row is inserted.
1378 @param fpr: The fpr to find / add
1380 @type session: SQLAlchemy
1381 @param session: Optional SQL session object (a temporary one will be
1382 generated if not supplied). If not passed, a commit will be performed at
1383 the end of the function, otherwise the caller is responsible for commiting.
1384 A flush will be performed either way.
1387 @return: the Fingerprint object for the given fpr
1390 q = session.query(Fingerprint).filter_by(fingerprint=fpr)
1394 except NoResultFound:
1395 fingerprint = Fingerprint()
1396 fingerprint.fingerprint = fpr
1397 session.add(fingerprint)
1398 session.commit_or_flush()
1403 __all__.append('get_or_set_fingerprint')
1405 ################################################################################
1407 # Helper routine for Keyring class
1408 def get_ldap_name(entry):
1410 for k in ["cn", "mn", "sn"]:
1412 if ret and ret[0] != "" and ret[0] != "-":
1414 return " ".join(name)
1416 ################################################################################
1418 class Keyring(object):
1419 gpg_invocation = "gpg --no-default-keyring --keyring %s" +\
1420 " --with-colons --fingerprint --fingerprint"
1425 def __init__(self, *args, **kwargs):
1429 return '<Keyring %s>' % self.keyring_name
1431 def de_escape_gpg_str(self, txt):
1432 esclist = re.split(r'(\\x..)', txt)
1433 for x in range(1,len(esclist),2):
1434 esclist[x] = "%c" % (int(esclist[x][2:],16))
1435 return "".join(esclist)
1437 def parse_address(self, uid):
1438 """parses uid and returns a tuple of real name and email address"""
1440 (name, address) = email.Utils.parseaddr(uid)
1441 name = re.sub(r"\s*[(].*[)]", "", name)
1442 name = self.de_escape_gpg_str(name)
1445 return (name, address)
1447 def load_keys(self, keyring):
1448 if not self.keyring_id:
1449 raise Exception('Must be initialized with database information')
1451 k = os.popen(self.gpg_invocation % keyring, "r")
1455 for line in k.xreadlines():
1456 field = line.split(":")
1457 if field[0] == "pub":
1460 (name, addr) = self.parse_address(field[9])
1462 self.keys[key]["email"] = addr
1463 self.keys[key]["name"] = name
1464 self.keys[key]["fingerprints"] = []
1466 elif key and field[0] == "sub" and len(field) >= 12:
1467 signingkey = ("s" in field[11])
1468 elif key and field[0] == "uid":
1469 (name, addr) = self.parse_address(field[9])
1470 if "email" not in self.keys[key] and "@" in addr:
1471 self.keys[key]["email"] = addr
1472 self.keys[key]["name"] = name
1473 elif signingkey and field[0] == "fpr":
1474 self.keys[key]["fingerprints"].append(field[9])
1475 self.fpr_lookup[field[9]] = key
1477 def import_users_from_ldap(self, session):
1481 LDAPDn = cnf["Import-LDAP-Fingerprints::LDAPDn"]
1482 LDAPServer = cnf["Import-LDAP-Fingerprints::LDAPServer"]
1484 l = ldap.open(LDAPServer)
1485 l.simple_bind_s("","")
1486 Attrs = l.search_s(LDAPDn, ldap.SCOPE_ONELEVEL,
1487 "(&(keyfingerprint=*)(gidnumber=%s))" % (cnf["Import-Users-From-Passwd::ValidGID"]),
1488 ["uid", "keyfingerprint", "cn", "mn", "sn"])
1490 ldap_fin_uid_id = {}
1497 uid = entry["uid"][0]
1498 name = get_ldap_name(entry)
1499 fingerprints = entry["keyFingerPrint"]
1501 for f in fingerprints:
1502 key = self.fpr_lookup.get(f, None)
1503 if key not in self.keys:
1505 self.keys[key]["uid"] = uid
1509 keyid = get_or_set_uid(uid, session).uid_id
1510 byuid[keyid] = (uid, name)
1511 byname[uid] = (keyid, name)
1513 return (byname, byuid)
1515 def generate_users_from_keyring(self, format, session):
1519 for x in self.keys.keys():
1520 if "email" not in self.keys[x]:
1522 self.keys[x]["uid"] = format % "invalid-uid"
1524 uid = format % self.keys[x]["email"]
1525 keyid = get_or_set_uid(uid, session).uid_id
1526 byuid[keyid] = (uid, self.keys[x]["name"])
1527 byname[uid] = (keyid, self.keys[x]["name"])
1528 self.keys[x]["uid"] = uid
1531 uid = format % "invalid-uid"
1532 keyid = get_or_set_uid(uid, session).uid_id
1533 byuid[keyid] = (uid, "ungeneratable user id")
1534 byname[uid] = (keyid, "ungeneratable user id")
1536 return (byname, byuid)
1538 __all__.append('Keyring')
1541 def get_keyring(keyring, session=None):
1543 If C{keyring} does not have an entry in the C{keyrings} table yet, return None
1544 If C{keyring} already has an entry, simply return the existing Keyring
1546 @type keyring: string
1547 @param keyring: the keyring name
1550 @return: the Keyring object for this keyring
1553 q = session.query(Keyring).filter_by(keyring_name=keyring)
1557 except NoResultFound:
1560 __all__.append('get_keyring')
1562 ################################################################################
1564 class KeyringACLMap(object):
1565 def __init__(self, *args, **kwargs):
1569 return '<KeyringACLMap %s>' % self.keyring_acl_map_id
1571 __all__.append('KeyringACLMap')
1573 ################################################################################
1575 class DBChange(object):
1576 def __init__(self, *args, **kwargs):
1580 return '<DBChange %s>' % self.changesname
1582 def clean_from_queue(self):
1583 session = DBConn().session().object_session(self)
1585 # Remove changes_pool_files entries
1588 # Remove changes_pending_files references
1591 # Clear out of queue
1592 self.in_queue = None
1593 self.approved_for_id = None
1595 __all__.append('DBChange')
1598 def get_dbchange(filename, session=None):
1600 returns DBChange object for given C{filename}.
1602 @type filename: string
1603 @param filename: the name of the file
1605 @type session: Session
1606 @param session: Optional SQLA session object (a temporary one will be
1607 generated if not supplied)
1610 @return: DBChange object for the given filename (C{None} if not present)
1613 q = session.query(DBChange).filter_by(changesname=filename)
1617 except NoResultFound:
1620 __all__.append('get_dbchange')
1622 ################################################################################
1624 class Location(ORMObject):
1625 def __init__(self, path = None, component = None):
1627 self.component = component
1628 # the column 'type' should go away, see comment at mapper
1629 self.archive_type = 'pool'
1631 def properties(self):
1632 return ['path', 'location_id', 'archive_type', 'component', \
1635 def not_null_constraints(self):
1636 return ['path', 'archive_type']
1638 __all__.append('Location')
1641 def get_location(location, component=None, archive=None, session=None):
1643 Returns Location object for the given combination of location, component
1646 @type location: string
1647 @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
1649 @type component: string
1650 @param component: the component name (if None, no restriction applied)
1652 @type archive: string
1653 @param archive: the archive name (if None, no restriction applied)
1655 @rtype: Location / None
1656 @return: Either a Location object or None if one can't be found
1659 q = session.query(Location).filter_by(path=location)
1661 if archive is not None:
1662 q = q.join(Archive).filter_by(archive_name=archive)
1664 if component is not None:
1665 q = q.join(Component).filter_by(component_name=component)
1669 except NoResultFound:
1672 __all__.append('get_location')
1674 ################################################################################
1676 class Maintainer(ORMObject):
1677 def __init__(self, name = None):
1680 def properties(self):
1681 return ['name', 'maintainer_id']
1683 def not_null_constraints(self):
1686 def get_split_maintainer(self):
1687 if not hasattr(self, 'name') or self.name is None:
1688 return ('', '', '', '')
1690 return fix_maintainer(self.name.strip())
1692 __all__.append('Maintainer')
1695 def get_or_set_maintainer(name, session=None):
1697 Returns Maintainer object for given maintainer name.
1699 If no matching maintainer name is found, a row is inserted.
1702 @param name: The maintainer name to add
1704 @type session: SQLAlchemy
1705 @param session: Optional SQL session object (a temporary one will be
1706 generated if not supplied). If not passed, a commit will be performed at
1707 the end of the function, otherwise the caller is responsible for commiting.
1708 A flush will be performed either way.
1711 @return: the Maintainer object for the given maintainer
1714 q = session.query(Maintainer).filter_by(name=name)
1717 except NoResultFound:
1718 maintainer = Maintainer()
1719 maintainer.name = name
1720 session.add(maintainer)
1721 session.commit_or_flush()
1726 __all__.append('get_or_set_maintainer')
1729 def get_maintainer(maintainer_id, session=None):
1731 Return the name of the maintainer behind C{maintainer_id} or None if that
1732 maintainer_id is invalid.
1734 @type maintainer_id: int
1735 @param maintainer_id: the id of the maintainer
1738 @return: the Maintainer with this C{maintainer_id}
1741 return session.query(Maintainer).get(maintainer_id)
1743 __all__.append('get_maintainer')
1745 ################################################################################
1747 class NewComment(object):
1748 def __init__(self, *args, **kwargs):
1752 return '''<NewComment for '%s %s' (%s)>''' % (self.package, self.version, self.comment_id)
1754 __all__.append('NewComment')
1757 def has_new_comment(package, version, session=None):
1759 Returns true if the given combination of C{package}, C{version} has a comment.
1761 @type package: string
1762 @param package: name of the package
1764 @type version: string
1765 @param version: package version
1767 @type session: Session
1768 @param session: Optional SQLA session object (a temporary one will be
1769 generated if not supplied)
1775 q = session.query(NewComment)
1776 q = q.filter_by(package=package)
1777 q = q.filter_by(version=version)
1779 return bool(q.count() > 0)
1781 __all__.append('has_new_comment')
1784 def get_new_comments(package=None, version=None, comment_id=None, session=None):
1786 Returns (possibly empty) list of NewComment objects for the given
1789 @type package: string (optional)
1790 @param package: name of the package
1792 @type version: string (optional)
1793 @param version: package version
1795 @type comment_id: int (optional)
1796 @param comment_id: An id of a comment
1798 @type session: Session
1799 @param session: Optional SQLA session object (a temporary one will be
1800 generated if not supplied)
1803 @return: A (possibly empty) list of NewComment objects will be returned
1806 q = session.query(NewComment)
1807 if package is not None: q = q.filter_by(package=package)
1808 if version is not None: q = q.filter_by(version=version)
1809 if comment_id is not None: q = q.filter_by(comment_id=comment_id)
1813 __all__.append('get_new_comments')
1815 ################################################################################
1817 class Override(ORMObject):
1818 def __init__(self, package = None, suite = None, component = None, overridetype = None, \
1819 section = None, priority = None):
1820 self.package = package
1822 self.component = component
1823 self.overridetype = overridetype
1824 self.section = section
1825 self.priority = priority
1827 def properties(self):
1828 return ['package', 'suite', 'component', 'overridetype', 'section', \
1831 def not_null_constraints(self):
1832 return ['package', 'suite', 'component', 'overridetype', 'section']
1834 __all__.append('Override')
1837 def get_override(package, suite=None, component=None, overridetype=None, session=None):
1839 Returns Override object for the given parameters
1841 @type package: string
1842 @param package: The name of the package
1844 @type suite: string, list or None
1845 @param suite: The name of the suite (or suites if a list) to limit to. If
1846 None, don't limit. Defaults to None.
1848 @type component: string, list or None
1849 @param component: The name of the component (or components if a list) to
1850 limit to. If None, don't limit. Defaults to None.
1852 @type overridetype: string, list or None
1853 @param overridetype: The name of the overridetype (or overridetypes if a list) to
1854 limit to. If None, don't limit. Defaults to None.
1856 @type session: Session
1857 @param session: Optional SQLA session object (a temporary one will be
1858 generated if not supplied)
1861 @return: A (possibly empty) list of Override objects will be returned
1864 q = session.query(Override)
1865 q = q.filter_by(package=package)
1867 if suite is not None:
1868 if not isinstance(suite, list): suite = [suite]
1869 q = q.join(Suite).filter(Suite.suite_name.in_(suite))
1871 if component is not None:
1872 if not isinstance(component, list): component = [component]
1873 q = q.join(Component).filter(Component.component_name.in_(component))
1875 if overridetype is not None:
1876 if not isinstance(overridetype, list): overridetype = [overridetype]
1877 q = q.join(OverrideType).filter(OverrideType.overridetype.in_(overridetype))
1881 __all__.append('get_override')
1884 ################################################################################
1886 class OverrideType(ORMObject):
1887 def __init__(self, overridetype = None):
1888 self.overridetype = overridetype
1890 def properties(self):
1891 return ['overridetype', 'overridetype_id', 'overrides_count']
1893 def not_null_constraints(self):
1894 return ['overridetype']
1896 __all__.append('OverrideType')
1899 def get_override_type(override_type, session=None):
1901 Returns OverrideType object for given C{override type}.
1903 @type override_type: string
1904 @param override_type: The name of the override type
1906 @type session: Session
1907 @param session: Optional SQLA session object (a temporary one will be
1908 generated if not supplied)
1911 @return: the database id for the given override type
1914 q = session.query(OverrideType).filter_by(overridetype=override_type)
1918 except NoResultFound:
1921 __all__.append('get_override_type')
1923 ################################################################################
1925 class DebContents(object):
1926 def __init__(self, *args, **kwargs):
1930 return '<DebConetnts %s: %s>' % (self.package.package,self.file)
1932 __all__.append('DebContents')
1935 class UdebContents(object):
1936 def __init__(self, *args, **kwargs):
1940 return '<UdebConetnts %s: %s>' % (self.package.package,self.file)
1942 __all__.append('UdebContents')
1944 class PendingBinContents(object):
1945 def __init__(self, *args, **kwargs):
1949 return '<PendingBinContents %s>' % self.contents_id
1951 __all__.append('PendingBinContents')
1953 def insert_pending_content_paths(package,
1958 Make sure given paths are temporarily associated with given
1962 @param package: the package to associate with should have been read in from the binary control file
1963 @type fullpaths: list
1964 @param fullpaths: the list of paths of the file being associated with the binary
1965 @type session: SQLAlchemy session
1966 @param session: Optional SQLAlchemy session. If this is passed, the caller
1967 is responsible for ensuring a transaction has begun and committing the
1968 results or rolling back based on the result code. If not passed, a commit
1969 will be performed at the end of the function
1971 @return: True upon success, False if there is a problem
1974 privatetrans = False
1977 session = DBConn().session()
1981 arch = get_architecture(package['Architecture'], session)
1982 arch_id = arch.arch_id
1984 # Remove any already existing recorded files for this package
1985 q = session.query(PendingBinContents)
1986 q = q.filter_by(package=package['Package'])
1987 q = q.filter_by(version=package['Version'])
1988 q = q.filter_by(architecture=arch_id)
1991 for fullpath in fullpaths:
1993 if fullpath.startswith( "./" ):
1994 fullpath = fullpath[2:]
1996 pca = PendingBinContents()
1997 pca.package = package['Package']
1998 pca.version = package['Version']
2000 pca.architecture = arch_id
2003 pca.type = 8 # gross
2005 pca.type = 7 # also gross
2008 # Only commit if we set up the session ourself
2016 except Exception, e:
2017 traceback.print_exc()
2019 # Only rollback if we set up the session ourself
2026 __all__.append('insert_pending_content_paths')
2028 ################################################################################
2030 class PolicyQueue(object):
2031 def __init__(self, *args, **kwargs):
2035 return '<PolicyQueue %s>' % self.queue_name
2037 __all__.append('PolicyQueue')
2040 def get_policy_queue(queuename, session=None):
2042 Returns PolicyQueue object for given C{queue name}
2044 @type queuename: string
2045 @param queuename: The name of the queue
2047 @type session: Session
2048 @param session: Optional SQLA session object (a temporary one will be
2049 generated if not supplied)
2052 @return: PolicyQueue object for the given queue
2055 q = session.query(PolicyQueue).filter_by(queue_name=queuename)
2059 except NoResultFound:
2062 __all__.append('get_policy_queue')
2065 def get_policy_queue_from_path(pathname, session=None):
2067 Returns PolicyQueue object for given C{path name}
2069 @type queuename: string
2070 @param queuename: The path
2072 @type session: Session
2073 @param session: Optional SQLA session object (a temporary one will be
2074 generated if not supplied)
2077 @return: PolicyQueue object for the given queue
2080 q = session.query(PolicyQueue).filter_by(path=pathname)
2084 except NoResultFound:
2087 __all__.append('get_policy_queue_from_path')
2089 ################################################################################
2091 class Priority(ORMObject):
2092 def __init__(self, priority = None, level = None):
2093 self.priority = priority
2096 def properties(self):
2097 return ['priority', 'priority_id', 'level', 'overrides_count']
2099 def not_null_constraints(self):
2100 return ['priority', 'level']
2102 def __eq__(self, val):
2103 if isinstance(val, str):
2104 return (self.priority == val)
2105 # This signals to use the normal comparison operator
2106 return NotImplemented
2108 def __ne__(self, val):
2109 if isinstance(val, str):
2110 return (self.priority != val)
2111 # This signals to use the normal comparison operator
2112 return NotImplemented
2114 __all__.append('Priority')
2117 def get_priority(priority, session=None):
2119 Returns Priority object for given C{priority name}.
2121 @type priority: string
2122 @param priority: The name of the priority
2124 @type session: Session
2125 @param session: Optional SQLA session object (a temporary one will be
2126 generated if not supplied)
2129 @return: Priority object for the given priority
2132 q = session.query(Priority).filter_by(priority=priority)
2136 except NoResultFound:
2139 __all__.append('get_priority')
2142 def get_priorities(session=None):
2144 Returns dictionary of priority names -> id mappings
2146 @type session: Session
2147 @param session: Optional SQL session object (a temporary one will be
2148 generated if not supplied)
2151 @return: dictionary of priority names -> id mappings
2155 q = session.query(Priority)
2157 ret[x.priority] = x.priority_id
2161 __all__.append('get_priorities')
2163 ################################################################################
2165 class Section(ORMObject):
2166 def __init__(self, section = None):
2167 self.section = section
2169 def properties(self):
2170 return ['section', 'section_id', 'overrides_count']
2172 def not_null_constraints(self):
2175 def __eq__(self, val):
2176 if isinstance(val, str):
2177 return (self.section == val)
2178 # This signals to use the normal comparison operator
2179 return NotImplemented
2181 def __ne__(self, val):
2182 if isinstance(val, str):
2183 return (self.section != val)
2184 # This signals to use the normal comparison operator
2185 return NotImplemented
2187 __all__.append('Section')
2190 def get_section(section, session=None):
2192 Returns Section object for given C{section name}.
2194 @type section: string
2195 @param section: The name of the section
2197 @type session: Session
2198 @param session: Optional SQLA session object (a temporary one will be
2199 generated if not supplied)
2202 @return: Section object for the given section name
2205 q = session.query(Section).filter_by(section=section)
2209 except NoResultFound:
2212 __all__.append('get_section')
2215 def get_sections(session=None):
2217 Returns dictionary of section names -> id mappings
2219 @type session: Session
2220 @param session: Optional SQL session object (a temporary one will be
2221 generated if not supplied)
2224 @return: dictionary of section names -> id mappings
2228 q = session.query(Section)
2230 ret[x.section] = x.section_id
2234 __all__.append('get_sections')
2236 ################################################################################
2238 class DBSource(ORMObject):
2239 def __init__(self, source = None, version = None, maintainer = None, \
2240 changedby = None, poolfile = None, install_date = None):
2241 self.source = source
2242 self.version = version
2243 self.maintainer = maintainer
2244 self.changedby = changedby
2245 self.poolfile = poolfile
2246 self.install_date = install_date
2248 def properties(self):
2249 return ['source', 'source_id', 'maintainer', 'changedby', \
2250 'fingerprint', 'poolfile', 'version', 'suites_count', \
2251 'install_date', 'binaries_count']
2253 def not_null_constraints(self):
2254 return ['source', 'version', 'install_date', 'maintainer', \
2255 'changedby', 'poolfile', 'install_date']
2257 __all__.append('DBSource')
2260 def source_exists(source, source_version, suites = ["any"], session=None):
2262 Ensure that source exists somewhere in the archive for the binary
2263 upload being processed.
2264 1. exact match => 1.0-3
2265 2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
2267 @type source: string
2268 @param source: source name
2270 @type source_version: string
2271 @param source_version: expected source version
2274 @param suites: list of suites to check in, default I{any}
2276 @type session: Session
2277 @param session: Optional SQLA session object (a temporary one will be
2278 generated if not supplied)
2281 @return: returns 1 if a source with expected version is found, otherwise 0
2288 from daklib.regexes import re_bin_only_nmu
2289 orig_source_version = re_bin_only_nmu.sub('', source_version)
2291 for suite in suites:
2292 q = session.query(DBSource).filter_by(source=source). \
2293 filter(DBSource.version.in_([source_version, orig_source_version]))
2295 # source must exist in suite X, or in some other suite that's
2296 # mapped to X, recursively... silent-maps are counted too,
2297 # unreleased-maps aren't.
2298 maps = cnf.ValueList("SuiteMappings")[:]
2300 maps = [ m.split() for m in maps ]
2301 maps = [ (x[1], x[2]) for x in maps
2302 if x[0] == "map" or x[0] == "silent-map" ]
2304 for (from_, to) in maps:
2305 if from_ in s and to not in s:
2308 q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
2313 # No source found so return not ok
2318 __all__.append('source_exists')
2321 def get_suites_source_in(source, session=None):
2323 Returns list of Suite objects which given C{source} name is in
2326 @param source: DBSource package name to search for
2329 @return: list of Suite objects for the given source
2332 return session.query(Suite).filter(Suite.sources.any(source=source)).all()
2334 __all__.append('get_suites_source_in')
2337 def get_sources_from_name(source, version=None, dm_upload_allowed=None, session=None):
2339 Returns list of DBSource objects for given C{source} name and other parameters
2342 @param source: DBSource package name to search for
2344 @type version: str or None
2345 @param version: DBSource version name to search for or None if not applicable
2347 @type dm_upload_allowed: bool
2348 @param dm_upload_allowed: If None, no effect. If True or False, only
2349 return packages with that dm_upload_allowed setting
2351 @type session: Session
2352 @param session: Optional SQL session object (a temporary one will be
2353 generated if not supplied)
2356 @return: list of DBSource objects for the given name (may be empty)
2359 q = session.query(DBSource).filter_by(source=source)
2361 if version is not None:
2362 q = q.filter_by(version=version)
2364 if dm_upload_allowed is not None:
2365 q = q.filter_by(dm_upload_allowed=dm_upload_allowed)
2369 __all__.append('get_sources_from_name')
2371 # FIXME: This function fails badly if it finds more than 1 source package and
2372 # its implementation is trivial enough to be inlined.
2374 def get_source_in_suite(source, suite, session=None):
2376 Returns a DBSource object for a combination of C{source} and C{suite}.
2378 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2379 - B{suite} - a suite name, eg. I{unstable}
2381 @type source: string
2382 @param source: source package name
2385 @param suite: the suite name
2388 @return: the version for I{source} in I{suite}
2392 q = get_suite(suite, session).get_sources(source)
2395 except NoResultFound:
2398 __all__.append('get_source_in_suite')
2400 ################################################################################
2403 def add_dsc_to_db(u, filename, session=None):
2404 entry = u.pkg.files[filename]
2408 source.source = u.pkg.dsc["source"]
2409 source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
2410 source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
2411 source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
2412 source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2413 source.install_date = datetime.now().date()
2415 dsc_component = entry["component"]
2416 dsc_location_id = entry["location id"]
2418 source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
2420 # Set up a new poolfile if necessary
2421 if not entry.has_key("files id") or not entry["files id"]:
2422 filename = entry["pool name"] + filename
2423 poolfile = add_poolfile(filename, entry, dsc_location_id, session)
2425 pfs.append(poolfile)
2426 entry["files id"] = poolfile.file_id
2428 source.poolfile_id = entry["files id"]
2431 suite_names = u.pkg.changes["distribution"].keys()
2432 source.suites = session.query(Suite). \
2433 filter(Suite.suite_name.in_(suite_names)).all()
2435 # Add the source files to the DB (files and dsc_files)
2437 dscfile.source_id = source.source_id
2438 dscfile.poolfile_id = entry["files id"]
2439 session.add(dscfile)
2441 for dsc_file, dentry in u.pkg.dsc_files.items():
2443 df.source_id = source.source_id
2445 # If the .orig tarball is already in the pool, it's
2446 # files id is stored in dsc_files by check_dsc().
2447 files_id = dentry.get("files id", None)
2449 # Find the entry in the files hash
2450 # TODO: Bail out here properly
2452 for f, e in u.pkg.files.items():
2457 if files_id is None:
2458 filename = dfentry["pool name"] + dsc_file
2460 (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
2461 # FIXME: needs to check for -1/-2 and or handle exception
2462 if found and obj is not None:
2463 files_id = obj.file_id
2466 # If still not found, add it
2467 if files_id is None:
2468 # HACK: Force sha1sum etc into dentry
2469 dentry["sha1sum"] = dfentry["sha1sum"]
2470 dentry["sha256sum"] = dfentry["sha256sum"]
2471 poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
2472 pfs.append(poolfile)
2473 files_id = poolfile.file_id
2475 poolfile = get_poolfile_by_id(files_id, session)
2476 if poolfile is None:
2477 utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
2478 pfs.append(poolfile)
2480 df.poolfile_id = files_id
2483 # Add the src_uploaders to the DB
2484 uploader_ids = [source.maintainer_id]
2485 if u.pkg.dsc.has_key("uploaders"):
2486 for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
2488 uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
2491 for up_id in uploader_ids:
2492 if added_ids.has_key(up_id):
2494 utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
2500 su.maintainer_id = up_id
2501 su.source_id = source.source_id
2506 return source, dsc_component, dsc_location_id, pfs
2508 __all__.append('add_dsc_to_db')
2511 def add_deb_to_db(u, filename, session=None):
2513 Contrary to what you might expect, this routine deals with both
2514 debs and udebs. That info is in 'dbtype', whilst 'type' is
2515 'deb' for both of them
2518 entry = u.pkg.files[filename]
2521 bin.package = entry["package"]
2522 bin.version = entry["version"]
2523 bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
2524 bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
2525 bin.arch_id = get_architecture(entry["architecture"], session).arch_id
2526 bin.binarytype = entry["dbtype"]
2529 filename = entry["pool name"] + filename
2530 fullpath = os.path.join(cnf["Dir::Pool"], filename)
2531 if not entry.get("location id", None):
2532 entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
2534 if entry.get("files id", None):
2535 poolfile = get_poolfile_by_id(bin.poolfile_id)
2536 bin.poolfile_id = entry["files id"]
2538 poolfile = add_poolfile(filename, entry, entry["location id"], session)
2539 bin.poolfile_id = entry["files id"] = poolfile.file_id
2542 bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
2543 if len(bin_sources) != 1:
2544 raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
2545 (bin.package, bin.version, entry["architecture"],
2546 filename, bin.binarytype, u.pkg.changes["fingerprint"])
2548 bin.source_id = bin_sources[0].source_id
2550 # Add and flush object so it has an ID
2553 suite_names = u.pkg.changes["distribution"].keys()
2554 bin.suites = session.query(Suite). \
2555 filter(Suite.suite_name.in_(suite_names)).all()
2559 # Deal with contents - disabled for now
2560 #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
2562 # print "REJECT\nCould not determine contents of package %s" % bin.package
2563 # session.rollback()
2564 # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
2568 __all__.append('add_deb_to_db')
2570 ################################################################################
2572 class SourceACL(object):
2573 def __init__(self, *args, **kwargs):
2577 return '<SourceACL %s>' % self.source_acl_id
2579 __all__.append('SourceACL')
2581 ################################################################################
2583 class SrcFormat(object):
2584 def __init__(self, *args, **kwargs):
2588 return '<SrcFormat %s>' % (self.format_name)
2590 __all__.append('SrcFormat')
2592 ################################################################################
2594 class SrcUploader(object):
2595 def __init__(self, *args, **kwargs):
2599 return '<SrcUploader %s>' % self.uploader_id
2601 __all__.append('SrcUploader')
2603 ################################################################################
2605 SUITE_FIELDS = [ ('SuiteName', 'suite_name'),
2606 ('SuiteID', 'suite_id'),
2607 ('Version', 'version'),
2608 ('Origin', 'origin'),
2610 ('Description', 'description'),
2611 ('Untouchable', 'untouchable'),
2612 ('Announce', 'announce'),
2613 ('Codename', 'codename'),
2614 ('OverrideCodename', 'overridecodename'),
2615 ('ValidTime', 'validtime'),
2616 ('Priority', 'priority'),
2617 ('NotAutomatic', 'notautomatic'),
2618 ('CopyChanges', 'copychanges'),
2619 ('OverrideSuite', 'overridesuite')]
2621 # Why the heck don't we have any UNIQUE constraints in table suite?
2622 # TODO: Add UNIQUE constraints for appropriate columns.
2623 class Suite(ORMObject):
2624 def __init__(self, suite_name = None, version = None):
2625 self.suite_name = suite_name
2626 self.version = version
2628 def properties(self):
2629 return ['suite_name', 'version', 'sources_count', 'binaries_count', \
2632 def not_null_constraints(self):
2633 return ['suite_name', 'version']
2635 def __eq__(self, val):
2636 if isinstance(val, str):
2637 return (self.suite_name == val)
2638 # This signals to use the normal comparison operator
2639 return NotImplemented
2641 def __ne__(self, val):
2642 if isinstance(val, str):
2643 return (self.suite_name != val)
2644 # This signals to use the normal comparison operator
2645 return NotImplemented
2649 for disp, field in SUITE_FIELDS:
2650 val = getattr(self, field, None)
2652 ret.append("%s: %s" % (disp, val))
2654 return "\n".join(ret)
2656 def get_architectures(self, skipsrc=False, skipall=False):
2658 Returns list of Architecture objects
2660 @type skipsrc: boolean
2661 @param skipsrc: Whether to skip returning the 'source' architecture entry
2664 @type skipall: boolean
2665 @param skipall: Whether to skip returning the 'all' architecture entry
2669 @return: list of Architecture objects for the given name (may be empty)
2672 q = object_session(self).query(Architecture).with_parent(self)
2674 q = q.filter(Architecture.arch_string != 'source')
2676 q = q.filter(Architecture.arch_string != 'all')
2677 return q.order_by(Architecture.arch_string).all()
2679 def get_sources(self, source):
2681 Returns a query object representing DBSource that is part of C{suite}.
2683 - B{source} - source package name, eg. I{mailfilter}, I{bbdb}, I{glibc}
2685 @type source: string
2686 @param source: source package name
2688 @rtype: sqlalchemy.orm.query.Query
2689 @return: a query of DBSource
2693 session = object_session(self)
2694 return session.query(DBSource).filter_by(source = source). \
2697 __all__.append('Suite')
2700 def get_suite(suite, session=None):
2702 Returns Suite object for given C{suite name}.
2705 @param suite: The name of the suite
2707 @type session: Session
2708 @param session: Optional SQLA session object (a temporary one will be
2709 generated if not supplied)
2712 @return: Suite object for the requested suite name (None if not present)
2715 q = session.query(Suite).filter_by(suite_name=suite)
2719 except NoResultFound:
2722 __all__.append('get_suite')
2724 ################################################################################
2726 # TODO: should be removed because the implementation is too trivial
2728 def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
2730 Returns list of Architecture objects for given C{suite} name
2733 @param suite: Suite name to search for
2735 @type skipsrc: boolean
2736 @param skipsrc: Whether to skip returning the 'source' architecture entry
2739 @type skipall: boolean
2740 @param skipall: Whether to skip returning the 'all' architecture entry
2743 @type session: Session
2744 @param session: Optional SQL session object (a temporary one will be
2745 generated if not supplied)
2748 @return: list of Architecture objects for the given name (may be empty)
2751 return get_suite(suite, session).get_architectures(skipsrc, skipall)
2753 __all__.append('get_suite_architectures')
2755 ################################################################################
2757 class SuiteSrcFormat(object):
2758 def __init__(self, *args, **kwargs):
2762 return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
2764 __all__.append('SuiteSrcFormat')
2767 def get_suite_src_formats(suite, session=None):
2769 Returns list of allowed SrcFormat for C{suite}.
2772 @param suite: Suite name to search for
2774 @type session: Session
2775 @param session: Optional SQL session object (a temporary one will be
2776 generated if not supplied)
2779 @return: the list of allowed source formats for I{suite}
2782 q = session.query(SrcFormat)
2783 q = q.join(SuiteSrcFormat)
2784 q = q.join(Suite).filter_by(suite_name=suite)
2785 q = q.order_by('format_name')
2789 __all__.append('get_suite_src_formats')
2791 ################################################################################
2793 class Uid(ORMObject):
2794 def __init__(self, uid = None, name = None):
2798 def __eq__(self, val):
2799 if isinstance(val, str):
2800 return (self.uid == val)
2801 # This signals to use the normal comparison operator
2802 return NotImplemented
2804 def __ne__(self, val):
2805 if isinstance(val, str):
2806 return (self.uid != val)
2807 # This signals to use the normal comparison operator
2808 return NotImplemented
2810 def properties(self):
2811 return ['uid', 'name', 'fingerprint']
2813 def not_null_constraints(self):
2816 __all__.append('Uid')
2819 def get_or_set_uid(uidname, session=None):
2821 Returns uid object for given uidname.
2823 If no matching uidname is found, a row is inserted.
2825 @type uidname: string
2826 @param uidname: The uid to add
2828 @type session: SQLAlchemy
2829 @param session: Optional SQL session object (a temporary one will be
2830 generated if not supplied). If not passed, a commit will be performed at
2831 the end of the function, otherwise the caller is responsible for commiting.
2834 @return: the uid object for the given uidname
2837 q = session.query(Uid).filter_by(uid=uidname)
2841 except NoResultFound:
2845 session.commit_or_flush()
2850 __all__.append('get_or_set_uid')
2853 def get_uid_from_fingerprint(fpr, session=None):
2854 q = session.query(Uid)
2855 q = q.join(Fingerprint).filter_by(fingerprint=fpr)
2859 except NoResultFound:
2862 __all__.append('get_uid_from_fingerprint')
2864 ################################################################################
2866 class UploadBlock(object):
2867 def __init__(self, *args, **kwargs):
2871 return '<UploadBlock %s (%s)>' % (self.source, self.upload_block_id)
2873 __all__.append('UploadBlock')
2875 ################################################################################
2877 class DBConn(object):
2879 database module init.
2883 def __init__(self, *args, **kwargs):
2884 self.__dict__ = self.__shared_state
2886 if not getattr(self, 'initialised', False):
2887 self.initialised = True
2888 self.debug = kwargs.has_key('debug')
2891 def __setuptables(self):
2892 tables_with_primary = (
2900 'build_queue_files',
2905 'changes_pending_binaries',
2906 'changes_pending_files',
2907 'changes_pending_source',
2917 'pending_bin_contents',
2931 tables_no_primary = (
2932 'changes_pending_files_map',
2933 'changes_pending_source_files',
2934 'changes_pool_files',
2936 # TODO: the maintainer column in table override should be removed.
2938 'suite_architectures',
2939 'suite_src_formats',
2940 'suite_build_queue_copy',
2945 'almost_obsolete_all_associations',
2946 'almost_obsolete_src_associations',
2947 'any_associations_source',
2948 'bin_assoc_by_arch',
2949 'bin_associations_binaries',
2950 'binaries_suite_arch',
2951 'binfiles_suite_component_arch',
2954 'newest_all_associations',
2955 'newest_any_associations',
2957 'newest_src_association',
2958 'obsolete_all_associations',
2959 'obsolete_any_associations',
2960 'obsolete_any_by_all_associations',
2961 'obsolete_src_associations',
2963 'src_associations_bin',
2964 'src_associations_src',
2965 'suite_arch_by_name',
2968 # Sqlalchemy version 0.5 fails to reflect the SERIAL type
2969 # correctly and that is why we have to use a workaround. It can
2970 # be removed as soon as we switch to version 0.6.
2971 for table_name in tables_with_primary:
2972 table = Table(table_name, self.db_meta, \
2973 Column('id', Integer, primary_key = True), \
2974 autoload=True, useexisting=True)
2975 setattr(self, 'tbl_%s' % table_name, table)
2977 for table_name in tables_no_primary:
2978 table = Table(table_name, self.db_meta, autoload=True)
2979 setattr(self, 'tbl_%s' % table_name, table)
2981 # bin_contents needs special attention until update #41 has been
2983 self.tbl_bin_contents = Table('bin_contents', self.db_meta, \
2984 Column('file', Text, primary_key = True),
2985 Column('binary_id', Integer, ForeignKey('binaries.id'), \
2986 primary_key = True),
2987 autoload=True, useexisting=True)
2989 for view_name in views:
2990 view = Table(view_name, self.db_meta, autoload=True)
2991 setattr(self, 'view_%s' % view_name, view)
2993 def __setupmappers(self):
2994 mapper(Architecture, self.tbl_architecture,
2995 properties = dict(arch_id = self.tbl_architecture.c.id,
2996 suites = relation(Suite, secondary=self.tbl_suite_architectures,
2997 order_by='suite_name',
2998 backref=backref('architectures', order_by='arch_string'))),
2999 extension = validator)
3001 mapper(Archive, self.tbl_archive,
3002 properties = dict(archive_id = self.tbl_archive.c.id,
3003 archive_name = self.tbl_archive.c.name))
3005 mapper(PendingBinContents, self.tbl_pending_bin_contents,
3006 properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
3007 filename = self.tbl_pending_bin_contents.c.filename,
3008 package = self.tbl_pending_bin_contents.c.package,
3009 version = self.tbl_pending_bin_contents.c.version,
3010 arch = self.tbl_pending_bin_contents.c.arch,
3011 otype = self.tbl_pending_bin_contents.c.type))
3013 mapper(DebContents, self.tbl_deb_contents,
3014 properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
3015 package=self.tbl_deb_contents.c.package,
3016 suite=self.tbl_deb_contents.c.suite,
3017 arch=self.tbl_deb_contents.c.arch,
3018 section=self.tbl_deb_contents.c.section,
3019 filename=self.tbl_deb_contents.c.filename))
3021 mapper(UdebContents, self.tbl_udeb_contents,
3022 properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
3023 package=self.tbl_udeb_contents.c.package,
3024 suite=self.tbl_udeb_contents.c.suite,
3025 arch=self.tbl_udeb_contents.c.arch,
3026 section=self.tbl_udeb_contents.c.section,
3027 filename=self.tbl_udeb_contents.c.filename))
3029 mapper(BuildQueue, self.tbl_build_queue,
3030 properties = dict(queue_id = self.tbl_build_queue.c.id))
3032 mapper(BuildQueueFile, self.tbl_build_queue_files,
3033 properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
3034 poolfile = relation(PoolFile, backref='buildqueueinstances')))
3036 mapper(DBBinary, self.tbl_binaries,
3037 properties = dict(binary_id = self.tbl_binaries.c.id,
3038 package = self.tbl_binaries.c.package,
3039 version = self.tbl_binaries.c.version,
3040 maintainer_id = self.tbl_binaries.c.maintainer,
3041 maintainer = relation(Maintainer),
3042 source_id = self.tbl_binaries.c.source,
3043 source = relation(DBSource, backref='binaries'),
3044 arch_id = self.tbl_binaries.c.architecture,
3045 architecture = relation(Architecture),
3046 poolfile_id = self.tbl_binaries.c.file,
3047 poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
3048 binarytype = self.tbl_binaries.c.type,
3049 fingerprint_id = self.tbl_binaries.c.sig_fpr,
3050 fingerprint = relation(Fingerprint),
3051 install_date = self.tbl_binaries.c.install_date,
3052 suites = relation(Suite, secondary=self.tbl_bin_associations,
3053 backref=backref('binaries', lazy='dynamic'))),
3054 extension = validator)
3056 mapper(BinaryACL, self.tbl_binary_acl,
3057 properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
3059 mapper(BinaryACLMap, self.tbl_binary_acl_map,
3060 properties = dict(binary_acl_map_id = self.tbl_binary_acl_map.c.id,
3061 fingerprint = relation(Fingerprint, backref="binary_acl_map"),
3062 architecture = relation(Architecture)))
3064 mapper(Component, self.tbl_component,
3065 properties = dict(component_id = self.tbl_component.c.id,
3066 component_name = self.tbl_component.c.name),
3067 extension = validator)
3069 mapper(DBConfig, self.tbl_config,
3070 properties = dict(config_id = self.tbl_config.c.id))
3072 mapper(DSCFile, self.tbl_dsc_files,
3073 properties = dict(dscfile_id = self.tbl_dsc_files.c.id,
3074 source_id = self.tbl_dsc_files.c.source,
3075 source = relation(DBSource),
3076 poolfile_id = self.tbl_dsc_files.c.file,
3077 poolfile = relation(PoolFile)))
3079 mapper(PoolFile, self.tbl_files,
3080 properties = dict(file_id = self.tbl_files.c.id,
3081 filesize = self.tbl_files.c.size,
3082 location_id = self.tbl_files.c.location,
3083 location = relation(Location,
3084 # using lazy='dynamic' in the back
3085 # reference because we have A LOT of
3086 # files in one location
3087 backref=backref('files', lazy='dynamic'))),
3088 extension = validator)
3090 mapper(Fingerprint, self.tbl_fingerprint,
3091 properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
3092 uid_id = self.tbl_fingerprint.c.uid,
3093 uid = relation(Uid),
3094 keyring_id = self.tbl_fingerprint.c.keyring,
3095 keyring = relation(Keyring),
3096 source_acl = relation(SourceACL),
3097 binary_acl = relation(BinaryACL)),
3098 extension = validator)
3100 mapper(Keyring, self.tbl_keyrings,
3101 properties = dict(keyring_name = self.tbl_keyrings.c.name,
3102 keyring_id = self.tbl_keyrings.c.id))
3104 mapper(DBChange, self.tbl_changes,
3105 properties = dict(change_id = self.tbl_changes.c.id,
3106 poolfiles = relation(PoolFile,
3107 secondary=self.tbl_changes_pool_files,
3108 backref="changeslinks"),
3109 seen = self.tbl_changes.c.seen,
3110 source = self.tbl_changes.c.source,
3111 binaries = self.tbl_changes.c.binaries,
3112 architecture = self.tbl_changes.c.architecture,
3113 distribution = self.tbl_changes.c.distribution,
3114 urgency = self.tbl_changes.c.urgency,
3115 maintainer = self.tbl_changes.c.maintainer,
3116 changedby = self.tbl_changes.c.changedby,
3117 date = self.tbl_changes.c.date,
3118 version = self.tbl_changes.c.version,
3119 files = relation(ChangePendingFile,
3120 secondary=self.tbl_changes_pending_files_map,
3121 backref="changesfile"),
3122 in_queue_id = self.tbl_changes.c.in_queue,
3123 in_queue = relation(PolicyQueue,
3124 primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
3125 approved_for_id = self.tbl_changes.c.approved_for))
3127 mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
3128 properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
3130 mapper(ChangePendingFile, self.tbl_changes_pending_files,
3131 properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
3132 filename = self.tbl_changes_pending_files.c.filename,
3133 size = self.tbl_changes_pending_files.c.size,
3134 md5sum = self.tbl_changes_pending_files.c.md5sum,
3135 sha1sum = self.tbl_changes_pending_files.c.sha1sum,
3136 sha256sum = self.tbl_changes_pending_files.c.sha256sum))
3138 mapper(ChangePendingSource, self.tbl_changes_pending_source,
3139 properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
3140 change = relation(DBChange),
3141 maintainer = relation(Maintainer,
3142 primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
3143 changedby = relation(Maintainer,
3144 primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
3145 fingerprint = relation(Fingerprint),
3146 source_files = relation(ChangePendingFile,
3147 secondary=self.tbl_changes_pending_source_files,
3148 backref="pending_sources")))
3151 mapper(KeyringACLMap, self.tbl_keyring_acl_map,
3152 properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
3153 keyring = relation(Keyring, backref="keyring_acl_map"),
3154 architecture = relation(Architecture)))
3156 mapper(Location, self.tbl_location,
3157 properties = dict(location_id = self.tbl_location.c.id,
3158 component_id = self.tbl_location.c.component,
3159 component = relation(Component, backref='location'),
3160 archive_id = self.tbl_location.c.archive,
3161 archive = relation(Archive),
3162 # FIXME: the 'type' column is old cruft and
3163 # should be removed in the future.
3164 archive_type = self.tbl_location.c.type),
3165 extension = validator)
3167 mapper(Maintainer, self.tbl_maintainer,
3168 properties = dict(maintainer_id = self.tbl_maintainer.c.id,
3169 maintains_sources = relation(DBSource, backref='maintainer',
3170 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
3171 changed_sources = relation(DBSource, backref='changedby',
3172 primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
3173 extension = validator)
3175 mapper(NewComment, self.tbl_new_comments,
3176 properties = dict(comment_id = self.tbl_new_comments.c.id))
3178 mapper(Override, self.tbl_override,
3179 properties = dict(suite_id = self.tbl_override.c.suite,
3180 suite = relation(Suite, \
3181 backref=backref('overrides', lazy='dynamic')),
3182 package = self.tbl_override.c.package,
3183 component_id = self.tbl_override.c.component,
3184 component = relation(Component, \
3185 backref=backref('overrides', lazy='dynamic')),
3186 priority_id = self.tbl_override.c.priority,
3187 priority = relation(Priority, \
3188 backref=backref('overrides', lazy='dynamic')),
3189 section_id = self.tbl_override.c.section,
3190 section = relation(Section, \
3191 backref=backref('overrides', lazy='dynamic')),
3192 overridetype_id = self.tbl_override.c.type,
3193 overridetype = relation(OverrideType, \
3194 backref=backref('overrides', lazy='dynamic'))))
3196 mapper(OverrideType, self.tbl_override_type,
3197 properties = dict(overridetype = self.tbl_override_type.c.type,
3198 overridetype_id = self.tbl_override_type.c.id))
3200 mapper(PolicyQueue, self.tbl_policy_queue,
3201 properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
3203 mapper(Priority, self.tbl_priority,
3204 properties = dict(priority_id = self.tbl_priority.c.id))
3206 mapper(Section, self.tbl_section,
3207 properties = dict(section_id = self.tbl_section.c.id,
3208 section=self.tbl_section.c.section))
3210 mapper(DBSource, self.tbl_source,
3211 properties = dict(source_id = self.tbl_source.c.id,
3212 version = self.tbl_source.c.version,
3213 maintainer_id = self.tbl_source.c.maintainer,
3214 poolfile_id = self.tbl_source.c.file,
3215 poolfile = relation(PoolFile, backref=backref('source', uselist = False)),
3216 fingerprint_id = self.tbl_source.c.sig_fpr,
3217 fingerprint = relation(Fingerprint),
3218 changedby_id = self.tbl_source.c.changedby,
3219 srcfiles = relation(DSCFile,
3220 primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
3221 suites = relation(Suite, secondary=self.tbl_src_associations,
3222 backref=backref('sources', lazy='dynamic')),
3223 srcuploaders = relation(SrcUploader)),
3224 extension = validator)
3226 mapper(SourceACL, self.tbl_source_acl,
3227 properties = dict(source_acl_id = self.tbl_source_acl.c.id))
3229 mapper(SrcFormat, self.tbl_src_format,
3230 properties = dict(src_format_id = self.tbl_src_format.c.id,
3231 format_name = self.tbl_src_format.c.format_name))
3233 mapper(SrcUploader, self.tbl_src_uploaders,
3234 properties = dict(uploader_id = self.tbl_src_uploaders.c.id,
3235 source_id = self.tbl_src_uploaders.c.source,
3236 source = relation(DBSource,
3237 primaryjoin=(self.tbl_src_uploaders.c.source==self.tbl_source.c.id)),
3238 maintainer_id = self.tbl_src_uploaders.c.maintainer,
3239 maintainer = relation(Maintainer,
3240 primaryjoin=(self.tbl_src_uploaders.c.maintainer==self.tbl_maintainer.c.id))))
3242 mapper(Suite, self.tbl_suite,
3243 properties = dict(suite_id = self.tbl_suite.c.id,
3244 policy_queue = relation(PolicyQueue),
3245 copy_queues = relation(BuildQueue,
3246 secondary=self.tbl_suite_build_queue_copy)),
3247 extension = validator)
3249 mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
3250 properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
3251 suite = relation(Suite, backref='suitesrcformats'),
3252 src_format_id = self.tbl_suite_src_formats.c.src_format,
3253 src_format = relation(SrcFormat)))
3255 mapper(Uid, self.tbl_uid,
3256 properties = dict(uid_id = self.tbl_uid.c.id,
3257 fingerprint = relation(Fingerprint)),
3258 extension = validator)
3260 mapper(UploadBlock, self.tbl_upload_blocks,
3261 properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
3262 fingerprint = relation(Fingerprint, backref="uploadblocks"),
3263 uid = relation(Uid, backref="uploadblocks")))
3265 mapper(BinContents, self.tbl_bin_contents,
3267 binary = relation(DBBinary,
3268 backref=backref('contents', lazy='dynamic')),
3269 file = self.tbl_bin_contents.c.file))
3271 ## Connection functions
3272 def __createconn(self):
3273 from config import Config
3277 connstr = "postgres://%s" % cnf["DB::Host"]
3278 if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
3279 connstr += ":%s" % cnf["DB::Port"]
3280 connstr += "/%s" % cnf["DB::Name"]
3283 connstr = "postgres:///%s" % cnf["DB::Name"]
3284 if cnf["DB::Port"] and cnf["DB::Port"] != "-1":
3285 connstr += "?port=%s" % cnf["DB::Port"]
3287 self.db_pg = create_engine(connstr, echo=self.debug)
3288 self.db_meta = MetaData()
3289 self.db_meta.bind = self.db_pg
3290 self.db_smaker = sessionmaker(bind=self.db_pg,
3294 self.__setuptables()
3295 self.__setupmappers()
3298 return self.db_smaker()
3300 __all__.append('DBConn')