@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup <james@nocrew.org>
@copyright: 2008-2009 Mark Hymers <mhy@debian.org>
-@copyright: 2009 Joerg Jaspert <joerg@debian.org>
+@copyright: 2009, 2010 Joerg Jaspert <joerg@debian.org>
@copyright: 2009 Mike O'Connor <stew@debian.org>
@license: GNU General Public License version 2 or later
"""
import re
import psycopg2
import traceback
-import datetime
+import commands
+from datetime import datetime, timedelta
+from errno import ENOENT
+from tempfile import mkstemp, mkdtemp
from inspect import getargspec
# Only import Config until Queue stuff is changed to store its config
# in the database
from config import Config
-from singleton import Singleton
from textutils import fix_maintainer
################################################################################
# reflection
class DebVersion(sqltypes.Text):
+ """
+ Support the debversion type
+ """
+
def get_col_spec(self):
return "DEBVERSION"
return wrapped
+__all__.append('session_wrapper')
+
################################################################################
class Architecture(object):
"""
Returns list of Suite objects for given C{architecture} name
- @type source: str
- @param source: Architecture name to search for
+ @type architecture: str
+ @param architecture: Architecture name to search for
@type session: Session
@param session: Optional SQL session object (a temporary one will be
"""
Returns list of Suite objects which given C{package} name is in
- @type source: str
- @param source: DBBinary package name to search for
+ @type package: str
+ @param package: DBBinary package name to search for
@rtype: list
@return: list of Suite objects for the given package
@type version: str or None
@param version: Version to search for (or None)
- @type package: str, list or None
- @param package: Architectures to limit to (or None if no limit)
+ @type architecture: str, list or None
+ @param architecture: Architectures to limit to (or None if no limit)
@type session: Session
@param session: Optional SQL session object (a temporary one will be
sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
- WHERE b.package=:package
+ WHERE b.package='%(package)s'
AND b.file = fi.id
AND fi.location = l.id
AND l.component = c.id
AND ba.bin=b.id
AND ba.suite = su.id
- AND su.suite_name=:suitename
+ AND su.suite_name %(suitename)s
ORDER BY b.version DESC"""
- return session.execute(sql, {'package': package, 'suitename': suitename})
+ return session.execute(sql % {'package': package, 'suitename': suitename})
__all__.append('get_binary_from_name_suite')
################################################################################
+MINIMAL_APT_CONF="""
+Dir
+{
+ ArchiveDir "%(archivepath)s";
+ OverrideDir "/srv/ftp-master.debian.org/scripts/override/";
+ CacheDir "/srv/ftp-master.debian.org/database/";
+};
+
+Default
+{
+ Packages::Compress ". bzip2 gzip";
+ Sources::Compress ". bzip2 gzip";
+ DeLinkLimit 0;
+ FileMode 0664;
+}
+
+bindirectory "incoming"
+{
+ Packages "Packages";
+ Contents " ";
+
+ BinOverride "override.sid.all3";
+ BinCacheDB "packages-accepted.db";
+
+ FileList "%(filelist)s";
+
+ PathPrefix "";
+ Packages::Extensions ".deb .udeb";
+};
+
+bindirectory "incoming/"
+{
+ Sources "Sources";
+ BinOverride "override.sid.all3";
+ SrcOverride "override.sid.all3.src";
+ FileList "%(filelist)s";
+};
+"""
+
+class BuildQueue(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<BuildQueue %s>' % self.queue_name
+
+ def write_metadata(self, starttime, force=False):
+ # Do we write out metafiles?
+ if not (force or self.generate_metadata):
+ return
+
+ session = DBConn().session().object_session(self)
+
+ fl_fd = fl_name = ac_fd = ac_name = None
+ tempdir = None
+ arches = " ".join([ a.arch_string for a in session.query(Architecture).all() if a.arch_string != 'source' ])
+ startdir = os.getcwd()
+
+ try:
+ # Grab files we want to include
+ newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+ # Write file list with newer files
+ (fl_fd, fl_name) = mkstemp()
+ for n in newer:
+ os.write(fl_fd, '%s\n' % n.fullpath)
+ os.close(fl_fd)
+
+ # Write minimal apt.conf
+ # TODO: Remove hardcoding from template
+ (ac_fd, ac_name) = mkstemp()
+ os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
+ 'filelist': fl_name})
+ os.close(ac_fd)
+
+ # Run apt-ftparchive generate
+ os.chdir(os.path.dirname(ac_name))
+ os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
+
+ # Run apt-ftparchive release
+ # TODO: Eww - fix this
+ bname = os.path.basename(self.path)
+ os.chdir(self.path)
+ os.chdir('..')
+
+ # We have to remove the Release file otherwise it'll be included in the
+ # new one
+ try:
+ os.unlink(os.path.join(bname, 'Release'))
+ except OSError:
+ pass
+
+ os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
+
+ # Crude hack with open and append, but this whole section is and should be redone.
+ if self.notautomatic:
+ release=open("Release", "a")
+ release.write("NotAutomatic: yes")
+ release.close()
+
+ # Sign if necessary
+ if self.signingkey:
+ cnf = Config()
+ keyring = "--secret-keyring \"%s\"" % cnf["Dinstall::SigningKeyring"]
+ if cnf.has_key("Dinstall::SigningPubKeyring"):
+ keyring += " --keyring \"%s\"" % cnf["Dinstall::SigningPubKeyring"]
+
+ os.system("gpg %s --no-options --batch --no-tty --armour --default-key %s --detach-sign -o Release.gpg Release""" % (keyring, self.signingkey))
+
+ # Move the files if we got this far
+ os.rename('Release', os.path.join(bname, 'Release'))
+ if self.signingkey:
+ os.rename('Release.gpg', os.path.join(bname, 'Release.gpg'))
+
+ # Clean up any left behind files
+ finally:
+ os.chdir(startdir)
+ if fl_fd:
+ try:
+ os.close(fl_fd)
+ except OSError:
+ pass
+
+ if fl_name:
+ try:
+ os.unlink(fl_name)
+ except OSError:
+ pass
+
+ if ac_fd:
+ try:
+ os.close(ac_fd)
+ except OSError:
+ pass
+
+ if ac_name:
+ try:
+ os.unlink(ac_name)
+ except OSError:
+ pass
+
+ def clean_and_update(self, starttime, Logger, dryrun=False):
+ """WARNING: This routine commits for you"""
+ session = DBConn().session().object_session(self)
+
+ if self.generate_metadata and not dryrun:
+ self.write_metadata(starttime)
+
+ # Grab files older than our execution time
+ older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+
+ for o in older:
+ killdb = False
+ try:
+ if dryrun:
+ Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
+ else:
+ Logger.log(["I: Removing %s from the queue" % o.fullpath])
+ os.unlink(o.fullpath)
+ killdb = True
+ except OSError, e:
+ # If it wasn't there, don't worry
+ if e.errno == ENOENT:
+ killdb = True
+ else:
+ # TODO: Replace with proper logging call
+ Logger.log(["E: Could not remove %s" % o.fullpath])
+
+ if killdb:
+ session.delete(o)
+
+ session.commit()
+
+ for f in os.listdir(self.path):
+ if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release'):
+ continue
+
+ try:
+ r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
+ except NoResultFound:
+ fp = os.path.join(self.path, f)
+ if dryrun:
+ Logger.log(["I: Would remove unused link %s" % fp])
+ else:
+ Logger.log(["I: Removing unused link %s" % fp])
+ try:
+ os.unlink(fp)
+ except OSError:
+ Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+
+ def add_file_from_pool(self, poolfile):
+ """Copies a file into the pool. Assumes that the PoolFile object is
+ attached to the same SQLAlchemy session as the Queue object is.
+
+ The caller is responsible for committing after calling this function."""
+ poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
+
+ # Check if we have a file of this name or this ID already
+ for f in self.queuefiles:
+ if f.fileid is not None and f.fileid == poolfile.file_id or \
+ f.poolfile.filename == poolfile_basename:
+ # In this case, update the BuildQueueFile entry so we
+ # don't remove it too early
+ f.lastused = datetime.now()
+ DBConn().session().object_session(poolfile).add(f)
+ return f
+
+ # Prepare BuildQueueFile object
+ qf = BuildQueueFile()
+ qf.build_queue_id = self.queue_id
+ qf.lastused = datetime.now()
+ qf.filename = poolfile_basename
+
+ targetpath = poolfile.fullpath
+ queuepath = os.path.join(self.path, poolfile_basename)
+
+ try:
+ if self.copy_files:
+ # We need to copy instead of symlink
+ import utils
+ utils.copy(targetpath, queuepath)
+ # NULL in the fileid field implies a copy
+ qf.fileid = None
+ else:
+ os.symlink(targetpath, queuepath)
+ qf.fileid = poolfile.file_id
+ except OSError:
+ return None
+
+ # Get the same session as the PoolFile is using and add the qf to it
+ DBConn().session().object_session(poolfile).add(qf)
+
+ return qf
+
+
+__all__.append('BuildQueue')
+
+@session_wrapper
+def get_build_queue(queuename, session=None):
+ """
+ Returns BuildQueue object for given C{queue name}, creating it if it does not
+ exist.
+
+ @type queuename: string
+ @param queuename: The name of the queue
+
+ @type session: Session
+ @param session: Optional SQLA session object (a temporary one will be
+ generated if not supplied)
+
+ @rtype: BuildQueue
+ @return: BuildQueue object for the given queue
+ """
+
+ q = session.query(BuildQueue).filter_by(queue_name=queuename)
+
+ try:
+ return q.one()
+ except NoResultFound:
+ return None
+
+__all__.append('get_build_queue')
+
+################################################################################
+
+class BuildQueueFile(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<BuildQueueFile %s (%s)>' % (self.filename, self.build_queue_id)
+
+ @property
+ def fullpath(self):
+ return os.path.join(self.buildqueue.path, self.filename)
+
+
+__all__.append('BuildQueueFile')
+
+################################################################################
+
+class ChangePendingBinary(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<ChangePendingBinary %s>' % self.change_pending_binary_id
+
+__all__.append('ChangePendingBinary')
+
+################################################################################
+
+class ChangePendingFile(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<ChangePendingFile %s>' % self.change_pending_file_id
+
+__all__.append('ChangePendingFile')
+
+################################################################################
+
+class ChangePendingSource(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<ChangePendingSource %s>' % self.change_pending_source_id
+
+__all__.append('ChangePendingSource')
+
+################################################################################
+
class Component(object):
def __init__(self, *args, **kwargs):
pass
If no matching file is found, a row is inserted.
- @type filename: string
- @param filename: The filepath
+ @type filepath: string
+ @param filepath: The filepath
+
@type session: SQLAlchemy
@param session: Optional SQL session object (a temporary one will be
generated if not supplied). If not passed, a commit will be performed at
try:
# Insert paths
- pathcache = {}
-
def generate_path_dicts():
for fullpath in fullpaths:
if fullpath.startswith( './' ):
fullpath = fullpath[2:]
- yield {'fulename':fullpath, 'id': binary_id }
+ yield {'filename':fullpath, 'id': binary_id }
- session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
- generate_path_dicts() )
+ for d in generate_path_dicts():
+ session.execute( "INSERT INTO bin_contents ( file, binary_id ) VALUES ( :filename, :id )",
+ d )
session.commit()
if privatetrans:
def check_poolfile(filename, filesize, md5sum, location_id, session=None):
"""
Returns a tuple:
- (ValidFileFound [boolean or None], PoolFile object or None)
+ (ValidFileFound [boolean or None], PoolFile object or None)
@type filename: string
@param filename: the filename of the file to check against the DB
@rtype: tuple
@return: Tuple of length 2.
- If more than one file found with that name:
- (None, None)
- If valid pool file found: (True, PoolFile object)
- If valid pool file not found:
- (False, None) if no file found
- (False, PoolFile object) if file found with size/md5sum mismatch
+ - If more than one file found with that name: (C{None}, C{None})
+ - If valid pool file found: (C{True}, C{PoolFile object})
+ - If valid pool file not found:
+ - (C{False}, C{None}) if no file found
+ - (C{False}, C{PoolFile object}) if file found with size/md5sum mismatch
"""
q = session.query(PoolFile).filter_by(filename=filename)
"""
# TODO: There must be a way of properly using bind parameters with %FOO%
- q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+ q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
return q.all()
__all__.append('get_poolfile_like_name')
+@session_wrapper
+def add_poolfile(filename, datadict, location_id, session=None):
+ """
+ Add a new file to the pool
+
+ @type filename: string
+ @param filename: filename
+
+ @type datadict: dict
+ @param datadict: dict with needed data
+
+ @type location_id: int
+ @param location_id: database id of the location
+
+ @rtype: PoolFile
+ @return: the PoolFile object created
+ """
+ poolfile = PoolFile()
+ poolfile.filename = filename
+ poolfile.filesize = datadict["size"]
+ poolfile.md5sum = datadict["md5sum"]
+ poolfile.sha1sum = datadict["sha1sum"]
+ poolfile.sha256sum = datadict["sha256sum"]
+ poolfile.location_id = location_id
+
+ session.add(poolfile)
+ # Flush to get a file id (NB: This is not a commit)
+ session.flush()
+
+ return poolfile
+
+__all__.append('add_poolfile')
+
################################################################################
class Fingerprint(object):
################################################################################
-class KnownChange(object):
+class DBChange(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
- return '<KnownChange %s>' % self.changesname
+ return '<DBChange %s>' % self.changesname
+
+ def clean_from_queue(self):
+ session = DBConn().session().object_session(self)
-__all__.append('KnownChange')
+ # Remove changes_pool_files entries
+ self.poolfiles = []
+
+ # Remove changes_pending_files references
+ self.files = []
+
+ # Clear out of queue
+ self.in_queue = None
+ self.approved_for_id = None
+
+__all__.append('DBChange')
@session_wrapper
-def get_knownchange(filename, session=None):
+def get_dbchange(filename, session=None):
"""
- returns knownchange object for given C{filename}.
+ returns DBChange object for given C{filename}.
- @type archive: string
- @param archive: the name of the arhive
+ @type filename: string
+ @param filename: the name of the file
@type session: Session
@param session: Optional SQLA session object (a temporary one will be
generated if not supplied)
- @rtype: Archive
- @return: Archive object for the given name (None if not present)
+ @rtype: DBChange
+ @return: DBChange object for the given filename (C{None} if not present)
"""
- q = session.query(KnownChange).filter_by(changesname=filename)
+ q = session.query(DBChange).filter_by(changesname=filename)
try:
return q.one()
except NoResultFound:
return None
-__all__.append('get_knownchange')
-
-################################################################################
-
-class KnownChangePendingFile(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<KnownChangePendingFile %s>' % self.known_change_pending_file_id
-
-__all__.append('KnownChangePendingFile')
+__all__.append('get_dbchange')
################################################################################
and archive
@type location: string
- @param location: the path of the location, e.g. I{/srv/ftp.debian.org/ftp/pool/}
+ @param location: the path of the location, e.g. I{/srv/ftp-master.debian.org/ftp/pool/}
@type component: string
@param component: the component name (if None, no restriction applied)
@type archive: string
- @param archive_id: the archive name (if None, no restriction applied)
+ @param archive: the archive name (if None, no restriction applied)
@rtype: Location / None
@return: Either a Location object or None if one can't be found
################################################################################
+class PolicyQueue(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<PolicyQueue %s>' % self.queue_name
+
+__all__.append('PolicyQueue')
+
+@session_wrapper
+def get_policy_queue(queuename, session=None):
+ """
+ Returns PolicyQueue object for given C{queue name}
+
+ @type queuename: string
+ @param queuename: The name of the queue
+
+ @type session: Session
+ @param session: Optional SQLA session object (a temporary one will be
+ generated if not supplied)
+
+ @rtype: PolicyQueue
+ @return: PolicyQueue object for the given queue
+ """
+
+ q = session.query(PolicyQueue).filter_by(queue_name=queuename)
+
+ try:
+ return q.one()
+ except NoResultFound:
+ return None
+
+__all__.append('get_policy_queue')
+
+################################################################################
+
class Priority(object):
def __init__(self, *args, **kwargs):
pass
################################################################################
-class Queue(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<Queue %s>' % self.queue_name
-
- def add_file_from_pool(self, poolfile):
- """Copies a file into the pool. Assumes that the PoolFile object is
- attached to the same SQLAlchemy session as the Queue object is.
-
- The caller is responsible for committing after calling this function."""
- poolfile_basename = poolfile.filename[poolfile.filename.rindex(os.sep)+1:]
-
- # Check if we have a file of this name or this ID already
- for f in self.queuefiles:
- if f.fileid is not None and f.fileid == poolfile.file_id or \
- f.poolfile.filename == poolfile_basename:
- # In this case, update the QueueFile entry so we
- # don't remove it too early
- f.lastused = datetime.now()
- DBConn().session().object_session(pf).add(f)
- return f
-
- # Prepare QueueFile object
- qf = QueueFile()
- qf.queue_id = self.queue_id
- qf.lastused = datetime.now()
- qf.filename = dest
-
- targetpath = qf.fullpath
- queuepath = os.path.join(self.path, poolfile_basename)
-
- try:
- if self.copy_pool_files:
- # We need to copy instead of symlink
- import utils
- utils.copy(targetfile, queuepath)
- # NULL in the fileid field implies a copy
- qf.fileid = None
- else:
- os.symlink(targetfile, queuepath)
- qf.fileid = poolfile.file_id
- except OSError:
- return None
-
- # Get the same session as the PoolFile is using and add the qf to it
- DBConn().session().object_session(poolfile).add(qf)
-
- return qf
-
-
-__all__.append('Queue')
-
-@session_wrapper
-def get_queue(queuename, session=None):
- """
- Returns Queue object for given C{queue name}, creating it if it does not
- exist.
-
- @type queuename: string
- @param queuename: The name of the queue
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
-
- @rtype: Queue
- @return: Queue object for the given queue
- """
-
- q = session.query(Queue).filter_by(queue_name=queuename)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_queue')
-
-################################################################################
-
-class QueueFile(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<QueueFile %s (%s)>' % (self.filename, self.queue_id)
-
-__all__.append('QueueFile')
-
-################################################################################
-
class Section(object):
def __init__(self, *args, **kwargs):
pass
1. exact match => 1.0-3
2. bin-only NMU => 1.0-3+b1 , 1.0-3.1+b1
- @type package: string
- @param package: package source name
+ @type source: string
+ @param source: source name
@type source_version: string
@param source_version: expected source version
@type source: str
@param source: DBSource package name to search for
- @type source: str or None
- @param source: DBSource version name to search for or None if not applicable
+ @type version: str or None
+ @param version: DBSource version name to search for or None if not applicable
@type dm_upload_allowed: bool
@param dm_upload_allowed: If None, no effect. If True or False, only
################################################################################
+@session_wrapper
+def add_dsc_to_db(u, filename, session=None):
+ entry = u.pkg.files[filename]
+ source = DBSource()
+ pfs = []
+
+ source.source = u.pkg.dsc["source"]
+ source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
+ source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
+ source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+ source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+ source.install_date = datetime.now().date()
+
+ dsc_component = entry["component"]
+ dsc_location_id = entry["location id"]
+
+ source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
+
+ # Set up a new poolfile if necessary
+ if not entry.has_key("files id") or not entry["files id"]:
+ filename = entry["pool name"] + filename
+ poolfile = add_poolfile(filename, entry, dsc_location_id, session)
+ session.flush()
+ pfs.append(poolfile)
+ entry["files id"] = poolfile.file_id
+
+ source.poolfile_id = entry["files id"]
+ session.add(source)
+ session.flush()
+
+ for suite_name in u.pkg.changes["distribution"].keys():
+ sa = SrcAssociation()
+ sa.source_id = source.source_id
+ sa.suite_id = get_suite(suite_name).suite_id
+ session.add(sa)
+
+ session.flush()
+
+ # Add the source files to the DB (files and dsc_files)
+ dscfile = DSCFile()
+ dscfile.source_id = source.source_id
+ dscfile.poolfile_id = entry["files id"]
+ session.add(dscfile)
+
+ for dsc_file, dentry in u.pkg.dsc_files.items():
+ df = DSCFile()
+ df.source_id = source.source_id
+
+ # If the .orig tarball is already in the pool, it's
+ # files id is stored in dsc_files by check_dsc().
+ files_id = dentry.get("files id", None)
+
+ # Find the entry in the files hash
+ # TODO: Bail out here properly
+ dfentry = None
+ for f, e in u.pkg.files.items():
+ if f == dsc_file:
+ dfentry = e
+ break
+
+ if files_id is None:
+ filename = dfentry["pool name"] + dsc_file
+
+ (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
+ # FIXME: needs to check for -1/-2 and or handle exception
+ if found and obj is not None:
+ files_id = obj.file_id
+ pfs.append(obj)
+
+ # If still not found, add it
+ if files_id is None:
+ # HACK: Force sha1sum etc into dentry
+ dentry["sha1sum"] = dfentry["sha1sum"]
+ dentry["sha256sum"] = dfentry["sha256sum"]
+ poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
+ pfs.append(poolfile)
+ files_id = poolfile.file_id
+ else:
+ poolfile = get_poolfile_by_id(files_id, session)
+ if poolfile is None:
+ utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
+ pfs.append(poolfile)
+
+ df.poolfile_id = files_id
+ session.add(df)
+
+ session.flush()
+
+ # Add the src_uploaders to the DB
+ uploader_ids = [source.maintainer_id]
+ if u.pkg.dsc.has_key("uploaders"):
+ for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
+ up = up.strip()
+ uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
+
+ added_ids = {}
+ for up_id in uploader_ids:
+ if added_ids.has_key(up_id):
+ import utils
+ utils.warn("Already saw uploader %s for source %s" % (up_id, source.source))
+ continue
+
+ added_ids[up_id]=1
+
+ su = SrcUploader()
+ su.maintainer_id = up_id
+ su.source_id = source.source_id
+ session.add(su)
+
+ session.flush()
+
+ return source, dsc_component, dsc_location_id, pfs
+
+__all__.append('add_dsc_to_db')
+
+@session_wrapper
+def add_deb_to_db(u, filename, session=None):
+ """
+ Contrary to what you might expect, this routine deals with both
+ debs and udebs. That info is in 'dbtype', whilst 'type' is
+ 'deb' for both of them
+ """
+ cnf = Config()
+ entry = u.pkg.files[filename]
+
+ bin = DBBinary()
+ bin.package = entry["package"]
+ bin.version = entry["version"]
+ bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
+ bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
+ bin.arch_id = get_architecture(entry["architecture"], session).arch_id
+ bin.binarytype = entry["dbtype"]
+
+ # Find poolfile id
+ filename = entry["pool name"] + filename
+ fullpath = os.path.join(cnf["Dir::Pool"], filename)
+ if not entry.get("location id", None):
+ entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], session=session).location_id
+
+ if entry.get("files id", None):
+ poolfile = get_poolfile_by_id(bin.poolfile_id)
+ bin.poolfile_id = entry["files id"]
+ else:
+ poolfile = add_poolfile(filename, entry, entry["location id"], session)
+ bin.poolfile_id = entry["files id"] = poolfile.file_id
+
+ # Find source id
+ bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+ if len(bin_sources) != 1:
+ raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+ (bin.package, bin.version, bin.architecture.arch_string,
+ filename, bin.binarytype, u.pkg.changes["fingerprint"])
+
+ bin.source_id = bin_sources[0].source_id
+
+ # Add and flush object so it has an ID
+ session.add(bin)
+ session.flush()
+
+ # Add BinAssociations
+ for suite_name in u.pkg.changes["distribution"].keys():
+ ba = BinAssociation()
+ ba.binary_id = bin.binary_id
+ ba.suite_id = get_suite(suite_name).suite_id
+ session.add(ba)
+
+ session.flush()
+
+ # Deal with contents - disabled for now
+ #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
+ #if not contents:
+ # print "REJECT\nCould not determine contents of package %s" % bin.package
+ # session.rollback()
+ # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
+
+ return poolfile
+
+__all__.append('add_deb_to_db')
+
+################################################################################
+
class SourceACL(object):
def __init__(self, *args, **kwargs):
pass
('OverrideSuite', 'overridesuite'),
('ChangelogBase', 'changelogbase')]
-
class Suite(object):
def __init__(self, *args, **kwargs):
pass
"""
Returns list of Architecture objects for given C{suite} name
- @type source: str
- @param source: Suite name to search for
+ @type suite: str
+ @param suite: Suite name to search for
@type skipsrc: boolean
@param skipsrc: Whether to skip returning the 'source' architecture entry
################################################################################
-class DBConn(Singleton):
+class DBConn(object):
"""
database module init.
"""
+ __shared_state = {}
+
def __init__(self, *args, **kwargs):
- super(DBConn, self).__init__(*args, **kwargs)
+ self.__dict__ = self.__shared_state
- def _startup(self, *args, **kwargs):
- self.debug = False
- if kwargs.has_key('debug'):
- self.debug = True
- self.__createconn()
+ if not getattr(self, 'initialised', False):
+ self.initialised = True
+ self.debug = kwargs.has_key('debug')
+ self.__createconn()
def __setuptables(self):
- self.tbl_architecture = Table('architecture', self.db_meta, autoload=True)
- self.tbl_archive = Table('archive', self.db_meta, autoload=True)
- self.tbl_bin_contents = Table('bin_contents', self.db_meta, autoload=True)
- self.tbl_bin_associations = Table('bin_associations', self.db_meta, autoload=True)
- self.tbl_binaries = Table('binaries', self.db_meta, autoload=True)
- self.tbl_binary_acl = Table('binary_acl', self.db_meta, autoload=True)
- self.tbl_binary_acl_map = Table('binary_acl_map', self.db_meta, autoload=True)
- self.tbl_component = Table('component', self.db_meta, autoload=True)
- self.tbl_config = Table('config', self.db_meta, autoload=True)
- self.tbl_content_associations = Table('content_associations', self.db_meta, autoload=True)
- self.tbl_content_file_names = Table('content_file_names', self.db_meta, autoload=True)
- self.tbl_content_file_paths = Table('content_file_paths', self.db_meta, autoload=True)
- self.tbl_changes_pending_files = Table('changes_pending_files', self.db_meta, autoload=True)
- self.tbl_changes_pool_files = Table('changes_pool_files', self.db_meta, autoload=True)
- self.tbl_dsc_files = Table('dsc_files', self.db_meta, autoload=True)
- self.tbl_deb_contents = Table('deb_contents', self.db_meta, autoload=True)
- self.tbl_files = Table('files', self.db_meta, autoload=True)
- self.tbl_fingerprint = Table('fingerprint', self.db_meta, autoload=True)
- self.tbl_keyrings = Table('keyrings', self.db_meta, autoload=True)
- self.tbl_known_changes = Table('known_changes', self.db_meta, autoload=True)
- self.tbl_keyring_acl_map = Table('keyring_acl_map', self.db_meta, autoload=True)
- self.tbl_location = Table('location', self.db_meta, autoload=True)
- self.tbl_maintainer = Table('maintainer', self.db_meta, autoload=True)
- self.tbl_new_comments = Table('new_comments', self.db_meta, autoload=True)
- self.tbl_override = Table('override', self.db_meta, autoload=True)
- self.tbl_override_type = Table('override_type', self.db_meta, autoload=True)
- self.tbl_pending_bin_contents = Table('pending_bin_contents', self.db_meta, autoload=True)
- self.tbl_priority = Table('priority', self.db_meta, autoload=True)
- self.tbl_queue = Table('queue', self.db_meta, autoload=True)
- self.tbl_queue_files = Table('queue_files', self.db_meta, autoload=True)
- self.tbl_section = Table('section', self.db_meta, autoload=True)
- self.tbl_source = Table('source', self.db_meta, autoload=True)
- self.tbl_source_acl = Table('source_acl', self.db_meta, autoload=True)
- self.tbl_src_associations = Table('src_associations', self.db_meta, autoload=True)
- self.tbl_src_format = Table('src_format', self.db_meta, autoload=True)
- self.tbl_src_uploaders = Table('src_uploaders', self.db_meta, autoload=True)
- self.tbl_suite = Table('suite', self.db_meta, autoload=True)
- self.tbl_suite_architectures = Table('suite_architectures', self.db_meta, autoload=True)
- self.tbl_suite_src_formats = Table('suite_src_formats', self.db_meta, autoload=True)
- self.tbl_suite_queue_copy = Table('suite_queue_copy', self.db_meta, autoload=True)
- self.tbl_udeb_contents = Table('udeb_contents', self.db_meta, autoload=True)
- self.tbl_uid = Table('uid', self.db_meta, autoload=True)
- self.tbl_upload_blocks = Table('upload_blocks', self.db_meta, autoload=True)
+ tables = (
+ 'architecture',
+ 'archive',
+ 'bin_associations',
+ 'binaries',
+ 'binary_acl',
+ 'binary_acl_map',
+ 'bin_contents',
+ 'build_queue',
+ 'build_queue_files',
+ 'component',
+ 'config',
+ 'changes_pending_binaries',
+ 'changes_pending_files',
+ 'changes_pending_files_map',
+ 'changes_pending_source',
+ 'changes_pending_source_files',
+ 'changes_pool_files',
+ 'deb_contents',
+ 'dsc_files',
+ 'files',
+ 'fingerprint',
+ 'keyrings',
+ 'changes',
+ 'keyring_acl_map',
+ 'location',
+ 'maintainer',
+ 'new_comments',
+ 'override',
+ 'override_type',
+ 'pending_bin_contents',
+ 'policy_queue',
+ 'priority',
+ 'section',
+ 'source',
+ 'source_acl',
+ 'src_associations',
+ 'src_format',
+ 'src_uploaders',
+ 'suite',
+ 'suite_architectures',
+ 'suite_src_formats',
+ 'suite_build_queue_copy',
+ 'udeb_contents',
+ 'uid',
+ 'upload_blocks',
+ )
+
+ for table_name in tables:
+ table = Table(table_name, self.db_meta, autoload=True)
+ setattr(self, 'tbl_%s' % table_name, table)
def __setupmappers(self):
mapper(Architecture, self.tbl_architecture,
mapper(DebContents, self.tbl_deb_contents,
properties = dict(binary_id=self.tbl_deb_contents.c.binary_id,
package=self.tbl_deb_contents.c.package,
- component=self.tbl_deb_contents.c.component,
+ suite=self.tbl_deb_contents.c.suite,
arch=self.tbl_deb_contents.c.arch,
section=self.tbl_deb_contents.c.section,
filename=self.tbl_deb_contents.c.filename))
mapper(UdebContents, self.tbl_udeb_contents,
properties = dict(binary_id=self.tbl_udeb_contents.c.binary_id,
package=self.tbl_udeb_contents.c.package,
- component=self.tbl_udeb_contents.c.component,
+ suite=self.tbl_udeb_contents.c.suite,
arch=self.tbl_udeb_contents.c.arch,
section=self.tbl_udeb_contents.c.section,
filename=self.tbl_udeb_contents.c.filename))
+ mapper(BuildQueue, self.tbl_build_queue,
+ properties = dict(queue_id = self.tbl_build_queue.c.id))
+
+ mapper(BuildQueueFile, self.tbl_build_queue_files,
+ properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
+ poolfile = relation(PoolFile, backref='buildqueueinstances')))
+
mapper(DBBinary, self.tbl_binaries,
properties = dict(binary_id = self.tbl_binaries.c.id,
package = self.tbl_binaries.c.package,
properties = dict(keyring_name = self.tbl_keyrings.c.name,
keyring_id = self.tbl_keyrings.c.id))
- mapper(KnownChange, self.tbl_known_changes,
- properties = dict(known_change_id = self.tbl_known_changes.c.id,
+ mapper(DBChange, self.tbl_changes,
+ properties = dict(change_id = self.tbl_changes.c.id,
poolfiles = relation(PoolFile,
secondary=self.tbl_changes_pool_files,
backref="changeslinks"),
- files = relation(KnownChangePendingFile, backref="changesfile")))
+ seen = self.tbl_changes.c.seen,
+ source = self.tbl_changes.c.source,
+ binaries = self.tbl_changes.c.binaries,
+ architecture = self.tbl_changes.c.architecture,
+ distribution = self.tbl_changes.c.distribution,
+ urgency = self.tbl_changes.c.urgency,
+ maintainer = self.tbl_changes.c.maintainer,
+ changedby = self.tbl_changes.c.changedby,
+ date = self.tbl_changes.c.date,
+ version = self.tbl_changes.c.version,
+ files = relation(ChangePendingFile,
+ secondary=self.tbl_changes_pending_files_map,
+ backref="changesfile"),
+ in_queue_id = self.tbl_changes.c.in_queue,
+ in_queue = relation(PolicyQueue,
+ primaryjoin=(self.tbl_changes.c.in_queue==self.tbl_policy_queue.c.id)),
+ approved_for_id = self.tbl_changes.c.approved_for))
+
+ mapper(ChangePendingBinary, self.tbl_changes_pending_binaries,
+ properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
+
+ mapper(ChangePendingFile, self.tbl_changes_pending_files,
+ properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+ filename = self.tbl_changes_pending_files.c.filename,
+ size = self.tbl_changes_pending_files.c.size,
+ md5sum = self.tbl_changes_pending_files.c.md5sum,
+ sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+ sha256sum = self.tbl_changes_pending_files.c.sha256sum))
+
+ mapper(ChangePendingSource, self.tbl_changes_pending_source,
+ properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,
+ change = relation(DBChange),
+ maintainer = relation(Maintainer,
+ primaryjoin=(self.tbl_changes_pending_source.c.maintainer_id==self.tbl_maintainer.c.id)),
+ changedby = relation(Maintainer,
+ primaryjoin=(self.tbl_changes_pending_source.c.changedby_id==self.tbl_maintainer.c.id)),
+ fingerprint = relation(Fingerprint),
+ source_files = relation(ChangePendingFile,
+ secondary=self.tbl_changes_pending_source_files,
+ backref="pending_sources")))
- mapper(KnownChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(known_change_pending_file_id = self.tbl_changes_pending_files.c.id))
mapper(KeyringACLMap, self.tbl_keyring_acl_map,
properties = dict(keyring_acl_map_id = self.tbl_keyring_acl_map.c.id,
properties = dict(overridetype = self.tbl_override_type.c.type,
overridetype_id = self.tbl_override_type.c.id))
+ mapper(PolicyQueue, self.tbl_policy_queue,
+ properties = dict(policy_queue_id = self.tbl_policy_queue.c.id))
+
mapper(Priority, self.tbl_priority,
properties = dict(priority_id = self.tbl_priority.c.id))
- mapper(Queue, self.tbl_queue,
- properties = dict(queue_id = self.tbl_queue.c.id))
-
- mapper(QueueFile, self.tbl_queue_files,
- properties = dict(queue = relation(Queue, backref='queuefiles'),
- poolfile = relation(PoolFile, backref='queueinstances')))
-
mapper(Section, self.tbl_section,
properties = dict(section_id = self.tbl_section.c.id,
section=self.tbl_section.c.section))
mapper(Suite, self.tbl_suite,
properties = dict(suite_id = self.tbl_suite.c.id,
- policy_queue = relation(Queue),
- copy_queues = relation(Queue, secondary=self.tbl_suite_queue_copy)))
+ policy_queue = relation(PolicyQueue),
+ copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
mapper(SuiteArchitecture, self.tbl_suite_architectures,
properties = dict(suite_id = self.tbl_suite_architectures.c.suite,