################################################################################
+import apt_pkg
import os
from os.path import normpath
import re
import psycopg2
import traceback
import commands
+import signal
try:
# python >= 2.6
# in the database
from config import Config
from textutils import fix_maintainer
-from dak_exceptions import DBUpdateError, NoSourceFieldError
+from dak_exceptions import DBUpdateError, NoSourceFieldError, FileExistsError
# suppress some deprecation warnings in squeeze related to sqlalchemy
import warnings
warnings.filterwarnings('ignore', \
"The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'.*", \
SADeprecationWarning)
+warnings.filterwarnings('ignore', \
+ "Predicate of partial index .* ignored during reflection", \
+ SAWarning)
################################################################################
return None
sa_major_version = sqlalchemy.__version__[0:3]
-if sa_major_version in ["0.5", "0.6"]:
+if sa_major_version in ["0.5", "0.6", "0.7"]:
from sqlalchemy.databases import postgres
postgres.ischema_names['debversion'] = DebVersion
else:
- raise Exception("dak only ported to SQLA versions 0.5 and 0.6. See daklib/dbconn.py")
+ raise Exception("dak only ported to SQLA versions 0.5 to 0.7. See daklib/dbconn.py")
################################################################################
################################################################################
+def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
class DBBinary(ORMObject):
def __init__(self, package = None, source = None, version = None, \
maintainer = None, architecture = None, poolfile = None, \
package does not contain any regular file.
'''
fullpath = self.poolfile.fullpath
- dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE)
+ dpkg = Popen(['dpkg-deb', '--fsys-tarfile', fullpath], stdout = PIPE,
+ preexec_fn = subprocess_setup)
tar = TarFile.open(fileobj = dpkg.stdout, mode = 'r|')
for member in tar.getmembers():
if not member.isdir():
@rtype: text
@return: stanza text of the control section.
'''
- import apt_inst
+ import utils
fullpath = self.poolfile.fullpath
deb_file = open(fullpath, 'r')
- stanza = apt_inst.debExtractControl(deb_file)
+ stanza = utils.deb_extract_control(deb_file)
deb_file.close()
return stanza
try:
# Grab files we want to include
newer = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
+ newer += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) > starttime).all()
# Write file list with newer files
(fl_fd, fl_name) = mkstemp()
for n in newer:
# Crude hack with open and append, but this whole section is and should be redone.
if self.notautomatic:
release=open("Release", "a")
- release.write("NotAutomatic: yes")
+ release.write("NotAutomatic: yes\n")
release.close()
# Sign if necessary
# Grab files older than our execution time
older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+ older += session.query(BuildQueuePolicyFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueuePolicyFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
for o in older:
killdb = False
Logger.log(["I: Removing %s from the queue" % o.fullpath])
os.unlink(o.fullpath)
killdb = True
- except OSError, e:
+ except OSError as e:
# If it wasn't there, don't worry
if e.errno == ENOENT:
killdb = True
if f.startswith('Packages') or f.startswith('Source') or f.startswith('Release') or f.startswith('advisory'):
continue
- try:
- r = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter_by(filename = f).one()
- except NoResultFound:
+ if not self.contains_filename(f):
fp = os.path.join(self.path, f)
if dryrun:
Logger.log(["I: Would remove unused link %s" % fp])
except OSError:
Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
+ def contains_filename(self, filename):
+ """
+ @rtype Boolean
+ @returns True if filename is supposed to be in the queue; False otherwise
+ """
+ session = DBConn().session().object_session(self)
+ if session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id, filename = filename).count() > 0:
+ return True
+ elif session.query(BuildQueuePolicyFile).filter_by(build_queue = self, filename = filename).count() > 0:
+ return True
+ return False
+
def add_file_from_pool(self, poolfile):
"""Copies a file into the pool. Assumes that the PoolFile object is
attached to the same SQLAlchemy session as the Queue object is.
# Check if we have a file of this name or this ID already
for f in self.queuefiles:
- if f.fileid is not None and f.fileid == poolfile.file_id or \
- f.poolfile.filename == poolfile_basename:
+ if (f.fileid is not None and f.fileid == poolfile.file_id) or \
+ (f.poolfile is not None and f.poolfile.filename == poolfile_basename):
# In this case, update the BuildQueueFile entry so we
# don't remove it too early
f.lastused = datetime.now()
# Prepare BuildQueueFile object
qf = BuildQueueFile()
qf.build_queue_id = self.queue_id
- qf.lastused = datetime.now()
qf.filename = poolfile_basename
targetpath = poolfile.fullpath
else:
os.symlink(targetpath, queuepath)
qf.fileid = poolfile.file_id
+ except FileExistsError:
+ if not poolfile.identical_to(queuepath):
+ raise
except OSError:
return None
return qf
+ def add_changes_from_policy_queue(self, policyqueue, changes):
+ """
+ Copies a changes from a policy queue together with its poolfiles.
+
+ @type policyqueue: PolicyQueue
+ @param policyqueue: policy queue to copy the changes from
+
+ @type changes: DBChange
+ @param changes: changes to copy to this build queue
+ """
+ for policyqueuefile in changes.files:
+ self.add_file_from_policy_queue(policyqueue, policyqueuefile)
+ for poolfile in changes.poolfiles:
+ self.add_file_from_pool(poolfile)
+
+ def add_file_from_policy_queue(self, policyqueue, policyqueuefile):
+ """
+ Copies a file from a policy queue.
+ Assumes that the policyqueuefile is attached to the same SQLAlchemy
+ session as the Queue object is. The caller is responsible for
+ committing after calling this function.
+
+ @type policyqueue: PolicyQueue
+ @param policyqueue: policy queue to copy the file from
+
+ @type policyqueuefile: ChangePendingFile
+ @param policyqueuefile: file to be added to the build queue
+ """
+ session = DBConn().session().object_session(policyqueuefile)
+
+ # Is the file already there?
+ try:
+ f = session.query(BuildQueuePolicyFile).filter_by(build_queue=self, file=policyqueuefile).one()
+ f.lastused = datetime.now()
+ return f
+ except NoResultFound:
+ pass # continue below
+
+ # We have to add the file.
+ f = BuildQueuePolicyFile()
+ f.build_queue = self
+ f.file = policyqueuefile
+ f.filename = policyqueuefile.filename
+
+ source = os.path.join(policyqueue.path, policyqueuefile.filename)
+ target = f.fullpath
+ try:
+ # Always copy files from policy queues as they might move around.
+ import utils
+ utils.copy(source, target)
+ except FileExistsError:
+ if not policyqueuefile.identical_to(target):
+ raise
+ except OSError:
+ return None
+
+ session.add(f)
+ return f
__all__.append('BuildQueue')
################################################################################
class BuildQueueFile(object):
+ """
+ BuildQueueFile represents a file in a build queue coming from a pool.
+ """
+
def __init__(self, *args, **kwargs):
pass
################################################################################
+class BuildQueuePolicyFile(object):
+ """
+ BuildQueuePolicyFile represents a file in a build queue that comes from a
+ policy queue (and not a pool).
+ """
+
+ def __init__(self, *args, **kwargs):
+ pass
+
+ #@property
+ #def filename(self):
+ # return self.file.filename
+
+ @property
+ def fullpath(self):
+ return os.path.join(self.build_queue.path, self.filename)
+
+__all__.append('BuildQueuePolicyFile')
+
+################################################################################
+
class ChangePendingBinary(object):
def __init__(self, *args, **kwargs):
pass
def __repr__(self):
return '<ChangePendingFile %s>' % self.change_pending_file_id
+ def identical_to(self, filename):
+ """
+ compare size and hash with the given file
+
+ @rtype: bool
+ @return: true if the given file has the same size and hash as this object; false otherwise
+ """
+ st = os.stat(filename)
+ if self.size != st.st_size:
+ return False
+
+ f = open(filename, "r")
+ sha256sum = apt_pkg.sha256sum(f)
+ if sha256sum != self.sha256sum:
+ return False
+
+ return True
+
__all__.append('ChangePendingFile')
################################################################################
__all__.append('get_component')
+@session_wrapper
+def get_component_names(session=None):
+ """
+ Returns list of strings of component names.
+
+ @rtype: list
+ @return: list of strings of component names
+ """
+
+ return [ x.component_name for x in session.query(Component).all() ]
+
+__all__.append('get_component_names')
+
################################################################################
class DBConfig(object):
################################################################################
+class ExternalOverride(ORMObject):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def __repr__(self):
+ return '<ExternalOverride %s = %s: %s>' % (self.package, self.key, self.value)
+
+__all__.append('ExternalOverride')
+
+################################################################################
+
class PoolFile(ORMObject):
def __init__(self, filename = None, location = None, filesize = -1, \
md5sum = None):
def not_null_constraints(self):
return ['filename', 'md5sum', 'location']
+ def identical_to(self, filename):
+ """
+ compare size and hash with the given file
+
+ @rtype: bool
+ @return: true if the given file has the same size and hash as this object; false otherwise
+ """
+ st = os.stat(filename)
+ if self.filesize != st.st_size:
+ return False
+
+ f = open(filename, "r")
+ sha256sum = apt_pkg.sha256sum(f)
+ if sha256sum != self.sha256sum:
+ return False
+
+ return True
+
__all__.append('PoolFile')
@session_wrapper
key = None
signingkey = False
- for line in k.xreadlines():
+ for line in k:
field = line.split(":")
if field[0] == "pub":
key = field[4]
__all__.append('get_keyring')
+@session_wrapper
+def get_active_keyring_paths(session=None):
+ """
+ @rtype: list
+ @return: list of active keyring paths
+ """
+ return [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).order_by(desc(Keyring.priority)).all() ]
+
+__all__.append('get_active_keyring_paths')
+
+@session_wrapper
+def get_primary_keyring_path(session=None):
+ """
+ Get the full path to the highest priority active keyring
+
+ @rtype: str or None
+ @return: path to the active keyring with the highest priority or None if no
+ keyring is configured
+ """
+ keyrings = get_active_keyring_paths()
+
+ if len(keyrings) > 0:
+ return keyrings[0]
+ else:
+ return None
+
+__all__.append('get_primary_keyring_path')
+
################################################################################
class KeyringACLMap(object):
def properties(self):
return ['source', 'source_id', 'maintainer', 'changedby', \
'fingerprint', 'poolfile', 'version', 'suites_count', \
- 'install_date', 'binaries_count']
+ 'install_date', 'binaries_count', 'uploaders_count']
def not_null_constraints(self):
return ['source', 'version', 'install_date', 'maintainer', \
metadata = association_proxy('key', 'value')
+ def get_component_name(self):
+ return self.poolfile.location.component.component_name
+
def scan_contents(self):
'''
Returns a set of names for non directories. The path names are
q = session.query(DBSource).filter_by(source=source). \
filter(DBSource.version.in_([source_version, orig_source_version]))
if suite != "any":
- # source must exist in suite X, or in some other suite that's
- # mapped to X, recursively... silent-maps are counted too,
- # unreleased-maps aren't.
- maps = cnf.ValueList("SuiteMappings")[:]
- maps.reverse()
- maps = [ m.split() for m in maps ]
- maps = [ (x[1], x[2]) for x in maps
- if x[0] == "map" or x[0] == "silent-map" ]
- s = [suite]
- for (from_, to) in maps:
- if from_ in s and to not in s:
- s.append(to)
-
- q = q.filter(DBSource.suites.any(Suite.suite_name.in_(s)))
+ # source must exist in 'suite' or a suite that is enhanced by 'suite'
+ s = get_suite(suite, session)
+ if s:
+ enhances_vcs = session.query(VersionCheck).filter(VersionCheck.suite==s).filter_by(check='Enhances')
+ considered_suites = [ vc.reference for vc in enhances_vcs ]
+ considered_suites.append(s)
+
+ q = q.filter(DBSource.suites.any(Suite.suite_id.in_([s.suite_id for s in considered_suites])))
if q.count() > 0:
continue
################################################################################
+def split_uploaders(uploaders_list):
+ '''
+ Split the Uploaders field into the individual uploaders and yield each of
+ them. Beware: email addresses might contain commas.
+ '''
+ import re
+ for uploader in re.sub(">[ ]*,", ">\t", uploaders_list).split("\t"):
+ yield uploader.strip()
+
@session_wrapper
def add_dsc_to_db(u, filename, session=None):
entry = u.pkg.files[filename]
source.source = u.pkg.dsc["source"]
source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
- source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+ # If Changed-By isn't available, fall back to maintainer
+ if u.pkg.changes.has_key("changed-by"):
+ source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
+ else:
+ source.changedby_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
source.install_date = datetime.now().date()
session.add(df)
# Add the src_uploaders to the DB
+ session.flush()
+ session.refresh(source)
source.uploaders = [source.maintainer]
if u.pkg.dsc.has_key("uploaders"):
- for up in u.pkg.dsc["uploaders"].replace(">, ", ">\t").split("\t"):
- up = up.strip()
+ for up in split_uploaders(u.pkg.dsc["uploaders"]):
source.uploaders.append(get_or_set_maintainer(up, session))
session.flush()
# Find source id
bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
+
+ # If we couldn't find anything and the upload contains Arch: source,
+ # fall back to trying the source package, source version uploaded
+ # This maintains backwards compatibility with previous dak behaviour
+ # and deals with slightly broken binary debs which don't properly
+ # declare their source package name
+ if len(bin_sources) == 0:
+ if u.pkg.changes["architecture"].has_key("source") \
+ and u.pkg.dsc.has_key("source") and u.pkg.dsc.has_key("version"):
+ bin_sources = get_sources_from_name(u.pkg.dsc["source"], u.pkg.dsc["version"], session=session)
+
+ # If we couldn't find a source here, we reject
+ # TODO: Fix this so that it doesn't kill process-upload and instead just
+ # performs a reject. To be honest, we should probably spot this
+ # *much* earlier than here
if len(bin_sources) != 1:
- raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
+ raise NoSourceFieldError("Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
(bin.package, bin.version, entry["architecture"],
- filename, bin.binarytype, u.pkg.changes["fingerprint"])
+ filename, bin.binarytype, u.pkg.changes["fingerprint"]))
bin.source_id = bin_sources[0].source_id
for srcname, version in entry["built-using"]:
exsources = get_sources_from_name(srcname, version, session=session)
if len(exsources) != 1:
- raise NoSourceFieldError, "Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
+ raise NoSourceFieldError("Unable to find source package (%s = %s) in Built-Using for %s (%s), %s, file %s, type %s, signed by %s" % \
(srcname, version, bin.package, bin.version, entry["architecture"],
- filename, bin.binarytype, u.pkg.changes["fingerprint"])
+ filename, bin.binarytype, u.pkg.changes["fingerprint"]))
bin.extra_sources.append(exsources[0])
return session.query(DBSource).filter_by(source = source). \
with_parent(self)
+ def get_overridesuite(self):
+ if self.overridesuite is None:
+ return self
+ else:
+ return object_session(self).query(Suite).filter_by(suite_name=self.overridesuite).one()
+
__all__.append('Suite')
@session_wrapper
################################################################################
-# TODO: should be removed because the implementation is too trivial
@session_wrapper
def get_suite_architectures(suite, skipsrc=False, skipall=False, session=None):
"""
- Returns list of Architecture objects for given C{suite} name
+ Returns list of Architecture objects for given C{suite} name. The list is
+ empty if suite does not exist.
@type suite: str
@param suite: Suite name to search for
@return: list of Architecture objects for the given name (may be empty)
"""
- return get_suite(suite, session).get_architectures(skipsrc, skipall)
+ try:
+ return get_suite(suite, session).get_architectures(skipsrc, skipall)
+ except AttributeError:
+ return []
__all__.append('get_suite_architectures')
################################################################################
-class SuiteSrcFormat(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<SuiteSrcFormat (%s, %s)>' % (self.suite_id, self.src_format_id)
-
-__all__.append('SuiteSrcFormat')
-
-@session_wrapper
-def get_suite_src_formats(suite, session=None):
- """
- Returns list of allowed SrcFormat for C{suite}.
-
- @type suite: str
- @param suite: Suite name to search for
-
- @type session: Session
- @param session: Optional SQL session object (a temporary one will be
- generated if not supplied)
-
- @rtype: list
- @return: the list of allowed source formats for I{suite}
- """
-
- q = session.query(SrcFormat)
- q = q.join(SuiteSrcFormat)
- q = q.join(Suite).filter_by(suite_name=suite)
- q = q.order_by('format_name')
-
- return q.all()
-
-__all__.append('get_suite_src_formats')
-
-################################################################################
-
class Uid(ORMObject):
def __init__(self, uid = None, name = None):
self.uid = uid
################################################################################
+class VersionCheck(ORMObject):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def properties(self):
+ #return ['suite_id', 'check', 'reference_id']
+ return ['check']
+
+ def not_null_constraints(self):
+ return ['suite', 'check', 'reference']
+
+__all__.append('VersionCheck')
+
+@session_wrapper
+def get_version_checks(suite_name, check = None, session = None):
+ suite = get_suite(suite_name, session)
+ if not suite:
+ # Make sure that what we return is iterable so that list comprehensions
+ # involving this don't cause a traceback
+ return []
+ q = session.query(VersionCheck).filter_by(suite=suite)
+ if check:
+ q = q.filter_by(check=check)
+ return q.all()
+
+__all__.append('get_version_checks')
+
+################################################################################
+
class DBConn(object):
"""
database module init.
'binary_acl_map',
'build_queue',
'build_queue_files',
+ 'build_queue_policy_files',
'changelogs_text',
'changes',
'component',
'changes_pending_source_files',
'changes_pool_files',
'dsc_files',
+ 'external_overrides',
'extra_src_references',
'files',
'fingerprint',
'suite_src_formats',
'uid',
'upload_blocks',
+ 'version_check',
)
views = (
'almost_obsolete_all_associations',
'almost_obsolete_src_associations',
'any_associations_source',
- 'bin_assoc_by_arch',
'bin_associations_binaries',
'binaries_suite_arch',
'binfiles_suite_component_arch',
mapper(Architecture, self.tbl_architecture,
properties = dict(arch_id = self.tbl_architecture.c.id,
suites = relation(Suite, secondary=self.tbl_suite_architectures,
- order_by='suite_name',
- backref=backref('architectures', order_by='arch_string'))),
+ order_by=self.tbl_suite.c.suite_name,
+ backref=backref('architectures', order_by=self.tbl_architecture.c.arch_string))),
extension = validator)
mapper(Archive, self.tbl_archive,
properties = dict(buildqueue = relation(BuildQueue, backref='queuefiles'),
poolfile = relation(PoolFile, backref='buildqueueinstances')))
+ mapper(BuildQueuePolicyFile, self.tbl_build_queue_policy_files,
+ properties = dict(
+ build_queue = relation(BuildQueue, backref='policy_queue_files'),
+ file = relation(ChangePendingFile, lazy='joined')))
+
mapper(DBBinary, self.tbl_binaries,
properties = dict(binary_id = self.tbl_binaries.c.id,
package = self.tbl_binaries.c.package,
poolfile_id = self.tbl_dsc_files.c.file,
poolfile = relation(PoolFile)))
+ mapper(ExternalOverride, self.tbl_external_overrides,
+ properties = dict(
+ suite_id = self.tbl_external_overrides.c.suite,
+ suite = relation(Suite),
+ component_id = self.tbl_external_overrides.c.component,
+ component = relation(Component)))
+
mapper(PoolFile, self.tbl_files,
properties = dict(file_id = self.tbl_files.c.id,
filesize = self.tbl_files.c.size,
properties = dict(suite_id = self.tbl_suite.c.id,
policy_queue = relation(PolicyQueue),
copy_queues = relation(BuildQueue,
- secondary=self.tbl_suite_build_queue_copy)),
+ secondary=self.tbl_suite_build_queue_copy),
+ srcformats = relation(SrcFormat, secondary=self.tbl_suite_src_formats,
+ backref=backref('suites', lazy='dynamic'))),
extension = validator)
- mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
- properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
- suite = relation(Suite, backref='suitesrcformats'),
- src_format_id = self.tbl_suite_src_formats.c.src_format,
- src_format = relation(SrcFormat)))
-
mapper(Uid, self.tbl_uid,
properties = dict(uid_id = self.tbl_uid.c.id,
fingerprint = relation(Fingerprint)),
key = relation(MetadataKey),
value = self.tbl_source_metadata.c.value))
+ mapper(VersionCheck, self.tbl_version_check,
+ properties = dict(
+ suite_id = self.tbl_version_check.c.suite,
+ suite = relation(Suite, primaryjoin=self.tbl_version_check.c.suite==self.tbl_suite.c.id),
+ reference_id = self.tbl_version_check.c.reference,
+ reference = relation(Suite, primaryjoin=self.tbl_version_check.c.reference==self.tbl_suite.c.id, lazy='joined')))
+
## Connection functions
def __createconn(self):
from config import Config
sqlalchemy.dialects.postgresql.base.dialect = PGDialect_psycopg2_dak
- self.db_pg = create_engine(connstr, **engine_args)
- self.db_meta = MetaData()
- self.db_meta.bind = self.db_pg
- self.db_smaker = sessionmaker(bind=self.db_pg,
- autoflush=True,
- autocommit=False)
+ try:
+ self.db_pg = create_engine(connstr, **engine_args)
+ self.db_meta = MetaData()
+ self.db_meta.bind = self.db_pg
+ self.db_smaker = sessionmaker(bind=self.db_pg,
+ autoflush=True,
+ autocommit=False)
+
+ self.__setuptables()
+ self.__setupmappers()
+
+ except OperationalError as e:
+ import utils
+ utils.fubar("Cannot connect to database (%s)" % str(e))
- self.__setuptables()
- self.__setupmappers()
self.pid = os.getpid()
- def session(self):
+ def session(self, work_mem = 0):
+ '''
+ Returns a new session object. If a work_mem parameter is provided a new
+ transaction is started and the work_mem parameter is set for this
+ transaction. The work_mem parameter is measured in MB. A default value
+ will be used if the parameter is not set.
+ '''
# reinitialize DBConn in new processes
if self.pid != os.getpid():
clear_mappers()
self.__createconn()
- return self.db_smaker()
+ session = self.db_smaker()
+ if work_mem > 0:
+ session.execute("SET LOCAL work_mem TO '%d MB'" % work_mem)
+ return session
__all__.append('DBConn')