]> git.decadent.org.uk Git - dak.git/commitdiff
Merge commit 'ftpmaster/master' into psycopg2
authorMark Hymers <mhy@debian.org>
Sat, 24 Jan 2009 19:35:26 +0000 (19:35 +0000)
committerMark Hymers <mhy@debian.org>
Sat, 24 Jan 2009 19:35:26 +0000 (19:35 +0000)
dak/init_db.py
daklib/Config.py [new file with mode: 0644]
daklib/DBConn.py [new file with mode: 0644]
daklib/Singleton.py [new file with mode: 0644]

index d40ad0c0aeb49cd6a95e1ad10d0d70181f056514..20102cdf72a9a160836f7a748450e0a6977c8dd4 100755 (executable)
 
 ################################################################################
 
-import pg, sys
+import psycopg2, sys
 import apt_pkg
-from daklib import database
-from daklib import utils
-
-################################################################################
 
-Cnf = None
-projectB = None
+from daklib import utils
+from daklib.DBConn import DBConn
+from daklib.Config import Config
 
 ################################################################################
 
@@ -43,159 +40,187 @@ Initalizes some tables in the projectB database based on the config file.
 ################################################################################
 
 def sql_get (config, key):
-    """Return the value of config[key] in quotes or NULL if it doesn't exist."""
+    """Return the value of config[key] or None if it doesn't exist."""
 
-    if config.has_key(key):
-        return "'%s'" % (config[key])
-    else:
-        return "NULL"
+    try:
+        return config[key]
+    except KeyError:
+        return None
 
 ################################################################################
 
-def do_archive():
-    """Initalize the archive table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM archive")
-    for name in Cnf.SubTree("Archive").List():
-        archive_config = Cnf.SubTree("Archive::%s" % (name))
-        origin_server = sql_get(archive_config, "OriginServer")
-        description = sql_get(archive_config, "Description")
-        projectB.query("INSERT INTO archive (name, origin_server, description) "
-                       "VALUES ('%s', %s, %s)"
-                       % (name, origin_server, description))
-    projectB.query("COMMIT WORK")
-
-def do_architecture():
-    """Initalize the architecture table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM architecture")
-    for arch in Cnf.SubTree("Architectures").List():
-        description = Cnf["Architectures::%s" % (arch)]
-        projectB.query("INSERT INTO architecture (arch_string, description) "
-                       "VALUES ('%s', '%s')" % (arch, description))
-    projectB.query("COMMIT WORK")
-
-def do_component():
-    """Initalize the component table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM component")
-    for name in Cnf.SubTree("Component").List():
-        component_config = Cnf.SubTree("Component::%s" % (name))
-        description = sql_get(component_config, "Description")
-        if component_config.get("MeetsDFSG").lower() == "true":
-            meets_dfsg = "true"
-        else:
-            meets_dfsg = "false"
-        projectB.query("INSERT INTO component (name, description, meets_dfsg) "
-                       "VALUES ('%s', %s, %s)"
-                       % (name, description, meets_dfsg))
-    projectB.query("COMMIT WORK")
-
-def do_location():
-    """Initalize the location table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM location")
-    for location in Cnf.SubTree("Location").List():
-        location_config = Cnf.SubTree("Location::%s" % (location))
-        archive_id = database.get_archive_id(location_config["Archive"])
-        if archive_id == -1:
-            utils.fubar("Archive '%s' for location '%s' not found."
-                               % (location_config["Archive"], location))
-        location_type = location_config.get("type")
-        if location_type == "legacy-mixed":
-            projectB.query("INSERT INTO location (path, archive, type) VALUES "
-                           "('%s', %d, '%s')"
-                           % (location, archive_id, location_config["type"]))
-        elif location_type == "legacy" or location_type == "pool":
-            for component in Cnf.SubTree("Component").List():
-                component_id = database.get_component_id(component)
-                projectB.query("INSERT INTO location (path, component, "
-                               "archive, type) VALUES ('%s', %d, %d, '%s')"
-                               % (location, component_id, archive_id,
-                                  location_type))
-        else:
-            utils.fubar("E: type '%s' not recognised in location %s."
-                               % (location_type, location))
-    projectB.query("COMMIT WORK")
-
-def do_suite():
-    """Initalize the suite table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM suite")
-    for suite in Cnf.SubTree("Suite").List():
-        suite_config = Cnf.SubTree("Suite::%s" %(suite))
-        version = sql_get(suite_config, "Version")
-        origin = sql_get(suite_config, "Origin")
-        description = sql_get(suite_config, "Description")
-        projectB.query("INSERT INTO suite (suite_name, version, origin, "
-                       "description) VALUES ('%s', %s, %s, %s)"
-                       % (suite.lower(), version, origin, description))
-        for architecture in Cnf.ValueList("Suite::%s::Architectures" % (suite)):
-            architecture_id = database.get_architecture_id (architecture)
-            if architecture_id < 0:
-                utils.fubar("architecture '%s' not found in architecture"
-                                   " table for suite %s."
+class InitDB(object):
+    def __init__(self, Cnf, projectB):
+        self.Cnf = Cnf
+        self.projectB = projectB
+
+    def do_archive(self):
+        """Initalize the archive table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM archive")
+        archive_add = "INSERT INTO archive (name, origin_server, description) VALUES (%s, %s, %s)"
+        for name in self.Cnf.SubTree("Archive").List():
+            archive_config = self.Cnf.SubTree("Archive::%s" % (name))
+            origin_server = sql_get(archive_config, "OriginServer")
+            description = sql_get(archive_config, "Description")
+            c.execute(archive_add, [name, origin_server, description])
+        self.projectB.commit()
+
+    def do_architecture(self):
+        """Initalize the architecture table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM architecture")
+        arch_add = "INSERT INTO architecture (arch_string, description) VALUES (%s, %s)"
+        for arch in self.Cnf.SubTree("Architectures").List():
+            description = self.Cnf["Architectures::%s" % (arch)]
+            c.execute(arch_add, [arch, description])
+        self.projectB.commit()
+
+    def do_component(self):
+        """Initalize the component table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM component")
+
+        comp_add = "INSERT INTO component (name, description, meets_dfsg) " + \
+                   "VALUES (%s, %s, %s)"
+
+        for name in self.Cnf.SubTree("Component").List():
+            component_config = self.Cnf.SubTree("Component::%s" % (name))
+            description = sql_get(component_config, "Description")
+            meets_dfsg = (component_config.get("MeetsDFSG").lower() == "true")
+            c.execute(comp_add, [name, description, meets_dfsg])
+
+        self.projectB.commit()
+
+    def do_location(self):
+        """Initalize the location table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM location")
+
+        loc_add_mixed = "INSERT INTO location (path, archive, type) " + \
+                        "VALUES (%s, %s, %s)"
+
+        loc_add = "INSERT INTO location (path, component, archive, type) " + \
+                  "VALUES (%s, %s, %s, %s)"
+
+        for location in self.Cnf.SubTree("Location").List():
+            location_config = self.Cnf.SubTree("Location::%s" % (location))
+            archive_id = self.projectB.get_archive_id(location_config["Archive"])
+            if archive_id == -1:
+                utils.fubar("Archive '%s' for location '%s' not found."
+                                   % (location_config["Archive"], location))
+            location_type = location_config.get("type")
+            if location_type == "legacy-mixed":
+                c.execute(loc_add_mixed, [location, archive_id, location_config["type"]])
+            elif location_type == "legacy" or location_type == "pool":
+                for component in self.Cnf.SubTree("Component").List():
+                    component_id = self.projectB.get_component_id(component)
+                    c.execute(loc_add, [location, component_id, archive_id, location_type])
+            else:
+                utils.fubar("E: type '%s' not recognised in location %s."
+                                   % (location_type, location))
+
+        self.projectB.commit()
+
+    def do_suite(self):
+        """Initalize the suite table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM suite")
+
+        suite_add = "INSERT INTO suite (suite_name, version, origin, description) " + \
+                    "VALUES (%s, %s, %s, %s)"
+
+        sa_add = "INSERT INTO suite_architectures (suite, architecture) " + \
+                 "VALUES (currval('suite_id_seq'), %s)"
+
+        for suite in self.Cnf.SubTree("Suite").List():
+            suite_config = self.Cnf.SubTree("Suite::%s" %(suite))
+            version = sql_get(suite_config, "Version")
+            origin = sql_get(suite_config, "Origin")
+            description = sql_get(suite_config, "Description")
+            c.execute(suite_add, [suite.lower(), version, origin, description])
+            for architecture in self.Cnf.ValueList("Suite::%s::Architectures" % (suite)):
+                architecture_id = self.projectB.get_architecture_id (architecture)
+                if architecture_id < 0:
+                    utils.fubar("architecture '%s' not found in architecture"
+                                       " table for suite %s."
                                    % (architecture, suite))
-            projectB.query("INSERT INTO suite_architectures (suite, "
-                           "architecture) VALUES (currval('suite_id_seq'), %d)"
-                           % (architecture_id))
-    projectB.query("COMMIT WORK")
-
-def do_override_type():
-    """Initalize the override_type table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM override_type")
-    for override_type in Cnf.ValueList("OverrideType"):
-        projectB.query("INSERT INTO override_type (type) VALUES ('%s')"
-                       % (override_type))
-    projectB.query("COMMIT WORK")
-
-def do_priority():
-    """Initialize the priority table."""
-
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM priority")
-    for priority in Cnf.SubTree("Priority").List():
-        projectB.query("INSERT INTO priority (priority, level) VALUES "
-                       "('%s', %s)"
-                       % (priority, Cnf["Priority::%s" % (priority)]))
-    projectB.query("COMMIT WORK")
-
-def do_section():
-    """Initalize the section table."""
-    projectB.query("BEGIN WORK")
-    projectB.query("DELETE FROM section")
-    for component in Cnf.SubTree("Component").List():
-        if Cnf["Control-Overrides::ComponentPosition"] == "prefix":
-            suffix = ""
-            if component != "main":
-                prefix = component + '/'
+                c.execute(sa_add, [architecture_id])
+
+        self.projectB.commit()
+
+    def do_override_type(self):
+        """Initalize the override_type table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM override_type")
+
+        over_add = "INSERT INTO override_type (type) VALUES (%s)"
+
+        for override_type in self.Cnf.ValueList("OverrideType"):
+            c.execute(over_add, [override_type])
+
+        self.projectB.commit()
+
+    def do_priority(self):
+        """Initialize the priority table."""
+
+        c = self.projectB.cursor()
+        c.execute("DELETE FROM priority")
+
+        prio_add = "INSERT INTO priority (priority, level) VALUES (%s, %s)"
+
+        for priority in self.Cnf.SubTree("Priority").List():
+            c.execute(prio_add, [priority, self.Cnf["Priority::%s" % (priority)]])
+
+        self.projectB.commit()
+
+    def do_section(self):
+        """Initalize the section table."""
+
+        c = projectB.cursor()
+        c.execute("DELETE FROM section")
+
+        sect_add = "INSERT INTO section (section) VALUES (%s)"
+
+        for component in self.Cnf.SubTree("Component").List():
+            if self.Cnf["Control-Overrides::ComponentPosition"] == "prefix":
+                suffix = ""
+                if component != "main":
+                    prefix = component + '/'
+                else:
+                    prefix = ""
             else:
                 prefix = ""
-        else:
-            prefix = ""
-            if component != "main":
-                suffix = '/' + component
-            else:
-                suffix = ""
-        for section in Cnf.ValueList("Section"):
-            projectB.query("INSERT INTO section (section) VALUES "
-                           "('%s%s%s')" % (prefix, section, suffix))
-    projectB.query("COMMIT WORK")
+                if component != "main":
+                    suffix = '/' + component
+                else:
+                    suffix = ""
+            for section in self.Cnf.ValueList("Section"):
+                c.execute(sect_add, [prefix + section + suffix])
+
+        self.projectB.commit()
+
+    def do_all(self):
+        self.do_archive()
+        self.do_architecture()
+        self.do_component()
+        self.do_location()
+        self.do_suite()
+        self.do_override_type()
+        self.do_priority()
+        self.do_section()
 
 ################################################################################
 
 def main ():
     """Sync dak.conf configuartion file and the SQL database"""
 
-    global Cnf, projectB
-
     Cnf = utils.get_conf()
     arguments = [('h', "help", "Init-DB::Options::Help")]
     for i in [ "help" ]:
@@ -211,18 +236,11 @@ def main ():
         utils.warn("dak init-db takes no arguments.")
         usage(exit_code=1)
 
-    projectB = pg.connect(Cnf["DB::Name"], Cnf["DB::Host"],
-                          int(Cnf["DB::Port"]))
-    database.init(Cnf, projectB)
-
-    do_archive()
-    do_architecture()
-    do_component()
-    do_location()
-    do_suite()
-    do_override_type()
-    do_priority()
-    do_section()
+    # Just let connection failures be reported to the user
+    projectB = DBConn()
+    Cnf = Config()
+
+    InitDB(Cnf, projectB).do_all()
 
 ################################################################################
 
diff --git a/daklib/Config.py b/daklib/Config.py
new file mode 100644 (file)
index 0000000..518d4de
--- /dev/null
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# Config access class
+# Copyright (C) 2008  Mark Hymers <mhy@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+# <NCommander> mhy, how about "Now with 20% more monty python references"
+
+################################################################################
+
+import apt_pkg
+import socket
+
+from Singleton import Singleton
+
+################################################################################
+
+default_config = "/etc/dak/dak.conf"
+
+def which_conf_file(Cnf):
+    res = socket.gethostbyaddr(socket.gethostname())
+    if Cnf.get("Config::" + res[0] + "::DakConfig"):
+        return Cnf["Config::" + res[0] + "::DakConfig"]
+    else:
+        return default_config
+
+class Config(Singleton):
+    """
+    A Config object is a singleton containing
+    information about the DAK configuration
+    """
+    def __init__(self, *args, **kwargs):
+        super(Config, self).__init__(*args, **kwargs)
+
+    def _readconf(self):
+        apt_pkg.init()
+
+        self.Cnf = apt_pkg.newConfiguration()
+
+        apt_pkg.ReadConfigFileISC(self.Cnf, default_config)
+
+        # Check whether our dak.conf was the real one or
+        # just a pointer to our main one
+        res = socket.gethostbyaddr(socket.gethostname())
+        conffile = self.Cnf.get("Config::" + res[0] + "::DakConfig")
+        if conffile:
+            apt_pkg.ReadConfigFileISC(self.Cnf, conffile)
+
+        # Rebind some functions
+        # TODO: Clean this up
+        self.get = self.Cnf.get
+        self.SubTree = self.Cnf.SubTree
+        self.ValueList = self.Cnf.ValueList
+
+    def _startup(self, *args, **kwargs):
+        self._readconf()
+
+    def __getitem__(self, name):
+        return self.Cnf[name]
+
+    def GetDBConnString(self):
+        s = "dbname=%s" % self.Cnf["DB::Name"]
+        if self.Cnf["DB::Host"]:
+            s += " host=%s" % self.Cnf["DB::Host"]
+        if self.Cnf["DB::Port"] and self.Cnf["DB::Port"] != "-1":
+            s += " port=%s" % self.Cnf["DB::Port"]
+
+        return s
diff --git a/daklib/DBConn.py b/daklib/DBConn.py
new file mode 100644 (file)
index 0000000..05bf32d
--- /dev/null
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+# DB access class
+# Copyright (C) 2008  Mark Hymers <mhy@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+# < mhy> I need a funny comment
+# < sgran> two peanuts were walking down a dark street
+# < sgran> one was a-salted
+#  * mhy looks up the definition of "funny"
+
+################################################################################
+
+import psycopg2
+from psycopg2.extras import DictCursor
+
+from Singleton import Singleton
+from Config import Config
+
+################################################################################
+
+class Cache(object):
+    def __init__(self, hashfunc=None):
+        if hashfunc:
+            self.hashfunc = hashfunc
+        else:
+            self.hashfunc = lambda x: x['value']
+
+        self.data = {}
+
+    def SetValue(self, keys, value):
+        self.data[self.hashfunc(keys)] = value
+
+    def GetValue(self, keys):
+        return self.data.get(self.hashfunc(keys))
+
+################################################################################
+
+class DBConn(Singleton):
+    """
+    A DBConn object is a singleton containing
+    information about the connection to the SQL Database
+    """
+    def __init__(self, *args, **kwargs):
+        super(DBConn, self).__init__(*args, **kwargs)
+
+    def _startup(self, *args, **kwargs):
+        self.__createconn()
+        self.__init_caches()
+
+    ## Connection functions
+    def __createconn(self):
+        connstr = Config().GetDBConnString()
+        self.db_con = psycopg2.connect(connstr)
+
+    def reconnect(self):
+        try:
+            self.db_con.close()
+        except psycopg2.InterfaceError:
+            pass
+
+        self.db_con = None
+        self.__createconn()
+
+    ## Cache functions
+    def __init_caches(self):
+        self.caches = {'suite':         Cache(), 
+                       'section':       Cache(),
+                       'priority':      Cache(),
+                       'override_type': Cache(),
+                       'architecture':  Cache(),
+                       'archive':       Cache(),
+                       'component':     Cache(),
+                       'location':      Cache(lambda x: '%s_%s_%s' % (x['location'], x['component'], x['location'])),
+                       'maintainer':    {}, # TODO
+                       'keyring':       {}, # TODO
+                       'source':        Cache(lambda x: '%s_%s_' % (x['source'], x['version'])),
+                       'files':         {}, # TODO
+                       'maintainer':    {}, # TODO
+                       'fingerprint':   {}, # TODO
+                       'queue':         {}, # TODO
+                       'uid':           {}, # TODO
+                       'suite_version': Cache(lambda x: '%s_%s' % (x['source'], x['suite'])),
+                      }
+
+    def clear_caches(self):
+        self.__init_caches()
+
+    ## Functions to pass through to the database connector
+    def cursor(self):
+        return self.db_con.cursor()
+
+    def commit(self):
+        return self.db_con.commit()
+
+    ## Get functions
+    def __get_single_id(self, query, values, cachename=None):
+        # This is a bit of a hack but it's an internal function only
+        if cachename is not None:
+            res = self.caches[cachename].GetValue(values)
+            if res:
+                return res
+
+        c = self.db_con.cursor()
+        c.execute(query, values)
+
+        if c.rowcount != 1:
+            return None
+
+        res = c.fetchone()[0]
+
+        if cachename is not None:
+            self.caches[cachename].SetValue(values, res)
+            
+        return res
+   
+    def __get_id(self, retfield, table, qfield, value):
+        query = "SELECT %s FROM %s WHERE %s = %%(value)s" % (retfield, table, qfield)
+        return self.__get_single_id(query, {'value': value}, cachename=table)
+
+    def get_suite_id(self, suite):
+        return self.__get_id('id', 'suite', 'suite_name', suite)
+
+    def get_section_id(self, section):
+        return self.__get_id('id', 'section', 'section', section)
+
+    def get_priority_id(self, priority):
+        return self.__get_id('id', 'priority', 'priority', priority)
+
+    def get_override_type_id(self, override_type):
+        return self.__get_id('id', 'override_type', 'override_type', override_type)
+
+    def get_architecture_id(self, architecture):
+        return self.__get_id('id', 'architecture', 'arch_string', architecture)
+
+    def get_archive_id(self, archive):
+        return self.__get_id('id', 'archive', 'lower(name)', archive)
+
+    def get_component_id(self, component):
+        return self.__get_id('id', 'component', 'lower(name)', component)
+
+    def get_location_id(self, location, component, archive):
+        archive_id = self.get_archive_id(archive)
+
+        if not archive_id:
+            return None
+
+        res = None
+
+        if component:
+            component_id = self.get_component_id(component)
+            if component_id:
+                res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND component=%(component)d AND archive=%(archive)d",
+                        {'location': location, 'archive': archive_id, 'component': component_id}, cachename='location')
+        else:
+            res = self.__get_single_id("SELECT id FROM location WHERE path=%(location)s AND archive=%(archive)d",
+                    {'location': location, 'archive': archive_id, 'component': ''}, cachename='location')
+
+        return res
+
+    def get_source_id(self, source, version):
+        return self.__get_single_id("SELECT id FROM source s WHERE s.source=%(source)s AND s.version=%(version)s",
+                                 {'source': source, 'version': version}, cachename='source')
+
+    def get_suite_version(self, source, suite):
+        return self.__get_single_id("""
+        SELECT s.version FROM source s, suite su, src_associations sa
+        WHERE sa.source=s.id
+          AND sa.suite=su.id
+          AND su.suite_name=%(suite)s
+          AND s.source=%(source)""", {'suite': suite, 'source': source}, cachename='suite_version')
+
diff --git a/daklib/Singleton.py b/daklib/Singleton.py
new file mode 100644 (file)
index 0000000..456d2cc
--- /dev/null
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# vim:set et ts=4 sw=4:
+
+# Singleton pattern code
+# Copyright (C) 2008  Mark Hymers <mhy@debian.org>
+
+# Inspiration for this very simple ABC was taken from various documents /
+# tutorials / mailing lists.  This may not be thread safe but given that
+# (as I write) large chunks of dak aren't even type-safe, I'll live with
+# it for now
+
+################################################################################
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+################################################################################
+
+# < sgran> NCommander: in SQL, it's better to join than to repeat information
+# < tomv_w> that makes SQL the opposite to Debian mailing lists!
+
+################################################################################
+
+"""
+This class set implements objects that may need to be instantiated multiple
+times, but we don't want the overhead of actually creating and init'ing
+them more than once.  It also saves us using globals all over the place
+"""
+
+class Singleton(object):
+    """This is the ABC for other dak Singleton classes"""
+    __single = None
+    def __new__(cls, *args, **kwargs):
+        # Check to see if a __single exists already for this class
+        # Compare class types instead of just looking for None so
+        # that subclasses will create their own __single objects
+        if cls != type(cls.__single):
+            cls.__single = object.__new__(cls, *args, **kwargs)
+            cls.__single._startup(*args, **kwargs)
+        return cls.__single
+
+    def __init__(self, *args, **kwargs):
+        if type(self) == "Singleton":
+            raise NotImplementedError("Singleton is an ABC")
+
+    def _startup(self):
+        """
+        _startup is a private method used instead of __init__ due to the way
+        we instantiate this object
+        """
+        raise NotImplementedError("Singleton is an ABC")
+