import os
import stat
+
+import datetime
from cPickle import Unpickler, Pickler
from errno import EPERM
from apt_pkg import ParseSection
from utils import open_file, fubar, poolify
+from config import *
+from dbconn import *
###############################################################################
return summary
-
- def load_dot_dak(self, changesfile):
- """
- Update ourself by reading a previously created cPickle .dak dumpfile.
- """
-
- self.changes_file = changesfile
- dump_filename = self.changes_file[:-8]+".dak"
- dump_file = open_file(dump_filename)
-
- p = Unpickler(dump_file)
-
- self.changes.update(p.load())
- self.dsc.update(p.load())
- self.files.update(p.load())
- self.dsc_files.update(p.load())
-
- next_obj = p.load()
- if type(next_obj) is DictType:
- self.pkg.orig_files.update(next_obj)
+ @session_wrapper
+ def remove_known_changes(self, session=None):
+ session.delete(get_dbchange(self.changes_file, session))
+
+ def mark_missing_fields(self):
+ """add "missing" in fields which we will require for the known_changes table"""
+ for key in ['urgency', 'maintainer', 'fingerprint', 'changed-by' ]:
+ if (not self.changes.has_key(key)) or (not self.changes[key]):
+ self.changes[key]='missing'
+
+ def __get_file_from_pool(self, filename, entry, session):
+ cnf = Config()
+
+ poolname = poolify(entry["source"], entry["component"])
+ l = get_location(cnf["Dir::Pool"], entry["component"], session=session)
+
+ found, poolfile = check_poolfile(os.path.join(poolname, filename),
+ entry['size'],
+ entry["md5sum"],
+ l.location_id,
+ session=session)
+
+ if found is None:
+ Logger.log(["E: Found multiple files for pool (%s) for %s" % (chg_fn, entry["component"])])
+ return None
+ elif found is False and poolfile is not None:
+ Logger.log(["E: md5sum/size mismatch for %s in pool" % (chg_fn)])
+ return None
else:
- # Auto-convert old dak files to new format supporting
- # multiple tarballs
- orig_tar_gz = None
- for dsc_file in self.dsc_files.keys():
- if dsc_file.endswith(".orig.tar.gz"):
- orig_tar_gz = dsc_file
- self.orig_files[orig_tar_gz] = {}
- if next_obj != None:
- self.orig_files[orig_tar_gz]["id"] = next_obj
- next_obj = p.load()
- if next_obj != None and next_obj != "":
- self.orig_files[orig_tar_gz]["location"] = next_obj
- if len(self.orig_files[orig_tar_gz]) == 0:
- del self.orig_files[orig_tar_gz]
-
- dump_file.close()
-
- def sanitised_files(self):
- ret = {}
- for name, entry in self.files.items():
- ret[name] = {}
- for i in CHANGESFIELDS_FILES:
- if entry.has_key(i):
- ret[name][i] = entry[i]
-
- return ret
-
- def sanitised_changes(self):
- ret = {}
- # Mandatory changes fields
- for i in CHANGESFIELDS_MANDATORY:
- ret[i] = self.changes[i]
-
- # Optional changes fields
- for i in CHANGESFIELDS_OPTIONAL:
- if self.changes.has_key(i):
- ret[i] = self.changes[i]
-
- return ret
-
- def sanitised_dsc(self):
- ret = {}
- for i in CHANGESFIELDS_DSC:
- if self.dsc.has_key(i):
- ret[i] = self.dsc[i]
-
- return ret
-
- def sanitised_dsc_files(self):
- ret = {}
- for name, entry in self.dsc_files.items():
- ret[name] = {}
- # Mandatory dsc_files fields
- for i in CHANGESFIELDS_DSCFILES_MANDATORY:
- ret[name][i] = entry[i]
-
- # Optional dsc_files fields
- for i in CHANGESFIELDS_DSCFILES_OPTIONAL:
- if entry.has_key(i):
- ret[name][i] = entry[i]
-
- return ret
-
- def sanitised_orig_files(self):
- ret = {}
- for name, entry in self.orig_files.items():
- ret[name] = {}
- # Optional orig_files fields
- for i in CHANGESFIELDS_ORIGFILES:
- if entry.has_key(i):
- ret[name][i] = entry[i]
-
- return ret
-
- def write_dot_dak(self, dest_dir):
- """
- Dump ourself into a cPickle file.
+ if poolfile is None:
+ Logger.log(["E: Could not find %s in pool" % (chg_fn)])
+ return None
+ else:
+ return poolfile
- @type dest_dir: string
- @param dest_dir: Path where the dumpfile should be stored
+ @session_wrapper
+ def add_known_changes(self, dirpath, in_queue=None, session=None):
+ """add "missing" in fields which we will require for the known_changes table"""
+ cnf = Config()
- @note: This could just dump the dictionaries as is, but I'd like to avoid this so
- there's some idea of what process-accepted & process-new use from
- process-unchecked. (JT)
+ changesfile = os.path.join(dirpath, self.changes_file)
+ filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile))
- """
+ self.mark_missing_fields()
- dump_filename = os.path.join(dest_dir, self.changes_file[:-8] + ".dak")
- dump_file = open_file(dump_filename, 'w')
-
- try:
- os.chmod(dump_filename, 0664)
- except OSError, e:
- # chmod may fail when the dumpfile is not owned by the user
- # invoking dak (like e.g. when NEW is processed by a member
- # of ftpteam)
- if e.errno == EPERM:
- perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE])
- # security precaution, should never happen unless a weird
- # umask is set anywhere
- if perms & stat.S_IWOTH:
- fubar("%s is world writable and chmod failed." % \
- (dump_filename,))
- # ignore the failed chmod otherwise as the file should
- # already have the right privileges and is just, at worst,
- # unreadable for world
+ multivalues = {}
+ for key in ("distribution", "architecture", "binary"):
+ if isinstance(self.changes[key], dict):
+ multivalues[key] = " ".join(self.changes[key].keys())
else:
- raise
+ multivalues[key] = self.changes[key]
+
+ chg = DBChange()
+ chg.changesname = self.changes_file
+ chg.seen = filetime
+ chg.in_queue_id = in_queue
+ chg.source = self.changes["source"]
+ chg.binaries = multivalues["binary"]
+ chg.architecture = multivalues["architecture"]
+ chg.version = self.changes["version"]
+ chg.distribution = multivalues["distribution"]
+ chg.urgency = self.changes["urgency"]
+ chg.maintainer = self.changes["maintainer"]
+ chg.fingerprint = self.changes["fingerprint"]
+ chg.changedby = self.changes["changed-by"]
+ chg.date = self.changes["date"]
+
+ session.add(chg)
+
+ files = []
+ for chg_fn, entry in self.files.items():
+ try:
+ f = open(os.path.join(dirpath, chg_fn))
+ cpf = ChangePendingFile()
+ cpf.filename = chg_fn
+ cpf.size = entry['size']
+ cpf.md5sum = entry['md5sum']
+
+ if entry.has_key('sha1sum'):
+ cpf.sha1sum = entry['sha1sum']
+ else:
+ f.seek(0)
+ cpf.sha1sum = apt_pkg.sha1sum(f)
+
+ if entry.has_key('sha256sum'):
+ cpf.sha256sum = entry['sha256sum']
+ else:
+ f.seek(0)
+ cpf.sha256sum = apt_pkg.sha256sum(f)
+
+ session.add(cpf)
+ files.append(cpf)
+ f.close()
+
+ except IOError:
+ # Can't find the file, try to look it up in the pool
+ poolfile = self.__get_file_from_pool(chg_fn, entry, session)
+ if poolfile:
+ chg.poolfiles.append(poolfile)
+
+ chg.files = files
+
+ # Add files referenced in .dsc, but not included in .changes
+ for name, entry in self.dsc_files.items():
+ if self.files.has_key(name):
+ continue
- p = Pickler(dump_file, 1)
+ entry['source'] = self.changes['source']
+ poolfile = self.__get_file_from_pool(name, entry, session)
+ if poolfile:
+ chg.poolfiles.append(poolfile)
- p.dump(self.sanitised_changes())
- p.dump(self.sanitised_dsc())
- p.dump(self.sanitised_files())
- p.dump(self.sanitised_dsc_files())
- p.dump(self.sanitised_orig_files())
+ session.commit()
+ chg = session.query(DBChange).filter_by(changesname = self.changes_file).one();
- dump_file.close()
+ return chg
def unknown_files_fields(self, name):
return sorted(list( set(self.files[name].keys()) -