2008-08-15 Mark Hymers <mhy@debian.org>
+ * dak/process_accepted.py, dak/process_unchecked.py,
+ daklib/database.py: Don't change get_files_id to use sha1sum and
+ sha256sum.
+
* setup/init_pool.sql, dak/check_archive.py, dak/decode_dot_dak.py,
dak/process_accepted.py, dak/process_unchecked.py, daklib/database.py,
daklib/queue.py, daklib/utils.py: Attempt to add sha1sum and
# files id is stored in dsc_files by check_dsc().
files_id = dsc_files[dsc_file].get("files id", None)
if files_id == None:
- files_id = database.get_files_id(filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], dsc_location_id)
+ files_id = database.get_files_id(filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], dsc_location_id)
# FIXME: needs to check for -1/-2 and or handle exception
if files_id == None:
files_id = database.set_files_id (filename, dsc_files[dsc_file]["size"], dsc_files[dsc_file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], dsc_location_id)
# Check the md5sum & size against existing files (if any)
files[f]["pool name"] = utils.poolify (changes["source"], files[f]["component"])
- files_id = database.get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["sha1sum"], files[f]["sha256sum"], files[f]["location id"])
+ files_id = database.get_files_id(files[f]["pool name"] + f, files[f]["size"], files[f]["md5sum"], files[f]["location id"])
if files_id == -1:
reject("INTERNAL ERROR, get_files_id() returned multiple matches for %s." % (f))
elif files_id == -2:
- reject("md5sum, sha1sum, sha256sum and/or size mismatch on existing copy of %s." % (f))
+ reject("md5sum and/or size mismatch on existing copy of %s." % (f))
files[f]["files id"] = files_id
# Check for packages that have moved from one component to another
################################################################################
-def get_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id):
+def get_files_id (filename, size, md5sum, location_id):
global files_id_cache
cache_key = "%s_%d" % (filename, location_id)
return files_id_cache[cache_key]
size = int(size)
- q = projectB.query("SELECT id, size, md5sum, sha1sum, sha256sum FROM files WHERE filename = '%s' AND location = %d" % (filename, location_id))
+ q = projectB.query("SELECT id, size, md5sum FROM files WHERE filename = '%s' AND location = %d" % (filename, location_id))
ql = q.getresult()
if ql:
if len(ql) != 1:
ql = ql[0]
orig_size = int(ql[1])
orig_md5sum = ql[2]
- orig_sha1sum = ql[3]
- orig_sha256sum = ql[4]
- if orig_size != size or orig_md5sum != md5sum or orig_sha1sum != sha1sum or orig_sha256sum != sha256sum:
+ if orig_size != size or orig_md5sum != md5sum:
return -2
files_id_cache[cache_key] = ql[0]
return files_id_cache[cache_key]
projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id))
- return get_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id)
+ return get_files_id (filename, size, md5sum, location_id)
### currval has issues with postgresql 7.1.3 when the table is big
### it was taking ~3 seconds to return on auric which is very Not