- # Build up a list of potentially new things
- for file in files.keys():
- f = files[file]
- # Skip byhand elements
- if f["type"] == "byhand":
- continue
- pkg = f["package"]
- priority = f["priority"]
- section = f["section"]
- type = get_type(f)
- component = f["component"]
-
- if type == "dsc":
- priority = "source"
- if not new.has_key(pkg):
- new[pkg] = {}
- new[pkg]["priority"] = priority
- new[pkg]["section"] = section
- new[pkg]["type"] = type
- new[pkg]["component"] = component
- new[pkg]["files"] = []
- else:
- old_type = new[pkg]["type"]
- if old_type != type:
- # source gets trumped by deb or udeb
- if old_type == "dsc":
- new[pkg]["priority"] = priority
- new[pkg]["section"] = section
- new[pkg]["type"] = type
- new[pkg]["component"] = component
- new[pkg]["files"].append(file)
- if f.has_key("othercomponents"):
- new[pkg]["othercomponents"] = f["othercomponents"]
-
- for suite in changes["suite"].keys():
- suite_id = database.get_suite_id(suite)
- for pkg in new.keys():
- component_id = database.get_component_id(new[pkg]["component"])
- type_id = database.get_override_type_id(new[pkg]["type"])
- q = projectB.query("SELECT package FROM override WHERE package = '%s' AND suite = %s AND component = %s AND type = %s" % (pkg, suite_id, component_id, type_id))
- ql = q.getresult()
- if ql:
- for file in new[pkg]["files"]:
- if files[file].has_key("new"):
- del files[file]["new"]
- del new[pkg]
-
- if warn:
- if changes["suite"].has_key("stable"):
- print "WARNING: overrides will be added for stable!"
- if changes["suite"].has_key("oldstable"):
- print "WARNING: overrides will be added for OLDstable!"
- for pkg in new.keys():
- if new[pkg].has_key("othercomponents"):
- print "WARNING: %s already present in %s distribution." % (pkg, new[pkg]["othercomponents"])
-
- return new
+ - If signing_rules == -1, no signature is required.
+ - If signing_rules == 0 (the default), a signature is required.
+ - If signing_rules == 1, it turns on the same strict format checking
+ as dpkg-source.
+
+ The rules for (signing_rules == 1)-mode are:
+
+ - The PGP header consists of "-----BEGIN PGP SIGNED MESSAGE-----"
+ followed by any PGP header data and must end with a blank line.
+
+ - The data section must end with a blank line and must be followed by
+ "-----BEGIN PGP SIGNATURE-----".
+ """
+
+ changes_in = open_file(filename)
+ content = changes_in.read()
+ changes_in.close()
+ return parse_deb822(content, signing_rules)
+
+################################################################################
+
+def hash_key(hashname):
+ return '%ssum' % hashname
+
+################################################################################
+
+def create_hash(where, files, hashname, hashfunc):
+ """
+ create_hash extends the passed files dict with the given hash by
+ iterating over all files on disk and passing them to the hashing
+ function given.
+ """
+
+ rejmsg = []
+ for f in files.keys():
+ try:
+ file_handle = open_file(f)
+ except CantOpenError:
+ rejmsg.append("Could not open file %s for checksumming" % (f))
+
+ files[f][hash_key(hashname)] = hashfunc(file_handle)
+
+ file_handle.close()
+ return rejmsg
+
+################################################################################
+
+def check_hash(where, files, hashname, hashfunc):
+ """
+ check_hash checks the given hash in the files dict against the actual
+ files on disk. The hash values need to be present consistently in
+ all file entries. It does not modify its input in any way.
+ """
+
+ rejmsg = []
+ for f in files.keys():
+ file_handle = None
+ try:
+ try:
+ file_handle = open_file(f)
+
+ # Check for the hash entry, to not trigger a KeyError.
+ if not files[f].has_key(hash_key(hashname)):
+ rejmsg.append("%s: misses %s checksum in %s" % (f, hashname,
+ where))
+ continue
+
+ # Actually check the hash for correctness.
+ if hashfunc(file_handle) != files[f][hash_key(hashname)]:
+ rejmsg.append("%s: %s check failed in %s" % (f, hashname,
+ where))
+ except CantOpenError:
+ # TODO: This happens when the file is in the pool.
+ # warn("Cannot open file %s" % f)
+ continue
+ finally:
+ if file_handle:
+ file_handle.close()
+ return rejmsg
+
+################################################################################
+
+def check_size(where, files):
+ """
+ check_size checks the file sizes in the passed files dict against the
+ files on disk.
+ """
+
+ rejmsg = []
+ for f in files.keys():
+ try:
+ entry = os.stat(f)
+ except OSError, exc:
+ if exc.errno == 2:
+ # TODO: This happens when the file is in the pool.
+ continue
+ raise
+
+ actual_size = entry[stat.ST_SIZE]
+ size = int(files[f]["size"])
+ if size != actual_size:
+ rejmsg.append("%s: actual file size (%s) does not match size (%s) in %s"
+ % (f, actual_size, size, where))
+ return rejmsg
+
+################################################################################
+
+def check_hash_fields(what, manifest):
+ """
+ check_hash_fields ensures that there are no checksum fields in the
+ given dict that we do not know about.
+ """
+
+ rejmsg = []
+ hashes = map(lambda x: x[0], known_hashes)
+ for field in manifest:
+ if field.startswith("checksums-"):
+ hashname = field.split("-",1)[1]
+ if hashname not in hashes:
+ rejmsg.append("Unsupported checksum field for %s "\
+ "in %s" % (hashname, what))
+ return rejmsg