# Determine what parts in a .changes are NEW
-def determine_new(filename, changes, files, warn=1, session = None, dsc = None):
+def determine_new(filename, changes, files, warn=1, session = None, dsc = None, new = {}):
"""
Determine what parts in a C{changes} file are NEW.
@type dsc: Upload.Pkg.dsc dict
@param dsc: (optional); Dsc dictionary
+ @type new: dict
+ @param new: new packages as returned by a previous call to this function, but override information may have changed
+
@rtype: dict
@return: dictionary of NEW components.
"""
# TODO: This should all use the database instead of parsing the changes
# file again
- new = {}
byhand = {}
dbchg = get_dbchange(filename, session)
# Try to get the Package-Set field from an included .dsc file (if possible).
if dsc:
- new = build_package_set(dsc, session)
+ for package, entry in build_package_set(dsc, session).items():
+ if not new.has_key(package):
+ new[package] = entry
# Build up a list of potentially new things
for name, f in files.items():
# Check any one-off upload blocks
self.check_upload_blocks(fpr, session)
- # Start with DM as a special case
+ # If the source_acl is None, source is never allowed
+ if fpr.source_acl is None:
+ if self.pkg.changes["architecture"].has_key("source"):
+ rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
+ rej += '\nPlease contact ftpmaster if you think this is incorrect'
+ self.rejects.append(rej)
+ return
+ # Do DM as a special case
# DM is a special case unfortunately, so we check it first
# (keys with no source access get more access than DMs in one
# way; DMs can only upload for their packages whether source
# or binary, whereas keys with no access might be able to
# upload some binaries)
- if fpr.source_acl.access_level == 'dm':
+ elif fpr.source_acl.access_level == 'dm':
self.check_dm_upload(fpr, session)
else:
- # Check source-based permissions for other types
- if self.pkg.changes["architecture"].has_key("source") and \
- fpr.source_acl.access_level is None:
- rej = 'Fingerprint %s may not upload source' % fpr.fingerprint
- rej += '\nPlease contact ftpmaster if you think this is incorrect'
- self.rejects.append(rej)
- return
# If not a DM, we allow full upload rights
uid_email = "%s@debian.org" % (fpr.uid.uid)
self.check_if_upload_is_sponsored(uid_email, fpr.uid.name)
if len(tmparches.keys()) > 0:
if fpr.binary_reject:
- rej = ".changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
- rej += "\narchitectures involved are: ", ",".join(tmparches.keys())
+ rej = "changes file contains files of architectures not permitted for fingerprint %s" % fpr.fingerprint
+ if len(tmparches.keys()) == 1:
+ rej += "\n\narchitecture involved is: %s" % ",".join(tmparches.keys())
+ else:
+ rej += "\n\narchitectures involved are: %s" % ",".join(tmparches.keys())
self.rejects.append(rej)
else:
# TODO: This is where we'll implement reject vs throw away binaries later
## experimental lists the uploader in the Maintainer: or Uploaders: fields (ie,
## non-developer maintainers cannot NMU or hijack packages)
- # srcuploaders includes the maintainer
+ # uploader includes the maintainer
accept = False
- for sup in r.srcuploaders:
- (rfc822, rfc2047, name, email) = sup.maintainer.get_split_maintainer()
+ for uploader in r.uploaders:
+ (rfc822, rfc2047, name, email) = uploader.get_split_maintainer()
# Eww - I hope we never have two people with the same name in Debian
if email == fpr.uid.uid or name == fpr.uid.name:
accept = True
print "Installing."
self.logger.log(["installing changes", self.pkg.changes_file])
+ binaries = []
poolfiles = []
# Add the .dsc file to the DB first
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
- poolfiles.append(add_deb_to_db(self, newfile, session))
+ b, pf = add_deb_to_db(self, newfile, session)
+ binaries.append(b)
+ poolfiles.append(pf)
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# Our SQL session will automatically start a new transaction after
# the last commit
+ # Now ensure that the metadata has been added
+ # This has to be done after we copy the files into the pool
+ # For source if we have it:
+ if self.pkg.changes["architecture"].has_key("source"):
+ import_metadata_into_db(source, session)
+
+ # Now for any of our binaries
+ for b in binaries:
+ import_metadata_into_db(b, session)
+
+ session.commit()
+
# Move the .changes into the 'done' directory
+ ye, mo, da = time.gmtime()[0:3]
+ donedir = os.path.join(cnf["Dir::Queue::Done"], str(ye), "%0.2d" % mo, "%0.2d" % da)
+ if not os.path.isdir(donedir):
+ os.makedirs(donedir)
+
utils.move(self.pkg.changes_file,
- os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
+ os.path.join(donedir, os.path.basename(self.pkg.changes_file)))
if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
"""
Cnf = Config()
anyversion = None
- anysuite = [suite] + Cnf.ValueList("Suite::%s::VersionChecks::Enhances" % (suite))
+ anysuite = [suite] + [ vc.reference.suite_name for vc in get_version_checks(suite, "Enhances") ]
for (s, v) in sv_list:
if s in [ x.lower() for x in anysuite ]:
if not anyversion or apt_pkg.VersionCompare(anyversion, v) <= 0:
# Check versions for each target suite
for target_suite in self.pkg.changes["distribution"].keys():
- must_be_newer_than = [ i.lower() for i in cnf.ValueList("Suite::%s::VersionChecks::MustBeNewerThan" % (target_suite)) ]
- must_be_older_than = [ i.lower() for i in cnf.ValueList("Suite::%s::VersionChecks::MustBeOlderThan" % (target_suite)) ]
+ must_be_newer_than = [ vc.reference.suite_name for vc in get_version_checks(target_suite, "MustBeNewerThan") ]
+ must_be_older_than = [ vc.reference.suite_name for vc in get_version_checks(target_suite, "MustBeOlderThan") ]
# Enforce "must be newer than target suite" even if conffile omits it
if target_suite not in must_be_newer_than: