X-Git-Url: https://git.decadent.org.uk/gitweb/?a=blobdiff_plain;f=dak%2Fgenerate_packages_sources.py;h=7398f67078bf37f6e6b31d0ab647f9ff6dfbf5eb;hb=e60e50250af1b4b9eaa6c5772c28d96a554284e4;hp=6e91aedae5470474a5130c3bfe59d002816d4682;hpb=3f4a09fa2e560c42df924ebb884c8ddd04516171;p=dak.git diff --git a/dak/generate_packages_sources.py b/dak/generate_packages_sources.py index 6e91aeda..7398f670 100755 --- a/dak/generate_packages_sources.py +++ b/dak/generate_packages_sources.py @@ -116,6 +116,21 @@ tree "dists/testing" SrcOverride "override.squeeze.$(SECTION).src"; }; """ + + apt_trees["squeeze-volatile"]=""" +tree "dists/squeeze-volatile" +{ + FileList "/srv/ftp-master.debian.org/database/dists/squeeze-volatile_$(SECTION)_binary-$(ARCH).list"; + SourceFileList "/srv/ftp-master.debian.org/database/dists/squeeze-volatile_$(SECTION)_source.list"; + Sections "main contrib non-free"; + Architectures "%(arch)s"; + BinOverride "override.squeeze.$(SECTION)"; + ExtraOverride "override.squeeze.extra.$(SECTION)"; + SrcOverride "override.squeeze.$(SECTION).src"; + Contents " "; +}; +""" + apt_trees["di"]["testing"]=""" tree "dists/testing/main" { @@ -274,7 +289,7 @@ tree "dists/proposed-updates/main" cnf = Config() try: # Write apt.conf - (ac_fd, ac_name) = mkstemp(dir=tmppath) + (ac_fd, ac_name) = mkstemp(dir=tmppath, suffix=suite, prefix=arch) os.write(ac_fd, DAILY_APT_CONF) # here we want to generate the tree entries os.write(ac_fd, apt_trees[suite] % {'arch': arch}) @@ -283,7 +298,7 @@ tree "dists/proposed-updates/main" if arch != 'source': if arch == 'hurd-i386' and suite == 'experimental': pass - else: + elif apt_trees["di"].has_key(suite): if arch == "amd64": os.write(ac_fd, apt_trees["di"][suite] % {'arch': arch, 'contentsline': 'Contents "$(DIST)/../Contents-udeb";'}) @@ -360,19 +375,19 @@ def main (): startdir = os.getcwd() os.chdir(cnf["Dir::TempPath"]) - # Setup a multiprocessing Pool. As many workers as we have CPU cores. - pool = Pool() - # For each given suite, each architecture, run one apt-ftparchive for s in suites: + # Setup a multiprocessing Pool. As many workers as we have CPU cores. + pool = Pool() arch_list=get_suite_architectures(s.suite_name, skipsrc=False, skipall=True, session=session) Logger.log(['generating output for Suite %s, Architectures %s' % (s.suite_name, map(sname, arch_list))]) for a in arch_list: pool.apply_async(generate_packages_sources, (a.arch_string, s.suite_name, cnf["Dir::TempPath"])) - # No more work will be added to our pool, close it and then wait for all to finish - pool.close() - pool.join() + # No more work will be added to our pool, close it and then wait for all to finish + pool.close() + pool.join() + os.chdir(startdir) # this script doesn't change the database session.close()