################################################################################
+import errno
import os
import stat
import sys
################################################################################
+def clean_byhash(now_date, session):
+ Logger.log(["Cleaning out unused by-hash files..."])
+
+ q = session.execute("""
+ DELETE FROM hashfile h
+ USING suite s, archive a
+ WHERE s.id = h.suite_id
+ AND a.id = s.archive_id
+ AND h.unreferenced + a.stayofexecution < CURRENT_TIMESTAMP
+ RETURNING a.path, s.suite_name, h.path""")
+ count = q.rowcount
+
+ if not Options["No-Action"]:
+ for base, suite, path in q:
+ filename = os.path.join(base, 'dists', suite, path)
+ try:
+ os.unlink(filename)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ Logger.log(['database referred to non-existing file', filename])
+ else:
+ Logger.log(['delete hashfile', suite, path])
+ session.commit()
+
+ if count > 0:
+ Logger.log(["total", count])
+
+################################################################################
+
def clean_empty_directories(session):
"""
Removes empty directories from pool directories.
clean(now_date, archives, max_delete, session)
clean_maintainers(now_date, session)
clean_fingerprints(now_date, session)
+ clean_byhash(now_date, session)
clean_empty_directories(session)
session.rollback()
--- /dev/null
+"""
+Add support for by-hash with a new table and per-suite boolean
+
+@contact: Debian FTP Master <ftpmaster@debian.org>
+@copyright: 2016, Julien Cristau <jcristau@debian.org>
+@license: GNU General Public License version 2 or later
+"""
+
+import psycopg2
+from daklib.dak_exceptions import DBUpdateError
+from daklib.config import Config
+
+def do_update(self):
+ """Add column to store whether to generate by-hash things per suite,
+ add table to store when by-hash files stopped being referenced
+ """
+ print __doc__
+ try:
+ c = self.db.cursor()
+
+ c.execute("ALTER TABLE suite ADD COLUMN byhash BOOLEAN DEFAULT false")
+
+ c.execute("""
+ CREATE TABLE hashfile (
+ suite_id INTEGER NOT NULL REFERENCES suite(id) ON DELETE CASCADE,
+ path TEXT NOT NULL,
+ unreferenced TIMESTAMP,
+ PRIMARY KEY (suite_id, path)
+ )
+ """)
+
+ c.execute("UPDATE config SET value = '116' WHERE name = 'db_revision'")
+
+ self.db.commit()
+
+ except psycopg2.ProgrammingError as msg:
+ self.db.rollback()
+ raise DBUpdateError('Unable to apply sick update 116, rollback issued. Error message : %s' % (str(msg)))
import time
import gzip
import bz2
+import errno
import apt_pkg
import subprocess
from tempfile import mkstemp, mkdtemp
# Boolean stuff. If we find it true in database, write out "yes" into the release file
boolattrs = ( ('NotAutomatic', 'notautomatic'),
- ('ButAutomaticUpgrades', 'butautomaticupgrades') )
+ ('ButAutomaticUpgrades', 'butautomaticupgrades'),
+ ('Acquire-By-Hash', 'byhash'),
+ )
cnf = Config()
out.close()
os.rename(outfile + '.new', outfile)
+ if suite.byhash:
+ query = """
+ UPDATE hashfile SET unreferenced = CURRENT_TIMESTAMP
+ WHERE suite_id = :id AND unreferenced IS NULL"""
+ session.execute(query, {'id': suite.suite_id})
+
+ for filename in fileinfo:
+ if not os.path.exists(filename):
+ # probably an uncompressed index we didn't generate
+ continue
+
+ for h in hashfuncs:
+ hashfile = os.path.join(os.path.dirname(filename), 'by-hash', h, fileinfo[filename][h])
+ query = "SELECT 1 FROM hashfile WHERE path = :p AND suite_id = :id"
+ q = session.execute(
+ query,
+ {'p': hashfile, 'id': suite.suite_id})
+ if q.rowcount:
+ session.execute('''
+ UPDATE hashfile SET unreferenced = NULL
+ WHERE path = :p and suite_id = :id''',
+ {'p': hashfile, 'id': suite.suite_id})
+ else:
+ session.execute('''
+ INSERT INTO hashfile (path, suite_id)
+ VALUES (:p, :id)''',
+ {'p': hashfile, 'id': suite.suite_id})
+
+ try:
+ os.makedirs(os.path.dirname(hashfile))
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ raise
+ try:
+ os.link(filename, hashfile)
+ except OSError as exc:
+ if exc.errno != errno.EEXIST:
+ raise
+
+ session.commit()
+
sign_release_dir(suite, os.path.dirname(outfile))
os.chdir(oldcwd)