(ac_fd, ac_name) = mkstemp()
os.write(ac_fd, MINIMAL_APT_CONF % {'archivepath': self.path,
'filelist': fl_name})
- os.close()
+ os.close(ac_fd)
# Run apt-ftparchive generate
- os.chdir(os.path.dirname(fl_name))
- os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(fl_name))
+ os.chdir(os.path.dirname(ac_name))
+ os.system('apt-ftparchive -qq -o APT::FTPArchive::Contents=off generate %s' % os.path.basename(ac_name))
# Run apt-ftparchive release
# TODO: Eww - fix this
bname = os.path.basename(self.path)
os.chdir(self.path)
os.chdir('..')
- os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="${archs}" release %s > Release""", [self.origin, self.label, self.releasedescription, arches, bname])
+
+ # We have to remove the Release file otherwise it'll be included in the
+ # new one
+ try:
+ os.unlink(os.path.join(bname, 'Release'))
+ except OSError:
+ pass
+
+ os.system("""apt-ftparchive -qq -o APT::FTPArchive::Release::Origin="%s" -o APT::FTPArchive::Release::Label="%s" -o APT::FTPArchive::Release::Description="%s" -o APT::FTPArchive::Release::Architectures="%s" release %s > Release""" % (self.origin, self.label, self.releasedescription, arches, bname))
# Sign if necessary
if self.signingkey:
except OSError:
pass
- def clean_and_update(self, starttime, dryrun=False):
+ def clean_and_update(self, starttime, Logger, dryrun=False):
"""WARNING: This routine commits for you"""
session = DBConn().session().object_session(self)
- if self.generate_metadata:
+ if self.generate_metadata and not dryrun:
self.write_metadata(starttime)
# Grab files older than our execution time
- older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
+ older = session.query(BuildQueueFile).filter_by(build_queue_id = self.queue_id).filter(BuildQueueFile.lastused + timedelta(seconds=self.stay_of_execution) <= starttime).all()
for o in older:
killdb = False
try:
if dryrun:
- print "I: Would have removed %s from the queue"
+ Logger.log(["I: Would have removed %s from the queue" % o.fullpath])
else:
- print "I: Removing %s from the queue"
+ Logger.log(["I: Removing %s from the queue" % o.fullpath])
os.unlink(o.fullpath)
killdb = True
except OSError, e:
killdb = True
else:
# TODO: Replace with proper logging call
- print "E: Could not remove %s" % o.fullpath
+ Logger.log(["E: Could not remove %s" % o.fullpath])
if killdb:
session.delete(o)
except NoResultFound:
fp = os.path.join(self.path, f)
if dryrun:
- print "I: Would remove unused link %s" % fp
+ Logger.log(["I: Would remove unused link %s" % fp])
else:
- print "I: Removing unused link %s" % fp
+ Logger.log(["I: Removing unused link %s" % fp])
try:
os.unlink(fp)
except OSError:
- print "E: Failed to unlink unreferenced file %s" % r.fullpath
+ Logger.log(["E: Failed to unlink unreferenced file %s" % r.fullpath])
def add_file_from_pool(self, poolfile):
"""Copies a file into the pool. Assumes that the PoolFile object is
"""
# TODO: There must be a way of properly using bind parameters with %FOO%
- q = session.query(PoolFile).filter(PoolFile.filename.like('%%%s%%' % filename))
+ q = session.query(PoolFile).filter(PoolFile.filename.like('%%/%s' % filename))
return q.all()
def __repr__(self):
return '<DBChange %s>' % self.changesname
+ def clean_from_queue(self):
+ session = DBConn().session().object_session(self)
+
+ # Remove changes_pool_files entries
+ self.poolfiles = []
+
+ # Remove changes_pending_files references
+ self.files = []
+
+ # Clear out of queue
+ self.in_queue = None
+ self.approved_for_id = None
+
__all__.append('DBChange')
@session_wrapper
poolfiles = relation(PoolFile,
secondary=self.tbl_changes_pool_files,
backref="changeslinks"),
+ seen = self.tbl_changes.c.seen,
+ source = self.tbl_changes.c.source,
+ binaries = self.tbl_changes.c.binaries,
+ architecture = self.tbl_changes.c.architecture,
+ distribution = self.tbl_changes.c.distribution,
+ urgency = self.tbl_changes.c.urgency,
+ maintainer = self.tbl_changes.c.maintainer,
+ changedby = self.tbl_changes.c.changedby,
+ date = self.tbl_changes.c.date,
+ version = self.tbl_changes.c.version,
files = relation(ChangePendingFile,
secondary=self.tbl_changes_pending_files_map,
backref="changesfile"),
properties = dict(change_pending_binary_id = self.tbl_changes_pending_binaries.c.id))
mapper(ChangePendingFile, self.tbl_changes_pending_files,
- properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id))
+ properties = dict(change_pending_file_id = self.tbl_changes_pending_files.c.id,
+ filename = self.tbl_changes_pending_files.c.filename,
+ size = self.tbl_changes_pending_files.c.size,
+ md5sum = self.tbl_changes_pending_files.c.md5sum,
+ sha1sum = self.tbl_changes_pending_files.c.sha1sum,
+ sha256sum = self.tbl_changes_pending_files.c.sha256sum))
mapper(ChangePendingSource, self.tbl_changes_pending_source,
properties = dict(change_pending_source_id = self.tbl_changes_pending_source.c.id,