Ensure all of our context managed objects have a __del__ that asserts
that the instance has been properly closed so that we'll be more
likely to notice related oversights.
This will only work in cases where __del__ is called before shutdown,
but that should normally be the case for cpython.
Signed-off-by: Rob Browning <rlb@defaultvalue.org>
Tested-by: Rob Browning <rlb@defaultvalue.org>
class ShaBloom:
"""Wrapper which contains data from multiple index files. """
def __init__(self, filename, f=None, readwrite=False, expected=-1):
class ShaBloom:
"""Wrapper which contains data from multiple index files. """
def __init__(self, filename, f=None, readwrite=False, expected=-1):
self.name = filename
self.readwrite = readwrite
self.file = None
self.name = filename
self.readwrite = readwrite
self.file = None
return self.map and self.bits
def close(self):
return self.map and self.bits
def close(self):
try:
if self.map and self.readwrite:
debug2("bloom: closing with %d entries\n" % self.entries)
try:
if self.map and self.readwrite:
debug2("bloom: closing with %d entries\n" % self.entries)
finally: # This won't handle pending exceptions correctly in py2
self._init_failed()
finally: # This won't handle pending exceptions correctly in py2
self._init_failed()
+ def __del__(self):
+ assert self.closed
+
def __enter__(self):
return self
def __enter__(self):
return self
class Client:
def __init__(self, remote, create=False):
class Client:
def __init__(self, remote, create=False):
self._busy = self.conn = None
self.sock = self.p = self.pout = self.pin = None
is_reverse = environ.get(b'BUP_SERVER_REVERSE')
self._busy = self.conn = None
self.sock = self.p = self.pout = self.pin = None
is_reverse = environ.get(b'BUP_SERVER_REVERSE')
self.check_ok()
self.sync_indexes()
self.check_ok()
self.sync_indexes()
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- with pending_raise(value, rethrow=False):
- self.close()
-
if self.conn and not self._busy:
self.conn.write(b'quit\n')
if self.pin:
if self.conn and not self._busy:
self.conn.write(b'quit\n')
if self.pin:
self.conn = None
self.sock = self.p = self.pin = self.pout = None
self.conn = None
self.sock = self.p = self.pin = self.pout = None
+ def __del__(self):
+ assert self.closed
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ with pending_raise(value, rethrow=False):
+ self.close()
+
def check_ok(self):
if self.p:
rv = self.p.poll()
def check_ok(self):
if self.p:
rv = self.p.poll()
compression_level=compression_level,
max_pack_size=max_pack_size,
max_pack_objects=max_pack_objects)
compression_level=compression_level,
max_pack_size=max_pack_size,
max_pack_objects=max_pack_objects)
+ self.remote_closed = False
self.file = conn
self.filename = b'remote socket'
self.suggest_packs = suggest_packs
self.file = conn
self.filename = b'remote socket'
self.suggest_packs = suggest_packs
def close(self):
# Called by inherited __exit__
def close(self):
# Called by inherited __exit__
+ self.remote_closed = True
id = self._end()
self.file = None
return id
id = self._end()
self.file = None
return id
+ def __del__(self):
+ assert self.remote_closed
+ super(PackWriter_Remote, self).__del__()
+
def abort(self):
raise ClientError("don't know how to abort remote pack writing")
def abort(self):
raise ClientError("don't know how to abort remote pack writing")
def __exit__(self, type, value, traceback):
return None # since close() does nothing
def close(self):
def __exit__(self, type, value, traceback):
return None # since close() does nothing
def close(self):
+ assert self.closed
+ def __del__(self):
+ assert self.closed
def new_blob(self, content):
return git.calc_hash(b'blob', content)
def new_tree(self, shalist):
def new_blob(self, content):
return git.calc_hash(b'blob', content)
def new_tree(self, shalist):
"""
def __init__(self, ex, rethrow=True):
"""
def __init__(self, ex, rethrow=True):
self.ex = ex
self.rethrow = rethrow
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
self.ex = ex
self.rethrow = rethrow
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
if not exc_type and self.ex and self.rethrow:
raise self.ex
if not exc_type and self.ex and self.rethrow:
raise self.ex
+ def __del__(self):
+ assert self.closed
def items(x):
return x.items()
def items(x):
return x.items()
"""
def __init__(self, ex, rethrow=True):
"""
def __init__(self, ex, rethrow=True):
self.ex = ex
self.rethrow = rethrow
def __enter__(self):
if self.ex:
add_ex_tb(self.ex)
def __exit__(self, exc_type, exc_value, traceback):
self.ex = ex
self.rethrow = rethrow
def __enter__(self):
if self.ex:
add_ex_tb(self.ex)
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
if self.ex:
add_ex_tb(exc_value)
if exc_value:
if self.ex:
add_ex_tb(exc_value)
return
if self.rethrow and self.ex:
raise self.ex
return
if self.rethrow and self.ex:
raise self.ex
+ def __del__(self):
+ assert self.closed
def dump_traceback(ex):
stack = [ex]
def dump_traceback(ex):
stack = [ex]
+ assert not hasattr(py_mmap.mmap, '__del__')
assert not hasattr(py_mmap.mmap, '__enter__')
assert not hasattr(py_mmap.mmap, '__exit__')
class mmap(py_mmap.mmap):
assert not hasattr(py_mmap.mmap, '__enter__')
assert not hasattr(py_mmap.mmap, '__exit__')
class mmap(py_mmap.mmap):
+ def __init__(self, *args, **kwargs):
+ self._bup_closed = False
+ # Silence deprecation warnings. mmap's current parent is
+ # object, which accepts no params and as of at least 2.7
+ # warns about them.
+ if py_mmap.mmap.__init__ is not object.__init__:
+ super(mmap, self).__init__(self, *args, **kwargs)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
+ self._bup_closed = True
with pending_raise(value, rethrow=False):
self.close()
with pending_raise(value, rethrow=False):
self.close()
+ def __del__(self):
+ assert self._bup_closed
class PackIdxV1(PackIdx):
"""Object representation of a Git pack index (version 1) file."""
def __init__(self, filename, f):
class PackIdxV1(PackIdx):
"""Object representation of a Git pack index (version 1) file."""
def __init__(self, filename, f):
self.name = filename
self.idxnames = [self.name]
self.map = mmap_read(f)
self.name = filename
self.idxnames = [self.name]
self.map = mmap_read(f)
yield self.map[ofs : ofs + 20]
def close(self):
yield self.map[ofs : ofs + 20]
def close(self):
if self.map is not None:
self.shatable = None
self.map.close()
self.map = None
if self.map is not None:
self.shatable = None
self.map.close()
self.map = None
+ def __del__(self):
+ assert self.closed
+
class PackIdxV2(PackIdx):
"""Object representation of a Git pack index (version 2) file."""
def __init__(self, filename, f):
class PackIdxV2(PackIdx):
"""Object representation of a Git pack index (version 2) file."""
def __init__(self, filename, f):
self.name = filename
self.idxnames = [self.name]
self.map = mmap_read(f)
self.name = filename
self.idxnames = [self.name]
self.map = mmap_read(f)
yield self.map[ofs : ofs + 20]
def close(self):
yield self.map[ofs : ofs + 20]
def close(self):
if self.map is not None:
self.shatable = None
self.map.close()
self.map = None
if self.map is not None:
self.shatable = None
self.map.close()
self.map = None
+ def __del__(self):
+ assert self.closed
+
_mpi_count = 0
class PackIdxList:
_mpi_count = 0
class PackIdxList:
def __init__(self, objcache_maker=_make_objcache, compression_level=1,
run_midx=True, on_pack_finish=None,
max_pack_size=None, max_pack_objects=None, repo_dir=None):
def __init__(self, objcache_maker=_make_objcache, compression_level=1,
run_midx=True, on_pack_finish=None,
max_pack_size=None, max_pack_objects=None, repo_dir=None):
self.repo_dir = repo_dir or repo()
self.file = None
self.parentfd = None
self.repo_dir = repo_dir or repo()
self.file = None
self.parentfd = None
def abort(self):
"""Remove the pack file from disk."""
def abort(self):
"""Remove the pack file from disk."""
self._end(abort=True)
def breakpoint(self):
self._end(abort=True)
def breakpoint(self):
def close(self, run_midx=True):
"""Close the pack file and move it to its definitive path."""
def close(self, run_midx=True):
"""Close the pack file and move it to its definitive path."""
return self._end(run_midx=run_midx)
return self._end(run_midx=run_midx)
+ def __del__(self):
+ assert self.closed
+
class PackIdxV2Writer:
def __init__(self):
class PackIdxV2Writer:
def __init__(self):
class BaseConn:
def __init__(self, outp):
class BaseConn:
def __init__(self, outp):
+ self._base_closed = False
self.outp = outp
def close(self):
self.outp = outp
def close(self):
+ self._base_closed = True
while self._read(65536): pass
while self._read(65536): pass
+ def __del__(self):
+ assert self._base_closed
+
def _read(self, size):
raise NotImplementedError("Subclasses must implement _read")
def _read(self, size):
raise NotImplementedError("Subclasses must implement _read")
class HLinkDB:
def __init__(self, filename):
class HLinkDB:
def __init__(self, filename):
# Map a "dev:ino" node to a list of paths associated with that node.
self._node_paths = {}
# Map a path to a "dev:ino" node.
# Map a "dev:ino" node to a list of paths associated with that node.
self._node_paths = {}
# Map a path to a "dev:ino" node.
self._save_prepared = True
def commit_save(self):
self._save_prepared = True
def commit_save(self):
if not self._save_prepared:
raise Error('cannot commit save of %r; no save prepared'
% self._filename)
if not self._save_prepared:
raise Error('cannot commit save of %r; no save prepared'
% self._filename)
self._save_prepared = None
def abort_save(self):
self._save_prepared = None
def abort_save(self):
if self._tmpname:
os.unlink(self._tmpname)
self._tmpname = None
if self._tmpname:
os.unlink(self._tmpname)
self._tmpname = None
with pending_raise(value, rethrow=True):
self.abort_save()
with pending_raise(value, rethrow=True):
self.abort_save()
+ def __del__(self):
+ assert self.closed
+
def add_path(self, path, dev, ino):
# Assume path is new.
node = b'%d:%d' % (dev, ino)
def add_path(self, path, dev, ino):
# Assume path is new.
node = b'%d:%d' % (dev, ino)
class MetaStoreReader:
def __init__(self, filename):
class MetaStoreReader:
def __init__(self, filename):
self._file = None
self._file = open(filename, 'rb')
def close(self):
self._file = None
self._file = open(filename, 'rb')
def close(self):
if self._file:
self._file.close()
self._file = None
if self._file:
self._file.close()
self._file = None
+ def __del__(self):
+ assert self._closed
+
def __enter__(self):
return self
def __enter__(self):
return self
# truncation or corruption somewhat sensibly.
def __init__(self, filename):
# truncation or corruption somewhat sensibly.
def __init__(self, filename):
# Map metadata hashes to bupindex.meta offsets.
self._offsets = {}
self._filename = filename
# Map metadata hashes to bupindex.meta offsets.
self._offsets = {}
self._filename = filename
self._file = open(filename, 'ab')
def close(self):
self._file = open(filename, 'ab')
def close(self):
if self._file:
self._file.close()
self._file = None
if self._file:
self._file.close()
self._file = None
+ def __del__(self):
+ assert self._closed
+
def __enter__(self):
return self
def __enter__(self):
return self
class Reader:
def __init__(self, filename):
class Reader:
def __init__(self, filename):
self.filename = filename
self.m = b''
self.writable = False
self.filename = filename
self.m = b''
self.writable = False
self.m.flush()
def close(self):
self.m.flush()
def close(self):
self.save()
if self.writable and self.m:
self.m.close()
self.m = None
self.writable = False
self.save()
if self.writable and self.m:
self.m.close()
self.m = None
self.writable = False
+ def __del__(self):
+ assert self.closed
+
def filter(self, prefixes, wantrecurse=None):
for (rp, path) in reduce_paths(prefixes):
any_entries = False
def filter(self, prefixes, wantrecurse=None):
for (rp, path) in reduce_paths(prefixes):
any_entries = False
class Writer:
def __init__(self, filename, metastore, tmax):
class Writer:
def __init__(self, filename, metastore, tmax):
self.rootlevel = self.level = Level([], None)
self.f = None
self.count = 0
self.rootlevel = self.level = Level([], None)
self.f = None
self.count = 0
self.abort()
def abort(self):
self.abort()
def abort(self):
f = self.f
self.f = None
if f:
f = self.f
self.f = None
if f:
assert(self.level == None)
def close(self):
assert(self.level == None)
def close(self):
self.flush()
f = self.f
self.f = None
self.flush()
f = self.f
self.f = None
f.close()
os.rename(self.tmpname, self.filename)
f.close()
os.rename(self.tmpname, self.filename)
+ def __del__(self):
+ assert self.closed
+
def _add(self, ename, entry):
if self.lastfile and self.lastfile <= ename:
raise Error('%r must come before %r'
def _add(self, ename, entry):
if self.lastfile and self.lastfile <= ename:
raise Error('%r must come before %r'
amounts of files.
"""
def __init__(self, filename):
amounts of files.
"""
def __init__(self, filename):
self.name = filename
self.force_keep = False
self.map = None
self.name = filename
self.force_keep = False
self.map = None
return self.idxnames[self._get_idx_i(i)]
def close(self):
return self.idxnames[self._get_idx_i(i)]
def close(self):
if self.map is not None:
self.fanout = self.shatable = self.whichlist = self.idxnames = None
self.map.close()
self.map = None
if self.map is not None:
self.fanout = self.shatable = self.whichlist = self.idxnames = None
self.map.close()
self.map = None
+ def __del__(self):
+ assert self.closed
+
def exists(self, hash, want_source=False):
"""Return nonempty if the object exists in the index files."""
global _total_searches, _total_steps
def exists(self, hash, want_source=False):
"""Return nonempty if the object exists in the index files."""
global _total_searches, _total_steps
class LocalRepo:
def __init__(self, repo_dir=None):
class LocalRepo:
def __init__(self, repo_dir=None):
self.repo_dir = realpath(repo_dir or git.repo())
self._cp = git.cp(self.repo_dir)
self.update_ref = partial(git.update_ref, repo_dir=self.repo_dir)
self.repo_dir = realpath(repo_dir or git.repo())
self._cp = git.cp(self.repo_dir)
self.update_ref = partial(git.update_ref, repo_dir=self.repo_dir)
self._id = _repo_id(self.repo_dir)
def close(self):
self._id = _repo_id(self.repo_dir)
def close(self):
+ self.closed = True
+
+ def __del__(self):
+ assert self.closed
def __enter__(self):
return self
def __enter__(self):
return self
class RemoteRepo:
def __init__(self, address):
class RemoteRepo:
def __init__(self, address):
self.address = address
self.client = client.Client(address)
self.new_packwriter = self.client.new_packwriter
self.address = address
self.client = client.Client(address)
self.new_packwriter = self.client.new_packwriter
self._id = _repo_id(self.address)
def close(self):
self._id = _repo_id(self.address)
def close(self):
+ if not self.closed:
+ self.closed = True
self.client.close()
self.client = None
self.client.close()
self.client = None
+ def __del__(self):
+ assert self.closed
+
def __enter__(self):
return self
def __enter__(self):
return self
class _FileReader(object):
def __init__(self, repo, oid, known_size=None):
assert len(oid) == 20
class _FileReader(object):
def __init__(self, repo, oid, known_size=None):
assert len(oid) == 20
self.oid = oid
self.ofs = 0
self.reader = None
self.oid = oid
self.ofs = 0
self.reader = None
return buf
def close(self):
return buf
def close(self):
+ self.closed = True
+
+ def __del__(self):
+ assert self.closed
def __enter__(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=False):
self.close()
def __exit__(self, type, value, traceback):
with pending_raise(value, rethrow=False):
self.close()