byte_int, bytes_from_byte, bytes_from_uint,
environ,
items,
+ pending_raise,
range,
reraise)
from bup.io import path_msg
from bup.helpers import (Sha1, add_error, chunkyreader, debug1, debug2,
exo,
fdatasync,
+ finalized,
log,
merge_dict,
merge_iter,
return - tz_off
return tz_off
+def parse_commit_gpgsig(sig):
+ """Return the original signature bytes.
+
+ i.e. with the "gpgsig " header and the leading space character on
+ each continuation line removed.
+
+ """
+ if not sig:
+ return None
+ assert sig.startswith(b'gpgsig ')
+ sig = sig[7:]
+ return sig.replace(b'\n ', b'\n')
# FIXME: derived from http://git.rsbx.net/Documents/Git_Data_Formats.txt
# Make sure that's authoritative.
+
+# See also
+# https://github.com/git/git/blob/master/Documentation/technical/signature-format.txt
+# The continuation lines have only one leading space.
+
_start_end_char = br'[^ .,:;<>"\'\0\n]'
_content_char = br'[^\0\n<>]'
_safe_str_rx = br'(?:%s{1,2}|(?:%s%s*%s))' \
_commit_rx = re.compile(br'''tree (?P<tree>[abcdefABCDEF0123456789]{40})
(?P<parents>%s*)author (?P<author_name>%s) <(?P<author_mail>%s)> (?P<asec>\d+) (?P<atz>%s)
committer (?P<committer_name>%s) <(?P<committer_mail>%s)> (?P<csec>\d+) (?P<ctz>%s)(?P<mergetag>%s?)
-
+(?P<gpgsig>gpgsig .*\n(?: .*\n)*)?
(?P<message>(?:.|\n)*)''' % (_parent_rx,
_safe_str_rx, _safe_str_rx, _tz_rx,
_safe_str_rx, _safe_str_rx, _tz_rx,
'author_sec', 'author_offset',
'committer_name', 'committer_mail',
'committer_sec', 'committer_offset',
+ 'gpgsig',
'message'])
def parse_commit(content):
committer_mail=matches['committer_mail'],
committer_sec=int(matches['csec']),
committer_offset=parse_tz_offset(matches['ctz']),
+ gpgsig=parse_commit_gpgsig(matches['gpgsig']),
message=matches['message'])
elif name.endswith(b'.bupm'):
return (name[:-5],
BUP_CHUNKED if stat.S_ISDIR(mode) else BUP_NORMAL)
- else:
- return (name, BUP_NORMAL)
+ return (name, BUP_NORMAL)
def calc_hash(type, content):
return self
def __exit__(self, type, value, traceback):
- self.close()
+ with pending_raise(value, rethrow=False):
+ self.close()
def __len__(self):
return int(self.nsha) # int() from long for python 2
return self
def __exit__(self, type, value, traceback):
- self.close()
+ with pending_raise(value, rethrow=False):
+ self.close()
def __len__(self):
return int(self.nsha) # int() from long for python 2
self.max_pack_objects = max_pack_objects if max_pack_objects \
else max(1, self.max_pack_size // 5000)
- def __del__(self):
- self.close()
-
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
- self.close()
+ with pending_raise(value, rethrow=False):
+ self.close()
def _open(self):
if not self.file:
self.breakpoint()
return sha
- def breakpoint(self):
- """Clear byte and object counts and return the last processed id."""
- id = self._end(self.run_midx)
- self.outbytes = self.count = 0
- return id
-
def _require_objcache(self):
if self.objcache is None and self.objcache_maker:
self.objcache = self.objcache_maker()
msg)
return self.maybe_write(b'commit', content)
- def abort(self):
- """Remove the pack file from disk."""
- f = self.file
- if f:
- pfd = self.parentfd
- self.file = None
- self.parentfd = None
- self.idx = None
- try:
- try:
- os.unlink(self.filename + b'.pack')
- finally:
- f.close()
- finally:
- if pfd is not None:
- os.close(pfd)
+ def _end(self, run_midx=True, abort=False):
+ # Ignores run_midx during abort
+ if not self.file:
+ return None
+ self.file, f = None, self.file
+ self.idx, idx = None, self.idx
+ self.parentfd, pfd, = None, self.parentfd
+ self.objcache = None
- def _end(self, run_midx=True):
- f = self.file
- if not f: return None
- self.file = None
- try:
- self.objcache = None
- idx = self.idx
- self.idx = None
+ with finalized(pfd, lambda x: x is not None and os.close(x)), \
+ f:
+
+ if abort:
+ os.unlink(self.filename + b'.pack')
+ return None
# update object count
f.seek(8)
cp = struct.pack('!i', self.count)
- assert(len(cp) == 4)
+ assert len(cp) == 4
f.write(cp)
# calculate the pack sha1sum
sum.update(b)
packbin = sum.digest()
f.write(packbin)
+ f.flush()
fdatasync(f.fileno())
- finally:
f.close()
- idx.write(self.filename + b'.idx', packbin)
- nameprefix = os.path.join(self.repo_dir,
- b'objects/pack/pack-' + hexlify(packbin))
- if os.path.exists(self.filename + b'.map'):
- os.unlink(self.filename + b'.map')
- os.rename(self.filename + b'.pack', nameprefix + b'.pack')
- os.rename(self.filename + b'.idx', nameprefix + b'.idx')
- try:
- os.fsync(self.parentfd)
- finally:
- os.close(self.parentfd)
-
- if run_midx:
- auto_midx(os.path.join(self.repo_dir, b'objects/pack'))
+ idx.write(self.filename + b'.idx', packbin)
+ nameprefix = os.path.join(self.repo_dir,
+ b'objects/pack/pack-' + hexlify(packbin))
+ if os.path.exists(self.filename + b'.map'):
+ os.unlink(self.filename + b'.map')
+ os.rename(self.filename + b'.pack', nameprefix + b'.pack')
+ os.rename(self.filename + b'.idx', nameprefix + b'.idx')
+ os.fsync(pfd)
+ if run_midx:
+ auto_midx(os.path.join(self.repo_dir, b'objects/pack'))
+ if self.on_pack_finish:
+ self.on_pack_finish(nameprefix)
+ return nameprefix
- if self.on_pack_finish:
- self.on_pack_finish(nameprefix)
+ def abort(self):
+ """Remove the pack file from disk."""
+ self._end(abort=True)
- return nameprefix
+ def breakpoint(self):
+ """Clear byte and object counts and return the last processed id."""
+ id = self._end(self.run_midx)
+ self.outbytes = self.count = 0
+ return id
def close(self, run_midx=True):
"""Close the pack file and move it to its definitive path."""
if wait:
p.wait()
return p.returncode
+ return None
def restart(self):
self.close()
self.p.stdin.write(ref + b'\n')
self.p.stdin.flush()
hdr = self.p.stdout.readline()
+ if not hdr:
+ raise GitError('unexpected cat-file EOF (last request: %r, exit: %s)'
+ % (ref, self.p.poll() or 'none'))
if hdr.endswith(b' missing\n'):
self.inprogress = None
yield None, None, None