X-Git-Url: https://arthur.barton.de/cgi-bin/gitweb.cgi?p=bup.git;a=blobdiff_plain;f=lib%2Fbup%2Fgit.py;h=ff48da7113ed7e4bd5cd363f3807bd7ffa674e5c;hp=5edfe1c3185d6a143902aac79a2726520b1afc8f;hb=b2f7ccd162e988a8785771a116c83b04f9ea51ce;hpb=56dd4712e7d66a8623ec7b85753469f9500c6003 diff --git a/lib/bup/git.py b/lib/bup/git.py index 5edfe1c..ff48da7 100644 --- a/lib/bup/git.py +++ b/lib/bup/git.py @@ -2,18 +2,29 @@ bup repositories are in Git format. This library allows us to interact with the Git data structures. """ -import os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob -from bup.helpers import * -from bup import _helpers, path, midx, bloom, xstat -max_pack_size = 1000*1000*1000 # larger packs will slow down pruning -max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object -SEEK_END=2 # os.SEEK_END is not defined in python 2.4 +from __future__ import absolute_import +import errno, os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob +from collections import namedtuple +from itertools import islice +from numbers import Integral + +from bup import _helpers, compat, hashsplit, path, midx, bloom, xstat +from bup.compat import range +from bup.helpers import (Sha1, add_error, chunkyreader, debug1, debug2, + fdatasync, + hostname, localtime, log, + merge_dict, + merge_iter, + mmap_read, mmap_readwrite, + parse_num, + progress, qprogress, shstr, stat_if_exists, + unlink, username, userfullname, + utc_offset_str) verbose = 0 ignore_midx = 0 -home_repodir = os.path.expanduser('~/.bup') -repodir = None +repodir = None # The default repository, once initialized _typemap = { 'blob':3, 'tree':2, 'commit':1, 'tag':4 } _typermap = { 3:'blob', 2:'tree', 1:'commit', 4:'tag' } @@ -26,18 +37,126 @@ class GitError(Exception): pass -def repo(sub = ''): +def _gitenv(repo_dir=None): + if not repo_dir: + repo_dir = repo() + return merge_dict(os.environ, {'GIT_DIR': os.path.abspath(repo_dir)}) + +def _git_wait(cmd, p): + rv = p.wait() + if rv != 0: + raise GitError('%s returned %d' % (shstr(cmd), rv)) + +def _git_capture(argv): + p = subprocess.Popen(argv, stdout=subprocess.PIPE, env=_gitenv()) + r = p.stdout.read() + _git_wait(repr(argv), p) + return r + +def git_config_get(option, repo_dir=None): + cmd = ('git', 'config', '--get', option) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + env=_gitenv(repo_dir=repo_dir)) + r = p.stdout.read() + rc = p.wait() + if rc == 0: + return r + if rc != 1: + raise GitError('%s returned %d' % (cmd, rc)) + return None + + +def parse_tz_offset(s): + """UTC offset in seconds.""" + tz_off = (int(s[1:3]) * 60 * 60) + (int(s[3:5]) * 60) + if s[0] == '-': + return - tz_off + return tz_off + + +# FIXME: derived from http://git.rsbx.net/Documents/Git_Data_Formats.txt +# Make sure that's authoritative. +_start_end_char = r'[^ .,:;<>"\'\0\n]' +_content_char = r'[^\0\n<>]' +_safe_str_rx = '(?:%s{1,2}|(?:%s%s*%s))' \ + % (_start_end_char, + _start_end_char, _content_char, _start_end_char) +_tz_rx = r'[-+]\d\d[0-5]\d' +_parent_rx = r'(?:parent [abcdefABCDEF0123456789]{40}\n)' +# Assumes every following line starting with a space is part of the +# mergetag. Is there a formal commit blob spec? +_mergetag_rx = r'(?:\nmergetag object [abcdefABCDEF0123456789]{40}(?:\n [^\0\n]*)*)' +_commit_rx = re.compile(r'''tree (?P[abcdefABCDEF0123456789]{40}) +(?P%s*)author (?P%s) <(?P%s)> (?P\d+) (?P%s) +committer (?P%s) <(?P%s)> (?P\d+) (?P%s)(?P%s?) + +(?P(?:.|\n)*)''' % (_parent_rx, + _safe_str_rx, _safe_str_rx, _tz_rx, + _safe_str_rx, _safe_str_rx, _tz_rx, + _mergetag_rx)) +_parent_hash_rx = re.compile(r'\s*parent ([abcdefABCDEF0123456789]{40})\s*') + +# Note that the author_sec and committer_sec values are (UTC) epoch +# seconds, and for now the mergetag is not included. +CommitInfo = namedtuple('CommitInfo', ['tree', 'parents', + 'author_name', 'author_mail', + 'author_sec', 'author_offset', + 'committer_name', 'committer_mail', + 'committer_sec', 'committer_offset', + 'message']) + +def parse_commit(content): + commit_match = re.match(_commit_rx, content) + if not commit_match: + raise Exception('cannot parse commit %r' % content) + matches = commit_match.groupdict() + return CommitInfo(tree=matches['tree'], + parents=re.findall(_parent_hash_rx, matches['parents']), + author_name=matches['author_name'], + author_mail=matches['author_mail'], + author_sec=int(matches['asec']), + author_offset=parse_tz_offset(matches['atz']), + committer_name=matches['committer_name'], + committer_mail=matches['committer_mail'], + committer_sec=int(matches['csec']), + committer_offset=parse_tz_offset(matches['ctz']), + message=matches['message']) + + +def get_cat_data(cat_iterator, expected_type): + _, kind, _ = next(cat_iterator) + if kind != expected_type: + raise Exception('expected %r, saw %r' % (expected_type, kind)) + return ''.join(cat_iterator) + +def get_commit_items(id, cp): + return parse_commit(get_cat_data(cp.get(id), 'commit')) + +def _local_git_date_str(epoch_sec): + return '%d %s' % (epoch_sec, utc_offset_str(epoch_sec)) + + +def _git_date_str(epoch_sec, tz_offset_sec): + offs = tz_offset_sec // 60 + return '%d %s%02d%02d' \ + % (epoch_sec, + '+' if offs >= 0 else '-', + abs(offs) // 60, + abs(offs) % 60) + + +def repo(sub = '', repo_dir=None): """Get the path to the git repository or one of its subdirectories.""" - global repodir - if not repodir: + repo_dir = repo_dir or repodir + if not repo_dir: raise GitError('You should call check_repo_or_die()') # If there's a .git subdirectory, then the actual repo is in there. - gd = os.path.join(repodir, '.git') + gd = os.path.join(repo_dir, '.git') if os.path.exists(gd): - repodir = gd + repo_dir = gd - return os.path.join(repodir, sub) + return os.path.join(repo_dir, sub) def shorten_hash(s): @@ -67,7 +186,7 @@ def auto_midx(objdir): args = [path.exe(), 'midx', '--auto', '--dir', objdir] try: rv = subprocess.call(args, stdout=open('/dev/null', 'w')) - except OSError, e: + except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise @@ -77,7 +196,7 @@ def auto_midx(objdir): args = [path.exe(), 'bloom', '--dir', objdir] try: rv = subprocess.call(args, stdout=open('/dev/null', 'w')) - except OSError, e: + except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise @@ -89,9 +208,10 @@ def mangle_name(name, mode, gitmode): """Mangle a file name to present an abstract name for segmented files. Mangled file names will have the ".bup" extension added to them. If a file's name already ends with ".bup", a ".bupl" extension is added to - disambiguate normal files from semgmented ones. + disambiguate normal files from segmented ones. """ if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode): + assert(stat.S_ISDIR(gitmode)) return name + '.bup' elif name.endswith('.bup') or name[:-1].endswith('.bup'): return name + '.bupl' @@ -100,21 +220,24 @@ def mangle_name(name, mode, gitmode): (BUP_NORMAL, BUP_CHUNKED) = (0,1) -def demangle_name(name): +def demangle_name(name, mode): """Remove name mangling from a file name, if necessary. The return value is a tuple (demangled_filename,mode), where mode is one of the following: * BUP_NORMAL : files that should be read as-is from the repository - * BUP_CHUNKED : files that were chunked and need to be assembled + * BUP_CHUNKED : files that were chunked and need to be reassembled - For more information on the name mangling algorythm, see mangle_name() + For more information on the name mangling algorithm, see mangle_name() """ if name.endswith('.bupl'): return (name[:-5], BUP_NORMAL) elif name.endswith('.bup'): return (name[:-4], BUP_CHUNKED) + elif name.endswith('.bupm'): + return (name[:-5], + BUP_CHUNKED if stat.S_ISDIR(mode) else BUP_NORMAL) else: return (name, BUP_NORMAL) @@ -127,7 +250,7 @@ def calc_hash(type, content): return sum.digest() -def _shalist_sort_key(ent): +def shalist_item_sort_key(ent): (mode, name, id) = ent assert(mode+0 == mode) if stat.S_ISDIR(mode): @@ -138,7 +261,7 @@ def _shalist_sort_key(ent): def tree_encode(shalist): """Generate a git tree object from (mode,name,hash) tuples.""" - shalist = sorted(shalist, key = _shalist_sort_key) + shalist = sorted(shalist, key = shalist_item_sort_key) l = [] for (mode,name,bin) in shalist: assert(mode) @@ -155,17 +278,19 @@ def tree_decode(buf): """Generate a list of (mode,name,hash) from the git tree object in buf.""" ofs = 0 while ofs < len(buf): - z = buf[ofs:].find('\0') - assert(z > 0) - spl = buf[ofs:ofs+z].split(' ', 1) + z = buf.find('\0', ofs) + assert(z > ofs) + spl = buf[ofs:z].split(' ', 1) assert(len(spl) == 2) mode,name = spl - sha = buf[ofs+z+1:ofs+z+1+20] - ofs += z+1+20 + sha = buf[z+1:z+1+20] + ofs = z+1+20 yield (int(mode, 8), name, sha) def _encode_packobj(type, content, compression_level=1): + if compression_level not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9): + raise ValueError('invalid compression level %s' % compression_level) szout = '' sz = len(content) szbits = (sz & 0x0f) | (_typemap[type]<<4) @@ -177,10 +302,6 @@ def _encode_packobj(type, content, compression_level=1): break szbits = sz & 0x7f sz >>= 7 - if compression_level > 9: - compression_level = 9 - elif compression_level < 0: - compression_level = 0 z = zlib.compressobj(compression_level) yield szout yield z.compress(content) @@ -273,22 +394,25 @@ class PackIdxV1(PackIdx): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) - self.fanout = list(struct.unpack('!256I', - str(buffer(self.map, 0, 256*4)))) + self.fanout = list(struct.unpack('!256I', buffer(self.map, 0, 256 * 4))) self.fanout.append(0) # entry "-1" nsha = self.fanout[255] self.sha_ofs = 256*4 self.shatable = buffer(self.map, self.sha_ofs, nsha*24) def _ofs_from_idx(self, idx): - return struct.unpack('!I', str(self.shatable[idx*24 : idx*24+4]))[0] + ofs = idx * 24 + return struct.unpack('!I', self.shatable[ofs : ofs + 4])[0] def _idx_to_hash(self, idx): - return str(self.shatable[idx*24+4 : idx*24+24]) + ofs = idx * 24 + 4 + return self.shatable[ofs : ofs + 20] def __iter__(self): - for i in xrange(self.fanout[255]): - yield buffer(self.map, 256*4 + 24*i + 4, 20) + count = self.fanout[255] + start = 256 * 4 + 4 + for ofs in range(start, start + (24 * count), 24): + yield self.map[ofs : ofs + 20] class PackIdxV2(PackIdx): @@ -297,9 +421,9 @@ class PackIdxV2(PackIdx): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) - assert(str(self.map[0:8]) == '\377tOc\0\0\0\2') + assert self.map[0:8] == b'\377tOc\0\0\0\2' self.fanout = list(struct.unpack('!256I', - str(buffer(self.map, 8, 256*4)))) + buffer(self.map[8 : 8 + 256 * 4]))) self.fanout.append(0) # entry "-1" nsha = self.fanout[255] self.sha_ofs = 8 + 256*4 @@ -311,19 +435,22 @@ class PackIdxV2(PackIdx): 8 + 256*4 + nsha*20 + nsha*4 + nsha*4) def _ofs_from_idx(self, idx): - ofs = struct.unpack('!I', str(buffer(self.ofstable, idx*4, 4)))[0] + i = idx * 4 + ofs = struct.unpack('!I', self.ofstable[i : i + 4])[0] if ofs & 0x80000000: idx64 = ofs & 0x7fffffff - ofs = struct.unpack('!Q', - str(buffer(self.ofs64table, idx64*8, 8)))[0] + idx64_i = idx64 * 8 + ofs = struct.unpack('!Q', self.ofs64table[idx64_i : idx64_i + 8])[0] return ofs def _idx_to_hash(self, idx): - return str(self.shatable[idx*20:(idx+1)*20]) + return self.shatable[idx * 20 : (idx + 1) * 20] def __iter__(self): - for i in xrange(self.fanout[255]): - yield buffer(self.map, 8 + 256*4 + 20*i, 20) + count = self.fanout[255] + start = 8 + 256 * 4 + for ofs in range(start, start + (20 * count), 20): + yield self.map[ofs : ofs + 20] _mpi_count = 0 @@ -408,6 +535,7 @@ class PackIdxList: ' used by %s\n') % (n, mxf)) broken = True if broken: + mx.close() del mx unlink(full) else: @@ -429,12 +557,13 @@ class PackIdxList: elif not ix.force_keep: debug1('midx: removing redundant: %s\n' % os.path.basename(ix.name)) + ix.close() unlink(ix.name) for full in glob.glob(os.path.join(self.dir,'*.idx')): if not d.get(full): try: ix = open_idx(full) - except GitError, e: + except GitError as e: add_error(e) continue d[full] = ix @@ -442,7 +571,7 @@ class PackIdxList: if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) self.packs = list(set(d.values())) - self.packs.sort(lambda x,y: -cmp(len(x),len(y))) + self.packs.sort(reverse=True, key=lambda x: len(x)) if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self): self.do_bloom = True else: @@ -491,25 +620,64 @@ def idxmerge(idxlist, final_progress=True): def _make_objcache(): return PackIdxList(repo('objects/pack')) +# bup-gc assumes that it can disable all PackWriter activities +# (bloom/midx/cache) via the constructor and close() arguments. + class PackWriter: """Writes Git objects inside a pack file.""" - def __init__(self, objcache_maker=_make_objcache, compression_level=1): + def __init__(self, objcache_maker=_make_objcache, compression_level=1, + run_midx=True, on_pack_finish=None, + max_pack_size=None, max_pack_objects=None, repo_dir=None): + self.repo_dir = repo_dir or repo() + self.file = None + self.parentfd = None self.count = 0 self.outbytes = 0 self.filename = None - self.file = None self.idx = None self.objcache_maker = objcache_maker self.objcache = None self.compression_level = compression_level + self.run_midx=run_midx + self.on_pack_finish = on_pack_finish + if not max_pack_size: + max_pack_size = git_config_get('pack.packSizeLimit', + repo_dir=self.repo_dir) + if max_pack_size is not None: + max_pack_size = parse_num(max_pack_size) + if not max_pack_size: + # larger packs slow down pruning + max_pack_size = 1000 * 1000 * 1000 + self.max_pack_size = max_pack_size + # cache memory usage is about 83 bytes per object + self.max_pack_objects = max_pack_objects if max_pack_objects \ + else max(1, self.max_pack_size // 5000) def __del__(self): self.close() + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + def _open(self): if not self.file: - (fd,name) = tempfile.mkstemp(suffix='.pack', dir=repo('objects')) - self.file = os.fdopen(fd, 'w+b') + objdir = dir = os.path.join(self.repo_dir, 'objects') + fd, name = tempfile.mkstemp(suffix='.pack', dir=objdir) + try: + self.file = os.fdopen(fd, 'w+b') + except: + os.close(fd) + raise + try: + self.parentfd = os.open(objdir, os.O_RDONLY) + except: + f = self.file + self.file = None + f.close() + raise assert(name.endswith('.pack')) self.filename = name[:-5] self.file.write('PACK\0\0\0\2\0\0\0\0') @@ -526,7 +694,7 @@ class PackWriter: oneblob = ''.join(datalist) try: f.write(oneblob) - except IOError, e: + except IOError as e: raise GitError, e, sys.exc_info()[2] nw = len(oneblob) crc = zlib.crc32(oneblob) & 0xffffffff @@ -548,13 +716,14 @@ class PackWriter: size, crc = self._raw_write(_encode_packobj(type, content, self.compression_level), sha=sha) - if self.outbytes >= max_pack_size or self.count >= max_pack_objects: + if self.outbytes >= self.max_pack_size \ + or self.count >= self.max_pack_objects: self.breakpoint() return sha def breakpoint(self): """Clear byte and object counts and return the last processed id.""" - id = self._end() + id = self._end(self.run_midx) self.outbytes = self.count = 0 return id @@ -570,13 +739,19 @@ class PackWriter: self._require_objcache() return self.objcache.exists(id, want_source=want_source) + def just_write(self, sha, type, content): + """Write an object to the pack file without checking for duplication.""" + self._write(sha, type, content) + # If nothing else, gc doesn't have/want an objcache + if self.objcache is not None: + self.objcache.add(sha) + def maybe_write(self, type, content): """Write an object to the pack file if not present and return its id.""" sha = calc_hash(type, content) if not self.exists(sha): - self._write(sha, type, content) self._require_objcache() - self.objcache.add(sha) + self.just_write(sha, type, content) return sha def new_blob(self, blob): @@ -588,66 +763,90 @@ class PackWriter: content = tree_encode(shalist) return self.maybe_write('tree', content) - def _new_commit(self, tree, parent, author, adate, committer, cdate, msg): + def new_commit(self, tree, parent, + author, adate_sec, adate_tz, + committer, cdate_sec, cdate_tz, + msg): + """Create a commit object in the pack. The date_sec values must be + epoch-seconds, and if a tz is None, the local timezone is assumed.""" + if adate_tz: + adate_str = _git_date_str(adate_sec, adate_tz) + else: + adate_str = _local_git_date_str(adate_sec) + if cdate_tz: + cdate_str = _git_date_str(cdate_sec, cdate_tz) + else: + cdate_str = _local_git_date_str(cdate_sec) l = [] if tree: l.append('tree %s' % tree.encode('hex')) if parent: l.append('parent %s' % parent.encode('hex')) - if author: l.append('author %s %s' % (author, _git_date(adate))) - if committer: l.append('committer %s %s' % (committer, _git_date(cdate))) + if author: l.append('author %s %s' % (author, adate_str)) + if committer: l.append('committer %s %s' % (committer, cdate_str)) l.append('') l.append(msg) return self.maybe_write('commit', '\n'.join(l)) - def new_commit(self, parent, tree, date, msg): - """Create a commit object in the pack.""" - userline = '%s <%s@%s>' % (userfullname(), username(), hostname()) - commit = self._new_commit(tree, parent, - userline, date, userline, date, - msg) - return commit - def abort(self): """Remove the pack file from disk.""" f = self.file if f: - self.idx = None + pfd = self.parentfd self.file = None - f.close() - os.unlink(self.filename + '.pack') + self.parentfd = None + self.idx = None + try: + try: + os.unlink(self.filename + '.pack') + finally: + f.close() + finally: + if pfd is not None: + os.close(pfd) def _end(self, run_midx=True): f = self.file if not f: return None self.file = None - self.objcache = None - idx = self.idx - self.idx = None + try: + self.objcache = None + idx = self.idx + self.idx = None - # update object count - f.seek(8) - cp = struct.pack('!i', self.count) - assert(len(cp) == 4) - f.write(cp) - - # calculate the pack sha1sum - f.seek(0) - sum = Sha1() - for b in chunkyreader(f): - sum.update(b) - packbin = sum.digest() - f.write(packbin) - f.close() + # update object count + f.seek(8) + cp = struct.pack('!i', self.count) + assert(len(cp) == 4) + f.write(cp) + + # calculate the pack sha1sum + f.seek(0) + sum = Sha1() + for b in chunkyreader(f): + sum.update(b) + packbin = sum.digest() + f.write(packbin) + fdatasync(f.fileno()) + finally: + f.close() obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx, packbin) - - nameprefix = repo('objects/pack/pack-%s' % obj_list_sha) + nameprefix = os.path.join(self.repo_dir, + 'objects/pack/pack-' + obj_list_sha) if os.path.exists(self.filename + '.map'): os.unlink(self.filename + '.map') os.rename(self.filename + '.pack', nameprefix + '.pack') os.rename(self.filename + '.idx', nameprefix + '.idx') + try: + os.fsync(self.parentfd) + finally: + os.close(self.parentfd) if run_midx: - auto_midx(repo('objects/pack')) + auto_midx(os.path.join(self.repo_dir, 'objects/pack')) + + if self.on_pack_finish: + self.on_pack_finish(nameprefix) + return nameprefix def close(self, run_midx=True): @@ -655,54 +854,70 @@ class PackWriter: return self._end(run_midx=run_midx) def _write_pack_idx_v2(self, filename, idx, packbin): + ofs64_count = 0 + for section in idx: + for entry in section: + if entry[2] >= 2**31: + ofs64_count += 1 + + # Length: header + fan-out + shas-and-crcs + overflow-offsets + index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count) + idx_map = None idx_f = open(filename, 'w+b') - idx_f.write('\377tOc\0\0\0\2') - - ofs64_ofs = 8 + 4*256 + 28*self.count - idx_f.truncate(ofs64_ofs) - idx_f.seek(0) - idx_map = mmap_readwrite(idx_f, close=False) - idx_f.seek(0, SEEK_END) - count = _helpers.write_idx(idx_f, idx_map, idx, self.count) - assert(count == self.count) - idx_map.close() - idx_f.write(packbin) - - idx_f.seek(0) - idx_sum = Sha1() - b = idx_f.read(8 + 4*256) - idx_sum.update(b) - - obj_list_sum = Sha1() - for b in chunkyreader(idx_f, 20*self.count): - idx_sum.update(b) - obj_list_sum.update(b) - namebase = obj_list_sum.hexdigest() - - for b in chunkyreader(idx_f): + try: + idx_f.truncate(index_len) + fdatasync(idx_f.fileno()) + idx_map = mmap_readwrite(idx_f, close=False) + try: + count = _helpers.write_idx(filename, idx_map, idx, self.count) + assert(count == self.count) + idx_map.flush() + finally: + idx_map.close() + finally: + idx_f.close() + + idx_f = open(filename, 'a+b') + try: + idx_f.write(packbin) + idx_f.seek(0) + idx_sum = Sha1() + b = idx_f.read(8 + 4*256) idx_sum.update(b) - idx_f.write(idx_sum.digest()) - idx_f.close() - - return namebase + obj_list_sum = Sha1() + for b in chunkyreader(idx_f, 20*self.count): + idx_sum.update(b) + obj_list_sum.update(b) + namebase = obj_list_sum.hexdigest() + + for b in chunkyreader(idx_f): + idx_sum.update(b) + idx_f.write(idx_sum.digest()) + fdatasync(idx_f.fileno()) + return namebase + finally: + idx_f.close() + + +def list_refs(patterns=None, repo_dir=None, + limit_to_heads=False, limit_to_tags=False): + """Yield (refname, hash) tuples for all repository refs unless + patterns are specified. In that case, only include tuples for + refs matching those patterns (cf. git-show-ref(1)). The limits + restrict the result items to refs/heads or refs/tags. If both + limits are specified, items from both sources will be included. -def _git_date(date): - return '%d %s' % (date, time.strftime('%z', time.localtime(date))) - - -def _gitenv(): - os.environ['GIT_DIR'] = os.path.abspath(repo()) - - -def list_refs(refname = None): - """Generate a list of tuples in the form (refname,hash). - If a ref name is specified, list only this particular ref. """ - argv = ['git', 'show-ref', '--'] - if refname: - argv += [refname] - p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE) + argv = ['git', 'show-ref'] + if limit_to_heads: + argv.append('--heads') + if limit_to_tags: + argv.append('--tags') + argv.append('--') + if patterns: + argv.extend(patterns) + p = subprocess.Popen(argv, env=_gitenv(repo_dir), stdout=subprocess.PIPE) out = p.stdout.read().strip() rv = p.wait() # not fatal if rv: @@ -713,9 +928,10 @@ def list_refs(refname = None): yield (name, sha.decode('hex')) -def read_ref(refname): +def read_ref(refname, repo_dir = None): """Get the commit id of the most recent commit made on a given ref.""" - l = list(list_refs(refname)) + refs = list_refs(patterns=[refname], repo_dir=repo_dir, limit_to_heads=True) + l = tuple(islice(refs, 2)) if l: assert(len(l) == 1) return l[0][1] @@ -723,43 +939,71 @@ def read_ref(refname): return None -def rev_list(ref, count=None): - """Generate a list of reachable commits in reverse chronological order. - - This generator walks through commits, from child to parent, that are - reachable via the specified ref and yields a series of tuples of the form - (date,hash). +def rev_list_invocation(ref_or_refs, count=None, format=None): + if isinstance(ref_or_refs, compat.str_type): + refs = (ref_or_refs,) + else: + refs = ref_or_refs + argv = ['git', 'rev-list'] + if isinstance(count, Integral): + argv.extend(['-n', str(count)]) + elif count: + raise ValueError('unexpected count argument %r' % count) + + if format: + argv.append('--pretty=format:' + format) + for ref in refs: + assert not ref.startswith('-') + argv.append(ref) + argv.append('--') + return argv + + +def rev_list(ref_or_refs, count=None, parse=None, format=None, repo_dir=None): + """Yield information about commits as per "git rev-list". If a format + is not provided, yield one hex hash at a time. If a format is + provided, pass it to rev-list and call parse(git_stdout) for each + commit with the stream positioned just after the rev-list "commit + HASH" header line. When a format is provided yield (oidx, + parse(git_stdout)) for each commit. - If count is a non-zero integer, limit the number of commits to "count" - objects. """ - assert(not ref.startswith('-')) - opts = [] - if count: - opts += ['-n', str(atoi(count))] - argv = ['git', 'rev-list', '--pretty=format:%ct'] + opts + [ref, '--'] - p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE) - commit = None - for row in p.stdout: - s = row.strip() - if s.startswith('commit '): - commit = s[7:].decode('hex') - else: - date = int(s) - yield (date, commit) + assert bool(parse) == bool(format) + p = subprocess.Popen(rev_list_invocation(ref_or_refs, count=count, + format=format), + env=_gitenv(repo_dir), + stdout = subprocess.PIPE) + if not format: + for line in p.stdout: + yield line.strip() + else: + line = p.stdout.readline() + while line: + s = line.strip() + if not s.startswith('commit '): + raise Exception('unexpected line ' + s) + s = s[7:] + assert len(s) == 40 + yield s, parse(p.stdout) + line = p.stdout.readline() + rv = p.wait() # not fatal if rv: raise GitError, 'git rev-list returned error %d' % rv -def rev_get_date(ref): - """Get the date of the latest commit on the specified ref.""" - for (date, commit) in rev_list(ref, count=1): - return date - raise GitError, 'no such commit %r' % ref +def get_commit_dates(refs, repo_dir=None): + """Get the dates for the specified commit refs. For now, every unique + string in refs must resolve to a different commit or this + function will fail.""" + result = [] + for ref in refs: + commit = get_commit_items(ref, cp(repo_dir)) + result.append(commit.author_sec) + return result -def rev_parse(committish): +def rev_parse(committish, repo_dir=None): """Resolve the full hash for 'committish', if it exists. Should be roughly equivalent to 'git rev-parse'. @@ -767,12 +1011,12 @@ def rev_parse(committish): Returns the hex value of the hash if it is found, None if 'committish' does not correspond to anything. """ - head = read_ref(committish) + head = read_ref(committish, repo_dir=repo_dir) if head: debug2("resolved from ref: commit = %s\n" % head.encode('hex')) return head - pL = PackIdxList(repo('objects/pack')) + pL = PackIdxList(repo('objects/pack', repo_dir=repo_dir)) if len(committish) == 40: try: @@ -786,14 +1030,24 @@ def rev_parse(committish): return None -def update_ref(refname, newval, oldval): - """Change the commit pointed to by a branch.""" +def update_ref(refname, newval, oldval, repo_dir=None): + """Update a repository reference.""" if not oldval: oldval = '' - assert(refname.startswith('refs/heads/')) + assert(refname.startswith('refs/heads/') \ + or refname.startswith('refs/tags/')) p = subprocess.Popen(['git', 'update-ref', refname, newval.encode('hex'), oldval.encode('hex')], - preexec_fn = _gitenv) + env=_gitenv(repo_dir)) + _git_wait('git update-ref', p) + + +def delete_ref(refname, oldvalue=None): + """Delete a repository reference (see git update-ref(1)).""" + assert(refname.startswith('refs/')) + oldvalue = [] if not oldvalue else [oldvalue] + p = subprocess.Popen(['git', 'update-ref', '-d', refname] + oldvalue, + env=_gitenv()) _git_wait('git update-ref', p) @@ -821,36 +1075,36 @@ def init_repo(path=None): if parent and not os.path.exists(parent): raise GitError('parent directory "%s" does not exist\n' % parent) if os.path.exists(d) and not os.path.isdir(os.path.join(d, '.')): - raise GitError('"%d" exists but is not a directory\n' % d) + raise GitError('"%s" exists but is not a directory\n' % d) p = subprocess.Popen(['git', '--bare', 'init'], stdout=sys.stderr, - preexec_fn = _gitenv) + env=_gitenv()) _git_wait('git init', p) # Force the index version configuration in order to ensure bup works # regardless of the version of the installed Git binary. p = subprocess.Popen(['git', 'config', 'pack.indexVersion', '2'], - stdout=sys.stderr, preexec_fn = _gitenv) + stdout=sys.stderr, env=_gitenv()) + _git_wait('git config', p) + # Enable the reflog + p = subprocess.Popen(['git', 'config', 'core.logAllRefUpdates', 'true'], + stdout=sys.stderr, env=_gitenv()) _git_wait('git config', p) def check_repo_or_die(path=None): - """Make sure a bup repository exists, and abort if not. - If the path to a particular repository was not specified, this function - initializes the default repository automatically. - """ + """Check to see if a bup repository probably exists, and abort if not.""" guess_repo(path) - try: - os.stat(repo('objects/pack/.')) - except OSError, e: - if e.errno == errno.ENOENT: - if repodir != home_repodir: - log('error: %r is not a bup repository; run "bup init"\n' - % repo()) - sys.exit(15) - else: - init_repo() - else: - log('error: %s\n' % e) - sys.exit(14) + top = repo() + pst = stat_if_exists(top + '/objects/pack') + if pst and stat.S_ISDIR(pst.st_mode): + return + if not pst: + top_st = stat_if_exists(top) + if not top_st: + log('error: repository %r does not exist (see "bup help init")\n' + % top) + sys.exit(15) + log('error: %r is not a repository\n' % top) + sys.exit(14) _ver = None @@ -880,19 +1134,6 @@ def ver(): return _ver -def _git_wait(cmd, p): - rv = p.wait() - if rv != 0: - raise GitError('%s returned %d' % (cmd, rv)) - - -def _git_capture(argv): - p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn = _gitenv) - r = p.stdout.read() - _git_wait(repr(argv), p) - return r - - class _AbortableIter: def __init__(self, it, onabort = None): self.it = it @@ -904,8 +1145,8 @@ class _AbortableIter: def next(self): try: - return self.it.next() - except StopIteration, e: + return next(self.it) + except StopIteration as e: self.done = True raise except: @@ -926,18 +1167,14 @@ class _AbortableIter: _ver_warned = 0 class CatPipe: """Link to 'git cat-file' that is used to retrieve blob data.""" - def __init__(self): + def __init__(self, repo_dir = None): global _ver_warned + self.repo_dir = repo_dir wanted = ('1','5','6') if ver() < wanted: - if not _ver_warned: - log('warning: git version < %s; bup will be slow.\n' - % '.'.join(wanted)) - _ver_warned = 1 - self.get = self._slow_get - else: - self.p = self.inprogress = None - self.get = self._fast_get + log('error: git version must be at least 1.5.6\n') + sys.exit(1) + self.p = self.inprogress = None def _abort(self): if self.p: @@ -946,83 +1183,75 @@ class CatPipe: self.p = None self.inprogress = None - def _restart(self): + def restart(self): self._abort() self.p = subprocess.Popen(['git', 'cat-file', '--batch'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds = True, bufsize = 4096, - preexec_fn = _gitenv) + env=_gitenv(self.repo_dir)) - def _fast_get(self, id): + def get(self, ref): + """Yield (oidx, type, size), followed by the data referred to by ref. + If ref does not exist, only yield (None, None, None). + + """ if not self.p or self.p.poll() != None: - self._restart() + self.restart() assert(self.p) - assert(self.p.poll() == None) + poll_result = self.p.poll() + assert(poll_result == None) if self.inprogress: - log('_fast_get: opening %r while %r is open' - % (id, self.inprogress)) + log('get: opening %r while %r is open\n' % (ref, self.inprogress)) assert(not self.inprogress) - assert(id.find('\n') < 0) - assert(id.find('\r') < 0) - assert(not id.startswith('-')) - self.inprogress = id - self.p.stdin.write('%s\n' % id) + assert(ref.find('\n') < 0) + assert(ref.find('\r') < 0) + assert(not ref.startswith('-')) + self.inprogress = ref + self.p.stdin.write('%s\n' % ref) self.p.stdin.flush() hdr = self.p.stdout.readline() if hdr.endswith(' missing\n'): self.inprogress = None - raise KeyError('blob %r is missing' % id) - spl = hdr.split(' ') - if len(spl) != 3 or len(spl[0]) != 40: - raise GitError('expected blob, got %r' % spl) - (hex, type, size) = spl - - it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])), - onabort = self._abort) + yield None, None, None + return + info = hdr.split(' ') + if len(info) != 3 or len(info[0]) != 40: + raise GitError('expected object (id, type, size), got %r' % info) + oidx, typ, size = info + size = int(size) + it = _AbortableIter(chunkyreader(self.p.stdout, size), + onabort=self._abort) try: - yield type + yield oidx, typ, size for blob in it: yield blob - assert(self.p.stdout.readline() == '\n') + readline_result = self.p.stdout.readline() + assert(readline_result == '\n') self.inprogress = None - except Exception, e: + except Exception as e: it.abort() raise - def _slow_get(self, id): - assert(id.find('\n') < 0) - assert(id.find('\r') < 0) - assert(id[0] != '-') - type = _git_capture(['git', 'cat-file', '-t', id]).strip() - yield type - - p = subprocess.Popen(['git', 'cat-file', type, id], - stdout=subprocess.PIPE, - preexec_fn = _gitenv) - for blob in chunkyreader(p.stdout): - yield blob - _git_wait('git cat-file', p) - def _join(self, it): - type = it.next() - if type == 'blob': + _, typ, _ = next(it) + if typ == 'blob': for blob in it: yield blob - elif type == 'tree': + elif typ == 'tree': treefile = ''.join(it) for (mode, name, sha) in tree_decode(treefile): for blob in self.join(sha.encode('hex')): yield blob - elif type == 'commit': + elif typ == 'commit': treeline = ''.join(it).split('\n')[0] assert(treeline.startswith('tree ')) for blob in self.join(treeline[5:]): yield blob else: raise GitError('invalid object type %r: expected blob/tree/commit' - % type) + % typ) def join(self, id): """Generate a list of the content of all blobs that can be reached @@ -1036,15 +1265,118 @@ class CatPipe: except StopIteration: log('booger!\n') -def tags(): + +_cp = {} + +def cp(repo_dir=None): + """Create a CatPipe object or reuse the already existing one.""" + global _cp, repodir + if not repo_dir: + repo_dir = repodir or repo() + repo_dir = os.path.abspath(repo_dir) + cp = _cp.get(repo_dir) + if not cp: + cp = CatPipe(repo_dir) + _cp[repo_dir] = cp + return cp + + +def tags(repo_dir = None): """Return a dictionary of all tags in the form {hash: [tag_names, ...]}.""" tags = {} - for (n,c) in list_refs(): - if n.startswith('refs/tags/'): - name = n[10:] - if not c in tags: - tags[c] = [] + for n, c in list_refs(repo_dir = repo_dir, limit_to_tags=True): + assert(n.startswith('refs/tags/')) + name = n[10:] + if not c in tags: + tags[c] = [] + tags[c].append(name) # more than one tag can point at 'c' + return tags - tags[c].append(name) # more than one tag can point at 'c' - return tags +class MissingObject(KeyError): + def __init__(self, oid): + self.oid = oid + KeyError.__init__(self, 'object %r is missing' % oid.encode('hex')) + + +WalkItem = namedtuple('WalkItem', ['oid', 'type', 'mode', + 'path', 'chunk_path', 'data']) +# The path is the mangled path, and if an item represents a fragment +# of a chunked file, the chunk_path will be the chunked subtree path +# for the chunk, i.e. ['', '2d3115e', ...]. The top-level path for a +# chunked file will have a chunk_path of ['']. So some chunk subtree +# of the file '/foo/bar/baz' might look like this: +# +# item.path = ['foo', 'bar', 'baz.bup'] +# item.chunk_path = ['', '2d3115e', '016b097'] +# item.type = 'tree' +# ... + + +def walk_object(get_ref, oidx, stop_at=None, include_data=None): + """Yield everything reachable from oidx via get_ref (which must behave + like CatPipe get) as a WalkItem, stopping whenever stop_at(oidx) + returns true. Throw MissingObject if a hash encountered is + missing from the repository, and don't read or return blob content + in the data field unless include_data is set. + + """ + # Maintain the pending stack on the heap to avoid stack overflow + pending = [(oidx, [], [], None)] + while len(pending): + oidx, parent_path, chunk_path, mode = pending.pop() + oid = oidx.decode('hex') + if stop_at and stop_at(oidx): + continue + + if (not include_data) and mode and stat.S_ISREG(mode): + # If the object is a "regular file", then it's a leaf in + # the graph, so we can skip reading the data if the caller + # hasn't requested it. + yield WalkItem(oid=oid, type='blob', + chunk_path=chunk_path, path=parent_path, + mode=mode, + data=None) + continue + + item_it = get_ref(oidx) + get_oidx, typ, _ = next(item_it) + if not get_oidx: + raise MissingObject(oidx.decode('hex')) + if typ not in ('blob', 'commit', 'tree'): + raise Exception('unexpected repository object type %r' % typ) + + # FIXME: set the mode based on the type when the mode is None + if typ == 'blob' and not include_data: + # Dump data until we can ask cat_pipe not to fetch it + for ignored in item_it: + pass + data = None + else: + data = ''.join(item_it) + + yield WalkItem(oid=oid, type=typ, + chunk_path=chunk_path, path=parent_path, + mode=mode, + data=(data if include_data else None)) + + if typ == 'commit': + commit_items = parse_commit(data) + for pid in commit_items.parents: + pending.append((pid, parent_path, chunk_path, mode)) + pending.append((commit_items.tree, parent_path, chunk_path, + hashsplit.GIT_MODE_TREE)) + elif typ == 'tree': + for mode, name, ent_id in tree_decode(data): + demangled, bup_type = demangle_name(name, mode) + if chunk_path: + sub_path = parent_path + sub_chunk_path = chunk_path + [name] + else: + sub_path = parent_path + [name] + if bup_type == BUP_CHUNKED: + sub_chunk_path = [''] + else: + sub_chunk_path = chunk_path + pending.append((ent_id.encode('hex'), sub_path, sub_chunk_path, + mode))