X-Git-Url: https://arthur.barton.de/cgi-bin/gitweb.cgi?p=bup.git;a=blobdiff_plain;f=lib%2Fbup%2Fgit.py;h=f4e27ad327658168004a69bcd746630c524a99dd;hp=65892e5b4a5632719860566e62faec64fc5545cb;hb=bf67f94dd4f4096de4eee07a7dc377d6c889a016;hpb=923accb8da62856cfede4e78cd0cd91d8f95d34f diff --git a/lib/bup/git.py b/lib/bup/git.py index 65892e5..f4e27ad 100644 --- a/lib/bup/git.py +++ b/lib/bup/git.py @@ -2,22 +2,42 @@ bup repositories are in Git format. This library allows us to interact with the Git data structures. """ -import os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob + +from __future__ import absolute_import, print_function +import errno, os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob +from array import array +from binascii import hexlify, unhexlify from collections import namedtuple from itertools import islice +from numbers import Integral + +from bup import _helpers, compat, hashsplit, path, midx, bloom, xstat +from bup.compat import (buffer, + byte_int, bytes_from_byte, bytes_from_uint, + environ, + items, + range, + reraise) +from bup.io import path_msg +from bup.helpers import (Sha1, add_error, chunkyreader, debug1, debug2, + fdatasync, + hostname, localtime, log, + merge_dict, + merge_iter, + mmap_read, mmap_readwrite, + parse_num, + progress, qprogress, stat_if_exists, + unlink, + utc_offset_str) +from bup.pwdgrp import username, userfullname -from bup.helpers import * -from bup import _helpers, path, midx, bloom, xstat - -max_pack_size = 1000*1000*1000 # larger packs will slow down pruning -max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object verbose = 0 -ignore_midx = 0 -repodir = None +repodir = None # The default repository, once initialized + +_typemap = {b'blob': 3, b'tree': 2, b'commit': 1, b'tag': 4} +_typermap = {v: k for k, v in items(_typemap)} -_typemap = { 'blob':3, 'tree':2, 'commit':1, 'tag':4 } -_typermap = { 3:'blob', 2:'tree', 1:'commit', 4:'tag' } _total_searches = 0 _total_steps = 0 @@ -27,34 +47,61 @@ class GitError(Exception): pass +def _gitenv(repo_dir=None): + if not repo_dir: + repo_dir = repo() + return merge_dict(environ, {b'GIT_DIR': os.path.abspath(repo_dir)}) + +def _git_wait(cmd, p): + rv = p.wait() + if rv != 0: + raise GitError('%r returned %d' % (cmd, rv)) + +def git_config_get(option, repo_dir=None): + cmd = (b'git', b'config', b'--get', option) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, + env=_gitenv(repo_dir=repo_dir)) + r = p.stdout.read() + rc = p.wait() + if rc == 0: + return r + if rc != 1: + raise GitError('%r returned %d' % (cmd, rc)) + return None + + def parse_tz_offset(s): """UTC offset in seconds.""" tz_off = (int(s[1:3]) * 60 * 60) + (int(s[3:5]) * 60) - if s[0] == '-': + if bytes_from_byte(s[0]) == b'-': return - tz_off return tz_off # FIXME: derived from http://git.rsbx.net/Documents/Git_Data_Formats.txt # Make sure that's authoritative. -_start_end_char = r'[^ .,:;<>"\'\0\n]' -_content_char = r'[^\0\n<>]' -_safe_str_rx = '(?:%s{1,2}|(?:%s%s*%s))' \ +_start_end_char = br'[^ .,:;<>"\'\0\n]' +_content_char = br'[^\0\n<>]' +_safe_str_rx = br'(?:%s{1,2}|(?:%s%s*%s))' \ % (_start_end_char, _start_end_char, _content_char, _start_end_char) -_tz_rx = r'[-+]\d\d[0-5]\d' -_parent_rx = r'(?:parent [abcdefABCDEF0123456789]{40}\n)' -_commit_rx = re.compile(r'''tree (?P[abcdefABCDEF0123456789]{40}) +_tz_rx = br'[-+]\d\d[0-5]\d' +_parent_rx = br'(?:parent [abcdefABCDEF0123456789]{40}\n)' +# Assumes every following line starting with a space is part of the +# mergetag. Is there a formal commit blob spec? +_mergetag_rx = br'(?:\nmergetag object [abcdefABCDEF0123456789]{40}(?:\n [^\0\n]*)*)' +_commit_rx = re.compile(br'''tree (?P[abcdefABCDEF0123456789]{40}) (?P%s*)author (?P%s) <(?P%s)> (?P\d+) (?P%s) -committer (?P%s) <(?P%s)> (?P\d+) (?P%s) +committer (?P%s) <(?P%s)> (?P\d+) (?P%s)(?P%s?) (?P(?:.|\n)*)''' % (_parent_rx, _safe_str_rx, _safe_str_rx, _tz_rx, - _safe_str_rx, _safe_str_rx, _tz_rx)) -_parent_hash_rx = re.compile(r'\s*parent ([abcdefABCDEF0123456789]{40})\s*') - + _safe_str_rx, _safe_str_rx, _tz_rx, + _mergetag_rx)) +_parent_hash_rx = re.compile(br'\s*parent ([abcdefABCDEF0123456789]{40})\s*') -# Note that the author_sec and committer_sec values are (UTC) epoch seconds. +# Note that the author_sec and committer_sec values are (UTC) epoch +# seconds, and for now the mergetag is not included. CommitInfo = namedtuple('CommitInfo', ['tree', 'parents', 'author_name', 'author_mail', 'author_sec', 'author_offset', @@ -80,66 +127,82 @@ def parse_commit(content): message=matches['message']) +def get_cat_data(cat_iterator, expected_type): + _, kind, _ = next(cat_iterator) + if kind != expected_type: + raise Exception('expected %r, saw %r' % (expected_type, kind)) + return b''.join(cat_iterator) + def get_commit_items(id, cp): - commit_it = cp.get(id) - assert(commit_it.next() == 'commit') - commit_content = ''.join(commit_it) - return parse_commit(commit_content) + return parse_commit(get_cat_data(cp.get(id), b'commit')) + +def _local_git_date_str(epoch_sec): + return b'%d %s' % (epoch_sec, utc_offset_str(epoch_sec)) + +def _git_date_str(epoch_sec, tz_offset_sec): + offs = tz_offset_sec // 60 + return b'%d %s%02d%02d' \ + % (epoch_sec, + b'+' if offs >= 0 else b'-', + abs(offs) // 60, + abs(offs) % 60) -def repo(sub = '', repo_dir=None): + +def repo(sub = b'', repo_dir=None): """Get the path to the git repository or one of its subdirectories.""" - global repodir repo_dir = repo_dir or repodir if not repo_dir: raise GitError('You should call check_repo_or_die()') # If there's a .git subdirectory, then the actual repo is in there. - gd = os.path.join(repo_dir, '.git') + gd = os.path.join(repo_dir, b'.git') if os.path.exists(gd): - repodir = gd + repo_dir = gd return os.path.join(repo_dir, sub) +_shorten_hash_rx = \ + re.compile(br'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)') + def shorten_hash(s): - return re.sub(r'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)', - r'\1\2*\3', s) + return _shorten_hash_rx.sub(br'\1\2*\3', s) def repo_rel(path): full = os.path.abspath(path) - fullrepo = os.path.abspath(repo('')) - if not fullrepo.endswith('/'): - fullrepo += '/' + fullrepo = os.path.abspath(repo(b'')) + if not fullrepo.endswith(b'/'): + fullrepo += b'/' if full.startswith(fullrepo): path = full[len(fullrepo):] - if path.startswith('index-cache/'): - path = path[len('index-cache/'):] + if path.startswith(b'index-cache/'): + path = path[len(b'index-cache/'):] return shorten_hash(path) def all_packdirs(): - paths = [repo('objects/pack')] - paths += glob.glob(repo('index-cache/*/.')) + paths = [repo(b'objects/pack')] + paths += glob.glob(repo(b'index-cache/*/.')) return paths def auto_midx(objdir): - args = [path.exe(), 'midx', '--auto', '--dir', objdir] + args = [path.exe(), b'midx', b'--auto', b'--dir', objdir] try: - rv = subprocess.call(args, stdout=open('/dev/null', 'w')) - except OSError, e: + rv = subprocess.call(args, stdout=open(os.devnull, 'w')) + except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise if rv: add_error('%r: returned %d' % (args, rv)) - args = [path.exe(), 'bloom', '--dir', objdir] + args = [path.exe(), b'bloom', b'--dir', objdir] try: - rv = subprocess.call(args, stdout=open('/dev/null', 'w')) - except OSError, e: + rv = subprocess.call(args, stdout=open(os.devnull, 'w')) + except OSError as e: # make sure 'args' gets printed to help with debugging add_error('%r: exception: %s' % (args, e)) raise @@ -155,15 +218,15 @@ def mangle_name(name, mode, gitmode): """ if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode): assert(stat.S_ISDIR(gitmode)) - return name + '.bup' - elif name.endswith('.bup') or name[:-1].endswith('.bup'): - return name + '.bupl' + return name + b'.bup' + elif name.endswith(b'.bup') or name[:-1].endswith(b'.bup'): + return name + b'.bupl' else: return name (BUP_NORMAL, BUP_CHUNKED) = (0,1) -def demangle_name(name): +def demangle_name(name, mode): """Remove name mangling from a file name, if necessary. The return value is a tuple (demangled_filename,mode), where mode is one of @@ -174,17 +237,20 @@ def demangle_name(name): For more information on the name mangling algorithm, see mangle_name() """ - if name.endswith('.bupl'): + if name.endswith(b'.bupl'): return (name[:-5], BUP_NORMAL) - elif name.endswith('.bup'): + elif name.endswith(b'.bup'): return (name[:-4], BUP_CHUNKED) + elif name.endswith(b'.bupm'): + return (name[:-5], + BUP_CHUNKED if stat.S_ISDIR(mode) else BUP_NORMAL) else: return (name, BUP_NORMAL) def calc_hash(type, content): """Calculate some content's hash in the Git fashion.""" - header = '%s %d\0' % (type, len(content)) + header = b'%s %d\0' % (type, len(content)) sum = Sha1(header) sum.update(content) return sum.digest() @@ -194,7 +260,7 @@ def shalist_item_sort_key(ent): (mode, name, id) = ent assert(mode+0 == mode) if stat.S_ISDIR(mode): - return name + '/' + return name + b'/' else: return name @@ -208,19 +274,19 @@ def tree_encode(shalist): assert(mode+0 == mode) assert(name) assert(len(bin) == 20) - s = '%o %s\0%s' % (mode,name,bin) - assert(s[0] != '0') # 0-padded octal is not acceptable in a git tree + s = b'%o %s\0%s' % (mode,name,bin) + assert s[0] != b'0' # 0-padded octal is not acceptable in a git tree l.append(s) - return ''.join(l) + return b''.join(l) def tree_decode(buf): """Generate a list of (mode,name,hash) from the git tree object in buf.""" ofs = 0 while ofs < len(buf): - z = buf.find('\0', ofs) + z = buf.find(b'\0', ofs) assert(z > ofs) - spl = buf[ofs:z].split(' ', 1) + spl = buf[ofs:z].split(b' ', 1) assert(len(spl) == 2) mode,name = spl sha = buf[z+1:z+1+20] @@ -229,21 +295,19 @@ def tree_decode(buf): def _encode_packobj(type, content, compression_level=1): - szout = '' + if compression_level not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9): + raise ValueError('invalid compression level %s' % compression_level) + szout = b'' sz = len(content) szbits = (sz & 0x0f) | (_typemap[type]<<4) sz >>= 4 while 1: if sz: szbits |= 0x80 - szout += chr(szbits) + szout += bytes_from_uint(szbits) if not sz: break szbits = sz & 0x7f sz >>= 7 - if compression_level > 9: - compression_level = 9 - elif compression_level < 0: - compression_level = 0 z = zlib.compressobj(compression_level) yield szout yield z.compress(content) @@ -252,7 +316,7 @@ def _encode_packobj(type, content, compression_level=1): def _encode_looseobj(type, content, compression_level=1): z = zlib.compressobj(compression_level) - yield z.compress('%s %d\0' % (type, len(content))) + yield z.compress(b'%s %d\0' % (type, len(content))) yield z.compress(content) yield z.flush() @@ -260,9 +324,9 @@ def _encode_looseobj(type, content, compression_level=1): def _decode_looseobj(buf): assert(buf); s = zlib.decompress(buf) - i = s.find('\0') + i = s.find(b'\0') assert(i > 0) - l = s[:i].split(' ') + l = s[:i].split(b' ') type = l[0] sz = int(l[1]) content = s[i+1:] @@ -273,14 +337,14 @@ def _decode_looseobj(buf): def _decode_packobj(buf): assert(buf) - c = ord(buf[0]) + c = byte_int(buf[0]) type = _typermap[(c & 0x70) >> 4] sz = c & 0x0f shift = 4 i = 0 while c & 0x80: i += 1 - c = ord(buf[i]) + c = byte_int(buf[i]) sz |= (c & 0x7f) << shift shift += 7 if not (c & 0x80): @@ -305,21 +369,18 @@ class PackIdx: return want_source and os.path.basename(self.name) or True return None - def __len__(self): - return int(self.fanout[255]) - def _idx_from_hash(self, hash): global _total_searches, _total_steps _total_searches += 1 assert(len(hash) == 20) - b1 = ord(hash[0]) + b1 = byte_int(hash[0]) start = self.fanout[b1-1] # range -1..254 end = self.fanout[b1] # range 0..255 - want = str(hash) + want = hash _total_steps += 1 # lookup table is a step while start < end: _total_steps += 1 - mid = start + (end-start)/2 + mid = start + (end - start) // 2 v = self._idx_to_hash(mid) if v < want: start = mid+1 @@ -336,22 +397,33 @@ class PackIdxV1(PackIdx): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) - self.fanout = list(struct.unpack('!256I', - str(buffer(self.map, 0, 256*4)))) + # Min size for 'L' is 4, which is sufficient for struct's '!I' + self.fanout = array('L', struct.unpack('!256I', self.map)) self.fanout.append(0) # entry "-1" - nsha = self.fanout[255] - self.sha_ofs = 256*4 - self.shatable = buffer(self.map, self.sha_ofs, nsha*24) + self.nsha = self.fanout[255] + self.sha_ofs = 256 * 4 + # Avoid slicing shatable for individual hashes (very high overhead) + self.shatable = buffer(self.map, self.sha_ofs, self.nsha * 24) + + def __len__(self): + return int(self.nsha) # int() from long for python 2 def _ofs_from_idx(self, idx): - return struct.unpack('!I', str(self.shatable[idx*24 : idx*24+4]))[0] + if idx >= self.nsha or idx < 0: + raise IndexError('invalid pack index index %d' % idx) + ofs = self.sha_ofs + idx * 24 + return struct.unpack_from('!I', self.map, offset=ofs)[0] def _idx_to_hash(self, idx): - return str(self.shatable[idx*24+4 : idx*24+24]) + if idx >= self.nsha or idx < 0: + raise IndexError('invalid pack index index %d' % idx) + ofs = self.sha_ofs + idx * 24 + 4 + return self.map[ofs : ofs + 20] def __iter__(self): - for i in xrange(self.fanout[255]): - yield buffer(self.map, 256*4 + 24*i + 4, 20) + start = self.sha_ofs + 4 + for ofs in range(start, start + 24 * self.nsha, 24): + yield self.map[ofs : ofs + 20] class PackIdxV2(PackIdx): @@ -360,38 +432,46 @@ class PackIdxV2(PackIdx): self.name = filename self.idxnames = [self.name] self.map = mmap_read(f) - assert(str(self.map[0:8]) == '\377tOc\0\0\0\2') - self.fanout = list(struct.unpack('!256I', - str(buffer(self.map, 8, 256*4)))) - self.fanout.append(0) # entry "-1" - nsha = self.fanout[255] + assert self.map[0:8] == b'\377tOc\0\0\0\2' + # Min size for 'L' is 4, which is sufficient for struct's '!I' + self.fanout = array('L', struct.unpack_from('!256I', self.map, offset=8)) + self.fanout.append(0) + self.nsha = self.fanout[255] self.sha_ofs = 8 + 256*4 - self.shatable = buffer(self.map, self.sha_ofs, nsha*20) - self.ofstable = buffer(self.map, - self.sha_ofs + nsha*20 + nsha*4, - nsha*4) - self.ofs64table = buffer(self.map, - 8 + 256*4 + nsha*20 + nsha*4 + nsha*4) + self.ofstable_ofs = self.sha_ofs + self.nsha * 20 + self.nsha * 4 + self.ofs64table_ofs = self.ofstable_ofs + self.nsha * 4 + # Avoid slicing this for individual hashes (very high overhead) + self.shatable = buffer(self.map, self.sha_ofs, self.nsha*20) + + def __len__(self): + return int(self.nsha) # int() from long for python 2 def _ofs_from_idx(self, idx): - ofs = struct.unpack('!I', str(buffer(self.ofstable, idx*4, 4)))[0] + if idx >= self.nsha or idx < 0: + raise IndexError('invalid pack index index %d' % idx) + ofs_ofs = self.ofstable_ofs + idx * 4 + ofs = struct.unpack_from('!I', self.map, offset=ofs_ofs)[0] if ofs & 0x80000000: idx64 = ofs & 0x7fffffff - ofs = struct.unpack('!Q', - str(buffer(self.ofs64table, idx64*8, 8)))[0] + ofs64_ofs = self.ofs64table_ofs + idx64 * 8 + ofs = struct.unpack_from('!Q', self.map, offset=ofs64_ofs)[0] return ofs def _idx_to_hash(self, idx): - return str(self.shatable[idx*20:(idx+1)*20]) + if idx >= self.nsha or idx < 0: + raise IndexError('invalid pack index index %d' % idx) + ofs = self.sha_ofs + idx * 20 + return self.map[ofs : ofs + 20] def __iter__(self): - for i in xrange(self.fanout[255]): - yield buffer(self.map, 8 + 256*4 + 20*i, 20) + start = self.sha_ofs + for ofs in range(start, start + 20 * self.nsha, 20): + yield self.map[ofs : ofs + 20] _mpi_count = 0 class PackIdxList: - def __init__(self, dir): + def __init__(self, dir, ignore_midx=False): global _mpi_count assert(_mpi_count == 0) # these things suck tons of VM; don't waste it _mpi_count += 1 @@ -400,6 +480,7 @@ class PackIdxList: self.packs = [] self.do_bloom = False self.bloom = None + self.ignore_midx = ignore_midx self.refresh() def __del__(self): @@ -425,7 +506,7 @@ class PackIdxList: else: _total_searches -= 1 # was counted by bloom return None - for i in xrange(len(self.packs)): + for i in range(len(self.packs)): p = self.packs[i] _total_searches -= 1 # will be incremented by sub-pack ix = p.exists(hash, want_source=want_source) @@ -445,12 +526,12 @@ class PackIdxList: If skip_midx is True, all work on .midx files will be skipped and .midx files will be removed from the list. - The module-global variable 'ignore_midx' can force this function to + The instance variable 'ignore_midx' can force this function to always act as if skip_midx was True. """ self.bloom = None # Always reopen the bloom as it may have been relaced self.do_bloom = False - skip_midx = skip_midx or ignore_midx + skip_midx = skip_midx or self.ignore_midx d = dict((p.name, p) for p in self.packs if not skip_midx or not isinstance(p, midx.PackMidx)) if os.path.exists(self.dir): @@ -460,15 +541,16 @@ class PackIdxList: if isinstance(ix, midx.PackMidx): for name in ix.idxnames: d[os.path.join(self.dir, name)] = ix - for full in glob.glob(os.path.join(self.dir,'*.midx')): + for full in glob.glob(os.path.join(self.dir,b'*.midx')): if not d.get(full): mx = midx.PackMidx(full) (mxd, mxf) = os.path.split(mx.name) broken = False for n in mx.idxnames: if not os.path.exists(os.path.join(mxd, n)): - log(('warning: index %s missing\n' + - ' used by %s\n') % (n, mxf)) + log(('warning: index %s missing\n' + ' used by %s\n') + % (path_msg(n), path_msg(mxf))) broken = True if broken: mx.close() @@ -492,22 +574,22 @@ class PackIdxList: d[os.path.join(self.dir, name)] = ix elif not ix.force_keep: debug1('midx: removing redundant: %s\n' - % os.path.basename(ix.name)) + % path_msg(os.path.basename(ix.name))) ix.close() unlink(ix.name) - for full in glob.glob(os.path.join(self.dir,'*.idx')): + for full in glob.glob(os.path.join(self.dir, b'*.idx')): if not d.get(full): try: ix = open_idx(full) - except GitError, e: + except GitError as e: add_error(e) continue d[full] = ix - bfull = os.path.join(self.dir, 'bup.bloom') + bfull = os.path.join(self.dir, b'bup.bloom') if self.bloom is None and os.path.exists(bfull): self.bloom = bloom.ShaBloom(bfull) self.packs = list(set(d.values())) - self.packs.sort(lambda x,y: -cmp(len(x),len(y))) + self.packs.sort(reverse=True, key=lambda x: len(x)) if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self): self.do_bloom = True else: @@ -521,21 +603,22 @@ class PackIdxList: def open_idx(filename): - if filename.endswith('.idx'): + if filename.endswith(b'.idx'): f = open(filename, 'rb') header = f.read(8) - if header[0:4] == '\377tOc': + if header[0:4] == b'\377tOc': version = struct.unpack('!I', header[4:8])[0] if version == 2: return PackIdxV2(filename, f) else: raise GitError('%s: expected idx file version 2, got %d' - % (filename, version)) - elif len(header) == 8 and header[0:4] < '\377tOc': + % (path_msg(filename), version)) + elif len(header) == 8 and header[0:4] < b'\377tOc': return PackIdxV1(filename, f) else: - raise GitError('%s: unrecognized idx file header' % filename) - elif filename.endswith('.midx'): + raise GitError('%s: unrecognized idx file header' + % path_msg(filename)) + elif filename.endswith(b'.midx'): return midx.PackMidx(filename) else: raise GitError('idx filenames must end with .idx or .midx') @@ -554,31 +637,70 @@ def idxmerge(idxlist, final_progress=True): def _make_objcache(): - return PackIdxList(repo('objects/pack')) + return PackIdxList(repo(b'objects/pack')) + +# bup-gc assumes that it can disable all PackWriter activities +# (bloom/midx/cache) via the constructor and close() arguments. class PackWriter: """Writes Git objects inside a pack file.""" - def __init__(self, objcache_maker=_make_objcache, compression_level=1): + def __init__(self, objcache_maker=_make_objcache, compression_level=1, + run_midx=True, on_pack_finish=None, + max_pack_size=None, max_pack_objects=None, repo_dir=None): + self.repo_dir = repo_dir or repo() + self.file = None + self.parentfd = None self.count = 0 self.outbytes = 0 self.filename = None - self.file = None self.idx = None self.objcache_maker = objcache_maker self.objcache = None self.compression_level = compression_level + self.run_midx=run_midx + self.on_pack_finish = on_pack_finish + if not max_pack_size: + max_pack_size = git_config_get(b'pack.packSizeLimit', + repo_dir=self.repo_dir) + if max_pack_size is not None: + max_pack_size = parse_num(max_pack_size) + if not max_pack_size: + # larger packs slow down pruning + max_pack_size = 1000 * 1000 * 1000 + self.max_pack_size = max_pack_size + # cache memory usage is about 83 bytes per object + self.max_pack_objects = max_pack_objects if max_pack_objects \ + else max(1, self.max_pack_size // 5000) def __del__(self): self.close() + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + def _open(self): if not self.file: - (fd,name) = tempfile.mkstemp(suffix='.pack', dir=repo('objects')) - self.file = os.fdopen(fd, 'w+b') - assert(name.endswith('.pack')) + objdir = dir = os.path.join(self.repo_dir, b'objects') + fd, name = tempfile.mkstemp(suffix=b'.pack', dir=objdir) + try: + self.file = os.fdopen(fd, 'w+b') + except: + os.close(fd) + raise + try: + self.parentfd = os.open(objdir, os.O_RDONLY) + except: + f = self.file + self.file = None + f.close() + raise + assert name.endswith(b'.pack') self.filename = name[:-5] - self.file.write('PACK\0\0\0\2\0\0\0\0') - self.idx = list(list() for i in xrange(256)) + self.file.write(b'PACK\0\0\0\2\0\0\0\0') + self.idx = list(list() for i in range(256)) def _raw_write(self, datalist, sha): self._open() @@ -588,11 +710,11 @@ class PackWriter: # all-or-nothing. (The blob shouldn't be very big anyway, thanks # to our hashsplit algorithm.) f.write() does its own buffering, # but that's okay because we'll flush it in _end(). - oneblob = ''.join(datalist) + oneblob = b''.join(datalist) try: f.write(oneblob) - except IOError, e: - raise GitError, e, sys.exc_info()[2] + except IOError as e: + reraise(GitError(e)) nw = len(oneblob) crc = zlib.crc32(oneblob) & 0xffffffff self._update_idx(sha, crc, nw) @@ -603,7 +725,8 @@ class PackWriter: def _update_idx(self, sha, crc, size): assert(sha) if self.idx: - self.idx[ord(sha[0])].append((sha, crc, self.file.tell() - size)) + self.idx[byte_int(sha[0])].append((sha, crc, + self.file.tell() - size)) def _write(self, sha, type, content): if verbose: @@ -613,13 +736,14 @@ class PackWriter: size, crc = self._raw_write(_encode_packobj(type, content, self.compression_level), sha=sha) - if self.outbytes >= max_pack_size or self.count >= max_pack_objects: + if self.outbytes >= self.max_pack_size \ + or self.count >= self.max_pack_objects: self.breakpoint() return sha def breakpoint(self): """Clear byte and object counts and return the last processed id.""" - id = self._end() + id = self._end(self.run_midx) self.outbytes = self.count = 0 return id @@ -635,84 +759,115 @@ class PackWriter: self._require_objcache() return self.objcache.exists(id, want_source=want_source) + def just_write(self, sha, type, content): + """Write an object to the pack file without checking for duplication.""" + self._write(sha, type, content) + # If nothing else, gc doesn't have/want an objcache + if self.objcache is not None: + self.objcache.add(sha) + def maybe_write(self, type, content): """Write an object to the pack file if not present and return its id.""" sha = calc_hash(type, content) if not self.exists(sha): - self._write(sha, type, content) self._require_objcache() - self.objcache.add(sha) + self.just_write(sha, type, content) return sha def new_blob(self, blob): """Create a blob object in the pack with the supplied content.""" - return self.maybe_write('blob', blob) + return self.maybe_write(b'blob', blob) def new_tree(self, shalist): """Create a tree object in the pack.""" content = tree_encode(shalist) - return self.maybe_write('tree', content) - - def _new_commit(self, tree, parent, author, adate, committer, cdate, msg): + return self.maybe_write(b'tree', content) + + def new_commit(self, tree, parent, + author, adate_sec, adate_tz, + committer, cdate_sec, cdate_tz, + msg): + """Create a commit object in the pack. The date_sec values must be + epoch-seconds, and if a tz is None, the local timezone is assumed.""" + if adate_tz: + adate_str = _git_date_str(adate_sec, adate_tz) + else: + adate_str = _local_git_date_str(adate_sec) + if cdate_tz: + cdate_str = _git_date_str(cdate_sec, cdate_tz) + else: + cdate_str = _local_git_date_str(cdate_sec) l = [] - if tree: l.append('tree %s' % tree.encode('hex')) - if parent: l.append('parent %s' % parent.encode('hex')) - if author: l.append('author %s %s' % (author, _git_date(adate))) - if committer: l.append('committer %s %s' % (committer, _git_date(cdate))) - l.append('') + if tree: l.append(b'tree %s' % hexlify(tree)) + if parent: l.append(b'parent %s' % hexlify(parent)) + if author: l.append(b'author %s %s' % (author, adate_str)) + if committer: l.append(b'committer %s %s' % (committer, cdate_str)) + l.append(b'') l.append(msg) - return self.maybe_write('commit', '\n'.join(l)) - - def new_commit(self, parent, tree, date, msg): - """Create a commit object in the pack.""" - userline = '%s <%s@%s>' % (userfullname(), username(), hostname()) - commit = self._new_commit(tree, parent, - userline, date, userline, date, - msg) - return commit + return self.maybe_write(b'commit', b'\n'.join(l)) def abort(self): """Remove the pack file from disk.""" f = self.file if f: - self.idx = None + pfd = self.parentfd self.file = None - f.close() - os.unlink(self.filename + '.pack') + self.parentfd = None + self.idx = None + try: + try: + os.unlink(self.filename + b'.pack') + finally: + f.close() + finally: + if pfd is not None: + os.close(pfd) def _end(self, run_midx=True): f = self.file if not f: return None self.file = None - self.objcache = None - idx = self.idx - self.idx = None + try: + self.objcache = None + idx = self.idx + self.idx = None - # update object count - f.seek(8) - cp = struct.pack('!i', self.count) - assert(len(cp) == 4) - f.write(cp) - - # calculate the pack sha1sum - f.seek(0) - sum = Sha1() - for b in chunkyreader(f): - sum.update(b) - packbin = sum.digest() - f.write(packbin) - f.close() - - obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx, packbin) - - nameprefix = repo('objects/pack/pack-%s' % obj_list_sha) - if os.path.exists(self.filename + '.map'): - os.unlink(self.filename + '.map') - os.rename(self.filename + '.pack', nameprefix + '.pack') - os.rename(self.filename + '.idx', nameprefix + '.idx') + # update object count + f.seek(8) + cp = struct.pack('!i', self.count) + assert(len(cp) == 4) + f.write(cp) + + # calculate the pack sha1sum + f.seek(0) + sum = Sha1() + for b in chunkyreader(f): + sum.update(b) + packbin = sum.digest() + f.write(packbin) + fdatasync(f.fileno()) + finally: + f.close() + + obj_list_sha = self._write_pack_idx_v2(self.filename + b'.idx', idx, + packbin) + nameprefix = os.path.join(self.repo_dir, + b'objects/pack/pack-' + obj_list_sha) + if os.path.exists(self.filename + b'.map'): + os.unlink(self.filename + b'.map') + os.rename(self.filename + b'.pack', nameprefix + b'.pack') + os.rename(self.filename + b'.idx', nameprefix + b'.idx') + try: + os.fsync(self.parentfd) + finally: + os.close(self.parentfd) if run_midx: - auto_midx(repo('objects/pack')) + auto_midx(os.path.join(self.repo_dir, b'objects/pack')) + + if self.on_pack_finish: + self.on_pack_finish(nameprefix) + return nameprefix def close(self, run_midx=True): @@ -732,11 +887,15 @@ class PackWriter: idx_f = open(filename, 'w+b') try: idx_f.truncate(index_len) + fdatasync(idx_f.fileno()) idx_map = mmap_readwrite(idx_f, close=False) - count = _helpers.write_idx(filename, idx_map, idx, self.count) - assert(count == self.count) + try: + count = _helpers.write_idx(filename, idx_map, idx, self.count) + assert(count == self.count) + idx_map.flush() + finally: + idx_map.close() finally: - if idx_map: idx_map.close() idx_f.close() idx_f = open(filename, 'a+b') @@ -751,61 +910,48 @@ class PackWriter: for b in chunkyreader(idx_f, 20*self.count): idx_sum.update(b) obj_list_sum.update(b) - namebase = obj_list_sum.hexdigest() + namebase = hexlify(obj_list_sum.digest()) for b in chunkyreader(idx_f): idx_sum.update(b) idx_f.write(idx_sum.digest()) + fdatasync(idx_f.fileno()) return namebase finally: idx_f.close() -def _git_date(date): - return '%d %s' % (date, utc_offset_str(date)) - - -def _gitenv(repo_dir = None): - if not repo_dir: - repo_dir = repo() - def env(): - os.environ['GIT_DIR'] = os.path.abspath(repo_dir) - return env - - -def list_refs(refname=None, repo_dir=None, +def list_refs(patterns=None, repo_dir=None, limit_to_heads=False, limit_to_tags=False): - """Yield (refname, hash) tuples for all repository refs unless a ref - name is specified. Given a ref name, only include tuples for that - particular ref. The limits restrict the result items to - refs/heads or refs/tags. If both limits are specified, items from - both sources will be included. + """Yield (refname, hash) tuples for all repository refs unless + patterns are specified. In that case, only include tuples for + refs matching those patterns (cf. git-show-ref(1)). The limits + restrict the result items to refs/heads or refs/tags. If both + limits are specified, items from both sources will be included. """ - argv = ['git', 'show-ref'] + argv = [b'git', b'show-ref'] if limit_to_heads: - argv.append('--heads') + argv.append(b'--heads') if limit_to_tags: - argv.append('--tags') - argv.append('--') - if refname: - argv += [refname] - p = subprocess.Popen(argv, - preexec_fn = _gitenv(repo_dir), - stdout = subprocess.PIPE) + argv.append(b'--tags') + argv.append(b'--') + if patterns: + argv.extend(patterns) + p = subprocess.Popen(argv, env=_gitenv(repo_dir), stdout=subprocess.PIPE) out = p.stdout.read().strip() rv = p.wait() # not fatal if rv: assert(not out) if out: - for d in out.split('\n'): - (sha, name) = d.split(' ', 1) - yield (name, sha.decode('hex')) + for d in out.split(b'\n'): + sha, name = d.split(b' ', 1) + yield name, unhexlify(sha) def read_ref(refname, repo_dir = None): """Get the commit id of the most recent commit made on a given ref.""" - refs = list_refs(refname, repo_dir=repo_dir, limit_to_heads=True) + refs = list_refs(patterns=[refname], repo_dir=repo_dir, limit_to_heads=True) l = tuple(islice(refs, 2)) if l: assert(len(l) == 1) @@ -814,35 +960,57 @@ def read_ref(refname, repo_dir = None): return None -def rev_list(ref, count=None, repo_dir=None): - """Generate a list of reachable commits in reverse chronological order. +def rev_list_invocation(ref_or_refs, count=None, format=None): + if isinstance(ref_or_refs, bytes): + refs = (ref_or_refs,) + else: + refs = ref_or_refs + argv = [b'git', b'rev-list'] + if isinstance(count, Integral): + argv.extend([b'-n', b'%d' % count]) + elif count: + raise ValueError('unexpected count argument %r' % count) + + if format: + argv.append(b'--pretty=format:' + format) + for ref in refs: + assert not ref.startswith(b'-') + argv.append(ref) + argv.append(b'--') + return argv + - This generator walks through commits, from child to parent, that are - reachable via the specified ref and yields a series of tuples of the form - (date,hash). +def rev_list(ref_or_refs, count=None, parse=None, format=None, repo_dir=None): + """Yield information about commits as per "git rev-list". If a format + is not provided, yield one hex hash at a time. If a format is + provided, pass it to rev-list and call parse(git_stdout) for each + commit with the stream positioned just after the rev-list "commit + HASH" header line. When a format is provided yield (oidx, + parse(git_stdout)) for each commit. - If count is a non-zero integer, limit the number of commits to "count" - objects. """ - assert(not ref.startswith('-')) - opts = [] - if count: - opts += ['-n', str(atoi(count))] - argv = ['git', 'rev-list', '--pretty=format:%at'] + opts + [ref, '--'] - p = subprocess.Popen(argv, - preexec_fn = _gitenv(repo_dir), + assert bool(parse) == bool(format) + p = subprocess.Popen(rev_list_invocation(ref_or_refs, count=count, + format=format), + env=_gitenv(repo_dir), stdout = subprocess.PIPE) - commit = None - for row in p.stdout: - s = row.strip() - if s.startswith('commit '): - commit = s[7:].decode('hex') - else: - date = int(s) - yield (date, commit) + if not format: + for line in p.stdout: + yield line.strip() + else: + line = p.stdout.readline() + while line: + s = line.strip() + if not s.startswith(b'commit '): + raise Exception('unexpected line ' + repr(s)) + s = s[7:] + assert len(s) == 40 + yield s, parse(p.stdout) + line = p.stdout.readline() + rv = p.wait() # not fatal if rv: - raise GitError, 'git rev-list returned error %d' % rv + raise GitError('git rev-list returned error %d' % rv) def get_commit_dates(refs, repo_dir=None): @@ -866,14 +1034,14 @@ def rev_parse(committish, repo_dir=None): """ head = read_ref(committish, repo_dir=repo_dir) if head: - debug2("resolved from ref: commit = %s\n" % head.encode('hex')) + debug2("resolved from ref: commit = %s\n" % hexlify(head)) return head - pL = PackIdxList(repo('objects/pack', repo_dir=repo_dir)) + pL = PackIdxList(repo(b'objects/pack', repo_dir=repo_dir)) if len(committish) == 40: try: - hash = committish.decode('hex') + hash = unhexlify(committish) except TypeError: return None @@ -886,20 +1054,21 @@ def rev_parse(committish, repo_dir=None): def update_ref(refname, newval, oldval, repo_dir=None): """Update a repository reference.""" if not oldval: - oldval = '' - assert(refname.startswith('refs/heads/') \ - or refname.startswith('refs/tags/')) - p = subprocess.Popen(['git', 'update-ref', refname, - newval.encode('hex'), oldval.encode('hex')], - preexec_fn = _gitenv(repo_dir)) - _git_wait('git update-ref', p) - - -def delete_ref(refname): - """Delete a repository reference.""" - assert(refname.startswith('refs/')) - p = subprocess.Popen(['git', 'update-ref', '-d', refname], - preexec_fn = _gitenv()) + oldval = b'' + assert refname.startswith(b'refs/heads/') \ + or refname.startswith(b'refs/tags/') + p = subprocess.Popen([b'git', b'update-ref', refname, + hexlify(newval), hexlify(oldval)], + env=_gitenv(repo_dir)) + _git_wait(b'git update-ref', p) + + +def delete_ref(refname, oldvalue=None): + """Delete a repository reference (see git update-ref(1)).""" + assert refname.startswith(b'refs/') + oldvalue = [] if not oldvalue else [oldvalue] + p = subprocess.Popen([b'git', b'update-ref', b'-d', refname] + oldvalue, + env=_gitenv()) _git_wait('git update-ref', p) @@ -914,9 +1083,9 @@ def guess_repo(path=None): if path: repodir = path if not repodir: - repodir = os.environ.get('BUP_DIR') + repodir = environ.get(b'BUP_DIR') if not repodir: - repodir = os.path.expanduser('~/.bup') + repodir = os.path.expanduser(b'~/.bup') def init_repo(path=None): @@ -925,39 +1094,39 @@ def init_repo(path=None): d = repo() # appends a / to the path parent = os.path.dirname(os.path.dirname(d)) if parent and not os.path.exists(parent): - raise GitError('parent directory "%s" does not exist\n' % parent) - if os.path.exists(d) and not os.path.isdir(os.path.join(d, '.')): - raise GitError('"%s" exists but is not a directory\n' % d) - p = subprocess.Popen(['git', '--bare', 'init'], stdout=sys.stderr, - preexec_fn = _gitenv()) + raise GitError('parent directory "%s" does not exist\n' + % path_msg(parent)) + if os.path.exists(d) and not os.path.isdir(os.path.join(d, b'.')): + raise GitError('"%s" exists but is not a directory\n' % path_msg(d)) + p = subprocess.Popen([b'git', b'--bare', b'init'], stdout=sys.stderr, + env=_gitenv()) _git_wait('git init', p) # Force the index version configuration in order to ensure bup works # regardless of the version of the installed Git binary. - p = subprocess.Popen(['git', 'config', 'pack.indexVersion', '2'], - stdout=sys.stderr, preexec_fn = _gitenv()) + p = subprocess.Popen([b'git', b'config', b'pack.indexVersion', '2'], + stdout=sys.stderr, env=_gitenv()) _git_wait('git config', p) # Enable the reflog - p = subprocess.Popen(['git', 'config', 'core.logAllRefUpdates', 'true'], - stdout=sys.stderr, preexec_fn = _gitenv()) + p = subprocess.Popen([b'git', b'config', b'core.logAllRefUpdates', b'true'], + stdout=sys.stderr, env=_gitenv()) _git_wait('git config', p) def check_repo_or_die(path=None): - """Make sure a bup repository exists, and abort if not. - If the path to a particular repository was not specified, this function - initializes the default repository automatically. - """ + """Check to see if a bup repository probably exists, and abort if not.""" guess_repo(path) - try: - os.stat(repo('objects/pack/.')) - except OSError, e: - if e.errno == errno.ENOENT: - log('error: %r is not a bup repository; run "bup init"\n' - % repo()) + top = repo() + pst = stat_if_exists(top + b'/objects/pack') + if pst and stat.S_ISDIR(pst.st_mode): + return + if not pst: + top_st = stat_if_exists(top) + if not top_st: + log('error: repository %r does not exist (see "bup help init")\n' + % top) sys.exit(15) - else: - log('error: %s\n' % e) - sys.exit(14) + log('error: %s is not a repository\n' % path_msg(top)) + sys.exit(14) _ver = None @@ -968,38 +1137,25 @@ def ver(): representing a digit in the version tag. For example, the following tuple would represent version 1.6.6.9: - ('1', '6', '6', '9') + (1, 6, 6, 9) """ global _ver if not _ver: - p = subprocess.Popen(['git', '--version'], - stdout=subprocess.PIPE) + p = subprocess.Popen([b'git', b'--version'], stdout=subprocess.PIPE) gvs = p.stdout.read() _git_wait('git --version', p) - m = re.match(r'git version (\S+.\S+)', gvs) + m = re.match(br'git version (\S+.\S+)', gvs) if not m: raise GitError('git --version weird output: %r' % gvs) - _ver = tuple(m.group(1).split('.')) - needed = ('1','5', '3', '1') + _ver = tuple(int(x) for x in m.group(1).split(b'.')) + needed = (1, 5, 3, 1) if _ver < needed: raise GitError('git version %s or higher is required; you have %s' - % ('.'.join(needed), '.'.join(_ver))) + % ('.'.join(str(x) for x in needed), + '.'.join(str(x) for x in _ver))) return _ver -def _git_wait(cmd, p): - rv = p.wait() - if rv != 0: - raise GitError('%s returned %d' % (cmd, rv)) - - -def _git_capture(argv): - p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn = _gitenv()) - r = p.stdout.read() - _git_wait(repr(argv), p) - return r - - class _AbortableIter: def __init__(self, it, onabort = None): self.it = it @@ -1009,16 +1165,18 @@ class _AbortableIter: def __iter__(self): return self - def next(self): + def __next__(self): try: - return self.it.next() - except StopIteration, e: + return next(self.it) + except StopIteration as e: self.done = True raise except: self.abort() raise + next = __next__ + def abort(self): """Abort iteration and call the abortion callback, if needed.""" if not self.done: @@ -1030,22 +1188,15 @@ class _AbortableIter: self.abort() -_ver_warned = 0 class CatPipe: """Link to 'git cat-file' that is used to retrieve blob data.""" def __init__(self, repo_dir = None): - global _ver_warned self.repo_dir = repo_dir - wanted = ('1','5','6') + wanted = (1, 5, 6) if ver() < wanted: - if not _ver_warned: - log('warning: git version < %s; bup will be slow.\n' - % '.'.join(wanted)) - _ver_warned = 1 - self.get = self._slow_get - else: - self.p = self.inprogress = None - self.get = self._fast_get + log('error: git version must be at least 1.5.6\n') + sys.exit(1) + self.p = self.inprogress = None def _abort(self): if self.p: @@ -1054,85 +1205,75 @@ class CatPipe: self.p = None self.inprogress = None - def _restart(self): + def restart(self): self._abort() - self.p = subprocess.Popen(['git', 'cat-file', '--batch'], + self.p = subprocess.Popen([b'git', b'cat-file', b'--batch'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds = True, bufsize = 4096, - preexec_fn = _gitenv(self.repo_dir)) + env=_gitenv(self.repo_dir)) + + def get(self, ref): + """Yield (oidx, type, size), followed by the data referred to by ref. + If ref does not exist, only yield (None, None, None). - def _fast_get(self, id): + """ if not self.p or self.p.poll() != None: - self._restart() + self.restart() assert(self.p) poll_result = self.p.poll() assert(poll_result == None) if self.inprogress: - log('_fast_get: opening %r while %r is open\n' - % (id, self.inprogress)) + log('get: opening %r while %r is open\n' % (ref, self.inprogress)) assert(not self.inprogress) - assert(id.find('\n') < 0) - assert(id.find('\r') < 0) - assert(not id.startswith('-')) - self.inprogress = id - self.p.stdin.write('%s\n' % id) + assert ref.find(b'\n') < 0 + assert ref.find(b'\r') < 0 + assert not ref.startswith(b'-') + self.inprogress = ref + self.p.stdin.write(ref + b'\n') self.p.stdin.flush() hdr = self.p.stdout.readline() - if hdr.endswith(' missing\n'): + if hdr.endswith(b' missing\n'): self.inprogress = None - raise KeyError('blob %r is missing' % id) - spl = hdr.split(' ') - if len(spl) != 3 or len(spl[0]) != 40: - raise GitError('expected blob, got %r' % spl) - (hex, type, size) = spl - - it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])), - onabort = self._abort) + yield None, None, None + return + info = hdr.split(b' ') + if len(info) != 3 or len(info[0]) != 40: + raise GitError('expected object (id, type, size), got %r' % info) + oidx, typ, size = info + size = int(size) + it = _AbortableIter(chunkyreader(self.p.stdout, size), + onabort=self._abort) try: - yield type + yield oidx, typ, size for blob in it: yield blob readline_result = self.p.stdout.readline() - assert(readline_result == '\n') + assert readline_result == b'\n' self.inprogress = None - except Exception, e: + except Exception as e: it.abort() raise - def _slow_get(self, id): - assert(id.find('\n') < 0) - assert(id.find('\r') < 0) - assert(id[0] != '-') - type = _git_capture(['git', 'cat-file', '-t', id]).strip() - yield type - - p = subprocess.Popen(['git', 'cat-file', type, id], - stdout=subprocess.PIPE, - preexec_fn = _gitenv(self.repo_dir)) - for blob in chunkyreader(p.stdout): - yield blob - _git_wait('git cat-file', p) - def _join(self, it): - type = it.next() - if type == 'blob': + _, typ, _ = next(it) + if typ == b'blob': for blob in it: yield blob - elif type == 'tree': - treefile = ''.join(it) + elif typ == b'tree': + treefile = b''.join(it) for (mode, name, sha) in tree_decode(treefile): - for blob in self.join(sha.encode('hex')): + for blob in self.join(hexlify(sha)): yield blob - elif type == 'commit': - treeline = ''.join(it).split('\n')[0] - assert(treeline.startswith('tree ')) + elif typ == b'commit': + treeline = b''.join(it).split(b'\n')[0] + assert treeline.startswith(b'tree ') for blob in self.join(treeline[5:]): yield blob else: raise GitError('invalid object type %r: expected blob/tree/commit' - % type) + % typ) def join(self, id): """Generate a list of the content of all blobs that can be reached @@ -1151,9 +1292,9 @@ _cp = {} def cp(repo_dir=None): """Create a CatPipe object or reuse the already existing one.""" - global _cp + global _cp, repodir if not repo_dir: - repo_dir = repo() + repo_dir = repodir or repo() repo_dir = os.path.abspath(repo_dir) cp = _cp.get(repo_dir) if not cp: @@ -1166,9 +1307,98 @@ def tags(repo_dir = None): """Return a dictionary of all tags in the form {hash: [tag_names, ...]}.""" tags = {} for n, c in list_refs(repo_dir = repo_dir, limit_to_tags=True): - assert(n.startswith('refs/tags/')) + assert n.startswith(b'refs/tags/') name = n[10:] if not c in tags: tags[c] = [] tags[c].append(name) # more than one tag can point at 'c' return tags + + +class MissingObject(KeyError): + def __init__(self, oid): + self.oid = oid + KeyError.__init__(self, 'object %r is missing' % oid.encode('hex')) + + +WalkItem = namedtuple('WalkItem', ['oid', 'type', 'mode', + 'path', 'chunk_path', 'data']) +# The path is the mangled path, and if an item represents a fragment +# of a chunked file, the chunk_path will be the chunked subtree path +# for the chunk, i.e. ['', '2d3115e', ...]. The top-level path for a +# chunked file will have a chunk_path of ['']. So some chunk subtree +# of the file '/foo/bar/baz' might look like this: +# +# item.path = ['foo', 'bar', 'baz.bup'] +# item.chunk_path = ['', '2d3115e', '016b097'] +# item.type = 'tree' +# ... + + +def walk_object(get_ref, oidx, stop_at=None, include_data=None): + """Yield everything reachable from oidx via get_ref (which must behave + like CatPipe get) as a WalkItem, stopping whenever stop_at(oidx) + returns true. Throw MissingObject if a hash encountered is + missing from the repository, and don't read or return blob content + in the data field unless include_data is set. + + """ + # Maintain the pending stack on the heap to avoid stack overflow + pending = [(oidx, [], [], None)] + while len(pending): + oidx, parent_path, chunk_path, mode = pending.pop() + oid = unhexlify(oidx) + if stop_at and stop_at(oidx): + continue + + if (not include_data) and mode and stat.S_ISREG(mode): + # If the object is a "regular file", then it's a leaf in + # the graph, so we can skip reading the data if the caller + # hasn't requested it. + yield WalkItem(oid=oid, type=b'blob', + chunk_path=chunk_path, path=parent_path, + mode=mode, + data=None) + continue + + item_it = get_ref(oidx) + get_oidx, typ, _ = next(item_it) + if not get_oidx: + raise MissingObject(unhexlify(oidx)) + if typ not in (b'blob', b'commit', b'tree'): + raise Exception('unexpected repository object type %r' % typ) + + # FIXME: set the mode based on the type when the mode is None + if typ == b'blob' and not include_data: + # Dump data until we can ask cat_pipe not to fetch it + for ignored in item_it: + pass + data = None + else: + data = b''.join(item_it) + + yield WalkItem(oid=oid, type=typ, + chunk_path=chunk_path, path=parent_path, + mode=mode, + data=(data if include_data else None)) + + if typ == b'commit': + commit_items = parse_commit(data) + for pid in commit_items.parents: + pending.append((pid, parent_path, chunk_path, mode)) + pending.append((commit_items.tree, parent_path, chunk_path, + hashsplit.GIT_MODE_TREE)) + elif typ == b'tree': + for mode, name, ent_id in tree_decode(data): + demangled, bup_type = demangle_name(name, mode) + if chunk_path: + sub_path = parent_path + sub_chunk_path = chunk_path + [name] + else: + sub_path = parent_path + [name] + if bup_type == BUP_CHUNKED: + sub_chunk_path = [b''] + else: + sub_chunk_path = chunk_path + pending.append((hexlify(ent_id), sub_path, sub_chunk_path, + mode))