]> arthur.barton.de Git - bup.git/blobdiff - lib/bup/git.py
tag: use git.update_ref()
[bup.git] / lib / bup / git.py
index 49e98019c2927c023de97a8683a6f4992c08c518..69134ac33ae4ba0df4eb6bb691b51bbf6e7572e7 100644 (file)
@@ -2,19 +2,44 @@
 bup repositories are in Git format. This library allows us to
 interact with the Git data structures.
 """
-import os, zlib, time, subprocess, struct, stat, re, tempfile, heapq
-from bup.helpers import *
-from bup import _helpers
 
-MIDX_VERSION = 2
+from __future__ import absolute_import, print_function
+import os, sys, zlib, subprocess, struct, stat, re, tempfile, glob
+from array import array
+from binascii import hexlify, unhexlify
+from collections import namedtuple
+from itertools import islice
+
+from bup import _helpers, hashsplit, path, midx, bloom, xstat
+from bup.compat import (buffer,
+                        byte_int, bytes_from_byte, bytes_from_uint,
+                        environ,
+                        ExitStack,
+                        items,
+                        pending_raise,
+                        range,
+                        reraise)
+from bup.io import path_msg
+from bup.helpers import (Sha1, add_error, chunkyreader, debug1, debug2,
+                         exo,
+                         fdatasync,
+                         finalized,
+                         log,
+                         merge_dict,
+                         merge_iter,
+                         mmap_read, mmap_readwrite,
+                         nullcontext_if_not,
+                         progress, qprogress, stat_if_exists,
+                         unlink,
+                         utc_offset_str)
+
 
 verbose = 0
-ignore_midx = 0
-home_repodir = os.path.expanduser('~/.bup')
-repodir = None
+repodir = None  # The default repository, once initialized
+
+_typemap =  {b'blob': 3, b'tree': 2, b'commit': 1, b'tag': 4}
+_typermap = {v: k for k, v in items(_typemap)}
 
-_typemap =  { 'blob':3, 'tree':2, 'commit':1, 'tag':4 }
-_typermap = { 3:'blob', 2:'tree', 1:'commit', 4:'tag' }
 
 _total_searches = 0
 _total_steps = 0
@@ -24,24 +49,205 @@ class GitError(Exception):
     pass
 
 
-def repo(sub = ''):
+def _gitenv(repo_dir=None):
+    if not repo_dir:
+        repo_dir = repo()
+    return merge_dict(environ, {b'GIT_DIR': os.path.abspath(repo_dir)})
+
+def _git_wait(cmd, p):
+    rv = p.wait()
+    if rv != 0:
+        raise GitError('%r returned %d' % (cmd, rv))
+
+def _git_exo(cmd, **kwargs):
+    kwargs['check'] = False
+    result = exo(cmd, **kwargs)
+    _, _, proc = result
+    if proc.returncode != 0:
+        raise GitError('%r returned %d' % (cmd, proc.returncode))
+    return result
+
+def git_config_get(option, repo_dir=None, opttype=None, cfg_file=None):
+    assert not (repo_dir and cfg_file), "repo_dir and cfg_file cannot both be used"
+    cmd = [b'git', b'config', b'--null']
+    if cfg_file:
+        cmd.extend([b'--file', cfg_file])
+    if opttype == 'int':
+        cmd.extend([b'--int'])
+    elif opttype == 'bool':
+        cmd.extend([b'--bool'])
+    else:
+        assert opttype is None
+    cmd.extend([b'--get', option])
+    env=None
+    if repo_dir:
+        env = _gitenv(repo_dir=repo_dir)
+    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env,
+                         close_fds=True)
+    # with --null, git writes out a trailing \0 after the value
+    r = p.stdout.read()[:-1]
+    rc = p.wait()
+    if rc == 0:
+        if opttype == 'int':
+            return int(r)
+        elif opttype == 'bool':
+            # git converts to 'true' or 'false'
+            return r == b'true'
+        return r
+    if rc != 1:
+        raise GitError('%r returned %d' % (cmd, rc))
+    return None
+
+
+def parse_tz_offset(s):
+    """UTC offset in seconds."""
+    tz_off = (int(s[1:3]) * 60 * 60) + (int(s[3:5]) * 60)
+    if bytes_from_byte(s[0]) == b'-':
+        return - tz_off
+    return tz_off
+
+def parse_commit_gpgsig(sig):
+    """Return the original signature bytes.
+
+    i.e. with the "gpgsig " header and the leading space character on
+    each continuation line removed.
+
+    """
+    if not sig:
+        return None
+    assert sig.startswith(b'gpgsig ')
+    sig = sig[7:]
+    return sig.replace(b'\n ', b'\n')
+
+# FIXME: derived from http://git.rsbx.net/Documents/Git_Data_Formats.txt
+# Make sure that's authoritative.
+
+# See also
+# https://github.com/git/git/blob/master/Documentation/technical/signature-format.txt
+# The continuation lines have only one leading space.
+
+_start_end_char = br'[^ .,:;<>"\'\0\n]'
+_content_char = br'[^\0\n<>]'
+_safe_str_rx = br'(?:%s{1,2}|(?:%s%s*%s))' \
+    % (_start_end_char,
+       _start_end_char, _content_char, _start_end_char)
+_tz_rx = br'[-+]\d\d[0-5]\d'
+_parent_rx = br'(?:parent [abcdefABCDEF0123456789]{40}\n)'
+# Assumes every following line starting with a space is part of the
+# mergetag.  Is there a formal commit blob spec?
+_mergetag_rx = br'(?:\nmergetag object [abcdefABCDEF0123456789]{40}(?:\n [^\0\n]*)*)'
+_commit_rx = re.compile(br'''tree (?P<tree>[abcdefABCDEF0123456789]{40})
+(?P<parents>%s*)author (?P<author_name>%s) <(?P<author_mail>%s)> (?P<asec>\d+) (?P<atz>%s)
+committer (?P<committer_name>%s) <(?P<committer_mail>%s)> (?P<csec>\d+) (?P<ctz>%s)(?P<mergetag>%s?)
+(?P<gpgsig>gpgsig .*\n(?: .*\n)*)?
+(?P<message>(?:.|\n)*)''' % (_parent_rx,
+                             _safe_str_rx, _safe_str_rx, _tz_rx,
+                             _safe_str_rx, _safe_str_rx, _tz_rx,
+                             _mergetag_rx))
+_parent_hash_rx = re.compile(br'\s*parent ([abcdefABCDEF0123456789]{40})\s*')
+
+# Note that the author_sec and committer_sec values are (UTC) epoch
+# seconds, and for now the mergetag is not included.
+CommitInfo = namedtuple('CommitInfo', ['tree', 'parents',
+                                       'author_name', 'author_mail',
+                                       'author_sec', 'author_offset',
+                                       'committer_name', 'committer_mail',
+                                       'committer_sec', 'committer_offset',
+                                       'gpgsig',
+                                       'message'])
+
+def parse_commit(content):
+    commit_match = re.match(_commit_rx, content)
+    if not commit_match:
+        raise Exception('cannot parse commit %r' % content)
+    matches = commit_match.groupdict()
+    return CommitInfo(tree=matches['tree'],
+                      parents=re.findall(_parent_hash_rx, matches['parents']),
+                      author_name=matches['author_name'],
+                      author_mail=matches['author_mail'],
+                      author_sec=int(matches['asec']),
+                      author_offset=parse_tz_offset(matches['atz']),
+                      committer_name=matches['committer_name'],
+                      committer_mail=matches['committer_mail'],
+                      committer_sec=int(matches['csec']),
+                      committer_offset=parse_tz_offset(matches['ctz']),
+                      gpgsig=parse_commit_gpgsig(matches['gpgsig']),
+                      message=matches['message'])
+
+
+def get_cat_data(cat_iterator, expected_type):
+    _, kind, _ = next(cat_iterator)
+    if kind != expected_type:
+        raise Exception('expected %r, saw %r' % (expected_type, kind))
+    return b''.join(cat_iterator)
+
+def get_commit_items(id, cp):
+    return parse_commit(get_cat_data(cp.get(id), b'commit'))
+
+def _local_git_date_str(epoch_sec):
+    return b'%d %s' % (epoch_sec, utc_offset_str(epoch_sec))
+
+
+def _git_date_str(epoch_sec, tz_offset_sec):
+    offs =  tz_offset_sec // 60
+    return b'%d %s%02d%02d' \
+        % (epoch_sec,
+           b'+' if offs >= 0 else b'-',
+           abs(offs) // 60,
+           abs(offs) % 60)
+
+
+def repo(sub = b'', repo_dir=None):
     """Get the path to the git repository or one of its subdirectories."""
-    global repodir
-    if not repodir:
+    repo_dir = repo_dir or repodir
+    if not repo_dir:
         raise GitError('You should call check_repo_or_die()')
 
     # If there's a .git subdirectory, then the actual repo is in there.
-    gd = os.path.join(repodir, '.git')
+    gd = os.path.join(repo_dir, b'.git')
     if os.path.exists(gd):
-        repodir = gd
+        repo_dir = gd
+
+    return os.path.join(repo_dir, sub)
+
+
+_shorten_hash_rx = \
+    re.compile(br'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)')
+
+def shorten_hash(s):
+    return _shorten_hash_rx.sub(br'\1\2*\3', s)
 
-    return os.path.join(repodir, sub)
+
+def repo_rel(path):
+    full = os.path.abspath(path)
+    fullrepo = os.path.abspath(repo(b''))
+    if not fullrepo.endswith(b'/'):
+        fullrepo += b'/'
+    if full.startswith(fullrepo):
+        path = full[len(fullrepo):]
+    if path.startswith(b'index-cache/'):
+        path = path[len(b'index-cache/'):]
+    return shorten_hash(path)
 
 
 def auto_midx(objdir):
-    main_exe = os.environ.get('BUP_MAIN_EXE') or sys.argv[0]
-    args = [main_exe, 'midx', '--auto', '--dir', objdir]
-    rv = subprocess.call(args, stdout=open('/dev/null', 'w'))
+    args = [path.exe(), b'midx', b'--auto', b'--dir', objdir]
+    try:
+        rv = subprocess.call(args, stdout=open(os.devnull, 'w'))
+    except OSError as e:
+        # make sure 'args' gets printed to help with debugging
+        add_error('%r: exception: %s' % (args, e))
+        raise
+    if rv:
+        add_error('%r: returned %d' % (args, rv))
+
+    args = [path.exe(), b'bloom', b'--dir', objdir]
+    try:
+        rv = subprocess.call(args, stdout=open(os.devnull, 'w'))
+    except OSError as e:
+        # make sure 'args' gets printed to help with debugging
+        add_error('%r: exception: %s' % (args, e))
+        raise
     if rv:
         add_error('%r: returned %d' % (args, rv))
 
@@ -50,85 +256,115 @@ def mangle_name(name, mode, gitmode):
     """Mangle a file name to present an abstract name for segmented files.
     Mangled file names will have the ".bup" extension added to them. If a
     file's name already ends with ".bup", a ".bupl" extension is added to
-    disambiguate normal files from semgmented ones.
+    disambiguate normal files from segmented ones.
     """
     if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode):
-        return name + '.bup'
-    elif name.endswith('.bup') or name[:-1].endswith('.bup'):
-        return name + '.bupl'
+        assert(stat.S_ISDIR(gitmode))
+        return name + b'.bup'
+    elif name.endswith(b'.bup') or name[:-1].endswith(b'.bup'):
+        return name + b'.bupl'
     else:
         return name
 
 
 (BUP_NORMAL, BUP_CHUNKED) = (0,1)
-def demangle_name(name):
+def demangle_name(name, mode):
     """Remove name mangling from a file name, if necessary.
 
     The return value is a tuple (demangled_filename,mode), where mode is one of
     the following:
 
     * BUP_NORMAL  : files that should be read as-is from the repository
-    * BUP_CHUNKED : files that were chunked and need to be assembled
+    * BUP_CHUNKED : files that were chunked and need to be reassembled
 
-    For more information on the name mangling algorythm, see mangle_name()
+    For more information on the name mangling algorithm, see mangle_name()
     """
-    if name.endswith('.bupl'):
+    if name.endswith(b'.bupl'):
         return (name[:-5], BUP_NORMAL)
-    elif name.endswith('.bup'):
+    elif name.endswith(b'.bup'):
         return (name[:-4], BUP_CHUNKED)
+    elif name.endswith(b'.bupm'):
+        return (name[:-5],
+                BUP_CHUNKED if stat.S_ISDIR(mode) else BUP_NORMAL)
+    return (name, BUP_NORMAL)
+
+
+def calc_hash(type, content):
+    """Calculate some content's hash in the Git fashion."""
+    header = b'%s %d\0' % (type, len(content))
+    sum = Sha1(header)
+    sum.update(content)
+    return sum.digest()
+
+
+def shalist_item_sort_key(ent):
+    (mode, name, id) = ent
+    assert(mode+0 == mode)
+    if stat.S_ISDIR(mode):
+        return name + b'/'
     else:
-        return (name, BUP_NORMAL)
+        return name
+
+
+def tree_encode(shalist):
+    """Generate a git tree object from (mode,name,hash) tuples."""
+    shalist = sorted(shalist, key = shalist_item_sort_key)
+    l = []
+    for (mode,name,bin) in shalist:
+        assert(mode)
+        assert(mode+0 == mode)
+        assert(name)
+        assert(len(bin) == 20)
+        s = b'%o %s\0%s' % (mode,name,bin)
+        assert s[0] != b'0'  # 0-padded octal is not acceptable in a git tree
+        l.append(s)
+    return b''.join(l)
+
+
+def tree_decode(buf):
+    """Generate a list of (mode,name,hash) from the git tree object in buf."""
+    ofs = 0
+    while ofs < len(buf):
+        z = buf.find(b'\0', ofs)
+        assert(z > ofs)
+        spl = buf[ofs:z].split(b' ', 1)
+        assert(len(spl) == 2)
+        mode,name = spl
+        sha = buf[z+1:z+1+20]
+        ofs = z+1+20
+        yield (int(mode, 8), name, sha)
 
 
-def _encode_packobj(type, content):
-    szout = ''
+def _encode_packobj(type, content, compression_level=1):
+    if compression_level not in (0, 1, 2, 3, 4, 5, 6, 7, 8, 9):
+        raise ValueError('invalid compression level %s' % compression_level)
+    szout = b''
     sz = len(content)
     szbits = (sz & 0x0f) | (_typemap[type]<<4)
     sz >>= 4
     while 1:
         if sz: szbits |= 0x80
-        szout += chr(szbits)
+        szout += bytes_from_uint(szbits)
         if not sz:
             break
         szbits = sz & 0x7f
         sz >>= 7
-    z = zlib.compressobj(1)
+    z = zlib.compressobj(compression_level)
     yield szout
     yield z.compress(content)
     yield z.flush()
 
 
-def _encode_looseobj(type, content):
-    z = zlib.compressobj(1)
-    yield z.compress('%s %d\0' % (type, len(content)))
-    yield z.compress(content)
-    yield z.flush()
-
-
-def _decode_looseobj(buf):
-    assert(buf);
-    s = zlib.decompress(buf)
-    i = s.find('\0')
-    assert(i > 0)
-    l = s[:i].split(' ')
-    type = l[0]
-    sz = int(l[1])
-    content = s[i+1:]
-    assert(type in _typemap)
-    assert(sz == len(content))
-    return (type, content)
-
-
 def _decode_packobj(buf):
     assert(buf)
-    c = ord(buf[0])
+    c = byte_int(buf[0])
     type = _typermap[(c & 0x70) >> 4]
     sz = c & 0x0f
     shift = 4
     i = 0
     while c & 0x80:
         i += 1
-        c = ord(buf[i])
+        c = byte_int(buf[i])
         sz |= (c & 0x7f) << shift
         shift += 7
         if not (c & 0x80):
@@ -147,25 +383,24 @@ class PackIdx:
             return self._ofs_from_idx(idx)
         return None
 
-    def exists(self, hash):
+    def exists(self, hash, want_source=False):
         """Return nonempty if the object exists in this index."""
-        return hash and (self._idx_from_hash(hash) != None) and True or None
-
-    def __len__(self):
-        return int(self.fanout[255])
+        if hash and (self._idx_from_hash(hash) != None):
+            return want_source and os.path.basename(self.name) or True
+        return None
 
     def _idx_from_hash(self, hash):
         global _total_searches, _total_steps
         _total_searches += 1
         assert(len(hash) == 20)
-        b1 = ord(hash[0])
+        b1 = byte_int(hash[0])
         start = self.fanout[b1-1] # range -1..254
         end = self.fanout[b1] # range 0..255
-        want = str(hash)
+        want = hash
         _total_steps += 1  # lookup table is a step
         while start < end:
             _total_steps += 1
-            mid = start + (end-start)/2
+            mid = start + (end - start) // 2
             v = self._idx_to_hash(mid)
             if v < want:
                 start = mid+1
@@ -179,170 +414,163 @@ class PackIdx:
 class PackIdxV1(PackIdx):
     """Object representation of a Git pack index (version 1) file."""
     def __init__(self, filename, f):
+        self.closed = False
         self.name = filename
         self.idxnames = [self.name]
         self.map = mmap_read(f)
-        self.fanout = list(struct.unpack('!256I',
-                                         str(buffer(self.map, 0, 256*4))))
+        # Min size for 'L' is 4, which is sufficient for struct's '!I'
+        self.fanout = array('L', struct.unpack('!256I', self.map))
         self.fanout.append(0)  # entry "-1"
-        nsha = self.fanout[255]
-        self.shatable = buffer(self.map, 256*4, nsha*24)
+        self.nsha = self.fanout[255]
+        self.sha_ofs = 256 * 4
+        # Avoid slicing shatable for individual hashes (very high overhead)
+        self.shatable = buffer(self.map, self.sha_ofs, self.nsha * 24)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        with pending_raise(value, rethrow=False):
+            self.close()
+
+    def __len__(self):
+        return int(self.nsha)  # int() from long for python 2
 
     def _ofs_from_idx(self, idx):
-        return struct.unpack('!I', str(self.shatable[idx*24 : idx*24+4]))[0]
+        if idx >= self.nsha or idx < 0:
+            raise IndexError('invalid pack index index %d' % idx)
+        ofs = self.sha_ofs + idx * 24
+        return struct.unpack_from('!I', self.map, offset=ofs)[0]
 
     def _idx_to_hash(self, idx):
-        return str(self.shatable[idx*24+4 : idx*24+24])
+        if idx >= self.nsha or idx < 0:
+            raise IndexError('invalid pack index index %d' % idx)
+        ofs = self.sha_ofs + idx * 24 + 4
+        return self.map[ofs : ofs + 20]
 
     def __iter__(self):
-        for i in xrange(self.fanout[255]):
-            yield buffer(self.map, 256*4 + 24*i + 4, 20)
+        start = self.sha_ofs + 4
+        for ofs in range(start, start + 24 * self.nsha, 24):
+            yield self.map[ofs : ofs + 20]
+
+    def close(self):
+        self.closed = True
+        if self.map is not None:
+            self.shatable = None
+            self.map.close()
+            self.map = None
+
+    def __del__(self):
+        assert self.closed
 
 
 class PackIdxV2(PackIdx):
     """Object representation of a Git pack index (version 2) file."""
     def __init__(self, filename, f):
+        self.closed = False
         self.name = filename
         self.idxnames = [self.name]
         self.map = mmap_read(f)
-        assert(str(self.map[0:8]) == '\377tOc\0\0\0\2')
-        self.fanout = list(struct.unpack('!256I',
-                                         str(buffer(self.map, 8, 256*4))))
-        self.fanout.append(0)  # entry "-1"
-        nsha = self.fanout[255]
-        self.shatable = buffer(self.map, 8 + 256*4, nsha*20)
-        self.ofstable = buffer(self.map,
-                               8 + 256*4 + nsha*20 + nsha*4,
-                               nsha*4)
-        self.ofs64table = buffer(self.map,
-                                 8 + 256*4 + nsha*20 + nsha*4 + nsha*4)
+        assert self.map[0:8] == b'\377tOc\0\0\0\2'
+        # Min size for 'L' is 4, which is sufficient for struct's '!I'
+        self.fanout = array('L', struct.unpack_from('!256I', self.map, offset=8))
+        self.fanout.append(0)
+        self.nsha = self.fanout[255]
+        self.sha_ofs = 8 + 256*4
+        self.ofstable_ofs = self.sha_ofs + self.nsha * 20 + self.nsha * 4
+        self.ofs64table_ofs = self.ofstable_ofs + self.nsha * 4
+        # Avoid slicing this for individual hashes (very high overhead)
+        self.shatable = buffer(self.map, self.sha_ofs, self.nsha*20)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        with pending_raise(value, rethrow=False):
+            self.close()
+
+    def __len__(self):
+        return int(self.nsha)  # int() from long for python 2
 
     def _ofs_from_idx(self, idx):
-        ofs = struct.unpack('!I', str(buffer(self.ofstable, idx*4, 4)))[0]
+        if idx >= self.nsha or idx < 0:
+            raise IndexError('invalid pack index index %d' % idx)
+        ofs_ofs = self.ofstable_ofs + idx * 4
+        ofs = struct.unpack_from('!I', self.map, offset=ofs_ofs)[0]
         if ofs & 0x80000000:
             idx64 = ofs & 0x7fffffff
-            ofs = struct.unpack('!Q',
-                                str(buffer(self.ofs64table, idx64*8, 8)))[0]
+            ofs64_ofs = self.ofs64table_ofs + idx64 * 8
+            ofs = struct.unpack_from('!Q', self.map, offset=ofs64_ofs)[0]
         return ofs
 
     def _idx_to_hash(self, idx):
-        return str(self.shatable[idx*20:(idx+1)*20])
+        if idx >= self.nsha or idx < 0:
+            raise IndexError('invalid pack index index %d' % idx)
+        ofs = self.sha_ofs + idx * 20
+        return self.map[ofs : ofs + 20]
 
     def __iter__(self):
-        for i in xrange(self.fanout[255]):
-            yield buffer(self.map, 8 + 256*4 + 20*i, 20)
-
-
-extract_bits = _helpers.extract_bits
-
-
-class PackMidx:
-    """Wrapper which contains data from multiple index files.
-    Multiple index (.midx) files constitute a wrapper around index (.idx) files
-    and make it possible for bup to expand Git's indexing capabilities to vast
-    amounts of files.
-    """
-    def __init__(self, filename):
-        self.name = filename
-        self.force_keep = False
-        assert(filename.endswith('.midx'))
-        self.map = mmap_read(open(filename))
-        if str(self.map[0:4]) != 'MIDX':
-            log('Warning: skipping: invalid MIDX header in %r\n' % filename)
-            self.force_keep = True
-            return self._init_failed()
-        ver = struct.unpack('!I', self.map[4:8])[0]
-        if ver < MIDX_VERSION:
-            log('Warning: ignoring old-style (v%d) midx %r\n' 
-                % (ver, filename))
-            self.force_keep = False  # old stuff is boring  
-            return self._init_failed()
-        if ver > MIDX_VERSION:
-            log('Warning: ignoring too-new (v%d) midx %r\n'
-                % (ver, filename))
-            self.force_keep = True  # new stuff is exciting
-            return self._init_failed()
-
-        self.bits = _helpers.firstword(self.map[8:12])
-        self.entries = 2**self.bits
-        self.fanout = buffer(self.map, 12, self.entries*4)
-        shaofs = 12 + self.entries*4
-        nsha = self._fanget(self.entries-1)
-        self.shalist = buffer(self.map, shaofs, nsha*20)
-        self.idxnames = str(self.map[shaofs + 20*nsha:]).split('\0')
-
-    def _init_failed(self):
-        self.bits = 0
-        self.entries = 1
-        self.fanout = buffer('\0\0\0\0')
-        self.shalist = buffer('\0'*20)
-        self.idxnames = []
-
-    def _fanget(self, i):
-        start = i*4
-        s = self.fanout[start:start+4]
-        return _helpers.firstword(s)
-
-    def _get(self, i):
-        return str(self.shalist[i*20:(i+1)*20])
-
-    def exists(self, hash):
-        """Return nonempty if the object exists in the index files."""
-        global _total_searches, _total_steps
-        _total_searches += 1
-        want = str(hash)
-        el = extract_bits(want, self.bits)
-        if el:
-            start = self._fanget(el-1)
-            startv = el << (32-self.bits)
-        else:
-            start = 0
-            startv = 0
-        end = self._fanget(el)
-        endv = (el+1) << (32-self.bits)
-        _total_steps += 1   # lookup table is a step
-        hashv = _helpers.firstword(hash)
-        #print '(%08x) %08x %08x %08x' % (extract_bits(want, 32), startv, hashv, endv)
-        while start < end:
-            _total_steps += 1
-            #print '! %08x %08x %08x   %d - %d' % (startv, hashv, endv, start, end)
-            mid = start + (hashv-startv)*(end-start-1)/(endv-startv)
-            #print '  %08x %08x %08x   %d %d %d' % (startv, hashv, endv, start, mid, end)
-            v = self._get(mid)
-            #print '    %08x' % self._num(v)
-            if v < want:
-                start = mid+1
-                startv = _helpers.firstword(v)
-            elif v > want:
-                end = mid
-                endv = _helpers.firstword(v)
-            else: # got it!
-                return True
-        return None
+        start = self.sha_ofs
+        for ofs in range(start, start + 20 * self.nsha, 20):
+            yield self.map[ofs : ofs + 20]
 
-    def __iter__(self):
-        for i in xrange(self._fanget(self.entries-1)):
-            yield buffer(self.shalist, i*20, 20)
+    def close(self):
+        self.closed = True
+        if self.map is not None:
+            self.shatable = None
+            self.map.close()
+            self.map = None
 
-    def __len__(self):
-        return int(self._fanget(self.entries-1))
+    def __del__(self):
+        assert self.closed
 
 
 _mpi_count = 0
 class PackIdxList:
-    def __init__(self, dir):
+    def __init__(self, dir, ignore_midx=False):
         global _mpi_count
+        # Q: was this also intended to prevent opening multiple repos?
         assert(_mpi_count == 0) # these things suck tons of VM; don't waste it
         _mpi_count += 1
+        self.open = True
         self.dir = dir
-        self.also = {}
+        self.also = set()
         self.packs = []
-        self.refresh()
+        self.do_bloom = False
+        self.bloom = None
+        self.ignore_midx = ignore_midx
+        try:
+            self.refresh()
+        except BaseException as ex:
+            with pending_raise(ex):
+                self.close()
 
-    def __del__(self):
+    def close(self):
         global _mpi_count
+        if not self.open:
+            assert _mpi_count == 0
+            return
         _mpi_count -= 1
-        assert(_mpi_count == 0)
+        assert _mpi_count == 0
+        self.also = None
+        self.bloom, bloom = None, self.bloom
+        self.packs, packs = None, self.packs
+        self.open = False
+        with ExitStack() as stack:
+            for pack in packs:
+                stack.enter_context(pack)
+            if bloom:
+                bloom.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        with pending_raise(value, rethrow=False):
+            self.close()
+
+    def __del__(self):
+        assert not self.open
 
     def __iter__(self):
         return iter(idxmerge(self.packs))
@@ -350,19 +578,27 @@ class PackIdxList:
     def __len__(self):
         return sum(len(pack) for pack in self.packs)
 
-    def exists(self, hash):
+    def exists(self, hash, want_source=False):
         """Return nonempty if the object exists in the index files."""
         global _total_searches
         _total_searches += 1
         if hash in self.also:
             return True
+        if self.do_bloom and self.bloom:
+            if self.bloom.exists(hash):
+                self.do_bloom = False
+            else:
+                _total_searches -= 1  # was counted by bloom
+                return None
         for i in range(len(self.packs)):
             p = self.packs[i]
             _total_searches -= 1  # will be incremented by sub-pack
-            if p.exists(hash):
+            ix = p.exists(hash, want_source=want_source)
+            if ix:
                 # reorder so most recently used packs are searched first
                 self.packs = [p] + self.packs[:i] + self.packs[i+1:]
-                return p.name
+                return ix
+        self.do_bloom = True
         return None
 
     def refresh(self, skip_midx = False):
@@ -374,185 +610,226 @@ class PackIdxList:
         If skip_midx is True, all work on .midx files will be skipped and .midx
         files will be removed from the list.
 
-        The module-global variable 'ignore_midx' can force this function to
+        The instance variable 'ignore_midx' can force this function to
         always act as if skip_midx was True.
         """
-        skip_midx = skip_midx or ignore_midx
+        if self.bloom is not None:
+            self.bloom.close()
+        self.bloom = None # Always reopen the bloom as it may have been relaced
+        self.do_bloom = False
+        skip_midx = skip_midx or self.ignore_midx
         d = dict((p.name, p) for p in self.packs
-                 if not skip_midx or not isinstance(p, PackMidx))
+                 if not skip_midx or not isinstance(p, midx.PackMidx))
         if os.path.exists(self.dir):
             if not skip_midx:
                 midxl = []
+                midxes = set(glob.glob(os.path.join(self.dir, b'*.midx')))
+                # remove any *.midx files from our list that no longer exist
+                for ix in list(d.values()):
+                    if not isinstance(ix, midx.PackMidx):
+                        continue
+                    if ix.name in midxes:
+                        continue
+                    # remove the midx
+                    del d[ix.name]
+                    ix.close()
+                    self.packs.remove(ix)
                 for ix in self.packs:
-                    if isinstance(ix, PackMidx):
+                    if isinstance(ix, midx.PackMidx):
                         for name in ix.idxnames:
                             d[os.path.join(self.dir, name)] = ix
-                for f in os.listdir(self.dir):
-                    full = os.path.join(self.dir, f)
-                    if f.endswith('.midx') and not d.get(full):
-                        mx = PackMidx(full)
+                for full in midxes:
+                    if not d.get(full):
+                        mx = midx.PackMidx(full)
                         (mxd, mxf) = os.path.split(mx.name)
-                        broken = 0
+                        broken = False
                         for n in mx.idxnames:
                             if not os.path.exists(os.path.join(mxd, n)):
-                                log(('warning: index %s missing\n' +
-                                    '  used by %s\n') % (n, mxf))
-                                broken += 1
+                                log(('warning: index %s missing\n'
+                                     '  used by %s\n')
+                                    % (path_msg(n), path_msg(mxf)))
+                                broken = True
                         if broken:
-                            del mx
+                            mx.close()
                             unlink(full)
                         else:
                             midxl.append(mx)
-                midxl.sort(lambda x,y: -cmp(len(x),len(y)))
+                midxl.sort(key=lambda ix:
+                           (-len(ix), -xstat.stat(ix.name).st_mtime))
                 for ix in midxl:
-                    any = 0
+                    any_needed = False
                     for sub in ix.idxnames:
                         found = d.get(os.path.join(self.dir, sub))
                         if not found or isinstance(found, PackIdx):
                             # doesn't exist, or exists but not in a midx
-                            d[ix.name] = ix
-                            for name in ix.idxnames:
-                                d[os.path.join(self.dir, name)] = ix
-                            any += 1
+                            any_needed = True
                             break
-                    if not any and not ix.force_keep:
+                    if any_needed:
+                        d[ix.name] = ix
+                        for name in ix.idxnames:
+                            d[os.path.join(self.dir, name)] = ix
+                    elif not ix.force_keep:
                         debug1('midx: removing redundant: %s\n'
-                               % os.path.basename(ix.name))
+                               % path_msg(os.path.basename(ix.name)))
+                        ix.close()
                         unlink(ix.name)
-            for f in os.listdir(self.dir):
-                full = os.path.join(self.dir, f)
-                if f.endswith('.idx') and not d.get(full):
+            for full in glob.glob(os.path.join(self.dir, b'*.idx')):
+                if not d.get(full):
                     try:
                         ix = open_idx(full)
-                    except GitError, e:
+                    except GitError as e:
                         add_error(e)
                         continue
                     d[full] = ix
-            self.packs = list(set(d.values()))
+            bfull = os.path.join(self.dir, b'bup.bloom')
+            new_packs = set(d.values())
+            for p in self.packs:
+                if not p in new_packs:
+                    p.close()
+            new_packs = list(new_packs)
+            new_packs.sort(reverse=True, key=lambda x: len(x))
+            self.packs = new_packs
+            if self.bloom is None and os.path.exists(bfull):
+                self.bloom = bloom.ShaBloom(bfull)
+            try:
+                if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self):
+                    self.do_bloom = True
+                else:
+                    if self.bloom:
+                        self.bloom, bloom_tmp = None, self.bloom
+                        bloom_tmp.close()
+            except BaseException as ex:
+                with pending_raise(ex):
+                    if self.bloom:
+                        self.bloom.close()
+
         debug1('PackIdxList: using %d index%s.\n'
             % (len(self.packs), len(self.packs)!=1 and 'es' or ''))
 
-    def packname_containing(self, hash):
-        # figure out which pack contains a given hash.
-        # FIXME: if the midx file format would just *store* this information,
-        # we could calculate it a lot more efficiently.  But it's not needed
-        # often, so let's do it like this.
-        for f in os.listdir(self.dir):
-            if f.endswith('.idx'):
-                full = os.path.join(self.dir, f)
-                try:
-                    ix = open_idx(full)
-                except GitError, e:
-                    add_error(e)
-                    continue
-                if ix.exists(hash):
-                    return full
-
     def add(self, hash):
         """Insert an additional object in the list."""
-        self.also[hash] = 1
-
-    def zap_also(self):
-        """Remove all additional objects from the list."""
-        self.also = {}
-
-
-def calc_hash(type, content):
-    """Calculate some content's hash in the Git fashion."""
-    header = '%s %d\0' % (type, len(content))
-    sum = Sha1(header)
-    sum.update(content)
-    return sum.digest()
-
-
-def _shalist_sort_key(ent):
-    (mode, name, id) = ent
-    if stat.S_ISDIR(int(mode, 8)):
-        return name + '/'
-    else:
-        return name
+        self.also.add(hash)
 
 
 def open_idx(filename):
-    if filename.endswith('.idx'):
+    if filename.endswith(b'.idx'):
         f = open(filename, 'rb')
         header = f.read(8)
-        if header[0:4] == '\377tOc':
+        if header[0:4] == b'\377tOc':
             version = struct.unpack('!I', header[4:8])[0]
             if version == 2:
                 return PackIdxV2(filename, f)
             else:
                 raise GitError('%s: expected idx file version 2, got %d'
-                               % (filename, version))
-        elif len(header) == 8 and header[0:4] < '\377tOc':
+                               % (path_msg(filename), version))
+        elif len(header) == 8 and header[0:4] < b'\377tOc':
             return PackIdxV1(filename, f)
         else:
-            raise GitError('%s: unrecognized idx file header' % filename)
-    elif filename.endswith('.midx'):
-        return PackMidx(filename)
+            raise GitError('%s: unrecognized idx file header'
+                           % path_msg(filename))
+    elif filename.endswith(b'.midx'):
+        return midx.PackMidx(filename)
     else:
         raise GitError('idx filenames must end with .idx or .midx')
 
 
 def idxmerge(idxlist, final_progress=True):
     """Generate a list of all the objects reachable in a PackIdxList."""
-    total = sum(len(i) for i in idxlist)
-    iters = (iter(i) for i in idxlist)
-    heap = [(next(it), it) for it in iters]
-    heapq.heapify(heap)
-    count = 0
-    last = None
-    while heap:
-        if (count % 10024) == 0:
-            progress('Reading indexes: %.2f%% (%d/%d)\r'
-                     % (count*100.0/total, count, total))
-        (e, it) = heap[0]
-        if e != last:
-            yield e
-            last = e
-        count += 1
-        e = next(it)
-        if e:
-            heapq.heapreplace(heap, (e, it))
-        else:
-            heapq.heappop(heap)
-    if final_progress:
-        log('Reading indexes: %.2f%% (%d/%d), done.\n' % (100, total, total))
-
-
-class PackWriter:
-    """Writes Git objects insid a pack file."""
-    def __init__(self, objcache_maker=None):
+    def pfunc(count, total):
+        qprogress('Reading indexes: %.2f%% (%d/%d)\r'
+                  % (count*100.0/total, count, total))
+    def pfinal(count, total):
+        if final_progress:
+            progress('Reading indexes: %.2f%% (%d/%d), done.\n'
+                     % (100, total, total))
+    return merge_iter(idxlist, 10024, pfunc, pfinal)
+
+
+def create_commit_blob(tree, parent,
+                       author, adate_sec, adate_tz,
+                       committer, cdate_sec, cdate_tz,
+                       msg):
+    if adate_tz is not None:
+        adate_str = _git_date_str(adate_sec, adate_tz)
+    else:
+        adate_str = _local_git_date_str(adate_sec)
+    if cdate_tz is not None:
+        cdate_str = _git_date_str(cdate_sec, cdate_tz)
+    else:
+        cdate_str = _local_git_date_str(cdate_sec)
+    l = []
+    if tree: l.append(b'tree %s' % hexlify(tree))
+    if parent: l.append(b'parent %s' % hexlify(parent))
+    if author: l.append(b'author %s %s' % (author, adate_str))
+    if committer: l.append(b'committer %s %s' % (committer, cdate_str))
+    l.append(b'')
+    l.append(msg)
+    return b'\n'.join(l)
+
+def _make_objcache():
+    return PackIdxList(repo(b'objects/pack'))
+
+# bup-gc assumes that it can disable all PackWriter activities
+# (bloom/midx/cache) via the constructor and close() arguments.
+
+class PackWriter(object):
+    """Writes Git objects inside a pack file."""
+    def __init__(self, objcache_maker=_make_objcache, compression_level=1,
+                 run_midx=True, on_pack_finish=None,
+                 max_pack_size=None, max_pack_objects=None, repo_dir=None):
+        self.closed = False
+        self.repo_dir = repo_dir or repo()
+        self.file = None
+        self.parentfd = None
         self.count = 0
         self.outbytes = 0
         self.filename = None
-        self.file = None
         self.idx = None
         self.objcache_maker = objcache_maker
         self.objcache = None
+        self.compression_level = compression_level
+        self.run_midx=run_midx
+        self.on_pack_finish = on_pack_finish
+        if not max_pack_size:
+            max_pack_size = git_config_get(b'pack.packSizeLimit',
+                                           repo_dir=self.repo_dir,
+                                           opttype='int')
+            if not max_pack_size:
+                # larger packs slow down pruning
+                max_pack_size = 1000 * 1000 * 1000
+        self.max_pack_size = max_pack_size
+        # cache memory usage is about 83 bytes per object
+        self.max_pack_objects = max_pack_objects if max_pack_objects \
+                                else max(1, self.max_pack_size // 5000)
+
+    def __enter__(self):
+        return self
 
-    def __del__(self):
-        self.close()
-
-    def _make_objcache(self):
-        if self.objcache == None:
-            if self.objcache_maker:
-                self.objcache = self.objcache_maker()
-            else:
-                self.objcache = PackIdxList(repo('objects/pack'))
+    def __exit__(self, type, value, traceback):
+        with pending_raise(value, rethrow=False):
+            self.close()
 
     def _open(self):
         if not self.file:
-            self._make_objcache()
-            (fd,name) = tempfile.mkstemp(suffix='.pack', dir=repo('objects'))
-            self.file = os.fdopen(fd, 'w+b')
-            assert(name.endswith('.pack'))
+            objdir = dir = os.path.join(self.repo_dir, b'objects')
+            fd, name = tempfile.mkstemp(suffix=b'.pack', dir=objdir)
+            try:
+                self.file = os.fdopen(fd, 'w+b')
+            except:
+                os.close(fd)
+                raise
+            try:
+                self.parentfd = os.open(objdir, os.O_RDONLY)
+            except:
+                f = self.file
+                self.file = None
+                f.close()
+                raise
+            assert name.endswith(b'.pack')
             self.filename = name[:-5]
-            self.file.write('PACK\0\0\0\2\0\0\0\0')
-            self.idx = list(list() for i in xrange(256))
+            self.file.write(b'PACK\0\0\0\2\0\0\0\0')
+            self.idx = PackIdxV2Writer()
 
-    # the 'sha' parameter is used in client.py's _raw_write(), but not needed
-    # in this basic version.
     def _raw_write(self, datalist, sha):
         self._open()
         f = self.file
@@ -561,8 +838,11 @@ class PackWriter:
         # all-or-nothing.  (The blob shouldn't be very big anyway, thanks
         # to our hashsplit algorithm.)  f.write() does its own buffering,
         # but that's okay because we'll flush it in _end().
-        oneblob = ''.join(datalist)
-        f.write(oneblob)
+        oneblob = b''.join(datalist)
+        try:
+            f.write(oneblob)
+        except IOError as e:
+            reraise(GitError(e))
         nw = len(oneblob)
         crc = zlib.crc32(oneblob) & 0xffffffff
         self._update_idx(sha, crc, nw)
@@ -573,192 +853,226 @@ class PackWriter:
     def _update_idx(self, sha, crc, size):
         assert(sha)
         if self.idx:
-            self.idx[ord(sha[0])].append((sha, crc, self.file.tell() - size))
+            self.idx.add(sha, crc, self.file.tell() - size)
 
     def _write(self, sha, type, content):
         if verbose:
             log('>')
         if not sha:
             sha = calc_hash(type, content)
-        size, crc = self._raw_write(_encode_packobj(type, content), sha=sha)
+        size, crc = self._raw_write(_encode_packobj(type, content,
+                                                    self.compression_level),
+                                    sha=sha)
+        if self.outbytes >= self.max_pack_size \
+           or self.count >= self.max_pack_objects:
+            self.breakpoint()
         return sha
 
-    def breakpoint(self):
-        """Clear byte and object counts and return the last processed id."""
-        id = self._end()
-        self.outbytes = self.count = 0
-        return id
-
-    def write(self, type, content):
-        """Write an object in this pack file."""
-        return self._write(calc_hash(type, content), type, content)
+    def _require_objcache(self):
+        if self.objcache is None and self.objcache_maker:
+            self.objcache = self.objcache_maker()
+        if self.objcache is None:
+            raise GitError(
+                    "PackWriter not opened or can't check exists w/o objcache")
 
-    def exists(self, id):
+    def exists(self, id, want_source=False):
         """Return non-empty if an object is found in the object cache."""
-        if not self.objcache:
-            self._make_objcache()
-        return self.objcache.exists(id)
+        self._require_objcache()
+        return self.objcache.exists(id, want_source=want_source)
+
+    def just_write(self, sha, type, content):
+        """Write an object to the pack file without checking for duplication."""
+        self._write(sha, type, content)
+        # If nothing else, gc doesn't have/want an objcache
+        if self.objcache is not None:
+            self.objcache.add(sha)
 
     def maybe_write(self, type, content):
         """Write an object to the pack file if not present and return its id."""
         sha = calc_hash(type, content)
         if not self.exists(sha):
-            self._write(sha, type, content)
-            self.objcache.add(sha)
+            self._require_objcache()
+            self.just_write(sha, type, content)
         return sha
 
     def new_blob(self, blob):
         """Create a blob object in the pack with the supplied content."""
-        return self.maybe_write('blob', blob)
+        return self.maybe_write(b'blob', blob)
 
     def new_tree(self, shalist):
         """Create a tree object in the pack."""
-        shalist = sorted(shalist, key = _shalist_sort_key)
-        l = []
-        for (mode,name,bin) in shalist:
-            assert(mode)
-            assert(mode != '0')
-            assert(mode[0] != '0')
-            assert(name)
-            assert(len(bin) == 20)
-            l.append('%s %s\0%s' % (mode,name,bin))
-        return self.maybe_write('tree', ''.join(l))
-
-    def _new_commit(self, tree, parent, author, adate, committer, cdate, msg):
-        l = []
-        if tree: l.append('tree %s' % tree.encode('hex'))
-        if parent: l.append('parent %s' % parent.encode('hex'))
-        if author: l.append('author %s %s' % (author, _git_date(adate)))
-        if committer: l.append('committer %s %s' % (committer, _git_date(cdate)))
-        l.append('')
-        l.append(msg)
-        return self.maybe_write('commit', '\n'.join(l))
-
-    def new_commit(self, parent, tree, date, msg):
-        """Create a commit object in the pack."""
-        userline = '%s <%s@%s>' % (userfullname(), username(), hostname())
-        commit = self._new_commit(tree, parent,
-                                  userline, date, userline, date,
-                                  msg)
-        return commit
+        content = tree_encode(shalist)
+        return self.maybe_write(b'tree', content)
+
+    def new_commit(self, tree, parent,
+                   author, adate_sec, adate_tz,
+                   committer, cdate_sec, cdate_tz,
+                   msg):
+        """Create a commit object in the pack.  The date_sec values must be
+        epoch-seconds, and if a tz is None, the local timezone is assumed."""
+        content = create_commit_blob(tree, parent,
+                                     author, adate_sec, adate_tz,
+                                     committer, cdate_sec, cdate_tz,
+                                     msg)
+        return self.maybe_write(b'commit', content)
+
+    def _end(self, run_midx=True, abort=False):
+        # Ignores run_midx during abort
+        self.parentfd, pfd, = None, self.parentfd
+        self.file, f = None, self.file
+        self.idx, idx = None, self.idx
+        try:
+            with nullcontext_if_not(self.objcache), \
+                 finalized(pfd, lambda x: x is not None and os.close(x)), \
+                 nullcontext_if_not(f):
+                if not f:
+                    return None
+
+                if abort:
+                    os.unlink(self.filename + b'.pack')
+                    return None
+
+                # update object count
+                f.seek(8)
+                cp = struct.pack('!i', self.count)
+                assert len(cp) == 4
+                f.write(cp)
+
+                # calculate the pack sha1sum
+                f.seek(0)
+                sum = Sha1()
+                for b in chunkyreader(f):
+                    sum.update(b)
+                packbin = sum.digest()
+                f.write(packbin)
+                f.flush()
+                fdatasync(f.fileno())
+                f.close()
+
+                idx.write(self.filename + b'.idx', packbin)
+                nameprefix = os.path.join(self.repo_dir,
+                                          b'objects/pack/pack-' +  hexlify(packbin))
+                if os.path.exists(self.filename + b'.map'):
+                    os.unlink(self.filename + b'.map')
+                os.rename(self.filename + b'.pack', nameprefix + b'.pack')
+                os.rename(self.filename + b'.idx', nameprefix + b'.idx')
+                os.fsync(pfd)
+                if run_midx:
+                    auto_midx(os.path.join(self.repo_dir, b'objects/pack'))
+                if self.on_pack_finish:
+                    self.on_pack_finish(nameprefix)
+                return nameprefix
+        finally:
+            # Must be last -- some of the code above depends on it
+            self.objcache = None
 
     def abort(self):
         """Remove the pack file from disk."""
-        f = self.file
-        if f:
-            self.idx = None
-            self.file = None
-            f.close()
-            os.unlink(self.filename + '.pack')
-
-    def _end(self):
-        f = self.file
-        if not f: return None
-        self.file = None
-        self.objcache = None
-        idx = self.idx
-        self.idx = None
+        self.closed = True
+        self._end(abort=True)
 
-        # update object count
-        f.seek(8)
-        cp = struct.pack('!i', self.count)
-        assert(len(cp) == 4)
-        f.write(cp)
-
-        # calculate the pack sha1sum
-        f.seek(0)
-        sum = Sha1()
-        for b in chunkyreader(f):
-            sum.update(b)
-        packbin = sum.digest()
-        f.write(packbin)
-        f.close()
-
-        idx_f = open(self.filename + '.idx', 'wb')
-        obj_list_sha = self._write_pack_idx_v2(idx_f, idx, packbin)
-        idx_f.close()
-
-        nameprefix = repo('objects/pack/pack-%s' % obj_list_sha)
-        if os.path.exists(self.filename + '.map'):
-            os.unlink(self.filename + '.map')
-        os.rename(self.filename + '.pack', nameprefix + '.pack')
-        os.rename(self.filename + '.idx', nameprefix + '.idx')
-
-        auto_midx(repo('objects/pack'))
-        return nameprefix
+    def breakpoint(self):
+        """Clear byte and object counts and return the last processed id."""
+        id = self._end(self.run_midx)
+        self.outbytes = self.count = 0
+        return id
 
-    def close(self):
+    def close(self, run_midx=True):
         """Close the pack file and move it to its definitive path."""
-        return self._end()
-
-    def _write_pack_idx_v2(self, file, idx, packbin):
-        sum = Sha1()
-
-        def write(data):
-            file.write(data)
-            sum.update(data)
-
-        write('\377tOc\0\0\0\2')
-
-        n = 0
-        for part in idx:
-            n += len(part)
-            write(struct.pack('!i', n))
-            part.sort(key=lambda x: x[0])
-
-        obj_list_sum = Sha1()
-        for part in idx:
-            for entry in part:
-                write(entry[0])
-                obj_list_sum.update(entry[0])
-        for part in idx:
-            for entry in part:
-                write(struct.pack('!I', entry[1]))
-        ofs64_list = []
-        for part in idx:
-            for entry in part:
-                if entry[2] & 0x80000000:
-                    write(struct.pack('!I', 0x80000000 | len(ofs64_list)))
-                    ofs64_list.append(struct.pack('!Q', entry[2]))
-                else:
-                    write(struct.pack('!i', entry[2]))
-        for ofs64 in ofs64_list:
-            write(ofs64)
-
-        write(packbin)
-        file.write(sum.digest())
-        return obj_list_sum.hexdigest()
-
+        self.closed = True
+        return self._end(run_midx=run_midx)
 
-def _git_date(date):
-    return '%d %s' % (date, time.strftime('%z', time.localtime(date)))
+    def __del__(self):
+        assert self.closed
 
 
-def _gitenv():
-    os.environ['GIT_DIR'] = os.path.abspath(repo())
+class PackIdxV2Writer:
+    def __init__(self):
+        self.idx = list(list() for i in range(256))
+        self.count = 0
 
+    def add(self, sha, crc, offs):
+        assert(sha)
+        self.count += 1
+        self.idx[byte_int(sha[0])].append((sha, crc, offs))
+
+    def write(self, filename, packbin):
+        ofs64_count = 0
+        for section in self.idx:
+            for entry in section:
+                if entry[2] >= 2**31:
+                    ofs64_count += 1
+
+        # Length: header + fan-out + shas-and-crcs + overflow-offsets
+        index_len = 8 + (4 * 256) + (28 * self.count) + (8 * ofs64_count)
+        idx_map = None
+        idx_f = open(filename, 'w+b')
+        try:
+            idx_f.truncate(index_len)
+            fdatasync(idx_f.fileno())
+            idx_map = mmap_readwrite(idx_f, close=False)
+            try:
+                count = _helpers.write_idx(filename, idx_map, self.idx,
+                                           self.count)
+                assert(count == self.count)
+                idx_map.flush()
+            finally:
+                idx_map.close()
+        finally:
+            idx_f.close()
+
+        idx_f = open(filename, 'a+b')
+        try:
+            idx_f.write(packbin)
+            idx_f.seek(0)
+            idx_sum = Sha1()
+            b = idx_f.read(8 + 4*256)
+            idx_sum.update(b)
+
+            for b in chunkyreader(idx_f, 20 * self.count):
+                idx_sum.update(b)
+
+            for b in chunkyreader(idx_f):
+                idx_sum.update(b)
+            idx_f.write(idx_sum.digest())
+            fdatasync(idx_f.fileno())
+        finally:
+            idx_f.close()
+
+
+def list_refs(patterns=None, repo_dir=None,
+              limit_to_heads=False, limit_to_tags=False):
+    """Yield (refname, hash) tuples for all repository refs unless
+    patterns are specified.  In that case, only include tuples for
+    refs matching those patterns (cf. git-show-ref(1)).  The limits
+    restrict the result items to refs/heads or refs/tags.  If both
+    limits are specified, items from both sources will be included.
 
-def list_refs(refname = None):
-    """Generate a list of tuples in the form (refname,hash).
-    If a ref name is specified, list only this particular ref.
     """
-    argv = ['git', 'show-ref', '--']
-    if refname:
-        argv += [refname]
-    p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE)
+    argv = [b'git', b'show-ref']
+    if limit_to_heads:
+        argv.append(b'--heads')
+    if limit_to_tags:
+        argv.append(b'--tags')
+    argv.append(b'--')
+    if patterns:
+        argv.extend(patterns)
+    p = subprocess.Popen(argv, env=_gitenv(repo_dir), stdout=subprocess.PIPE,
+                         close_fds=True)
     out = p.stdout.read().strip()
     rv = p.wait()  # not fatal
     if rv:
         assert(not out)
     if out:
-        for d in out.split('\n'):
-            (sha, name) = d.split(' ', 1)
-            yield (name, sha.decode('hex'))
+        for d in out.split(b'\n'):
+            sha, name = d.split(b' ', 1)
+            yield name, unhexlify(sha)
 
 
-def read_ref(refname):
+def read_ref(refname, repo_dir = None):
     """Get the commit id of the most recent commit made on a given ref."""
-    l = list(list_refs(refname))
+    refs = list_refs(patterns=[refname], repo_dir=repo_dir, limit_to_heads=True)
+    l = tuple(islice(refs, 2))
     if l:
         assert(len(l) == 1)
         return l[0][1]
@@ -766,43 +1080,57 @@ def read_ref(refname):
         return None
 
 
-def rev_list(ref, count=None):
-    """Generate a list of reachable commits in reverse chronological order.
-
-    This generator walks through commits, from child to parent, that are
-    reachable via the specified ref and yields a series of tuples of the form
-    (date,hash).
+def rev_list_invocation(ref_or_refs, format=None):
+    if isinstance(ref_or_refs, bytes):
+        refs = (ref_or_refs,)
+    else:
+        refs = ref_or_refs
+    argv = [b'git', b'rev-list']
+
+    if format:
+        argv.append(b'--pretty=format:' + format)
+    for ref in refs:
+        assert not ref.startswith(b'-')
+        argv.append(ref)
+    argv.append(b'--')
+    return argv
+
+
+def rev_list(ref_or_refs, parse=None, format=None, repo_dir=None):
+    """Yield information about commits as per "git rev-list".  If a format
+    is not provided, yield one hex hash at a time.  If a format is
+    provided, pass it to rev-list and call parse(git_stdout) for each
+    commit with the stream positioned just after the rev-list "commit
+    HASH" header line.  When a format is provided yield (oidx,
+    parse(git_stdout)) for each commit.
 
-    If count is a non-zero integer, limit the number of commits to "count"
-    objects.
     """
-    assert(not ref.startswith('-'))
-    opts = []
-    if count:
-        opts += ['-n', str(atoi(count))]
-    argv = ['git', 'rev-list', '--pretty=format:%ct'] + opts + [ref, '--']
-    p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE)
-    commit = None
-    for row in p.stdout:
-        s = row.strip()
-        if s.startswith('commit '):
-            commit = s[7:].decode('hex')
-        else:
-            date = int(s)
-            yield (date, commit)
+    assert bool(parse) == bool(format)
+    p = subprocess.Popen(rev_list_invocation(ref_or_refs,
+                                             format=format),
+                         env=_gitenv(repo_dir),
+                         stdout = subprocess.PIPE,
+                         close_fds=True)
+    if not format:
+        for line in p.stdout:
+            yield line.strip()
+    else:
+        line = p.stdout.readline()
+        while line:
+            s = line.strip()
+            if not s.startswith(b'commit '):
+                raise Exception('unexpected line ' + repr(s))
+            s = s[7:]
+            assert len(s) == 40
+            yield s, parse(p.stdout)
+            line = p.stdout.readline()
+
     rv = p.wait()  # not fatal
     if rv:
-        raise GitError, 'git rev-list returned error %d' % rv
-
-
-def rev_get_date(ref):
-    """Get the date of the latest commit on the specified ref."""
-    for (date, commit) in rev_list(ref, count=1):
-        return date
-    raise GitError, 'no such commit %r' % ref
+        raise GitError('git rev-list returned error %d' % rv)
 
 
-def rev_parse(committish):
+def rev_parse(committish, repo_dir=None):
     """Resolve the full hash for 'committish', if it exists.
 
     Should be roughly equivalent to 'git rev-parse'.
@@ -810,33 +1138,54 @@ def rev_parse(committish):
     Returns the hex value of the hash if it is found, None if 'committish' does
     not correspond to anything.
     """
-    head = read_ref(committish)
+    head = read_ref(committish, repo_dir=repo_dir)
     if head:
-        debug2("resolved from ref: commit = %s\n" % head.encode('hex'))
+        debug2("resolved from ref: commit = %s\n" % hexlify(head))
         return head
 
-    pL = PackIdxList(repo('objects/pack'))
-
     if len(committish) == 40:
         try:
-            hash = committish.decode('hex')
+            hash = unhexlify(committish)
         except TypeError:
             return None
 
-        if pL.exists(hash):
-            return hash
+        with PackIdxList(repo(b'objects/pack', repo_dir=repo_dir)) as pL:
+            if pL.exists(hash):
+                return hash
 
     return None
 
 
-def update_ref(refname, newval, oldval):
-    """Change the commit pointed to by a branch."""
-    if not oldval:
-        oldval = ''
-    assert(refname.startswith('refs/heads/'))
-    p = subprocess.Popen(['git', 'update-ref', refname,
-                          newval.encode('hex'), oldval.encode('hex')],
-                         preexec_fn = _gitenv)
+def update_ref(refname, newval, oldval, repo_dir=None, force=False):
+    """Update a repository reference.
+
+    With force=True, don't care about the previous ref (oldval);
+    with force=False oldval must be either a sha1 or None (for an
+    entirely new branch)
+    """
+    if force:
+        assert oldval is None
+        oldarg = []
+    elif not oldval:
+        oldarg = [b'']
+    else:
+        oldarg = [hexlify(oldval)]
+    assert refname.startswith(b'refs/heads/') \
+        or refname.startswith(b'refs/tags/')
+    p = subprocess.Popen([b'git', b'update-ref', refname,
+                          hexlify(newval)] + oldarg,
+                         env=_gitenv(repo_dir),
+                         close_fds=True)
+    _git_wait(b'git update-ref', p)
+
+
+def delete_ref(refname, oldvalue=None):
+    """Delete a repository reference (see git update-ref(1))."""
+    assert refname.startswith(b'refs/')
+    oldvalue = [] if not oldvalue else [oldvalue]
+    p = subprocess.Popen([b'git', b'update-ref', b'-d', refname] + oldvalue,
+                         env=_gitenv(),
+                         close_fds=True)
     _git_wait('git update-ref', p)
 
 
@@ -851,222 +1200,196 @@ def guess_repo(path=None):
     if path:
         repodir = path
     if not repodir:
-        repodir = os.environ.get('BUP_DIR')
+        repodir = environ.get(b'BUP_DIR')
         if not repodir:
-            repodir = os.path.expanduser('~/.bup')
+            repodir = os.path.expanduser(b'~/.bup')
 
 
 def init_repo(path=None):
     """Create the Git bare repository for bup in a given path."""
     guess_repo(path)
-    d = repo()
-    if os.path.exists(d) and not os.path.isdir(os.path.join(d, '.')):
-        raise GitError('"%d" exists but is not a directory\n' % d)
-    p = subprocess.Popen(['git', '--bare', 'init'], stdout=sys.stderr,
-                         preexec_fn = _gitenv)
+    d = repo()  # appends a / to the path
+    parent = os.path.dirname(os.path.dirname(d))
+    if parent and not os.path.exists(parent):
+        raise GitError('parent directory "%s" does not exist\n'
+                       % path_msg(parent))
+    if os.path.exists(d) and not os.path.isdir(os.path.join(d, b'.')):
+        raise GitError('"%s" exists but is not a directory\n' % path_msg(d))
+    p = subprocess.Popen([b'git', b'--bare', b'init'], stdout=sys.stderr,
+                         env=_gitenv(),
+                         close_fds=True)
     _git_wait('git init', p)
     # Force the index version configuration in order to ensure bup works
     # regardless of the version of the installed Git binary.
-    p = subprocess.Popen(['git', 'config', 'pack.indexVersion', '2'],
-                         stdout=sys.stderr, preexec_fn = _gitenv)
+    p = subprocess.Popen([b'git', b'config', b'pack.indexVersion', '2'],
+                         stdout=sys.stderr, env=_gitenv(), close_fds=True)
+    _git_wait('git config', p)
+    # Enable the reflog
+    p = subprocess.Popen([b'git', b'config', b'core.logAllRefUpdates', b'true'],
+                         stdout=sys.stderr, env=_gitenv(), close_fds=True)
     _git_wait('git config', p)
 
 
 def check_repo_or_die(path=None):
-    """Make sure a bup repository exists, and abort if not.
-    If the path to a particular repository was not specified, this function
-    initializes the default repository automatically.
-    """
+    """Check to see if a bup repository probably exists, and abort if not."""
     guess_repo(path)
-    if not os.path.isdir(repo('objects/pack/.')):
-        if repodir == home_repodir:
-            init_repo()
-        else:
-            log('error: %r is not a bup/git repository\n' % repo())
+    top = repo()
+    pst = stat_if_exists(top + b'/objects/pack')
+    if pst and stat.S_ISDIR(pst.st_mode):
+        return
+    if not pst:
+        top_st = stat_if_exists(top)
+        if not top_st:
+            log('error: repository %r does not exist (see "bup help init")\n'
+                % top)
             sys.exit(15)
+    log('error: %s is not a repository\n' % path_msg(top))
+    sys.exit(14)
+
+
+def is_suitable_git(ver_str):
+    if not ver_str.startswith(b'git version '):
+        return 'unrecognized'
+    ver_str = ver_str[len(b'git version '):]
+    if ver_str.startswith(b'0.'):
+        return 'insufficient'
+    if ver_str.startswith(b'1.'):
+        if re.match(br'1\.[012345]rc', ver_str):
+            return 'insufficient'
+        if re.match(br'1\.[01234]\.', ver_str):
+            return 'insufficient'
+        if re.match(br'1\.5\.[012345]($|\.)', ver_str):
+            return 'insufficient'
+        if re.match(br'1\.5\.6-rc', ver_str):
+            return 'insufficient'
+        return 'suitable'
+    if re.match(br'[0-9]+(\.|$)?', ver_str):
+        return 'suitable'
+    sys.exit(13)
+
+_git_great = None
+
+def require_suitable_git(ver_str=None):
+    """Raise GitError if the version of git isn't suitable.
+
+    Rely on ver_str when provided, rather than invoking the git in the
+    path.
 
-
-def treeparse(buf):
-    """Generate a list of (mode, name, hash) tuples of objects from 'buf'."""
-    ofs = 0
-    while ofs < len(buf):
-        z = buf[ofs:].find('\0')
-        assert(z > 0)
-        spl = buf[ofs:ofs+z].split(' ', 1)
-        assert(len(spl) == 2)
-        sha = buf[ofs+z+1:ofs+z+1+20]
-        ofs += z+1+20
-        yield (spl[0], spl[1], sha)
-
-
-_ver = None
-def ver():
-    """Get Git's version and ensure a usable version is installed.
-
-    The returned version is formatted as an ordered tuple with each position
-    representing a digit in the version tag. For example, the following tuple
-    would represent version 1.6.6.9:
-
-        ('1', '6', '6', '9')
     """
-    global _ver
-    if not _ver:
-        p = subprocess.Popen(['git', '--version'],
-                             stdout=subprocess.PIPE)
-        gvs = p.stdout.read()
-        _git_wait('git --version', p)
-        m = re.match(r'git version (\S+.\S+)', gvs)
-        if not m:
-            raise GitError('git --version weird output: %r' % gvs)
-        _ver = tuple(m.group(1).split('.'))
-    needed = ('1','5', '3', '1')
-    if _ver < needed:
-        raise GitError('git version %s or higher is required; you have %s'
-                       % ('.'.join(needed), '.'.join(_ver)))
-    return _ver
-
-
-def _git_wait(cmd, p):
-    rv = p.wait()
-    if rv != 0:
-        raise GitError('%s returned %d' % (cmd, rv))
-
-
-def _git_capture(argv):
-    p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn = _gitenv)
-    r = p.stdout.read()
-    _git_wait(repr(argv), p)
-    return r
-
-
-class _AbortableIter:
-    def __init__(self, it, onabort = None):
-        self.it = it
-        self.onabort = onabort
-        self.done = None
+    global _git_great
+    if _git_great is not None:
+        return
+    if environ.get(b'BUP_GIT_VERSION_IS_FINE', b'').lower() \
+       in (b'yes', b'true', b'1'):
+        _git_great = True
+        return
+    if not ver_str:
+        ver_str, _, _ = _git_exo([b'git', b'--version'])
+    status = is_suitable_git(ver_str)
+    if status == 'unrecognized':
+        raise GitError('Unexpected git --version output: %r' % ver_str)
+    if status == 'insufficient':
+        log('error: git version must be at least 1.5.6\n')
+        sys.exit(1)
+    if status == 'suitable':
+        _git_great = True
+        return
+    assert False
 
-    def __iter__(self):
-        return self
-
-    def next(self):
-        try:
-            return self.it.next()
-        except StopIteration, e:
-            self.done = True
-            raise
-        except:
-            self.abort()
-            raise
 
-    def abort(self):
-        """Abort iteration and call the abortion callback, if needed."""
-        if not self.done:
-            self.done = True
-            if self.onabort:
-                self.onabort()
-
-    def __del__(self):
-        self.abort()
-
-
-_ver_warned = 0
 class CatPipe:
     """Link to 'git cat-file' that is used to retrieve blob data."""
-    def __init__(self):
-        global _ver_warned
-        wanted = ('1','5','6')
-        if ver() < wanted:
-            if not _ver_warned:
-                log('warning: git version < %s; bup will be slow.\n'
-                    % '.'.join(wanted))
-                _ver_warned = 1
-            self.get = self._slow_get
-        else:
-            self.p = self.inprogress = None
-            self.get = self._fast_get
-
-    def _abort(self):
-        if self.p:
-            self.p.stdout.close()
-            self.p.stdin.close()
-        self.p = None
+    def __init__(self, repo_dir = None):
+        require_suitable_git()
+        self.repo_dir = repo_dir
+        self.p = self.inprogress = None
+
+    def close(self, wait=False):
+        self.p, p = None, self.p
         self.inprogress = None
+        if p:
+            try:
+                p.stdout.close()
+            finally:
+                # This will handle pending exceptions correctly once
+                # we drop py2
+                p.stdin.close()
+        if wait:
+            p.wait()
+            return p.returncode
+        return None
 
-    def _restart(self):
-        self._abort()
-        self.p = subprocess.Popen(['git', 'cat-file', '--batch'],
+    def restart(self):
+        self.close()
+        self.p = subprocess.Popen([b'git', b'cat-file', b'--batch'],
                                   stdin=subprocess.PIPE,
                                   stdout=subprocess.PIPE,
                                   close_fds = True,
-                                  preexec_fn = _gitenv)
+                                  bufsize = 4096,
+                                  env=_gitenv(self.repo_dir))
 
-    def _fast_get(self, id):
+    def get(self, ref):
+        """Yield (oidx, type, size), followed by the data referred to by ref.
+        If ref does not exist, only yield (None, None, None).
+
+        """
         if not self.p or self.p.poll() != None:
-            self._restart()
+            self.restart()
         assert(self.p)
-        assert(self.p.poll() == None)
+        poll_result = self.p.poll()
+        assert(poll_result == None)
         if self.inprogress:
-            log('_fast_get: opening %r while %r is open'
-                % (id, self.inprogress))
+            log('get: opening %r while %r is open\n' % (ref, self.inprogress))
         assert(not self.inprogress)
-        assert(id.find('\n') < 0)
-        assert(id.find('\r') < 0)
-        assert(not id.startswith('-'))
-        self.inprogress = id
-        self.p.stdin.write('%s\n' % id)
+        assert ref.find(b'\n') < 0
+        assert ref.find(b'\r') < 0
+        assert not ref.startswith(b'-')
+        self.inprogress = ref
+        self.p.stdin.write(ref + b'\n')
+        self.p.stdin.flush()
         hdr = self.p.stdout.readline()
-        if hdr.endswith(' missing\n'):
+        if not hdr:
+            raise GitError('unexpected cat-file EOF (last request: %r, exit: %s)'
+                           % (ref, self.p.poll() or 'none'))
+        if hdr.endswith(b' missing\n'):
             self.inprogress = None
-            raise KeyError('blob %r is missing' % id)
-        spl = hdr.split(' ')
-        if len(spl) != 3 or len(spl[0]) != 40:
-            raise GitError('expected blob, got %r' % spl)
-        (hex, type, size) = spl
-
-        it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])),
-                           onabort = self._abort)
+            yield None, None, None
+            return
+        info = hdr.split(b' ')
+        if len(info) != 3 or len(info[0]) != 40:
+            raise GitError('expected object (id, type, size), got %r' % info)
+        oidx, typ, size = info
+        size = int(size)
         try:
-            yield type
-            for blob in it:
+            it = chunkyreader(self.p.stdout, size)
+            yield oidx, typ, size
+            for blob in chunkyreader(self.p.stdout, size):
                 yield blob
-            assert(self.p.stdout.readline() == '\n')
+            readline_result = self.p.stdout.readline()
+            assert readline_result == b'\n'
             self.inprogress = None
-        except Exception, e:
-            it.abort()
-            raise
-
-    def _slow_get(self, id):
-        assert(id.find('\n') < 0)
-        assert(id.find('\r') < 0)
-        assert(id[0] != '-')
-        type = _git_capture(['git', 'cat-file', '-t', id]).strip()
-        yield type
-
-        p = subprocess.Popen(['git', 'cat-file', type, id],
-                             stdout=subprocess.PIPE,
-                             preexec_fn = _gitenv)
-        for blob in chunkyreader(p.stdout):
-            yield blob
-        _git_wait('git cat-file', p)
+        except Exception as ex:
+            with pending_raise(ex):
+                self.close()
 
     def _join(self, it):
-        type = it.next()
-        if type == 'blob':
+        _, typ, _ = next(it)
+        if typ == b'blob':
             for blob in it:
                 yield blob
-        elif type == 'tree':
-            treefile = ''.join(it)
-            for (mode, name, sha) in treeparse(treefile):
-                for blob in self.join(sha.encode('hex')):
+        elif typ == b'tree':
+            treefile = b''.join(it)
+            for (mode, name, sha) in tree_decode(treefile):
+                for blob in self.join(hexlify(sha)):
                     yield blob
-        elif type == 'commit':
-            treeline = ''.join(it).split('\n')[0]
-            assert(treeline.startswith('tree '))
+        elif typ == b'commit':
+            treeline = b''.join(it).split(b'\n')[0]
+            assert treeline.startswith(b'tree ')
             for blob in self.join(treeline[5:]):
                 yield blob
         else:
             raise GitError('invalid object type %r: expected blob/tree/commit'
-                           % type)
+                           % typ)
 
     def join(self, id):
         """Generate a list of the content of all blobs that can be reached
@@ -1074,21 +1397,128 @@ class CatPipe:
         or a commit. The content of all blobs that can be seen from trees or
         commits will be added to the list.
         """
-        try:
-            for d in self._join(self.get(id)):
-                yield d
-        except StopIteration:
-            log('booger!\n')
+        for d in self._join(self.get(id)):
+            yield d
+
+
+_cp = {}
+
+def cp(repo_dir=None):
+    """Create a CatPipe object or reuse the already existing one."""
+    global _cp, repodir
+    if not repo_dir:
+        repo_dir = repodir or repo()
+    repo_dir = os.path.abspath(repo_dir)
+    cp = _cp.get(repo_dir)
+    if not cp:
+        cp = CatPipe(repo_dir)
+        _cp[repo_dir] = cp
+    return cp
 
-def tags():
+
+def close_catpipes():
+    # FIXME: chain exceptions
+    while _cp:
+        _, cp = _cp.popitem()
+        cp.close(wait=True)
+
+
+def tags(repo_dir = None):
     """Return a dictionary of all tags in the form {hash: [tag_names, ...]}."""
     tags = {}
-    for (n,c) in list_refs():
-        if n.startswith('refs/tags/'):
-            name = n[10:]
-            if not c in tags:
-                tags[c] = []
+    for n, c in list_refs(repo_dir = repo_dir, limit_to_tags=True):
+        assert n.startswith(b'refs/tags/')
+        name = n[10:]
+        if not c in tags:
+            tags[c] = []
+        tags[c].append(name)  # more than one tag can point at 'c'
+    return tags
 
-            tags[c].append(name)  # more than one tag can point at 'c'
 
-    return tags
+class MissingObject(KeyError):
+    def __init__(self, oid):
+        self.oid = oid
+        KeyError.__init__(self, 'object %r is missing' % hexlify(oid))
+
+
+WalkItem = namedtuple('WalkItem', ['oid', 'type', 'mode',
+                                   'path', 'chunk_path', 'data'])
+# The path is the mangled path, and if an item represents a fragment
+# of a chunked file, the chunk_path will be the chunked subtree path
+# for the chunk, i.e. ['', '2d3115e', ...].  The top-level path for a
+# chunked file will have a chunk_path of [''].  So some chunk subtree
+# of the file '/foo/bar/baz' might look like this:
+#
+#   item.path = ['foo', 'bar', 'baz.bup']
+#   item.chunk_path = ['', '2d3115e', '016b097']
+#   item.type = 'tree'
+#   ...
+
+
+def walk_object(get_ref, oidx, stop_at=None, include_data=None):
+    """Yield everything reachable from oidx via get_ref (which must behave
+    like CatPipe get) as a WalkItem, stopping whenever stop_at(oidx)
+    returns true.  Throw MissingObject if a hash encountered is
+    missing from the repository, and don't read or return blob content
+    in the data field unless include_data is set.
+
+    """
+    # Maintain the pending stack on the heap to avoid stack overflow
+    pending = [(oidx, [], [], None)]
+    while len(pending):
+        oidx, parent_path, chunk_path, mode = pending.pop()
+        oid = unhexlify(oidx)
+        if stop_at and stop_at(oidx):
+            continue
+
+        if (not include_data) and mode and stat.S_ISREG(mode):
+            # If the object is a "regular file", then it's a leaf in
+            # the graph, so we can skip reading the data if the caller
+            # hasn't requested it.
+            yield WalkItem(oid=oid, type=b'blob',
+                           chunk_path=chunk_path, path=parent_path,
+                           mode=mode,
+                           data=None)
+            continue
+
+        item_it = get_ref(oidx)
+        get_oidx, typ, _ = next(item_it)
+        if not get_oidx:
+            raise MissingObject(unhexlify(oidx))
+        if typ not in (b'blob', b'commit', b'tree'):
+            raise Exception('unexpected repository object type %r' % typ)
+
+        # FIXME: set the mode based on the type when the mode is None
+        if typ == b'blob' and not include_data:
+            # Dump data until we can ask cat_pipe not to fetch it
+            for ignored in item_it:
+                pass
+            data = None
+        else:
+            data = b''.join(item_it)
+
+        yield WalkItem(oid=oid, type=typ,
+                       chunk_path=chunk_path, path=parent_path,
+                       mode=mode,
+                       data=(data if include_data else None))
+
+        if typ == b'commit':
+            commit_items = parse_commit(data)
+            for pid in commit_items.parents:
+                pending.append((pid, parent_path, chunk_path, mode))
+            pending.append((commit_items.tree, parent_path, chunk_path,
+                            hashsplit.GIT_MODE_TREE))
+        elif typ == b'tree':
+            for mode, name, ent_id in tree_decode(data):
+                demangled, bup_type = demangle_name(name, mode)
+                if chunk_path:
+                    sub_path = parent_path
+                    sub_chunk_path = chunk_path + [name]
+                else:
+                    sub_path = parent_path + [name]
+                    if bup_type == BUP_CHUNKED:
+                        sub_chunk_path = [b'']
+                    else:
+                        sub_chunk_path = chunk_path
+                pending.append((hexlify(ent_id), sub_path, sub_chunk_path,
+                                mode))