1 """Git interaction library.
2 bup repositories are in Git format. This library allows us to
3 interact with the Git data structures.
5 import os, sys, zlib, time, subprocess, struct, stat, re, tempfile, glob
6 from bup.helpers import *
7 from bup import _helpers, path, midx, bloom
9 max_pack_size = 1000*1000*1000 # larger packs will slow down pruning
10 max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object
11 SEEK_END=2 # os.SEEK_END is not defined in python 2.4
15 home_repodir = os.path.expanduser('~/.bup')
18 _typemap = { 'blob':3, 'tree':2, 'commit':1, 'tag':4 }
19 _typermap = { 3:'blob', 2:'tree', 1:'commit', 4:'tag' }
25 class GitError(Exception):
30 """Get the path to the git repository or one of its subdirectories."""
33 raise GitError('You should call check_repo_or_die()')
35 # If there's a .git subdirectory, then the actual repo is in there.
36 gd = os.path.join(repodir, '.git')
37 if os.path.exists(gd):
40 return os.path.join(repodir, sub)
44 return re.sub(r'([^0-9a-z]|\b)([0-9a-z]{7})[0-9a-z]{33}([^0-9a-z]|\b)',
49 full = os.path.abspath(path)
50 fullrepo = os.path.abspath(repo(''))
51 if not fullrepo.endswith('/'):
53 if full.startswith(fullrepo):
54 path = full[len(fullrepo):]
55 if path.startswith('index-cache/'):
56 path = path[len('index-cache/'):]
57 return shorten_hash(path)
61 paths = [repo('objects/pack')]
62 paths += glob.glob(repo('index-cache/*/.'))
66 def auto_midx(objdir):
67 args = [path.exe(), 'midx', '--auto', '--dir', objdir]
69 rv = subprocess.call(args, stdout=open('/dev/null', 'w'))
71 # make sure 'args' gets printed to help with debugging
72 add_error('%r: exception: %s' % (args, e))
75 add_error('%r: returned %d' % (args, rv))
77 args = [path.exe(), 'bloom', '--dir', objdir]
79 rv = subprocess.call(args, stdout=open('/dev/null', 'w'))
81 # make sure 'args' gets printed to help with debugging
82 add_error('%r: exception: %s' % (args, e))
85 add_error('%r: returned %d' % (args, rv))
88 def mangle_name(name, mode, gitmode):
89 """Mangle a file name to present an abstract name for segmented files.
90 Mangled file names will have the ".bup" extension added to them. If a
91 file's name already ends with ".bup", a ".bupl" extension is added to
92 disambiguate normal files from semgmented ones.
94 if stat.S_ISREG(mode) and not stat.S_ISREG(gitmode):
96 elif name.endswith('.bup') or name[:-1].endswith('.bup'):
102 (BUP_NORMAL, BUP_CHUNKED) = (0,1)
103 def demangle_name(name):
104 """Remove name mangling from a file name, if necessary.
106 The return value is a tuple (demangled_filename,mode), where mode is one of
109 * BUP_NORMAL : files that should be read as-is from the repository
110 * BUP_CHUNKED : files that were chunked and need to be assembled
112 For more information on the name mangling algorythm, see mangle_name()
114 if name.endswith('.bupl'):
115 return (name[:-5], BUP_NORMAL)
116 elif name.endswith('.bup'):
117 return (name[:-4], BUP_CHUNKED)
119 return (name, BUP_NORMAL)
122 def _encode_packobj(type, content):
125 szbits = (sz & 0x0f) | (_typemap[type]<<4)
128 if sz: szbits |= 0x80
134 z = zlib.compressobj(1)
136 yield z.compress(content)
140 def _encode_looseobj(type, content):
141 z = zlib.compressobj(1)
142 yield z.compress('%s %d\0' % (type, len(content)))
143 yield z.compress(content)
147 def _decode_looseobj(buf):
149 s = zlib.decompress(buf)
156 assert(type in _typemap)
157 assert(sz == len(content))
158 return (type, content)
161 def _decode_packobj(buf):
164 type = _typermap[(c & 0x70) >> 4]
171 sz |= (c & 0x7f) << shift
175 return (type, zlib.decompress(buf[i+1:]))
182 def find_offset(self, hash):
183 """Get the offset of an object inside the index file."""
184 idx = self._idx_from_hash(hash)
186 return self._ofs_from_idx(idx)
189 def exists(self, hash, want_source=False):
190 """Return nonempty if the object exists in this index."""
191 if hash and (self._idx_from_hash(hash) != None):
192 return want_source and os.path.basename(self.name) or True
196 return int(self.fanout[255])
198 def _idx_from_hash(self, hash):
199 global _total_searches, _total_steps
201 assert(len(hash) == 20)
203 start = self.fanout[b1-1] # range -1..254
204 end = self.fanout[b1] # range 0..255
206 _total_steps += 1 # lookup table is a step
209 mid = start + (end-start)/2
210 v = self._idx_to_hash(mid)
220 class PackIdxV1(PackIdx):
221 """Object representation of a Git pack index (version 1) file."""
222 def __init__(self, filename, f):
224 self.idxnames = [self.name]
225 self.map = mmap_read(f)
226 self.fanout = list(struct.unpack('!256I',
227 str(buffer(self.map, 0, 256*4))))
228 self.fanout.append(0) # entry "-1"
229 nsha = self.fanout[255]
231 self.shatable = buffer(self.map, self.sha_ofs, nsha*24)
233 def _ofs_from_idx(self, idx):
234 return struct.unpack('!I', str(self.shatable[idx*24 : idx*24+4]))[0]
236 def _idx_to_hash(self, idx):
237 return str(self.shatable[idx*24+4 : idx*24+24])
240 for i in xrange(self.fanout[255]):
241 yield buffer(self.map, 256*4 + 24*i + 4, 20)
244 class PackIdxV2(PackIdx):
245 """Object representation of a Git pack index (version 2) file."""
246 def __init__(self, filename, f):
248 self.idxnames = [self.name]
249 self.map = mmap_read(f)
250 assert(str(self.map[0:8]) == '\377tOc\0\0\0\2')
251 self.fanout = list(struct.unpack('!256I',
252 str(buffer(self.map, 8, 256*4))))
253 self.fanout.append(0) # entry "-1"
254 nsha = self.fanout[255]
255 self.sha_ofs = 8 + 256*4
256 self.shatable = buffer(self.map, self.sha_ofs, nsha*20)
257 self.ofstable = buffer(self.map,
258 self.sha_ofs + nsha*20 + nsha*4,
260 self.ofs64table = buffer(self.map,
261 8 + 256*4 + nsha*20 + nsha*4 + nsha*4)
263 def _ofs_from_idx(self, idx):
264 ofs = struct.unpack('!I', str(buffer(self.ofstable, idx*4, 4)))[0]
266 idx64 = ofs & 0x7fffffff
267 ofs = struct.unpack('!Q',
268 str(buffer(self.ofs64table, idx64*8, 8)))[0]
271 def _idx_to_hash(self, idx):
272 return str(self.shatable[idx*20:(idx+1)*20])
275 for i in xrange(self.fanout[255]):
276 yield buffer(self.map, 8 + 256*4 + 20*i, 20)
281 def __init__(self, dir):
283 assert(_mpi_count == 0) # these things suck tons of VM; don't waste it
288 self.do_bloom = False
295 assert(_mpi_count == 0)
298 return iter(idxmerge(self.packs))
301 return sum(len(pack) for pack in self.packs)
303 def exists(self, hash, want_source=False):
304 """Return nonempty if the object exists in the index files."""
305 global _total_searches
307 if hash in self.also:
309 if self.do_bloom and self.bloom:
310 if self.bloom.exists(hash):
311 self.do_bloom = False
313 _total_searches -= 1 # was counted by bloom
315 for i in xrange(len(self.packs)):
317 _total_searches -= 1 # will be incremented by sub-pack
318 ix = p.exists(hash, want_source=want_source)
320 # reorder so most recently used packs are searched first
321 self.packs = [p] + self.packs[:i] + self.packs[i+1:]
326 def refresh(self, skip_midx = False):
327 """Refresh the index list.
328 This method verifies if .midx files were superseded (e.g. all of its
329 contents are in another, bigger .midx file) and removes the superseded
332 If skip_midx is True, all work on .midx files will be skipped and .midx
333 files will be removed from the list.
335 The module-global variable 'ignore_midx' can force this function to
336 always act as if skip_midx was True.
338 self.bloom = None # Always reopen the bloom as it may have been relaced
339 self.do_bloom = False
340 skip_midx = skip_midx or ignore_midx
341 d = dict((p.name, p) for p in self.packs
342 if not skip_midx or not isinstance(p, midx.PackMidx))
343 if os.path.exists(self.dir):
346 for ix in self.packs:
347 if isinstance(ix, midx.PackMidx):
348 for name in ix.idxnames:
349 d[os.path.join(self.dir, name)] = ix
350 for full in glob.glob(os.path.join(self.dir,'*.midx')):
352 mx = midx.PackMidx(full)
353 (mxd, mxf) = os.path.split(mx.name)
355 for n in mx.idxnames:
356 if not os.path.exists(os.path.join(mxd, n)):
357 log(('warning: index %s missing\n' +
358 ' used by %s\n') % (n, mxf))
365 midxl.sort(key=lambda ix:
366 (-len(ix), -os.stat(ix.name).st_mtime))
369 for sub in ix.idxnames:
370 found = d.get(os.path.join(self.dir, sub))
371 if not found or isinstance(found, PackIdx):
372 # doesn't exist, or exists but not in a midx
377 for name in ix.idxnames:
378 d[os.path.join(self.dir, name)] = ix
379 elif not ix.force_keep:
380 debug1('midx: removing redundant: %s\n'
381 % os.path.basename(ix.name))
383 for full in glob.glob(os.path.join(self.dir,'*.idx')):
391 bfull = os.path.join(self.dir, 'bup.bloom')
392 if self.bloom is None and os.path.exists(bfull):
393 self.bloom = bloom.ShaBloom(bfull)
394 self.packs = list(set(d.values()))
395 self.packs.sort(lambda x,y: -cmp(len(x),len(y)))
396 if self.bloom and self.bloom.valid() and len(self.bloom) >= len(self):
400 debug1('PackIdxList: using %d index%s.\n'
401 % (len(self.packs), len(self.packs)!=1 and 'es' or ''))
404 """Insert an additional object in the list."""
408 def calc_hash(type, content):
409 """Calculate some content's hash in the Git fashion."""
410 header = '%s %d\0' % (type, len(content))
416 def _shalist_sort_key(ent):
417 (mode, name, id) = ent
418 if stat.S_ISDIR(int(mode, 8)):
424 def open_idx(filename):
425 if filename.endswith('.idx'):
426 f = open(filename, 'rb')
428 if header[0:4] == '\377tOc':
429 version = struct.unpack('!I', header[4:8])[0]
431 return PackIdxV2(filename, f)
433 raise GitError('%s: expected idx file version 2, got %d'
434 % (filename, version))
435 elif len(header) == 8 and header[0:4] < '\377tOc':
436 return PackIdxV1(filename, f)
438 raise GitError('%s: unrecognized idx file header' % filename)
439 elif filename.endswith('.midx'):
440 return midx.PackMidx(filename)
442 raise GitError('idx filenames must end with .idx or .midx')
445 def idxmerge(idxlist, final_progress=True):
446 """Generate a list of all the objects reachable in a PackIdxList."""
447 def pfunc(count, total):
448 qprogress('Reading indexes: %.2f%% (%d/%d)\r'
449 % (count*100.0/total, count, total))
450 def pfinal(count, total):
452 progress('Reading indexes: %.2f%% (%d/%d), done.\n'
453 % (100, total, total))
454 return merge_iter(idxlist, 10024, pfunc, pfinal)
457 def _make_objcache():
458 return PackIdxList(repo('objects/pack'))
461 """Writes Git objects inside a pack file."""
462 def __init__(self, objcache_maker=_make_objcache):
468 self.objcache_maker = objcache_maker
476 (fd,name) = tempfile.mkstemp(suffix='.pack', dir=repo('objects'))
477 self.file = os.fdopen(fd, 'w+b')
478 assert(name.endswith('.pack'))
479 self.filename = name[:-5]
480 self.file.write('PACK\0\0\0\2\0\0\0\0')
481 self.idx = list(list() for i in xrange(256))
483 def _raw_write(self, datalist, sha):
486 # in case we get interrupted (eg. KeyboardInterrupt), it's best if
487 # the file never has a *partial* blob. So let's make sure it's
488 # all-or-nothing. (The blob shouldn't be very big anyway, thanks
489 # to our hashsplit algorithm.) f.write() does its own buffering,
490 # but that's okay because we'll flush it in _end().
491 oneblob = ''.join(datalist)
495 raise GitError, e, sys.exc_info()[2]
497 crc = zlib.crc32(oneblob) & 0xffffffff
498 self._update_idx(sha, crc, nw)
503 def _update_idx(self, sha, crc, size):
506 self.idx[ord(sha[0])].append((sha, crc, self.file.tell() - size))
508 def _write(self, sha, type, content):
512 sha = calc_hash(type, content)
513 size, crc = self._raw_write(_encode_packobj(type, content), sha=sha)
514 if self.outbytes >= max_pack_size or self.count >= max_pack_objects:
518 def breakpoint(self):
519 """Clear byte and object counts and return the last processed id."""
521 self.outbytes = self.count = 0
524 def _require_objcache(self):
525 if self.objcache is None and self.objcache_maker:
526 self.objcache = self.objcache_maker()
527 if self.objcache is None:
529 "PackWriter not opened or can't check exists w/o objcache")
531 def exists(self, id, want_source=False):
532 """Return non-empty if an object is found in the object cache."""
533 self._require_objcache()
534 return self.objcache.exists(id, want_source=want_source)
536 def maybe_write(self, type, content):
537 """Write an object to the pack file if not present and return its id."""
538 sha = calc_hash(type, content)
539 if not self.exists(sha):
540 self._write(sha, type, content)
541 self._require_objcache()
542 self.objcache.add(sha)
545 def new_blob(self, blob):
546 """Create a blob object in the pack with the supplied content."""
547 return self.maybe_write('blob', blob)
549 def new_tree(self, shalist):
550 """Create a tree object in the pack."""
551 shalist = sorted(shalist, key = _shalist_sort_key)
553 for (mode,name,bin) in shalist:
556 assert(mode[0] != '0')
558 assert(len(bin) == 20)
559 l.append('%s %s\0%s' % (mode,name,bin))
560 return self.maybe_write('tree', ''.join(l))
562 def _new_commit(self, tree, parent, author, adate, committer, cdate, msg):
564 if tree: l.append('tree %s' % tree.encode('hex'))
565 if parent: l.append('parent %s' % parent.encode('hex'))
566 if author: l.append('author %s %s' % (author, _git_date(adate)))
567 if committer: l.append('committer %s %s' % (committer, _git_date(cdate)))
570 return self.maybe_write('commit', '\n'.join(l))
572 def new_commit(self, parent, tree, date, msg):
573 """Create a commit object in the pack."""
574 userline = '%s <%s@%s>' % (userfullname(), username(), hostname())
575 commit = self._new_commit(tree, parent,
576 userline, date, userline, date,
581 """Remove the pack file from disk."""
587 os.unlink(self.filename + '.pack')
589 def _end(self, run_midx=True):
591 if not f: return None
597 # update object count
599 cp = struct.pack('!i', self.count)
603 # calculate the pack sha1sum
606 for b in chunkyreader(f):
608 packbin = sum.digest()
612 obj_list_sha = self._write_pack_idx_v2(self.filename + '.idx', idx, packbin)
614 nameprefix = repo('objects/pack/pack-%s' % obj_list_sha)
615 if os.path.exists(self.filename + '.map'):
616 os.unlink(self.filename + '.map')
617 os.rename(self.filename + '.pack', nameprefix + '.pack')
618 os.rename(self.filename + '.idx', nameprefix + '.idx')
621 auto_midx(repo('objects/pack'))
624 def close(self, run_midx=True):
625 """Close the pack file and move it to its definitive path."""
626 return self._end(run_midx=run_midx)
628 def _write_pack_idx_v2(self, filename, idx, packbin):
629 idx_f = open(filename, 'w+b')
630 idx_f.write('\377tOc\0\0\0\2')
632 ofs64_ofs = 8 + 4*256 + 28*self.count
633 idx_f.truncate(ofs64_ofs)
635 idx_map = mmap_readwrite(idx_f, close=False)
636 idx_f.seek(0, SEEK_END)
637 count = _helpers.write_idx(idx_f, idx_map, idx, self.count)
638 assert(count == self.count)
644 b = idx_f.read(8 + 4*256)
647 obj_list_sum = Sha1()
648 for b in chunkyreader(idx_f, 20*self.count):
650 obj_list_sum.update(b)
651 namebase = obj_list_sum.hexdigest()
653 for b in chunkyreader(idx_f):
655 idx_f.write(idx_sum.digest())
662 return '%d %s' % (date, time.strftime('%z', time.localtime(date)))
666 os.environ['GIT_DIR'] = os.path.abspath(repo())
669 def list_refs(refname = None):
670 """Generate a list of tuples in the form (refname,hash).
671 If a ref name is specified, list only this particular ref.
673 argv = ['git', 'show-ref', '--']
676 p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE)
677 out = p.stdout.read().strip()
678 rv = p.wait() # not fatal
682 for d in out.split('\n'):
683 (sha, name) = d.split(' ', 1)
684 yield (name, sha.decode('hex'))
687 def read_ref(refname):
688 """Get the commit id of the most recent commit made on a given ref."""
689 l = list(list_refs(refname))
697 def rev_list(ref, count=None):
698 """Generate a list of reachable commits in reverse chronological order.
700 This generator walks through commits, from child to parent, that are
701 reachable via the specified ref and yields a series of tuples of the form
704 If count is a non-zero integer, limit the number of commits to "count"
707 assert(not ref.startswith('-'))
710 opts += ['-n', str(atoi(count))]
711 argv = ['git', 'rev-list', '--pretty=format:%ct'] + opts + [ref, '--']
712 p = subprocess.Popen(argv, preexec_fn = _gitenv, stdout = subprocess.PIPE)
716 if s.startswith('commit '):
717 commit = s[7:].decode('hex')
721 rv = p.wait() # not fatal
723 raise GitError, 'git rev-list returned error %d' % rv
726 def rev_get_date(ref):
727 """Get the date of the latest commit on the specified ref."""
728 for (date, commit) in rev_list(ref, count=1):
730 raise GitError, 'no such commit %r' % ref
733 def rev_parse(committish):
734 """Resolve the full hash for 'committish', if it exists.
736 Should be roughly equivalent to 'git rev-parse'.
738 Returns the hex value of the hash if it is found, None if 'committish' does
739 not correspond to anything.
741 head = read_ref(committish)
743 debug2("resolved from ref: commit = %s\n" % head.encode('hex'))
746 pL = PackIdxList(repo('objects/pack'))
748 if len(committish) == 40:
750 hash = committish.decode('hex')
760 def update_ref(refname, newval, oldval):
761 """Change the commit pointed to by a branch."""
764 assert(refname.startswith('refs/heads/'))
765 p = subprocess.Popen(['git', 'update-ref', refname,
766 newval.encode('hex'), oldval.encode('hex')],
767 preexec_fn = _gitenv)
768 _git_wait('git update-ref', p)
771 def guess_repo(path=None):
772 """Set the path value in the global variable "repodir".
773 This makes bup look for an existing bup repository, but not fail if a
774 repository doesn't exist. Usually, if you are interacting with a bup
775 repository, you would not be calling this function but using
782 repodir = os.environ.get('BUP_DIR')
784 repodir = os.path.expanduser('~/.bup')
787 def init_repo(path=None):
788 """Create the Git bare repository for bup in a given path."""
790 d = repo() # appends a / to the path
791 parent = os.path.dirname(os.path.dirname(d))
792 if parent and not os.path.exists(parent):
793 raise GitError('parent directory "%s" does not exist\n' % parent)
794 if os.path.exists(d) and not os.path.isdir(os.path.join(d, '.')):
795 raise GitError('"%d" exists but is not a directory\n' % d)
796 p = subprocess.Popen(['git', '--bare', 'init'], stdout=sys.stderr,
797 preexec_fn = _gitenv)
798 _git_wait('git init', p)
799 # Force the index version configuration in order to ensure bup works
800 # regardless of the version of the installed Git binary.
801 p = subprocess.Popen(['git', 'config', 'pack.indexVersion', '2'],
802 stdout=sys.stderr, preexec_fn = _gitenv)
803 _git_wait('git config', p)
806 def check_repo_or_die(path=None):
807 """Make sure a bup repository exists, and abort if not.
808 If the path to a particular repository was not specified, this function
809 initializes the default repository automatically.
812 if not os.path.isdir(repo('objects/pack/.')):
813 if repodir == home_repodir:
816 log('error: %r is not a bup/git repository\n' % repo())
821 """Generate a list of (mode, name, hash) tuples of objects from 'buf'."""
823 while ofs < len(buf):
824 z = buf[ofs:].find('\0')
826 spl = buf[ofs:ofs+z].split(' ', 1)
827 assert(len(spl) == 2)
828 sha = buf[ofs+z+1:ofs+z+1+20]
830 yield (spl[0], spl[1], sha)
835 """Get Git's version and ensure a usable version is installed.
837 The returned version is formatted as an ordered tuple with each position
838 representing a digit in the version tag. For example, the following tuple
839 would represent version 1.6.6.9:
845 p = subprocess.Popen(['git', '--version'],
846 stdout=subprocess.PIPE)
847 gvs = p.stdout.read()
848 _git_wait('git --version', p)
849 m = re.match(r'git version (\S+.\S+)', gvs)
851 raise GitError('git --version weird output: %r' % gvs)
852 _ver = tuple(m.group(1).split('.'))
853 needed = ('1','5', '3', '1')
855 raise GitError('git version %s or higher is required; you have %s'
856 % ('.'.join(needed), '.'.join(_ver)))
860 def _git_wait(cmd, p):
863 raise GitError('%s returned %d' % (cmd, rv))
866 def _git_capture(argv):
867 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn = _gitenv)
869 _git_wait(repr(argv), p)
873 class _AbortableIter:
874 def __init__(self, it, onabort = None):
876 self.onabort = onabort
884 return self.it.next()
885 except StopIteration, e:
893 """Abort iteration and call the abortion callback, if needed."""
905 """Link to 'git cat-file' that is used to retrieve blob data."""
908 wanted = ('1','5','6')
911 log('warning: git version < %s; bup will be slow.\n'
914 self.get = self._slow_get
916 self.p = self.inprogress = None
917 self.get = self._fast_get
921 self.p.stdout.close()
924 self.inprogress = None
928 self.p = subprocess.Popen(['git', 'cat-file', '--batch'],
929 stdin=subprocess.PIPE,
930 stdout=subprocess.PIPE,
933 preexec_fn = _gitenv)
935 def _fast_get(self, id):
936 if not self.p or self.p.poll() != None:
939 assert(self.p.poll() == None)
941 log('_fast_get: opening %r while %r is open'
942 % (id, self.inprogress))
943 assert(not self.inprogress)
944 assert(id.find('\n') < 0)
945 assert(id.find('\r') < 0)
946 assert(not id.startswith('-'))
948 self.p.stdin.write('%s\n' % id)
950 hdr = self.p.stdout.readline()
951 if hdr.endswith(' missing\n'):
952 self.inprogress = None
953 raise KeyError('blob %r is missing' % id)
955 if len(spl) != 3 or len(spl[0]) != 40:
956 raise GitError('expected blob, got %r' % spl)
957 (hex, type, size) = spl
959 it = _AbortableIter(chunkyreader(self.p.stdout, int(spl[2])),
960 onabort = self._abort)
965 assert(self.p.stdout.readline() == '\n')
966 self.inprogress = None
971 def _slow_get(self, id):
972 assert(id.find('\n') < 0)
973 assert(id.find('\r') < 0)
975 type = _git_capture(['git', 'cat-file', '-t', id]).strip()
978 p = subprocess.Popen(['git', 'cat-file', type, id],
979 stdout=subprocess.PIPE,
980 preexec_fn = _gitenv)
981 for blob in chunkyreader(p.stdout):
983 _git_wait('git cat-file', p)
991 treefile = ''.join(it)
992 for (mode, name, sha) in treeparse(treefile):
993 for blob in self.join(sha.encode('hex')):
995 elif type == 'commit':
996 treeline = ''.join(it).split('\n')[0]
997 assert(treeline.startswith('tree '))
998 for blob in self.join(treeline[5:]):
1001 raise GitError('invalid object type %r: expected blob/tree/commit'
1005 """Generate a list of the content of all blobs that can be reached
1006 from an object. The hash given in 'id' must point to a blob, a tree
1007 or a commit. The content of all blobs that can be seen from trees or
1008 commits will be added to the list.
1011 for d in self._join(self.get(id)):
1013 except StopIteration:
1017 """Return a dictionary of all tags in the form {hash: [tag_names, ...]}."""
1019 for (n,c) in list_refs():
1020 if n.startswith('refs/tags/'):
1025 tags[c].append(name) # more than one tag can point at 'c'