1 """Virtual File System interface to bup repository content.
3 This module provides a path-based interface to the content of a bup
6 The VFS is structured like this:
9 /SAVE-NAME/SAVE-DATE/...
12 Each path is represented by an item that has least an item.meta which
13 may be either a Metadata object, or an integer mode. Functions like
14 item_mode() and item_size() will return the mode and size in either
15 case. Any item.meta Metadata instances must not be modified directly.
16 Make a copy to modify via item.meta.copy() if needed.
18 The want_meta argument is advisory for calls that accept it, and it
19 may not be honored. Callers must be able to handle an item.meta value
20 that is either an instance of Metadata or an integer mode, perhaps
21 via item_mode() or augment_item_meta().
23 Setting want_meta=False is rarely desirable since it can limit the VFS
24 to only the metadata that git itself can represent, and so for
25 example, fifos and sockets will appear to be regular files
26 (e.g. S_ISREG(item_mode(item)) will be true). But the option is still
27 provided because it may be more efficient when just the path names or
28 the more limited metadata is sufficient.
30 Any given metadata object's size may be None, in which case the size
31 can be computed via item_size() or augment_item_meta(...,
34 When traversing a directory using functions like contents(), the meta
35 value for any directories other than '.' will be a default directory
36 mode, not a Metadata object. This is because the actual metadata for
37 a directory is stored inside the directory (see
38 fill_in_metadata_if_dir() or ensure_item_has_metadata()).
40 Commit items represent commits (e.g. /.tag/some-commit or
41 /foo/latest), and for most purposes, they appear as the underlying
42 tree. S_ISDIR(item_mode(item)) will return true for both tree Items
43 and Commits and the commit's oid is the tree hash; the commit hash is
48 from __future__ import print_function
49 from collections import namedtuple
50 from errno import ELOOP, ENOENT, ENOTDIR
51 from itertools import chain, dropwhile, groupby, izip, tee
52 from stat import S_IFDIR, S_IFLNK, S_IFREG, S_ISDIR, S_ISLNK, S_ISREG
53 from time import localtime, strftime
54 import exceptions, re, sys
56 from bup import client, git, metadata
57 from bup.git import BUP_CHUNKED, cp, get_commit_items, parse_commit, tree_decode
58 from bup.helpers import debug2, last
59 from bup.metadata import Metadata
60 from bup.repo import LocalRepo, RemoteRepo
63 class IOError(exceptions.IOError):
64 def __init__(self, errno, message, terminus=None):
65 exceptions.IOError.__init__(self, errno, message)
66 self.terminus = terminus
68 default_file_mode = S_IFREG | 0o644
69 default_dir_mode = S_IFDIR | 0o755
70 default_symlink_mode = S_IFLNK | 0o755
72 def _default_mode_for_gitmode(gitmode):
74 return default_file_mode
76 return default_dir_mode
78 return default_symlink_mode
79 raise Exception('unexpected git mode ' + oct(gitmode))
81 def _normal_or_chunked_file_size(repo, oid):
82 """Return the size of the normal or chunked file indicated by oid."""
83 # FIXME: --batch-format CatPipe?
84 it = repo.cat(oid.encode('hex'))
85 _, obj_t, size = next(it)
87 while obj_t == 'tree':
88 mode, name, last_oid = last(tree_decode(''.join(it)))
90 it = repo.cat(last_oid.encode('hex'))
91 _, obj_t, size = next(it)
92 return ofs + sum(len(b) for b in it)
94 def _tree_chunks(repo, tree, startofs):
95 "Tree should be a sequence of (name, mode, hash) as per tree_decode()."
97 # name is the chunk's hex offset in the original file
98 tree = dropwhile(lambda (_1, name, _2): int(name, 16) < startofs, tree)
99 for mode, name, oid in tree:
101 skipmore = startofs - ofs
104 it = repo.cat(oid.encode('hex'))
105 _, obj_t, size = next(it)
108 assert obj_t == 'tree'
109 for b in _tree_chunks(repo, tree_decode(data), skipmore):
112 assert obj_t == 'blob'
113 yield data[skipmore:]
116 def __init__(self, repo, oid, startofs):
117 it = repo.cat(oid.encode('hex'))
118 _, obj_t, size = next(it)
119 isdir = obj_t == 'tree'
122 self.it = _tree_chunks(repo, tree_decode(data), startofs)
126 self.blob = data[startofs:]
129 def next(self, size):
131 while len(out) < size:
132 if self.it and not self.blob:
134 self.blob = self.it.next()
135 except StopIteration:
138 want = size - len(out)
139 out += self.blob[:want]
140 self.blob = self.blob[want:]
143 debug2('next(%d) returned %d\n' % (size, len(out)))
147 class _FileReader(object):
148 def __init__(self, repo, oid, known_size=None):
153 self._size = known_size
155 def _compute_size(self):
157 self._size = _normal_or_chunked_file_size(self._repo, self.oid)
162 raise IOError(errno.EINVAL, 'Invalid argument')
163 if ofs > self._compute_size():
164 raise IOError(errno.EINVAL, 'Invalid argument')
170 def read(self, count=-1):
172 count = self._compute_size() - self.ofs
173 if not self.reader or self.reader.ofs != self.ofs:
174 self.reader = _ChunkReader(self._repo, self.oid, self.ofs)
176 buf = self.reader.next(count)
179 raise # our offsets will be all screwed up otherwise
188 def __exit__(self, type, value, traceback):
192 _multiple_slashes_rx = re.compile(r'//+')
194 def _decompose_path(path):
195 """Return a boolean indicating whether the path is absolute, and a
196 reversed list of path elements, omitting any occurrences of "."
197 and ignoring any leading or trailing slash. If the path is
198 effectively '/' or '.', return an empty list.
201 path = re.sub(_multiple_slashes_rx, '/', path)
203 return True, True, []
204 is_absolute = must_be_dir = False
205 if path.startswith('/'):
208 for suffix in ('/', '/.'):
209 if path.endswith(suffix):
211 path = path[:-len(suffix)]
212 parts = [x for x in path.split('/') if x != '.']
215 must_be_dir = True # e.g. path was effectively '.' or '/', etc.
216 return is_absolute, must_be_dir, parts
219 Item = namedtuple('Item', ('meta', 'oid'))
220 Chunky = namedtuple('Chunky', ('meta', 'oid'))
221 Root = namedtuple('Root', ('meta'))
222 Tags = namedtuple('Tags', ('meta'))
223 RevList = namedtuple('RevList', ('meta', 'oid'))
224 Commit = namedtuple('Commit', ('meta', 'oid', 'coid'))
226 item_types = frozenset((Item, Chunky, Root, Tags, RevList, Commit))
227 real_tree_types = frozenset((Item, Commit))
229 _root = Root(meta=default_dir_mode)
230 _tags = Tags(meta=default_dir_mode)
234 """Return a completely independent copy of item, such that
235 modifications will not affect the original.
238 meta = getattr(item, 'meta', None)
241 return(item._replace(meta=meta.copy()))
244 """Return the integer mode (stat st_mode) for item."""
246 if isinstance(m, Metadata):
250 def _read_dir_meta(bupm):
251 # This is because save writes unmodified Metadata() entries for
252 # fake parents -- test-save-strip-graft.sh demonstrates.
253 m = Metadata.read(bupm)
255 return default_dir_mode
256 assert m.mode is not None
261 def tree_data_and_bupm(repo, oid):
262 """Return (tree_bytes, bupm_oid) where bupm_oid will be None if the
263 tree has no metadata (i.e. older bup save, or non-bup tree).
266 assert len(oid) == 20
267 it = repo.cat(oid.encode('hex'))
268 _, item_t, size = next(it)
270 if item_t == 'commit':
271 commit = parse_commit(data)
272 it = repo.cat(commit.tree)
273 _, item_t, size = next(it)
275 assert item_t == 'tree'
276 elif item_t != 'tree':
277 raise Exception('%r is not a tree or commit' % oid.encode('hex'))
278 for _, mangled_name, sub_oid in tree_decode(data):
279 if mangled_name == '.bupm':
281 if mangled_name > '.bupm':
285 def _find_treeish_oid_metadata(repo, oid):
286 """Return the metadata for the tree or commit oid, or None if the tree
287 has no metadata (i.e. older bup save, or non-bup tree).
290 tree_data, bupm_oid = tree_data_and_bupm(repo, oid)
292 with _FileReader(repo, bupm_oid) as meta_stream:
293 return _read_dir_meta(meta_stream)
296 def _readlink(repo, oid):
297 return ''.join(repo.join(oid.encode('hex')))
299 def readlink(repo, item):
300 """Return the link target of item, which must be a symlink. Reads the
301 target from the repository if necessary."""
303 assert S_ISLNK(item_mode(item))
304 if isinstance(item.meta, Metadata):
305 target = item.meta.symlink_target
308 return _readlink(repo, item.oid)
310 def _compute_item_size(repo, item):
311 mode = item_mode(item)
313 size = _normal_or_chunked_file_size(repo, item.oid)
316 return len(_readlink(repo, item.oid))
319 def item_size(repo, item):
320 """Return the size of item, computing it if necessary."""
322 if isinstance(m, Metadata) and m.size is not None:
324 return _compute_item_size(repo, item)
326 def fopen(repo, item):
327 """Return an open reader for the given file item."""
329 assert S_ISREG(item_mode(item))
330 return _FileReader(repo, item.oid)
332 def _commit_item_from_data(oid, data):
333 info = parse_commit(data)
334 return Commit(meta=default_dir_mode,
335 oid=info.tree.decode('hex'),
338 def _commit_item_from_oid(repo, oid, require_meta):
339 it = repo.cat(oid.encode('hex'))
340 _, typ, size = next(it)
341 assert typ == 'commit'
342 commit = _commit_item_from_data(oid, ''.join(it))
344 meta = _find_treeish_oid_metadata(repo, commit.tree)
346 commit = commit._replace(meta=meta)
349 def _revlist_item_from_oid(repo, oid, require_meta):
351 meta = _find_treeish_oid_metadata(repo, oid) or default_dir_mode
353 meta = default_dir_mode
354 return RevList(oid=oid, meta=meta)
356 def parse_rev_auth_secs(f):
357 tree, author_secs = f.readline().split(None, 2)
358 return tree, int(author_secs)
360 def root_items(repo, names=None):
361 """Yield (name, item) for the items in '/' in the VFS. Return
362 everything if names is logically false, otherwise return only
363 items with a name in the collection.
366 # FIXME: what about non-leaf refs like 'refs/heads/foo/bar/baz?
372 # FIXME: maybe eventually support repo.clone() or something
373 # and pass in two repos, so we can drop the tuple() and stream
374 # in parallel (i.e. meta vs refs).
375 for name, oid in tuple(repo.refs([], limit_to_heads=True)):
376 assert(name.startswith('refs/heads/'))
377 yield name[11:], _revlist_item_from_oid(repo, oid, False)
385 if ref in ('.', '.tag'):
388 oidx, typ, size = next(it)
392 assert typ == 'commit'
393 commit = parse_commit(''.join(it))
394 yield ref, _revlist_item_from_oid(repo, oidx.decode('hex'), False)
396 def ordered_tree_entries(tree_data, bupm=None):
397 """Yields (name, mangled_name, kind, gitmode, oid) for each item in
398 tree, sorted by name.
401 # Sadly, the .bupm entries currently aren't in git tree order,
402 # i.e. they don't account for the fact that git sorts trees
403 # (including our chunked trees) as if their names ended with "/",
404 # so "fo" sorts after "fo." iff fo is a directory. This makes
405 # streaming impossible when we need the metadata.
406 def result_from_tree_entry(tree_entry):
407 gitmode, mangled_name, oid = tree_entry
408 name, kind = git.demangle_name(mangled_name, gitmode)
409 return name, mangled_name, kind, gitmode, oid
411 tree_ents = (result_from_tree_entry(x) for x in tree_decode(tree_data))
413 tree_ents = sorted(tree_ents, key=lambda x: x[0])
414 for ent in tree_ents:
417 def tree_items(oid, tree_data, names=frozenset(), bupm=None):
419 def tree_item(ent_oid, kind, gitmode):
420 if kind == BUP_CHUNKED:
421 meta = Metadata.read(bupm) if bupm else default_file_mode
422 return Chunky(oid=ent_oid, meta=meta)
425 # No metadata here (accessable via '.' inside ent_oid).
426 return Item(meta=default_dir_mode, oid=ent_oid)
428 return Item(oid=ent_oid,
429 meta=(Metadata.read(bupm) if bupm \
430 else _default_mode_for_gitmode(gitmode)))
432 assert len(oid) == 20
434 dot_meta = _read_dir_meta(bupm) if bupm else default_dir_mode
435 yield '.', Item(oid=oid, meta=dot_meta)
436 tree_entries = ordered_tree_entries(tree_data, bupm)
437 for name, mangled_name, kind, gitmode, ent_oid in tree_entries:
438 if mangled_name == '.bupm':
441 yield name, tree_item(ent_oid, kind, gitmode)
444 # Assumes the tree is properly formed, i.e. there are no
445 # duplicates, and entries will be in git tree order.
446 if type(names) not in (frozenset, set):
447 names = frozenset(names)
448 remaining = len(names)
450 # Account for the bupm sort order issue (cf. ordered_tree_entries above)
451 last_name = max(names) if bupm else max(names) + '/'
454 dot_meta = _read_dir_meta(bupm) if bupm else default_dir_mode
455 yield '.', Item(oid=oid, meta=dot_meta)
460 tree_entries = ordered_tree_entries(tree_data, bupm)
461 for name, mangled_name, kind, gitmode, ent_oid in tree_entries:
462 if mangled_name == '.bupm':
465 if name not in names:
467 break # given bupm sort order, we're finished
468 if (kind == BUP_CHUNKED or not S_ISDIR(gitmode)) and bupm:
471 yield name, tree_item(ent_oid, kind, gitmode)
476 def tree_items_with_meta(repo, oid, tree_data, names):
477 # For now, the .bupm order doesn't quite match git's, and we don't
478 # load the tree data incrementally anyway, so we just work in RAM
480 assert len(oid) == 20
482 for _, mangled_name, sub_oid in tree_decode(tree_data):
483 if mangled_name == '.bupm':
484 bupm = _FileReader(repo, sub_oid)
486 if mangled_name > '.bupm':
488 for item in tree_items(oid, tree_data, names, bupm):
491 _save_name_rx = re.compile(r'^\d\d\d\d-\d\d-\d\d-\d{6}(-\d+)?$')
493 def _reverse_suffix_duplicates(strs):
494 """Yields the elements of strs, with any runs of duplicate values
495 suffixed with -N suffixes, where the zero padded integer N
496 decreases to 0 by 1 (e.g. 10, 09, ..., 00).
499 for name, duplicates in groupby(strs):
500 ndup = len(tuple(duplicates))
504 ndig = len(str(ndup - 1))
505 fmt = '%s-' + '%0' + str(ndig) + 'd'
506 for i in xrange(ndup - 1, -1, -1):
507 yield fmt % (name, i)
509 def _name_for_rev(rev):
510 commit, (tree_oidx, utc) = rev
511 assert len(commit) == 40
512 return strftime('%Y-%m-%d-%H%M%S', localtime(utc))
514 def _item_for_rev(rev):
515 commit, (tree_oidx, utc) = rev
516 assert len(commit) == 40
517 assert len(tree_oidx) == 40
518 return Commit(meta=default_dir_mode,
519 oid=tree_oidx.decode('hex'),
520 coid=commit.decode('hex'))
522 def revlist_items(repo, oid, names):
523 assert len(oid) == 20
524 oidx = oid.encode('hex')
525 names = frozenset(name for name in (names or tuple()) \
526 if _save_name_rx.match(name) or name in ('.', 'latest'))
527 # Do this before we open the rev_list iterator so we're not nesting
528 if (not names) or ('.' in names):
529 yield '.', _revlist_item_from_oid(repo, oid, True)
531 revs = repo.rev_list((oidx,), format='%T %at', parse=parse_rev_auth_secs)
532 rev_items, rev_names = tee(revs)
533 revs = None # Don't disturb the tees
534 rev_names = _reverse_suffix_duplicates(_name_for_rev(x) for x in rev_names)
535 rev_items = (_item_for_rev(x) for x in rev_items)
539 for item in rev_items:
540 first_commit = first_commit or item
541 yield next(rev_names), item
542 yield 'latest', first_commit
545 # Revs are in reverse chronological order by default
546 last_name = min(names)
547 for item in rev_items:
548 first_commit = first_commit or item
549 name = next(rev_names) # Might have -N dup suffix
552 if not name in names:
556 # FIXME: need real short circuit...
557 for _ in rev_items: pass
558 for _ in rev_names: pass
560 if 'latest' in names:
561 yield 'latest', first_commit
563 def tags_items(repo, names):
567 assert len(oid) == 20
568 oidx = oid.encode('hex')
570 _, typ, size = next(it)
572 return _commit_item_from_data(oid, ''.join(it))
575 return Item(meta=default_file_mode, oid=oid)
577 return Item(meta=default_dir_mode, oid=oid)
578 raise Exception('unexpected tag type ' + typ + ' for tag ' + name)
582 # We have to pull these all into ram because tag_item calls cat()
583 for name, oid in tuple(repo.refs(names, limit_to_tags=True)):
584 assert(name.startswith('refs/tags/'))
586 yield name, tag_item(oid)
589 # Assumes no duplicate refs
590 if type(names) not in (frozenset, set):
591 names = frozenset(names)
592 remaining = len(names)
593 last_name = max(names)
600 for name, oid in repo.refs(names, limit_to_tags=True):
601 assert(name.startswith('refs/tags/'))
605 if name not in names:
607 yield name, tag_item(oid)
612 def contents(repo, item, names=None, want_meta=True):
613 """Yields information about the items contained in item. Yields
614 (name, item) for each name in names, if the name exists, in an
615 unspecified order. If there are no names, then yields (name,
616 item) for all items, including, a first item named '.'
617 representing the container itself.
619 The meta value for any directories other than '.' will be a
620 default directory mode, not a Metadata object. This is because
621 the actual metadata for a directory is stored inside the directory
622 (see fill_in_metadata_if_dir() or ensure_item_has_metadata()).
624 Note that want_meta is advisory. For any given item, item.meta
625 might be a Metadata instance or a mode, and if the former,
626 meta.size might be None. Missing sizes can be computed via via
627 item_size() or augment_item_meta(..., include_size=True).
629 Do not modify any item.meta Metadata instances directly. If
630 needed, make a copy via item.meta.copy() and modify that instead.
633 # Q: are we comfortable promising '.' first when no names?
636 assert S_ISDIR(item_mode(item))
639 if item_t in real_tree_types:
640 it = repo.cat(item.oid.encode('hex'))
641 _, obj_type, size = next(it)
643 if obj_type == 'tree':
645 item_gen = tree_items_with_meta(repo, item.oid, data, names)
647 item_gen = tree_items(item.oid, data, names)
648 elif obj_type == 'commit':
650 item_gen = tree_items_with_meta(repo, item.oid, tree_data, names)
652 item_gen = tree_items(item.oid, tree_data, names)
655 raise Exception('unexpected git ' + obj_type)
656 elif item_t == RevList:
657 item_gen = revlist_items(repo, item.oid, names)
659 item_gen = root_items(repo, names)
661 item_gen = tags_items(repo, names)
663 raise Exception('unexpected VFS item ' + str(item))
667 def _resolve_path(repo, path, parent=None, want_meta=True, deref=False):
668 def raise_dir_required_but_not_dir(path, parent, past):
669 raise IOError(ENOTDIR,
670 "path %r%s resolves to non-directory %r"
672 ' (relative to %r)' % parent if parent else '',
681 assert type(x[0]) in (bytes, str)
682 assert type(x[1]) in item_types
683 assert parent[0][1] == _root
684 if not S_ISDIR(item_mode(parent[-1][1])):
685 raise IOError(ENOTDIR,
686 'path resolution parent %r is not a directory'
688 is_absolute, must_be_dir, future = _decompose_path(path)
691 if not future: # path was effectively '.' or '/'
693 return (('', _root),)
700 past = list(parent) if parent else [('', _root)]
704 if must_be_dir and not S_ISDIR(item_mode(past[-1][1])):
705 raise_dir_required_but_not_dir(path, parent, past)
707 segment = future.pop()
710 if len(past) > 1: # .. from / is /
711 assert S_ISDIR(item_mode(past[-1][1]))
714 parent_name, parent_item = past[-1]
715 wanted = (segment,) if not want_meta else ('.', segment)
716 items = tuple(contents(repo, parent_item, names=wanted,
717 want_meta=want_meta))
719 item = items[0][1] if items else None
720 else: # First item will be '.' and have the metadata
721 item = items[1][1] if len(items) == 2 else None
722 dot, dot_item = items[0]
724 past[-1] = parent_name, parent_item
726 past.append((segment, None),)
728 mode = item_mode(item)
729 if not S_ISLNK(mode):
730 if not S_ISDIR(mode):
731 past.append((segment, item),)
733 raise IOError(ENOTDIR,
734 'path %r%s ends internally in non-directory here: %r'
736 ' (relative to %r)' % parent if parent else '',
740 raise_dir_required_but_not_dir(path, parent, past)
743 if want_meta and type(item) in real_tree_types:
744 dir_meta = _find_treeish_oid_metadata(repo, item.oid)
746 item = item._replace(meta=dir_meta)
747 past.append((segment, item))
749 if not future and not deref:
750 past.append((segment, item),)
754 'too many symlinks encountered while resolving %r%s'
755 % (path, ' relative to %r' % parent if parent else ''),
756 terminus=tuple(past + [(segment, item)]))
757 target = readlink(repo, item)
758 is_absolute, _, target_future = _decompose_path(target)
760 if not target_future: # path was effectively '/'
761 return (('', _root),)
763 future = target_future
765 future.extend(target_future)
768 def lresolve(repo, path, parent=None, want_meta=True):
769 """Perform exactly the same function as resolve(), except if the final
770 path element is a symbolic link, don't follow it, just return it
774 return _resolve_path(repo, path, parent=parent, want_meta=want_meta,
777 def resolve(repo, path, parent=None, want_meta=True):
778 """Follow the path in the virtual filesystem and return a tuple
779 representing the location, if any, denoted by the path. Each
780 element in the result tuple will be (name, info), where info will
781 be a VFS item that can be passed to functions like item_mode().
783 If a path segment that does not exist is encountered during
784 resolution, the result will represent the location of the missing
785 item, and that item in the result will be None.
787 Any attempt to traverse a non-directory will raise a VFS ENOTDIR
790 Any symlinks along the path, including at the end, will be
791 resolved. A VFS IOError with the errno attribute set to ELOOP
792 will be raised if too many symlinks are traversed while following
793 the path. That exception is effectively like a normal
794 ELOOP IOError exception, but will include a terminus element
795 describing the location of the failure, which will be a tuple of
796 (name, info) elements.
798 The parent, if specified, must be a sequence of (name, item)
799 tuples, and will provide the starting point for the resolution of
800 the path. If no parent is specified, resolution will start at
803 The result may include elements of parent directly, so they must
804 not be modified later. If this is a concern, pass in "name,
805 copy_item(item) for name, item in parent" instead.
807 When want_meta is true, detailed metadata will be included in each
808 result item if it's avaiable, otherwise item.meta will be an
809 integer mode. The metadata size may or may not be provided, but
810 can be computed by item_size() or augment_item_meta(...,
811 include_size=True). Setting want_meta=False is rarely desirable
812 since it can limit the VFS to just the metadata git itself can
813 represent, and so, as an example, fifos and sockets will appear to
814 be regular files (e.g. S_ISREG(item_mode(item)) will be true) .
815 But the option is provided because it may be more efficient when
816 only the path names or the more limited metadata is sufficient.
818 Do not modify any item.meta Metadata instances directly. If
819 needed, make a copy via item.meta.copy() and modify that instead.
822 result = _resolve_path(repo, path, parent=parent, want_meta=want_meta,
824 _, leaf_item = result[-1]
826 assert not S_ISLNK(item_mode(leaf_item))
829 def augment_item_meta(repo, item, include_size=False):
830 """Ensure item has a Metadata instance for item.meta. If item.meta is
831 currently a mode, replace it with a compatible "fake" Metadata
832 instance. If include_size is true, ensure item.meta.size is
833 correct, computing it if needed. If item.meta is a Metadata
834 instance, this call may modify it in place or replace it.
837 # If we actually had parallelism, we'd need locking...
840 if isinstance(m, Metadata):
841 if include_size and m.size is None:
842 m.size = _compute_item_size(repo, item)
843 return item._replace(meta=m)
848 meta.uid = meta.gid = meta.atime = meta.mtime = meta.ctime = 0
850 target = _readlink(repo, item.oid)
851 meta.symlink_target = target
852 meta.size = len(target)
854 meta.size = _compute_item_size(repo, item)
855 return item._replace(meta=meta)
857 def fill_in_metadata_if_dir(repo, item):
858 """If item is a directory and item.meta is not a Metadata instance,
859 attempt to find the metadata for the directory. If found, return
860 a new item augmented to include that metadata. Otherwise, return
861 item. May be useful for the output of contents().
864 if S_ISDIR(item_mode(item)) and not isinstance(item.meta, Metadata):
865 items = tuple(contents(repo, item, ('.',), want_meta=True))
866 assert len(items) == 1
867 assert items[0][0] == '.'
871 def ensure_item_has_metadata(repo, item, include_size=False):
872 """If item is a directory, attempt to find and add its metadata. If
873 the item still doesn't have a Metadata instance for item.meta,
874 give it one via augment_item_meta(). May be useful for the output
878 return augment_item_meta(repo,
879 fill_in_metadata_if_dir(repo, item),
880 include_size=include_size)