2 from __future__ import absolute_import
3 import errno, os, stat, struct, tempfile
5 from bup import metadata, xstat
6 from bup._helpers import UINT_MAX, bytescmp
7 from bup.compat import range
8 from bup.helpers import (add_error, log, merge_iter, mmap_readwrite,
9 progress, qprogress, resolve_parent, slashappend)
14 INDEX_HDR = 'BUPI\0\0\0\7'
16 # Time values are handled as integer nanoseconds since the epoch in
17 # memory, but are written as xstat/metadata timespecs. This behavior
18 # matches the existing metadata/xstat/.bupm code.
20 # Record times (mtime, ctime, atime) as xstat/metadata timespecs, and
21 # store all of the times in the index so they won't interfere with the
22 # forthcoming metadata cache.
27 'qQ' # ctime_s, ctime_ns
28 'qQ' # mtime_s, mtime_ns
29 'qQ' # atime_s, atime_ns
39 ENTLEN = struct.calcsize(INDEX_SIG)
41 FOOTLEN = struct.calcsize(FOOTER_SIG)
43 IX_EXISTS = 0x8000 # file exists on filesystem
44 IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem
45 IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist
47 class Error(Exception):
51 class MetaStoreReader:
52 def __init__(self, filename):
54 self._file = open(filename, 'rb')
64 def metadata_at(self, ofs):
66 return metadata.Metadata.read(self._file)
69 class MetaStoreWriter:
70 # For now, we just append to the file, and try to handle any
71 # truncation or corruption somewhat sensibly.
73 def __init__(self, filename):
74 # Map metadata hashes to bupindex.meta offsets.
76 self._filename = filename
78 # FIXME: see how slow this is; does it matter?
79 m_file = open(filename, 'ab+')
84 m = metadata.Metadata.read(m_file)
86 m_encoded = m.encode()
87 self._offsets[m_encoded] = m_off
89 m = metadata.Metadata.read(m_file)
93 log('index metadata in %r appears to be corrupt' % filename)
97 self._file = open(filename, 'ab')
108 def store(self, metadata):
109 meta_encoded = metadata.encode(include_path=False)
110 ofs = self._offsets.get(meta_encoded)
113 ofs = self._file.tell()
114 self._file.write(meta_encoded)
115 self._offsets[meta_encoded] = ofs
120 def __init__(self, ename, parent):
127 (ofs,n) = (f.tell(), len(self.list))
129 count = len(self.list)
130 #log('popping %r with %d entries\n'
131 # % (''.join(self.ename), count))
135 self.parent.count += count + self.count
139 def _golevel(level, f, ename, newentry, metastore, tmax):
140 # close nodes back up the tree
142 default_meta_ofs = metastore.store(metadata.Metadata())
143 while ename[:len(level.ename)] != level.ename:
144 n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax)
146 (n.children_ofs,n.children_n) = level.write(f)
147 level.parent.list.append(n)
150 # create nodes down the tree
151 while len(level.ename) < len(ename):
152 level = Level(ename[:len(level.ename)+1], level)
154 # are we in precisely the right place?
155 assert(ename == level.ename)
157 BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax)
158 (n.children_ofs,n.children_n) = level.write(f)
160 level.parent.list.append(n)
167 def __init__(self, basename, name, meta_ofs, tmax):
168 self.basename = str(basename)
169 self.name = str(name)
170 self.meta_ofs = meta_ofs
172 self.children_ofs = 0
176 return ("(%s,0x%04x,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)"
177 % (self.name, self.dev, self.ino, self.nlink,
178 self.ctime, self.mtime, self.atime,
179 self.size, self.mode, self.gitmode,
180 self.flags, self.meta_ofs,
181 self.children_ofs, self.children_n))
185 ctime = xstat.nsecs_to_timespec(self.ctime)
186 mtime = xstat.nsecs_to_timespec(self.mtime)
187 atime = xstat.nsecs_to_timespec(self.atime)
188 return struct.pack(INDEX_SIG,
189 self.dev, self.ino, self.nlink,
193 self.size, self.mode,
194 self.gitmode, self.sha, self.flags,
195 self.children_ofs, self.children_n,
197 except (DeprecationWarning, struct.error) as e:
198 log('pack error: %s (%r)\n' % (e, self))
201 def stale(self, st, tstart, check_device=True):
202 if self.size != st.st_size:
204 if self.mtime != st.st_mtime:
206 if self.sha == EMPTY_SHA:
210 if self.ctime != st.st_ctime:
212 if self.ino != st.st_ino:
214 if self.nlink != st.st_nlink:
216 if not (self.flags & IX_EXISTS):
218 if check_device and (self.dev != st.st_dev):
220 # Check that the ctime's "second" is at or after tstart's.
221 ctime_sec_in_ns = xstat.fstime_floor_secs(st.st_ctime) * 10**9
222 if ctime_sec_in_ns >= tstart:
226 def update_from_stat(self, st, meta_ofs):
227 # Should only be called when the entry is stale(), and
228 # invalidate() should almost certainly be called afterward.
231 self.nlink = st.st_nlink
232 self.ctime = st.st_ctime
233 self.mtime = st.st_mtime
234 self.atime = st.st_atime
235 self.size = st.st_size
236 self.mode = st.st_mode
237 self.flags |= IX_EXISTS
238 self.meta_ofs = meta_ofs
242 self.mtime = self._fixup_time(self.mtime)
243 self.ctime = self._fixup_time(self.ctime)
245 def _fixup_time(self, t):
246 if self.tmax != None and t > self.tmax:
252 f = IX_HASHVALID|IX_EXISTS
253 return (self.flags & f) == f
255 def invalidate(self):
256 self.flags &= ~IX_HASHVALID
258 def validate(self, gitmode, sha):
261 assert(gitmode+0 == gitmode)
262 self.gitmode = gitmode
264 self.flags |= IX_HASHVALID|IX_EXISTS
267 return not self.is_deleted()
269 def sha_missing(self):
270 return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID)
272 def is_deleted(self):
273 return (self.flags & IX_EXISTS) == 0
275 def set_deleted(self):
276 if self.flags & IX_EXISTS:
277 self.flags &= ~(IX_EXISTS | IX_HASHVALID)
280 return not self.is_fake()
283 return not self.ctime
285 def _cmp(self, other):
286 # Note reversed name ordering
287 bc = bytescmp(other.name, self.name)
290 vc = self.is_valid() - other.is_valid()
293 fc = self.is_fake() - other.is_fake()
298 def __eq__(self, other):
299 return self._cmp(other) == 0
302 return self._cmp(other) != 0
304 def __lt__(self, other):
305 return self._cmp(other) < 0
307 def __gt__(self, other):
308 return self._cmp(other) > 0
311 return self._cmp(other) <= 0
314 return self._cmp(other) >= 0
317 f.write(self.basename + '\0' + self.packed())
320 class NewEntry(Entry):
321 def __init__(self, basename, name, tmax, dev, ino, nlink,
323 size, mode, gitmode, sha, flags, meta_ofs,
324 children_ofs, children_n):
325 Entry.__init__(self, basename, name, meta_ofs, tmax)
326 (self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime,
327 self.size, self.mode, self.gitmode, self.sha,
328 self.flags, self.children_ofs, self.children_n
329 ) = (dev, ino, nlink, ctime, mtime, atime,
330 size, mode, gitmode, sha, flags, children_ofs, children_n)
334 class BlankNewEntry(NewEntry):
335 def __init__(self, basename, meta_ofs, tmax):
336 NewEntry.__init__(self, basename, basename, tmax,
337 0, 0, 0, 0, 0, 0, 0, 0,
338 0, EMPTY_SHA, 0, meta_ofs, 0, 0)
341 class ExistingEntry(Entry):
342 def __init__(self, parent, basename, name, m, ofs):
343 Entry.__init__(self, basename, name, None, None)
347 (self.dev, self.ino, self.nlink,
348 self.ctime, ctime_ns, self.mtime, mtime_ns, self.atime, atime_ns,
349 self.size, self.mode, self.gitmode, self.sha,
350 self.flags, self.children_ofs, self.children_n, self.meta_ofs
351 ) = struct.unpack(INDEX_SIG, str(buffer(m, ofs, ENTLEN)))
352 self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
353 self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
354 self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
356 # effectively, we don't bother messing with IX_SHAMISSING if
357 # not IX_HASHVALID, since it's redundant, and repacking is more
358 # expensive than not repacking.
359 # This is implemented by having sha_missing() check IX_HASHVALID too.
360 def set_sha_missing(self, val):
362 oldval = self.sha_missing() and 1 or 0
364 flag = val and IX_SHAMISSING or 0
365 newflags = (self.flags & (~IX_SHAMISSING)) | flag
366 self.flags = newflags
369 def unset_sha_missing(self, flag):
370 if self.flags & IX_SHAMISSING:
371 self.flags &= ~IX_SHAMISSING
375 self._m[self._ofs:self._ofs+ENTLEN] = self.packed()
376 if self.parent and not self.is_valid():
377 self.parent.invalidate()
380 def iter(self, name=None, wantrecurse=None):
382 if dname and not dname.endswith('/'):
384 ofs = self.children_ofs
385 assert(ofs <= len(self._m))
386 assert(self.children_n <= UINT_MAX) # i.e. python struct 'I'
387 for i in range(self.children_n):
388 eon = self._m.find('\0', ofs)
392 basename = str(buffer(self._m, ofs, eon-ofs))
393 child = ExistingEntry(self, basename, self.name + basename,
396 or child.name.startswith(dname)
397 or child.name.endswith('/') and dname.startswith(child.name)):
398 if not wantrecurse or wantrecurse(child):
399 for e in child.iter(name=name, wantrecurse=wantrecurse):
401 if not name or child.name == name or child.name.startswith(dname):
403 ofs = eon + 1 + ENTLEN
410 def __init__(self, filename):
411 self.filename = filename
413 self.writable = False
417 f = open(filename, 'r+')
419 if e.errno == errno.ENOENT:
424 b = f.read(len(INDEX_HDR))
426 log('warning: %s: header: expected %r, got %r\n'
427 % (filename, INDEX_HDR, b))
429 st = os.fstat(f.fileno())
431 self.m = mmap_readwrite(f)
433 self.count = struct.unpack(FOOTER_SIG,
434 str(buffer(self.m, st.st_size-FOOTLEN, FOOTLEN)))[0]
440 return int(self.count)
442 def forward_iter(self):
444 while ofs+ENTLEN <= len(self.m)-FOOTLEN:
445 eon = self.m.find('\0', ofs)
449 basename = str(buffer(self.m, ofs, eon-ofs))
450 yield ExistingEntry(None, basename, basename, self.m, eon+1)
451 ofs = eon + 1 + ENTLEN
453 def iter(self, name=None, wantrecurse=None):
454 if len(self.m) > len(INDEX_HDR)+ENTLEN:
456 if dname and not dname.endswith('/'):
458 root = ExistingEntry(None, '/', '/',
459 self.m, len(self.m)-FOOTLEN-ENTLEN)
460 for sub in root.iter(name=name, wantrecurse=wantrecurse):
462 if not dname or dname == root.name:
468 def find(self, name):
469 return next((e for e in self.iter(name, wantrecurse=lambda x : True)
477 if self.writable and self.m:
482 if self.writable and self.m:
485 self.writable = False
487 def filter(self, prefixes, wantrecurse=None):
488 for (rp, path) in reduce_paths(prefixes):
490 for e in self.iter(rp, wantrecurse=wantrecurse):
492 assert(e.name.startswith(rp))
493 name = path + e.name[len(rp):]
496 # Always return at least the top for each prefix.
497 # Otherwise something like "save x/y" will produce
498 # nothing if x is up to date.
501 name = path + pe.name[len(rp):]
504 # FIXME: this function isn't very generic, because it splits the filename
505 # in an odd way and depends on a terminating '/' to indicate directories.
507 """Split a path into a list of elements of the file system hierarchy."""
509 l = [i+'/' for i in l[:-1]] + l[-1:]
511 l.pop() # extra blank caused by terminating '/'
516 def __init__(self, filename, metastore, tmax):
517 self.rootlevel = self.level = Level([], None)
522 self.filename = filename = resolve_parent(filename)
523 self.metastore = metastore
525 (dir,name) = os.path.split(filename)
526 (ffd,self.tmpname) = tempfile.mkstemp('.tmp', filename, dir)
527 self.f = os.fdopen(ffd, 'wb', 65536)
528 self.f.write(INDEX_HDR)
538 os.unlink(self.tmpname)
542 self.level = _golevel(self.level, self.f, [], None,
543 self.metastore, self.tmax)
544 self.count = self.rootlevel.count
547 self.f.write(struct.pack(FOOTER_SIG, self.count))
549 assert(self.level == None)
557 os.rename(self.tmpname, self.filename)
559 def _add(self, ename, entry):
560 if self.lastfile and self.lastfile <= ename:
561 raise Error('%r must come before %r'
562 % (''.join(ename), ''.join(self.lastfile)))
563 self.lastfile = ename
564 self.level = _golevel(self.level, self.f, ename, entry,
565 self.metastore, self.tmax)
567 def add(self, name, st, meta_ofs, hashgen = None):
568 endswith = name.endswith('/')
569 ename = pathsplit(name)
571 #log('add: %r %r\n' % (basename, name))
575 (gitmode, sha) = hashgen(name)
576 flags |= IX_HASHVALID
578 (gitmode, sha) = (0, EMPTY_SHA)
580 isdir = stat.S_ISDIR(st.st_mode)
581 assert(isdir == endswith)
582 e = NewEntry(basename, name, self.tmax,
583 st.st_dev, st.st_ino, st.st_nlink,
584 st.st_ctime, st.st_mtime, st.st_atime,
585 st.st_size, st.st_mode, gitmode, sha, flags,
589 meta_ofs = self.metastore.store(metadata.Metadata())
590 e = BlankNewEntry(basename, meta_ofs, self.tmax)
596 def add_ixentry(self, e):
597 e.children_ofs = e.children_n = 0
598 self._add(pathsplit(e.name), e)
600 def new_reader(self):
602 return Reader(self.tmpname)
605 def _slashappend_or_add_error(p, caller):
606 """Return p, after ensuring it has a single trailing slash if it names
607 a directory, unless there's an OSError, in which case, call
608 add_error() and return None."""
612 add_error('%s: %s' % (caller, e))
615 if stat.S_ISDIR(st.st_mode):
616 return slashappend(p)
620 def unique_resolved_paths(paths):
621 "Return a collection of unique resolved paths."
622 rps = (_slashappend_or_add_error(resolve_parent(p), 'unique_resolved_paths')
624 return frozenset((x for x in rps if x is not None))
627 def reduce_paths(paths):
630 rp = _slashappend_or_add_error(resolve_parent(p), 'reduce_paths')
632 xpaths.append((rp, slashappend(p) if rp.endswith('/') else p))
637 for (rp, p) in xpaths:
638 if prev and (prev == rp
639 or (prev.endswith('/') and rp.startswith(prev))):
640 continue # already superceded by previous path
641 paths.append((rp, p))
643 paths.sort(reverse=True)
648 def pfunc(count, total):
649 qprogress('bup: merging indexes (%d/%d)\r' % (count, total))
650 def pfinal(count, total):
651 progress('bup: merging indexes (%d/%d), done.\n' % (count, total))
652 return merge_iter(iters, 1024, pfunc, pfinal, key='name')