2 from __future__ import absolute_import
3 import errno, os, stat, struct, tempfile
5 from bup import metadata, xstat
6 from bup._helpers import UINT_MAX, bytescmp
7 from bup.helpers import (add_error, log, merge_iter, mmap_readwrite,
8 progress, qprogress, resolve_parent, slashappend)
13 INDEX_HDR = 'BUPI\0\0\0\7'
15 # Time values are handled as integer nanoseconds since the epoch in
16 # memory, but are written as xstat/metadata timespecs. This behavior
17 # matches the existing metadata/xstat/.bupm code.
19 # Record times (mtime, ctime, atime) as xstat/metadata timespecs, and
20 # store all of the times in the index so they won't interfere with the
21 # forthcoming metadata cache.
26 'qQ' # ctime_s, ctime_ns
27 'qQ' # mtime_s, mtime_ns
28 'qQ' # atime_s, atime_ns
38 ENTLEN = struct.calcsize(INDEX_SIG)
40 FOOTLEN = struct.calcsize(FOOTER_SIG)
42 IX_EXISTS = 0x8000 # file exists on filesystem
43 IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem
44 IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist
46 class Error(Exception):
50 class MetaStoreReader:
51 def __init__(self, filename):
53 self._file = open(filename, 'rb')
63 def metadata_at(self, ofs):
65 return metadata.Metadata.read(self._file)
68 class MetaStoreWriter:
69 # For now, we just append to the file, and try to handle any
70 # truncation or corruption somewhat sensibly.
72 def __init__(self, filename):
73 # Map metadata hashes to bupindex.meta offsets.
75 self._filename = filename
77 # FIXME: see how slow this is; does it matter?
78 m_file = open(filename, 'ab+')
83 m = metadata.Metadata.read(m_file)
85 m_encoded = m.encode()
86 self._offsets[m_encoded] = m_off
88 m = metadata.Metadata.read(m_file)
92 log('index metadata in %r appears to be corrupt' % filename)
96 self._file = open(filename, 'ab')
107 def store(self, metadata):
108 meta_encoded = metadata.encode(include_path=False)
109 ofs = self._offsets.get(meta_encoded)
112 ofs = self._file.tell()
113 self._file.write(meta_encoded)
114 self._offsets[meta_encoded] = ofs
119 def __init__(self, ename, parent):
126 (ofs,n) = (f.tell(), len(self.list))
128 count = len(self.list)
129 #log('popping %r with %d entries\n'
130 # % (''.join(self.ename), count))
134 self.parent.count += count + self.count
138 def _golevel(level, f, ename, newentry, metastore, tmax):
139 # close nodes back up the tree
141 default_meta_ofs = metastore.store(metadata.Metadata())
142 while ename[:len(level.ename)] != level.ename:
143 n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax)
145 (n.children_ofs,n.children_n) = level.write(f)
146 level.parent.list.append(n)
149 # create nodes down the tree
150 while len(level.ename) < len(ename):
151 level = Level(ename[:len(level.ename)+1], level)
153 # are we in precisely the right place?
154 assert(ename == level.ename)
156 BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax)
157 (n.children_ofs,n.children_n) = level.write(f)
159 level.parent.list.append(n)
166 def __init__(self, basename, name, meta_ofs, tmax):
167 self.basename = str(basename)
168 self.name = str(name)
169 self.meta_ofs = meta_ofs
171 self.children_ofs = 0
175 return ("(%s,0x%04x,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)"
176 % (self.name, self.dev, self.ino, self.nlink,
177 self.ctime, self.mtime, self.atime,
178 self.size, self.mode, self.gitmode,
179 self.flags, self.meta_ofs,
180 self.children_ofs, self.children_n))
184 ctime = xstat.nsecs_to_timespec(self.ctime)
185 mtime = xstat.nsecs_to_timespec(self.mtime)
186 atime = xstat.nsecs_to_timespec(self.atime)
187 return struct.pack(INDEX_SIG,
188 self.dev, self.ino, self.nlink,
192 self.size, self.mode,
193 self.gitmode, self.sha, self.flags,
194 self.children_ofs, self.children_n,
196 except (DeprecationWarning, struct.error) as e:
197 log('pack error: %s (%r)\n' % (e, self))
200 def stale(self, st, tstart, check_device=True):
201 if self.size != st.st_size:
203 if self.mtime != st.st_mtime:
205 if self.sha == EMPTY_SHA:
209 if self.ctime != st.st_ctime:
211 if self.ino != st.st_ino:
213 if self.nlink != st.st_nlink:
215 if not (self.flags & IX_EXISTS):
217 if check_device and (self.dev != st.st_dev):
219 # Check that the ctime's "second" is at or after tstart's.
220 ctime_sec_in_ns = xstat.fstime_floor_secs(st.st_ctime) * 10**9
221 if ctime_sec_in_ns >= tstart:
225 def update_from_stat(self, st, meta_ofs):
226 # Should only be called when the entry is stale(), and
227 # invalidate() should almost certainly be called afterward.
230 self.nlink = st.st_nlink
231 self.ctime = st.st_ctime
232 self.mtime = st.st_mtime
233 self.atime = st.st_atime
234 self.size = st.st_size
235 self.mode = st.st_mode
236 self.flags |= IX_EXISTS
237 self.meta_ofs = meta_ofs
241 self.mtime = self._fixup_time(self.mtime)
242 self.ctime = self._fixup_time(self.ctime)
244 def _fixup_time(self, t):
245 if self.tmax != None and t > self.tmax:
251 f = IX_HASHVALID|IX_EXISTS
252 return (self.flags & f) == f
254 def invalidate(self):
255 self.flags &= ~IX_HASHVALID
257 def validate(self, gitmode, sha):
260 assert(gitmode+0 == gitmode)
261 self.gitmode = gitmode
263 self.flags |= IX_HASHVALID|IX_EXISTS
266 return not self.is_deleted()
268 def sha_missing(self):
269 return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID)
271 def is_deleted(self):
272 return (self.flags & IX_EXISTS) == 0
274 def set_deleted(self):
275 if self.flags & IX_EXISTS:
276 self.flags &= ~(IX_EXISTS | IX_HASHVALID)
279 return not self.is_fake()
282 return not self.ctime
284 def _cmp(self, other):
285 # Note reversed name ordering
286 bc = bytescmp(other.name, self.name)
289 vc = self.is_valid() - other.is_valid()
292 fc = self.is_fake() - other.is_fake()
297 def __eq__(self, other):
298 return self._cmp(other) == 0
301 return self._cmp(other) != 0
303 def __lt__(self, other):
304 return self._cmp(other) < 0
306 def __gt__(self, other):
307 return self._cmp(other) > 0
310 return self._cmp(other) <= 0
313 return self._cmp(other) >= 0
316 f.write(self.basename + '\0' + self.packed())
319 class NewEntry(Entry):
320 def __init__(self, basename, name, tmax, dev, ino, nlink,
322 size, mode, gitmode, sha, flags, meta_ofs,
323 children_ofs, children_n):
324 Entry.__init__(self, basename, name, meta_ofs, tmax)
325 (self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime,
326 self.size, self.mode, self.gitmode, self.sha,
327 self.flags, self.children_ofs, self.children_n
328 ) = (dev, ino, nlink, ctime, mtime, atime,
329 size, mode, gitmode, sha, flags, children_ofs, children_n)
333 class BlankNewEntry(NewEntry):
334 def __init__(self, basename, meta_ofs, tmax):
335 NewEntry.__init__(self, basename, basename, tmax,
336 0, 0, 0, 0, 0, 0, 0, 0,
337 0, EMPTY_SHA, 0, meta_ofs, 0, 0)
340 class ExistingEntry(Entry):
341 def __init__(self, parent, basename, name, m, ofs):
342 Entry.__init__(self, basename, name, None, None)
346 (self.dev, self.ino, self.nlink,
347 self.ctime, ctime_ns, self.mtime, mtime_ns, self.atime, atime_ns,
348 self.size, self.mode, self.gitmode, self.sha,
349 self.flags, self.children_ofs, self.children_n, self.meta_ofs
350 ) = struct.unpack(INDEX_SIG, str(buffer(m, ofs, ENTLEN)))
351 self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
352 self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
353 self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
355 # effectively, we don't bother messing with IX_SHAMISSING if
356 # not IX_HASHVALID, since it's redundant, and repacking is more
357 # expensive than not repacking.
358 # This is implemented by having sha_missing() check IX_HASHVALID too.
359 def set_sha_missing(self, val):
361 oldval = self.sha_missing() and 1 or 0
363 flag = val and IX_SHAMISSING or 0
364 newflags = (self.flags & (~IX_SHAMISSING)) | flag
365 self.flags = newflags
368 def unset_sha_missing(self, flag):
369 if self.flags & IX_SHAMISSING:
370 self.flags &= ~IX_SHAMISSING
374 self._m[self._ofs:self._ofs+ENTLEN] = self.packed()
375 if self.parent and not self.is_valid():
376 self.parent.invalidate()
379 def iter(self, name=None, wantrecurse=None):
381 if dname and not dname.endswith('/'):
383 ofs = self.children_ofs
384 assert(ofs <= len(self._m))
385 assert(self.children_n <= UINT_MAX) # i.e. python struct 'I'
386 for i in xrange(self.children_n):
387 eon = self._m.find('\0', ofs)
391 basename = str(buffer(self._m, ofs, eon-ofs))
392 child = ExistingEntry(self, basename, self.name + basename,
395 or child.name.startswith(dname)
396 or child.name.endswith('/') and dname.startswith(child.name)):
397 if not wantrecurse or wantrecurse(child):
398 for e in child.iter(name=name, wantrecurse=wantrecurse):
400 if not name or child.name == name or child.name.startswith(dname):
402 ofs = eon + 1 + ENTLEN
409 def __init__(self, filename):
410 self.filename = filename
412 self.writable = False
416 f = open(filename, 'r+')
418 if e.errno == errno.ENOENT:
423 b = f.read(len(INDEX_HDR))
425 log('warning: %s: header: expected %r, got %r\n'
426 % (filename, INDEX_HDR, b))
428 st = os.fstat(f.fileno())
430 self.m = mmap_readwrite(f)
432 self.count = struct.unpack(FOOTER_SIG,
433 str(buffer(self.m, st.st_size-FOOTLEN, FOOTLEN)))[0]
439 return int(self.count)
441 def forward_iter(self):
443 while ofs+ENTLEN <= len(self.m)-FOOTLEN:
444 eon = self.m.find('\0', ofs)
448 basename = str(buffer(self.m, ofs, eon-ofs))
449 yield ExistingEntry(None, basename, basename, self.m, eon+1)
450 ofs = eon + 1 + ENTLEN
452 def iter(self, name=None, wantrecurse=None):
453 if len(self.m) > len(INDEX_HDR)+ENTLEN:
455 if dname and not dname.endswith('/'):
457 root = ExistingEntry(None, '/', '/',
458 self.m, len(self.m)-FOOTLEN-ENTLEN)
459 for sub in root.iter(name=name, wantrecurse=wantrecurse):
461 if not dname or dname == root.name:
467 def find(self, name):
468 return next((e for e in self.iter(name, wantrecurse=lambda x : True)
476 if self.writable and self.m:
481 if self.writable and self.m:
484 self.writable = False
486 def filter(self, prefixes, wantrecurse=None):
487 for (rp, path) in reduce_paths(prefixes):
489 for e in self.iter(rp, wantrecurse=wantrecurse):
491 assert(e.name.startswith(rp))
492 name = path + e.name[len(rp):]
495 # Always return at least the top for each prefix.
496 # Otherwise something like "save x/y" will produce
497 # nothing if x is up to date.
500 name = path + pe.name[len(rp):]
503 # FIXME: this function isn't very generic, because it splits the filename
504 # in an odd way and depends on a terminating '/' to indicate directories.
506 """Split a path into a list of elements of the file system hierarchy."""
508 l = [i+'/' for i in l[:-1]] + l[-1:]
510 l.pop() # extra blank caused by terminating '/'
515 def __init__(self, filename, metastore, tmax):
516 self.rootlevel = self.level = Level([], None)
521 self.filename = filename = resolve_parent(filename)
522 self.metastore = metastore
524 (dir,name) = os.path.split(filename)
525 (ffd,self.tmpname) = tempfile.mkstemp('.tmp', filename, dir)
526 self.f = os.fdopen(ffd, 'wb', 65536)
527 self.f.write(INDEX_HDR)
537 os.unlink(self.tmpname)
541 self.level = _golevel(self.level, self.f, [], None,
542 self.metastore, self.tmax)
543 self.count = self.rootlevel.count
546 self.f.write(struct.pack(FOOTER_SIG, self.count))
548 assert(self.level == None)
556 os.rename(self.tmpname, self.filename)
558 def _add(self, ename, entry):
559 if self.lastfile and self.lastfile <= ename:
560 raise Error('%r must come before %r'
561 % (''.join(ename), ''.join(self.lastfile)))
562 self.lastfile = ename
563 self.level = _golevel(self.level, self.f, ename, entry,
564 self.metastore, self.tmax)
566 def add(self, name, st, meta_ofs, hashgen = None):
567 endswith = name.endswith('/')
568 ename = pathsplit(name)
570 #log('add: %r %r\n' % (basename, name))
574 (gitmode, sha) = hashgen(name)
575 flags |= IX_HASHVALID
577 (gitmode, sha) = (0, EMPTY_SHA)
579 isdir = stat.S_ISDIR(st.st_mode)
580 assert(isdir == endswith)
581 e = NewEntry(basename, name, self.tmax,
582 st.st_dev, st.st_ino, st.st_nlink,
583 st.st_ctime, st.st_mtime, st.st_atime,
584 st.st_size, st.st_mode, gitmode, sha, flags,
588 meta_ofs = self.metastore.store(metadata.Metadata())
589 e = BlankNewEntry(basename, meta_ofs, self.tmax)
595 def add_ixentry(self, e):
596 e.children_ofs = e.children_n = 0
597 self._add(pathsplit(e.name), e)
599 def new_reader(self):
601 return Reader(self.tmpname)
604 def _slashappend_or_add_error(p, caller):
605 """Return p, after ensuring it has a single trailing slash if it names
606 a directory, unless there's an OSError, in which case, call
607 add_error() and return None."""
611 add_error('%s: %s' % (caller, e))
614 if stat.S_ISDIR(st.st_mode):
615 return slashappend(p)
619 def unique_resolved_paths(paths):
620 "Return a collection of unique resolved paths."
621 rps = (_slashappend_or_add_error(resolve_parent(p), 'unique_resolved_paths')
623 return frozenset((x for x in rps if x is not None))
626 def reduce_paths(paths):
629 rp = _slashappend_or_add_error(resolve_parent(p), 'reduce_paths')
631 xpaths.append((rp, slashappend(p) if rp.endswith('/') else p))
636 for (rp, p) in xpaths:
637 if prev and (prev == rp
638 or (prev.endswith('/') and rp.startswith(prev))):
639 continue # already superceded by previous path
640 paths.append((rp, p))
642 paths.sort(reverse=True)
647 def pfunc(count, total):
648 qprogress('bup: merging indexes (%d/%d)\r' % (count, total))
649 def pfinal(count, total):
650 progress('bup: merging indexes (%d/%d), done.\n' % (count, total))
651 return merge_iter(iters, 1024, pfunc, pfinal, key='name')