1 import metadata, os, stat, struct, tempfile
3 from bup.helpers import *
8 INDEX_HDR = 'BUPI\0\0\0\5'
10 # Time values are handled as integer nanoseconds since the epoch in
11 # memory, but are written as xstat/metadata timespecs. This behavior
12 # matches the existing metadata/xstat/.bupm code.
14 # Record times (mtime, ctime, atime) as xstat/metadata timespecs, and
15 # store all of the times in the index so they won't interfere with the
16 # forthcoming metadata cache.
17 INDEX_SIG = '!QQQqQqQqQIIQII20sHIIQ'
19 ENTLEN = struct.calcsize(INDEX_SIG)
21 FOOTLEN = struct.calcsize(FOOTER_SIG)
23 IX_EXISTS = 0x8000 # file exists on filesystem
24 IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem
25 IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist
27 class Error(Exception):
31 class MetaStoreReader:
32 def __init__(self, filename):
33 self._file = open(filename, 'rb')
43 def metadata_at(self, ofs):
45 return metadata.Metadata.read(self._file)
48 class MetaStoreWriter:
49 # For now, we just append to the file, and try to handle any
50 # truncation or corruption somewhat sensibly.
52 def __init__(self, filename):
53 # Map metadata hashes to bupindex.meta offsets.
55 self._filename = filename
56 # FIXME: see how slow this is; does it matter?
57 m_file = open(filename, 'ab+')
61 m = metadata.Metadata.read(m_file)
63 m_encoded = m.encode()
64 self._offsets[m_encoded] = m_file.tell() - len(m_encoded)
65 m = metadata.Metadata.read(m_file)
69 log('index metadata in %r appears to be corrupt' % filename)
73 self._file = open(filename, 'ab')
84 def store(self, metadata):
85 meta_encoded = metadata.encode(include_path=False)
86 ofs = self._offsets.get(meta_encoded)
89 ofs = self._file.tell()
90 self._file.write(meta_encoded)
91 self._offsets[meta_encoded] = ofs
96 def __init__(self, ename, parent):
103 (ofs,n) = (f.tell(), len(self.list))
105 count = len(self.list)
106 #log('popping %r with %d entries\n'
107 # % (''.join(self.ename), count))
111 self.parent.count += count + self.count
115 def _golevel(level, f, ename, newentry, metastore, tmax):
116 # close nodes back up the tree
118 default_meta_ofs = metastore.store(metadata.Metadata())
119 while ename[:len(level.ename)] != level.ename:
120 n = BlankNewEntry(level.ename[-1], default_meta_ofs, tmax)
122 (n.children_ofs,n.children_n) = level.write(f)
123 level.parent.list.append(n)
126 # create nodes down the tree
127 while len(level.ename) < len(ename):
128 level = Level(ename[:len(level.ename)+1], level)
130 # are we in precisely the right place?
131 assert(ename == level.ename)
133 BlankNewEntry(ename and level.ename[-1] or None, default_meta_ofs, tmax)
134 (n.children_ofs,n.children_n) = level.write(f)
136 level.parent.list.append(n)
143 def __init__(self, basename, name, meta_ofs, tmax):
144 self.basename = str(basename)
145 self.name = str(name)
146 self.meta_ofs = meta_ofs
148 self.children_ofs = 0
152 return ("(%s,0x%04x,%d,%d,%d,%d,%d,%d,%d,%d,%s/%s,0x%04x,%d,0x%08x/%d)"
153 % (self.name, self.dev, self.ino, self.nlink,
154 self.ctime, self.mtime, self.atime, self.uid, self.gid,
155 self.size, self.mode, self.gitmode,
156 self.flags, self.meta_ofs,
157 self.children_ofs, self.children_n))
161 ctime = xstat.nsecs_to_timespec(self.ctime)
162 mtime = xstat.nsecs_to_timespec(self.mtime)
163 atime = xstat.nsecs_to_timespec(self.atime)
164 return struct.pack(INDEX_SIG,
165 self.dev, self.ino, self.nlink,
169 self.uid, self.gid, self.size, self.mode,
170 self.gitmode, self.sha, self.flags,
171 self.children_ofs, self.children_n,
173 except (DeprecationWarning, struct.error), e:
174 log('pack error: %s (%r)\n' % (e, self))
177 def from_stat(self, st, meta_ofs, tstart, check_device=True):
178 old = (self.dev if check_device else 0,
179 self.ino, self.nlink, self.ctime, self.mtime,
180 self.uid, self.gid, self.size, self.flags & IX_EXISTS)
181 new = (st.st_dev if check_device else 0,
182 st.st_ino, st.st_nlink, st.st_ctime, st.st_mtime,
183 st.st_uid, st.st_gid, st.st_size, IX_EXISTS)
186 self.nlink = st.st_nlink
187 self.ctime = st.st_ctime
188 self.mtime = st.st_mtime
189 self.atime = st.st_atime
192 self.size = st.st_size
193 self.mode = st.st_mode
194 self.flags |= IX_EXISTS
195 self.meta_ofs = meta_ofs
196 # Check that the ctime's "second" is at or after tstart's.
197 ctime_sec_in_ns = xstat.fstime_floor_secs(st.st_ctime) * 10**9
198 if ctime_sec_in_ns >= tstart or old != new \
199 or self.sha == EMPTY_SHA or not self.gitmode:
205 self.uid += 0x100000000
207 self.gid += 0x100000000
208 assert(self.uid >= 0)
209 assert(self.gid >= 0)
210 self.mtime = self._fixup_time(self.mtime)
211 self.ctime = self._fixup_time(self.ctime)
213 def _fixup_time(self, t):
214 if self.tmax != None and t > self.tmax:
220 f = IX_HASHVALID|IX_EXISTS
221 return (self.flags & f) == f
223 def invalidate(self):
224 self.flags &= ~IX_HASHVALID
226 def validate(self, gitmode, sha):
229 assert(gitmode+0 == gitmode)
230 self.gitmode = gitmode
232 self.flags |= IX_HASHVALID|IX_EXISTS
235 return not self.is_deleted()
237 def sha_missing(self):
238 return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID)
240 def is_deleted(self):
241 return (self.flags & IX_EXISTS) == 0
243 def set_deleted(self):
244 if self.flags & IX_EXISTS:
245 self.flags &= ~(IX_EXISTS | IX_HASHVALID)
248 return not self.is_fake()
251 return not self.ctime
254 return (cmp(b.name, a.name)
255 or cmp(a.is_valid(), b.is_valid())
256 or cmp(a.is_fake(), b.is_fake()))
259 f.write(self.basename + '\0' + self.packed())
262 class NewEntry(Entry):
263 def __init__(self, basename, name, tmax, dev, ino, nlink,
265 uid, gid, size, mode, gitmode, sha, flags, meta_ofs,
266 children_ofs, children_n):
267 Entry.__init__(self, basename, name, meta_ofs, tmax)
268 (self.dev, self.ino, self.nlink, self.ctime, self.mtime, self.atime,
269 self.uid, self.gid, self.size, self.mode, self.gitmode, self.sha,
270 self.flags, self.children_ofs, self.children_n
271 ) = (dev, ino, nlink, ctime, mtime, atime, uid, gid,
272 size, mode, gitmode, sha, flags, children_ofs, children_n)
276 class BlankNewEntry(NewEntry):
277 def __init__(self, basename, meta_ofs, tmax):
278 NewEntry.__init__(self, basename, basename, tmax,
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
280 0, EMPTY_SHA, 0, meta_ofs, 0, 0)
283 class ExistingEntry(Entry):
284 def __init__(self, parent, basename, name, m, ofs):
285 Entry.__init__(self, basename, name, None, None)
289 (self.dev, self.ino, self.nlink,
290 self.ctime, ctime_ns, self.mtime, mtime_ns, self.atime, atime_ns,
291 self.uid, self.gid, self.size, self.mode, self.gitmode, self.sha,
292 self.flags, self.children_ofs, self.children_n, self.meta_ofs
293 ) = struct.unpack(INDEX_SIG, str(buffer(m, ofs, ENTLEN)))
294 self.atime = xstat.timespec_to_nsecs((self.atime, atime_ns))
295 self.mtime = xstat.timespec_to_nsecs((self.mtime, mtime_ns))
296 self.ctime = xstat.timespec_to_nsecs((self.ctime, ctime_ns))
298 # effectively, we don't bother messing with IX_SHAMISSING if
299 # not IX_HASHVALID, since it's redundant, and repacking is more
300 # expensive than not repacking.
301 # This is implemented by having sha_missing() check IX_HASHVALID too.
302 def set_sha_missing(self, val):
304 oldval = self.sha_missing() and 1 or 0
306 flag = val and IX_SHAMISSING or 0
307 newflags = (self.flags & (~IX_SHAMISSING)) | flag
308 self.flags = newflags
311 def unset_sha_missing(self, flag):
312 if self.flags & IX_SHAMISSING:
313 self.flags &= ~IX_SHAMISSING
317 self._m[self._ofs:self._ofs+ENTLEN] = self.packed()
318 if self.parent and not self.is_valid():
319 self.parent.invalidate()
322 def iter(self, name=None, wantrecurse=None):
324 if dname and not dname.endswith('/'):
326 ofs = self.children_ofs
327 assert(ofs <= len(self._m))
328 assert(self.children_n < 1000000)
329 for i in xrange(self.children_n):
330 eon = self._m.find('\0', ofs)
334 basename = str(buffer(self._m, ofs, eon-ofs))
335 child = ExistingEntry(self, basename, self.name + basename,
338 or child.name.startswith(dname)
339 or child.name.endswith('/') and dname.startswith(child.name)):
340 if not wantrecurse or wantrecurse(child):
341 for e in child.iter(name=name, wantrecurse=wantrecurse):
343 if not name or child.name == name or child.name.startswith(dname):
345 ofs = eon + 1 + ENTLEN
352 def __init__(self, filename):
353 self.filename = filename
355 self.writable = False
359 f = open(filename, 'r+')
361 if e.errno == errno.ENOENT:
366 b = f.read(len(INDEX_HDR))
368 log('warning: %s: header: expected %r, got %r\n'
369 % (filename, INDEX_HDR, b))
371 st = os.fstat(f.fileno())
373 self.m = mmap_readwrite(f)
375 self.count = struct.unpack(FOOTER_SIG,
376 str(buffer(self.m, st.st_size-FOOTLEN, FOOTLEN)))[0]
382 return int(self.count)
384 def forward_iter(self):
386 while ofs+ENTLEN <= len(self.m)-FOOTLEN:
387 eon = self.m.find('\0', ofs)
391 basename = str(buffer(self.m, ofs, eon-ofs))
392 yield ExistingEntry(None, basename, basename, self.m, eon+1)
393 ofs = eon + 1 + ENTLEN
395 def iter(self, name=None, wantrecurse=None):
396 if len(self.m) > len(INDEX_HDR)+ENTLEN:
398 if dname and not dname.endswith('/'):
400 root = ExistingEntry(None, '/', '/',
401 self.m, len(self.m)-FOOTLEN-ENTLEN)
402 for sub in root.iter(name=name, wantrecurse=wantrecurse):
404 if not dname or dname == root.name:
414 if self.writable and self.m:
419 if self.writable and self.m:
422 self.writable = False
424 def filter(self, prefixes, wantrecurse=None):
425 for (rp, path) in reduce_paths(prefixes):
426 for e in self.iter(rp, wantrecurse=wantrecurse):
427 assert(e.name.startswith(rp))
428 name = path + e.name[len(rp):]
432 # FIXME: this function isn't very generic, because it splits the filename
433 # in an odd way and depends on a terminating '/' to indicate directories.
435 """Split a path into a list of elements of the file system hierarchy."""
437 l = [i+'/' for i in l[:-1]] + l[-1:]
439 l.pop() # extra blank caused by terminating '/'
444 def __init__(self, filename, metastore, tmax):
445 self.rootlevel = self.level = Level([], None)
450 self.filename = filename = realpath(filename)
451 self.metastore = metastore
453 (dir,name) = os.path.split(filename)
454 (ffd,self.tmpname) = tempfile.mkstemp('.tmp', filename, dir)
455 self.f = os.fdopen(ffd, 'wb', 65536)
456 self.f.write(INDEX_HDR)
466 os.unlink(self.tmpname)
470 self.level = _golevel(self.level, self.f, [], None,
471 self.metastore, self.tmax)
472 self.count = self.rootlevel.count
475 self.f.write(struct.pack(FOOTER_SIG, self.count))
477 assert(self.level == None)
485 os.rename(self.tmpname, self.filename)
487 def _add(self, ename, entry):
488 if self.lastfile and self.lastfile <= ename:
489 raise Error('%r must come before %r'
490 % (''.join(e.name), ''.join(self.lastfile)))
491 self.lastfile = e.name
492 self.level = _golevel(self.level, self.f, ename, entry,
493 self.metastore, self.tmax)
495 def add(self, name, st, meta_ofs, hashgen = None):
496 endswith = name.endswith('/')
497 ename = pathsplit(name)
499 #log('add: %r %r\n' % (basename, name))
503 (gitmode, sha) = hashgen(name)
504 flags |= IX_HASHVALID
506 (gitmode, sha) = (0, EMPTY_SHA)
508 isdir = stat.S_ISDIR(st.st_mode)
509 assert(isdir == endswith)
510 e = NewEntry(basename, name, self.tmax,
511 st.st_dev, st.st_ino, st.st_nlink,
512 st.st_ctime, st.st_mtime, st.st_atime,
513 st.st_uid, st.st_gid,
514 st.st_size, st.st_mode, gitmode, sha, flags,
518 meta_ofs = self.metastore.store(metadata.Metadata())
519 e = BlankNewEntry(basename, meta_ofs, tmax)
525 def add_ixentry(self, e):
526 e.children_ofs = e.children_n = 0
527 self._add(pathsplit(e.name), e)
529 def new_reader(self):
531 return Reader(self.tmpname)
534 def reduce_paths(paths):
540 if stat.S_ISDIR(st.st_mode):
543 xpaths.append((rp, p))
545 add_error('reduce_paths: %s' % e)
550 for (rp, p) in xpaths:
551 if prev and (prev == rp
552 or (prev.endswith('/') and rp.startswith(prev))):
553 continue # already superceded by previous path
554 paths.append((rp, p))
556 paths.sort(reverse=True)
560 def pfunc(count, total):
561 qprogress('bup: merging indexes (%d/%d)\r' % (count, total))
562 def pfinal(count, total):
563 progress('bup: merging indexes (%d/%d), done.\n' % (count, total))
564 return merge_iter(iters, 1024, pfunc, pfinal, key='name')