-import os, stat, time, struct, tempfile
+import os, stat, struct, tempfile
from bup.helpers import *
EMPTY_SHA = '\0'*20
FAKE_SHA = '\x01'*20
INDEX_HDR = 'BUPI\0\0\0\2'
+
+# FIXME: guess I should have used 64-bit integers to store the mtime/ctime.
+# NTFS mtime=0 corresponds to the year 1600, which can't be stored in a 32-bit
+# time_t. Next time we update the bupindex format, keep that in mind.
INDEX_SIG = '!IIIIIQII20sHII'
+
ENTLEN = struct.calcsize(INDEX_SIG)
FOOTER_SIG = '!Q'
FOOTLEN = struct.calcsize(FOOTER_SIG)
-IX_EXISTS = 0x8000
-IX_HASHVALID = 0x4000
+IX_EXISTS = 0x8000 # file exists on filesystem
+IX_HASHVALID = 0x4000 # the stored sha1 matches the filesystem
+IX_SHAMISSING = 0x2000 # the stored sha1 object doesn't seem to exist
class Error(Exception):
pass
self.flags, self.children_ofs, self.children_n))
def packed(self):
- return struct.pack(INDEX_SIG,
+ try:
+ return struct.pack(INDEX_SIG,
self.dev, self.ctime, self.mtime,
self.uid, self.gid, self.size, self.mode,
self.gitmode, self.sha, self.flags,
self.children_ofs, self.children_n)
+ except DeprecationWarning, e:
+ log('pack error: %s (%r)\n' % (e, self))
+ raise
def from_stat(self, st, tstart):
old = (self.dev, self.ctime, self.mtime,
if int(st.st_ctime) >= tstart or old != new \
or self.sha == EMPTY_SHA or not self.gitmode:
self.invalidate()
+ self._fixup()
+
+ def _fixup(self):
+ if self.uid < 0:
+ self.uid += 0x100000000
+ if self.gid < 0:
+ self.gid += 0x100000000
+ assert(self.uid >= 0)
+ assert(self.gid >= 0)
+ if self.mtime < -0x80000000: # can happen in NTFS on 64-bit linux
+ self.mtime = 0
+ if self.ctime < -0x80000000:
+ self.ctime = 0
+ if self.mtime > 0x7fffffff:
+ self.mtime = 0x7fffffff
+ if self.ctime > 0x7fffffff:
+ self.ctime = 0x7fffffff
def is_valid(self):
f = IX_HASHVALID|IX_EXISTS
def exists(self):
return not self.is_deleted()
+ def sha_missing(self):
+ return (self.flags & IX_SHAMISSING) or not (self.flags & IX_HASHVALID)
+
def is_deleted(self):
return (self.flags & IX_EXISTS) == 0
return not self.ctime
def __cmp__(a, b):
- return (cmp(a.name, b.name)
- or -cmp(a.is_valid(), b.is_valid())
- or -cmp(a.is_fake(), b.is_fake()))
+ return (cmp(b.name, a.name)
+ or cmp(a.is_valid(), b.is_valid())
+ or cmp(a.is_fake(), b.is_fake()))
def write(self, f):
f.write(self.basename + '\0' + self.packed())
self.flags, self.children_ofs, self.children_n
) = (dev, int(ctime), int(mtime), uid, gid,
size, mode, gitmode, sha, flags, children_ofs, children_n)
+ self._fixup()
class BlankNewEntry(NewEntry):
self.flags, self.children_ofs, self.children_n
) = struct.unpack(INDEX_SIG, str(buffer(m, ofs, ENTLEN)))
+ # effectively, we don't bother messing with IX_SHAMISSING if
+ # not IX_HASHVALID, since it's redundant, and repacking is more
+ # expensive than not repacking.
+ # This is implemented by having sha_missing() check IX_HASHVALID too.
+ def set_sha_missing(self, val):
+ val = val and 1 or 0
+ oldval = self.sha_missing() and 1 or 0
+ if val != oldval:
+ flag = val and IX_SHAMISSING or 0
+ newflags = (self.flags & (~IX_SHAMISSING)) | flag
+ self.flags = newflags
+ self.repack()
+
+ def unset_sha_missing(self, flag):
+ if self.flags & IX_SHAMISSING:
+ self.flags &= ~IX_SHAMISSING
+ self.repack()
+
def repack(self):
self._m[self._ofs:self._ofs+ENTLEN] = self.packed()
if self.parent and not self.is_valid():
def close(self):
self.save()
if self.writable and self.m:
+ self.m.close()
self.m = None
self.writable = False
yield (name, e)
+# FIXME: this function isn't very generic, because it splits the filename
+# in an odd way and depends on a terminating '/' to indicate directories.
+def pathsplit(p):
+ """Split a path into a list of elements of the file system hierarchy."""
+ l = p.split('/')
+ l = [i+'/' for i in l[:-1]] + l[-1:]
+ if l[-1] == '':
+ l.pop() # extra blank caused by terminating '/'
+ return l
+
+
class Writer:
def __init__(self, filename):
self.rootlevel = self.level = Level([], None)
if stat.S_ISDIR(st.st_mode):
rp = slashappend(rp)
p = slashappend(p)
+ xpaths.append((rp, p))
except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- xpaths.append((rp, p))
+ add_error('reduce_paths: %s' % e)
xpaths.sort()
paths = []
paths.sort(reverse=True)
return paths
-
-class MergeIter:
- def __init__(self, iters):
- self.iters = iters
-
- def __len__(self):
- # FIXME: doesn't remove duplicated entries between iters.
- # That only happens for parent directories, but will mean the
- # actual iteration returns fewer entries than this function counts.
- return sum(len(it) for it in self.iters)
-
- def __iter__(self):
- total = len(self)
- l = [iter(it) for it in self.iters]
- l = [(next(it),it) for it in l]
- l = filter(lambda x: x[0], l)
- count = 0
- lastname = None
- while l:
- if not (count % 1024):
- progress('bup: merging indexes (%d/%d)\r' % (count, total))
- l.sort()
- (e,it) = l.pop()
- if not e:
- continue
- if e.name != lastname:
- yield e
- lastname = e.name
- n = next(it)
- if n:
- l.append((n,it))
- count += 1
- log('bup: merging indexes (%d/%d), done.\n' % (count, total))
+def merge(*iters):
+ def pfunc(count, total):
+ qprogress('bup: merging indexes (%d/%d)\r' % (count, total))
+ def pfinal(count, total):
+ progress('bup: merging indexes (%d/%d), done.\n' % (count, total))
+ return merge_iter(iters, 1024, pfunc, pfinal, key='name')