]> arthur.barton.de Git - bup.git/blobdiff - lib/bup/index.py
Combine and speed up idx->midx and bupindex merge
[bup.git] / lib / bup / index.py
index 1c555d7f38929d4405e2178fc5a82805888e83a0..72a7296ea4fda43e299662873fbc77aa8947ff89 100644 (file)
@@ -1,10 +1,15 @@
-import os, stat, time, struct, tempfile
+import os, stat, struct, tempfile
 from bup.helpers import *
 
 EMPTY_SHA = '\0'*20
 FAKE_SHA = '\x01'*20
 INDEX_HDR = 'BUPI\0\0\0\2'
+
+# FIXME: guess I should have used 64-bit integers to store the mtime/ctime.
+# NTFS mtime=0 corresponds to the year 1600, which can't be stored in a 32-bit
+# time_t.  Next time we update the bupindex format, keep that in mind.
 INDEX_SIG = '!IIIIIQII20sHII'
+
 ENTLEN = struct.calcsize(INDEX_SIG)
 FOOTER_SIG = '!Q'
 FOOTLEN = struct.calcsize(FOOTER_SIG)
@@ -77,11 +82,15 @@ class Entry:
                    self.flags, self.children_ofs, self.children_n))
 
     def packed(self):
-        return struct.pack(INDEX_SIG,
+        try:
+            return struct.pack(INDEX_SIG,
                            self.dev, self.ctime, self.mtime, 
                            self.uid, self.gid, self.size, self.mode,
                            self.gitmode, self.sha, self.flags,
                            self.children_ofs, self.children_n)
+        except DeprecationWarning, e:
+            log('pack error: %s (%r)\n' % (e, self))
+            raise
 
     def from_stat(self, st, tstart):
         old = (self.dev, self.ctime, self.mtime,
@@ -99,6 +108,23 @@ class Entry:
         if int(st.st_ctime) >= tstart or old != new \
               or self.sha == EMPTY_SHA or not self.gitmode:
             self.invalidate()
+        self._fixup()
+        
+    def _fixup(self):
+        if self.uid < 0:
+            self.uid += 0x100000000
+        if self.gid < 0:
+            self.gid += 0x100000000
+        assert(self.uid >= 0)
+        assert(self.gid >= 0)
+        if self.mtime < -0x80000000:  # can happen in NTFS on 64-bit linux
+            self.mtime = 0
+        if self.ctime < -0x80000000:
+            self.ctime = 0
+        if self.mtime > 0x7fffffff:
+            self.mtime = 0x7fffffff
+        if self.ctime > 0x7fffffff:
+            self.ctime = 0x7fffffff
 
     def is_valid(self):
         f = IX_HASHVALID|IX_EXISTS
@@ -134,9 +160,9 @@ class Entry:
         return not self.ctime
 
     def __cmp__(a, b):
-        return (cmp(a.name, b.name)
-                or -cmp(a.is_valid(), b.is_valid())
-                or -cmp(a.is_fake(), b.is_fake()))
+        return (cmp(b.name, a.name)
+                or cmp(a.is_valid(), b.is_valid())
+                or cmp(a.is_fake(), b.is_fake()))
 
     def write(self, f):
         f.write(self.basename + '\0' + self.packed())
@@ -151,6 +177,7 @@ class NewEntry(Entry):
          self.flags, self.children_ofs, self.children_n
          ) = (dev, int(ctime), int(mtime), uid, gid,
               size, mode, gitmode, sha, flags, children_ofs, children_n)
+        self._fixup()
 
 
 class BlankNewEntry(NewEntry):
@@ -305,6 +332,17 @@ class Reader:
                 yield (name, e)
 
 
+# FIXME: this function isn't very generic, because it splits the filename
+# in an odd way and depends on a terminating '/' to indicate directories.
+def pathsplit(p):
+    """Split a path into a list of elements of the file system hierarchy."""
+    l = p.split('/')
+    l = [i+'/' for i in l[:-1]] + l[-1:]
+    if l[-1] == '':
+        l.pop()  # extra blank caused by terminating '/'
+    return l
+
+
 class Writer:
     def __init__(self, filename):
         self.rootlevel = self.level = Level([], None)
@@ -398,10 +436,9 @@ def reduce_paths(paths):
             if stat.S_ISDIR(st.st_mode):
                 rp = slashappend(rp)
                 p = slashappend(p)
+            xpaths.append((rp, p))
         except OSError, e:
-            if e.errno != errno.ENOENT:
-                raise
-        xpaths.append((rp, p))
+            add_error('reduce_paths: %s' % e)
     xpaths.sort()
 
     paths = []
@@ -415,36 +452,9 @@ def reduce_paths(paths):
     paths.sort(reverse=True)
     return paths
 
-
-class MergeIter:
-    def __init__(self, iters):
-        self.iters = iters
-
-    def __len__(self):
-        # FIXME: doesn't remove duplicated entries between iters.
-        # That only happens for parent directories, but will mean the
-        # actual iteration returns fewer entries than this function counts.
-        return sum(len(it) for it in self.iters)
-
-    def __iter__(self):
-        total = len(self)
-        l = [iter(it) for it in self.iters]
-        l = [(next(it),it) for it in l]
-        l = filter(lambda x: x[0], l)
-        count = 0
-        lastname = None
-        while l:
-            if not (count % 1024):
-                progress('bup: merging indexes (%d/%d)\r' % (count, total))
-            l.sort()
-            (e,it) = l.pop()
-            if not e:
-                continue
-            if e.name != lastname:
-                yield e
-                lastname = e.name
-            n = next(it)
-            if n:
-                l.append((n,it))
-            count += 1
+def merge(*iters):
+    def pfunc(count, total):
+        progress('bup: merging indexes (%d/%d)\r' % (count, total))
+    def pfinal(count, total):
         log('bup: merging indexes (%d/%d), done.\n' % (count, total))
+    return merge_iter(iters, 1024, pfunc, pfinal, key='name')