]> arthur.barton.de Git - bup.git/blob - lib/bup/hashsplit.py
hashsplit.py: okay, *really* fix BLOB_MAX.
[bup.git] / lib / bup / hashsplit.py
1 import math
2 from bup import _helpers
3 from bup.helpers import *
4
5 BLOB_MAX = 8192*4   # 8192 is the "typical" blob size for bupsplit
6 BLOB_READ_SIZE = 1024*1024
7 MAX_PER_TREE = 256
8 progress_callback = None
9 max_pack_size = 1000*1000*1000  # larger packs will slow down pruning
10 max_pack_objects = 200*1000  # cache memory usage is about 83 bytes per object
11 fanout = 16
12
13 # The purpose of this type of buffer is to avoid copying on peek(), get(),
14 # and eat().  We do copy the buffer contents on put(), but that should
15 # be ok if we always only put() large amounts of data at a time.
16 class Buf:
17     def __init__(self):
18         self.data = ''
19         self.start = 0
20
21     def put(self, s):
22         if s:
23             self.data = buffer(self.data, self.start) + s
24             self.start = 0
25             
26     def peek(self, count):
27         return buffer(self.data, self.start, count)
28     
29     def eat(self, count):
30         self.start += count
31
32     def get(self, count):
33         v = buffer(self.data, self.start, count)
34         self.start += count
35         return v
36
37     def used(self):
38         return len(self.data) - self.start
39
40
41 def readfile_iter(files, progress=None):
42     for filenum,f in enumerate(files):
43         ofs = 0
44         b = ''
45         while 1:
46             if progress:
47                 progress(filenum, len(b))
48             fadvise_done(f, max(0, ofs - 1024*1024))
49             b = f.read(BLOB_READ_SIZE)
50             ofs += len(b)
51             if not b:
52                 fadvise_done(f, ofs)
53                 break
54             yield b
55
56
57 def _splitbuf(buf):
58     while 1:
59         b = buf.peek(buf.used())
60         (ofs, bits) = _helpers.splitbuf(b)
61         if ofs > BLOB_MAX:
62             ofs = BLOB_MAX
63         if ofs:
64             buf.eat(ofs)
65             yield buffer(b, 0, ofs), bits
66         else:
67             break
68     while buf.used() >= BLOB_MAX:
69         # limit max blob size
70         yield buf.get(BLOB_MAX), 0
71
72
73 def _hashsplit_iter(files, progress):
74     assert(BLOB_READ_SIZE > BLOB_MAX)
75     buf = Buf()
76     for inblock in readfile_iter(files, progress):
77         buf.put(inblock)
78         for buf_and_bits in _splitbuf(buf):
79             yield buf_and_bits
80     if buf.used():
81         yield buf.get(buf.used()), 0
82
83
84 def _hashsplit_iter_keep_boundaries(files, progress):
85     for real_filenum,f in enumerate(files):
86         if progress:
87             def prog(filenum, nbytes):
88                 # the inner _hashsplit_iter doesn't know the real file count,
89                 # so we'll replace it here.
90                 return progress(real_filenum, nbytes)
91         else:
92             prog = None
93         for buf_and_bits in _hashsplit_iter([f], progress=prog):
94             yield buf_and_bits
95
96
97 def hashsplit_iter(files, keep_boundaries, progress):
98     if keep_boundaries:
99         return _hashsplit_iter_keep_boundaries(files, progress)
100     else:
101         return _hashsplit_iter(files, progress)
102
103
104 total_split = 0
105 def _split_to_blobs(w, files, keep_boundaries, progress):
106     global total_split
107     for (blob, bits) in hashsplit_iter(files, keep_boundaries, progress):
108         sha = w.new_blob(blob)
109         total_split += len(blob)
110         if w.outbytes >= max_pack_size or w.count >= max_pack_objects:
111             w.breakpoint()
112         if progress_callback:
113             progress_callback(len(blob))
114         yield (sha, len(blob), bits)
115
116
117 def _make_shalist(l):
118     ofs = 0
119     shalist = []
120     for (mode, sha, size) in l:
121         shalist.append((mode, '%016x' % ofs, sha))
122         ofs += size
123     total = ofs
124     return (shalist, total)
125
126
127 def _squish(w, stacks, n):
128     i = 0
129     while i<n or len(stacks[i]) > MAX_PER_TREE:
130         while len(stacks) <= i+1:
131             stacks.append([])
132         if len(stacks[i]) == 1:
133             stacks[i+1] += stacks[i]
134         elif stacks[i]:
135             (shalist, size) = _make_shalist(stacks[i])
136             tree = w.new_tree(shalist)
137             stacks[i+1].append(('40000', tree, size))
138         stacks[i] = []
139         i += 1
140
141
142 def split_to_shalist(w, files, keep_boundaries, progress=None):
143     sl = _split_to_blobs(w, files, keep_boundaries, progress)
144     if not fanout:
145         shal = []
146         for (sha,size,bits) in sl:
147             shal.append(('100644', sha, size))
148         return _make_shalist(shal)[0]
149     else:
150         base_bits = _helpers.blobbits()
151         fanout_bits = int(math.log(fanout, 2))
152         def bits_to_idx(n):
153             assert(n >= base_bits)
154             return (n - base_bits)/fanout_bits
155         stacks = [[]]
156         for (sha,size,bits) in sl:
157             assert(bits <= 32)
158             stacks[0].append(('100644', sha, size))
159             if bits > base_bits:
160                 _squish(w, stacks, bits_to_idx(bits))
161         #log('stacks: %r\n' % [len(i) for i in stacks])
162         _squish(w, stacks, len(stacks)-1)
163         #log('stacks: %r\n' % [len(i) for i in stacks])
164         return _make_shalist(stacks[-1])[0]
165
166
167 def split_to_blob_or_tree(w, files, keep_boundaries):
168     shalist = list(split_to_shalist(w, files, keep_boundaries))
169     if len(shalist) == 1:
170         return (shalist[0][0], shalist[0][2])
171     elif len(shalist) == 0:
172         return ('100644', w.new_blob(''))
173     else:
174         return ('40000', w.new_tree(shalist))
175
176
177 def open_noatime(name):
178     fd = _helpers.open_noatime(name)
179     try:
180         return os.fdopen(fd, 'rb', 1024*1024)
181     except:
182         try:
183             os.close(fd)
184         except:
185             pass
186         raise
187
188
189 def fadvise_done(f, ofs):
190     assert(ofs >= 0)
191     if ofs > 0 and hasattr(f, 'fileno'):
192         _helpers.fadvise_done(f.fileno(), ofs)