2 from bup import _helpers
3 from bup.helpers import *
9 progress_callback = None
10 max_pack_size = 1000*1000*1000 # larger packs will slow down pruning
11 max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object
21 self.data = buffer(self.data, self.start) + s
24 def peek(self, count):
25 return buffer(self.data, self.start, count)
31 v = buffer(self.data, self.start, count)
36 return len(self.data) - self.start
40 b = buf.peek(buf.used())
41 (ofs, bits) = _helpers.splitbuf(b)
44 return (buffer(b, 0, ofs), bits)
48 def blobiter(files, progress=None):
49 for filenum,f in enumerate(files):
54 progress(filenum, len(b))
55 fadvise_done(f, max(0, ofs - 1024*1024))
64 def drainbuf(buf, finalize):
66 (blob, bits) = splitbuf(buf)
71 if buf.used() > BLOB_MAX:
73 yield (buf.get(buf.used()), 0)
74 elif finalize and buf.used():
75 yield (buf.get(buf.used()), 0)
78 def _hashsplit_iter(files, progress):
79 assert(BLOB_HWM > BLOB_MAX)
81 fi = blobiter(files, progress)
83 for i in drainbuf(buf, finalize=False):
85 while buf.used() < BLOB_HWM:
89 for i in drainbuf(buf, finalize=True):
95 def _hashsplit_iter_keep_boundaries(files, progress):
96 for real_filenum,f in enumerate(files):
98 def prog(filenum, nbytes):
99 # the inner _hashsplit_iter doesn't know the real file count,
100 # so we'll replace it here.
101 return progress(real_filenum, nbytes)
104 for i in _hashsplit_iter([f], progress=prog):
108 def hashsplit_iter(files, keep_boundaries, progress):
110 return _hashsplit_iter_keep_boundaries(files, progress)
112 return _hashsplit_iter(files, progress)
116 def _split_to_blobs(w, files, keep_boundaries, progress):
118 for (blob, bits) in hashsplit_iter(files, keep_boundaries, progress):
119 sha = w.new_blob(blob)
120 total_split += len(blob)
121 if w.outbytes >= max_pack_size or w.count >= max_pack_objects:
123 if progress_callback:
124 progress_callback(len(blob))
125 yield (sha, len(blob), bits)
128 def _make_shalist(l):
131 for (mode, sha, size) in l:
132 shalist.append((mode, '%016x' % ofs, sha))
135 return (shalist, total)
138 def _squish(w, stacks, n):
140 while i<n or len(stacks[i]) > MAX_PER_TREE:
141 while len(stacks) <= i+1:
143 if len(stacks[i]) == 1:
144 stacks[i+1] += stacks[i]
146 (shalist, size) = _make_shalist(stacks[i])
147 tree = w.new_tree(shalist)
148 stacks[i+1].append(('40000', tree, size))
153 def split_to_shalist(w, files, keep_boundaries, progress=None):
154 sl = _split_to_blobs(w, files, keep_boundaries, progress)
157 for (sha,size,bits) in sl:
158 shal.append(('100644', sha, size))
159 return _make_shalist(shal)[0]
161 base_bits = _helpers.blobbits()
162 fanout_bits = int(math.log(fanout, 2))
164 assert(n >= base_bits)
165 return (n - base_bits)/fanout_bits
167 for (sha,size,bits) in sl:
169 stacks[0].append(('100644', sha, size))
171 _squish(w, stacks, bits_to_idx(bits))
172 #log('stacks: %r\n' % [len(i) for i in stacks])
173 _squish(w, stacks, len(stacks)-1)
174 #log('stacks: %r\n' % [len(i) for i in stacks])
175 return _make_shalist(stacks[-1])[0]
178 def split_to_blob_or_tree(w, files, keep_boundaries):
179 shalist = list(split_to_shalist(w, files, keep_boundaries))
180 if len(shalist) == 1:
181 return (shalist[0][0], shalist[0][2])
182 elif len(shalist) == 0:
183 return ('100644', w.new_blob(''))
185 return ('40000', w.new_tree(shalist))
188 def open_noatime(name):
189 fd = _helpers.open_noatime(name)
191 return os.fdopen(fd, 'rb', 1024*1024)
200 def fadvise_done(f, ofs):
202 if ofs > 0 and hasattr(f, 'fileno'):
203 _helpers.fadvise_done(f.fileno(), ofs)