2 from bup import _helpers
3 from bup.helpers import *
5 BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
6 BLOB_READ_SIZE = 1024*1024
8 progress_callback = None
9 max_pack_size = 1000*1000*1000 # larger packs will slow down pruning
10 max_pack_objects = 200*1000 # cache memory usage is about 83 bytes per object
13 # The purpose of this type of buffer is to avoid copying on peek(), get(),
14 # and eat(). We do copy the buffer contents on put(), but that should
15 # be ok if we always only put() large amounts of data at a time.
23 self.data = buffer(self.data, self.start) + s
26 def peek(self, count):
27 return buffer(self.data, self.start, count)
33 v = buffer(self.data, self.start, count)
38 return len(self.data) - self.start
41 def readfile_iter(files, progress=None):
42 for filenum,f in enumerate(files):
47 progress(filenum, len(b))
48 fadvise_done(f, max(0, ofs - 1024*1024))
49 b = f.read(BLOB_READ_SIZE)
57 def _splitbuf(buf, basebits, fanbits):
59 b = buf.peek(buf.used())
60 (ofs, bits) = _helpers.splitbuf(b)
65 level = (bits-basebits)//fanbits # integer division
66 yield buffer(b, 0, ofs), level
69 while buf.used() >= BLOB_MAX:
71 yield buf.get(BLOB_MAX), 0
74 def _hashsplit_iter(files, progress):
75 assert(BLOB_READ_SIZE > BLOB_MAX)
76 basebits = _helpers.blobbits()
77 fanbits = int(math.log(fanout or 128, 2))
79 for inblock in readfile_iter(files, progress):
81 for buf_and_level in _splitbuf(buf, basebits, fanbits):
84 yield buf.get(buf.used()), 0
87 def _hashsplit_iter_keep_boundaries(files, progress):
88 for real_filenum,f in enumerate(files):
90 def prog(filenum, nbytes):
91 # the inner _hashsplit_iter doesn't know the real file count,
92 # so we'll replace it here.
93 return progress(real_filenum, nbytes)
96 for buf_and_level in _hashsplit_iter([f], progress=prog):
100 def hashsplit_iter(files, keep_boundaries, progress):
102 return _hashsplit_iter_keep_boundaries(files, progress)
104 return _hashsplit_iter(files, progress)
108 def _split_to_blobs(w, files, keep_boundaries, progress):
110 for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
111 sha = w.new_blob(blob)
112 total_split += len(blob)
113 if w.outbytes >= max_pack_size or w.count >= max_pack_objects:
115 if progress_callback:
116 progress_callback(len(blob))
117 yield (sha, len(blob), level)
120 def _make_shalist(l):
123 for (mode, sha, size) in l:
124 shalist.append((mode, '%016x' % ofs, sha))
127 return (shalist, total)
130 def _squish(w, stacks, n):
132 while i<n or len(stacks[i]) > MAX_PER_TREE:
133 while len(stacks) <= i+1:
135 if len(stacks[i]) == 1:
136 stacks[i+1] += stacks[i]
138 (shalist, size) = _make_shalist(stacks[i])
139 tree = w.new_tree(shalist)
140 stacks[i+1].append(('40000', tree, size))
145 def split_to_shalist(w, files, keep_boundaries, progress=None):
146 sl = _split_to_blobs(w, files, keep_boundaries, progress)
149 for (sha,size,level) in sl:
150 shal.append(('100644', sha, size))
151 return _make_shalist(shal)[0]
154 for (sha,size,level) in sl:
155 stacks[0].append(('100644', sha, size))
157 _squish(w, stacks, level)
158 #log('stacks: %r\n' % [len(i) for i in stacks])
159 _squish(w, stacks, len(stacks)-1)
160 #log('stacks: %r\n' % [len(i) for i in stacks])
161 return _make_shalist(stacks[-1])[0]
164 def split_to_blob_or_tree(w, files, keep_boundaries):
165 shalist = list(split_to_shalist(w, files, keep_boundaries))
166 if len(shalist) == 1:
167 return (shalist[0][0], shalist[0][2])
168 elif len(shalist) == 0:
169 return ('100644', w.new_blob(''))
171 return ('40000', w.new_tree(shalist))
174 def open_noatime(name):
175 fd = _helpers.open_noatime(name)
177 return os.fdopen(fd, 'rb', 1024*1024)
186 def fadvise_done(f, ofs):
188 if ofs > 0 and hasattr(f, 'fileno'):
189 _helpers.fadvise_done(f.fileno(), ofs)