2 from __future__ import absolute_import
5 from bup import _helpers, compat, helpers
6 from bup._helpers import cat_bytes
7 from bup.compat import buffer, py_maj
8 from bup.helpers import sc_page_size
11 _fmincore = getattr(helpers, 'fmincore', None)
13 BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
14 BLOB_READ_SIZE = 1024*1024
16 progress_callback = None
19 GIT_MODE_FILE = 0o100644
20 GIT_MODE_TREE = 0o40000
21 GIT_MODE_SYMLINK = 0o120000
23 # The purpose of this type of buffer is to avoid copying on peek(), get(),
24 # and eat(). We do copy the buffer contents on put(), but that should
25 # be ok if we always only put() large amounts of data at a time.
33 remaining = len(self.data) - self.start
34 self.data = cat_bytes(self.data, self.start, remaining,
38 def peek(self, count):
40 return self.data[self.start : self.start + count]
41 return buffer(self.data, self.start, count)
48 v = self.data[self.start : self.start + count]
50 v = buffer(self.data, self.start, count)
55 return len(self.data) - self.start
58 def _fadvise_pages_done(fd, first_page, count):
59 assert(first_page >= 0)
62 _helpers.fadvise_done(fd,
63 first_page * sc_page_size,
67 def _nonresident_page_regions(status_bytes, incore_mask, max_region_len=None):
68 """Return (start_page, count) pairs in ascending start_page order for
69 each contiguous region of nonresident pages indicated by the
70 mincore() status_bytes. Limit the number of pages in each region
72 assert(max_region_len is None or max_region_len > 0)
74 for i, x in enumerate(status_bytes):
75 in_core = x & incore_mask
84 elif max_region_len and count >= max_region_len:
88 yield (start, len(status_bytes) - start)
91 def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
92 """Uncache the pages of fd indicated by first_region and
93 remaining_regions that are before offset, where each region is a
94 (start_page, count) pair. The final region must have a start_page
96 rstart, rlen = first_region
97 while rstart is not None and (rstart + rlen) * sc_page_size <= offset:
98 _fadvise_pages_done(fd, rstart, rlen)
99 rstart, rlen = next(remaining_regions, (None, None))
100 return (rstart, rlen)
103 def readfile_iter(files, progress=None):
104 for filenum,f in enumerate(files):
107 fd = rpr = rstart = rlen = None
108 if _fmincore and hasattr(f, 'fileno'):
111 except io.UnsupportedOperation:
114 mcore = _fmincore(fd)
116 max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
117 rpr = _nonresident_page_regions(mcore, helpers.MINCORE_INCORE,
119 rstart, rlen = next(rpr, (None, None))
122 progress(filenum, len(b))
123 b = f.read(BLOB_READ_SIZE)
126 rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
131 rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
134 def _splitbuf(buf, basebits, fanbits):
136 b = buf.peek(buf.used())
137 (ofs, bits) = _helpers.splitbuf(b)
143 level = (bits-basebits)//fanbits # integer division
145 yield buffer(b, 0, ofs), level
148 while buf.used() >= BLOB_MAX:
149 # limit max blob size
150 yield buf.get(BLOB_MAX), 0
153 def _hashsplit_iter(files, progress):
154 assert(BLOB_READ_SIZE > BLOB_MAX)
155 basebits = _helpers.blobbits()
156 fanbits = int(math.log(fanout or 128, 2))
158 for inblock in readfile_iter(files, progress):
160 for buf_and_level in _splitbuf(buf, basebits, fanbits):
163 yield buf.get(buf.used()), 0
166 def _hashsplit_iter_keep_boundaries(files, progress):
167 for real_filenum,f in enumerate(files):
169 def prog(filenum, nbytes):
170 # the inner _hashsplit_iter doesn't know the real file count,
171 # so we'll replace it here.
172 return progress(real_filenum, nbytes)
175 for buf_and_level in _hashsplit_iter([f], progress=prog):
179 def hashsplit_iter(files, keep_boundaries, progress):
181 return _hashsplit_iter_keep_boundaries(files, progress)
183 return _hashsplit_iter(files, progress)
187 def split_to_blobs(makeblob, files, keep_boundaries, progress):
189 for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
191 total_split += len(blob)
192 if progress_callback:
193 progress_callback(len(blob))
194 yield (sha, len(blob), level)
197 def _make_shalist(l):
200 total = sum(size for mode,sha,size, in l)
201 vlen = len(b'%x' % total)
203 for (mode, sha, size) in l:
204 shalist.append((mode, b'%0*x' % (vlen,ofs), sha))
207 return (shalist, total)
210 def _squish(maketree, stacks, n):
212 while i < n or len(stacks[i]) >= MAX_PER_TREE:
213 while len(stacks) <= i+1:
215 if len(stacks[i]) == 1:
216 stacks[i+1] += stacks[i]
218 (shalist, size) = _make_shalist(stacks[i])
219 tree = maketree(shalist)
220 stacks[i+1].append((GIT_MODE_TREE, tree, size))
225 def split_to_shalist(makeblob, maketree, files,
226 keep_boundaries, progress=None):
227 sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
231 for (sha,size,level) in sl:
232 shal.append((GIT_MODE_FILE, sha, size))
233 return _make_shalist(shal)[0]
236 for (sha,size,level) in sl:
237 stacks[0].append((GIT_MODE_FILE, sha, size))
238 _squish(maketree, stacks, level)
239 #log('stacks: %r\n' % [len(i) for i in stacks])
240 _squish(maketree, stacks, len(stacks)-1)
241 #log('stacks: %r\n' % [len(i) for i in stacks])
242 return _make_shalist(stacks[-1])[0]
245 def split_to_blob_or_tree(makeblob, maketree, files,
246 keep_boundaries, progress=None):
247 shalist = list(split_to_shalist(makeblob, maketree,
248 files, keep_boundaries, progress))
249 if len(shalist) == 1:
250 return (shalist[0][0], shalist[0][2])
251 elif len(shalist) == 0:
252 return (GIT_MODE_FILE, makeblob(b''))
254 return (GIT_MODE_TREE, maketree(shalist))
257 def open_noatime(name):
258 fd = _helpers.open_noatime(name)
260 return os.fdopen(fd, 'rb', 1024*1024)