2 from bup import _helpers
3 from bup.helpers import *
6 _fmincore = _helpers.fmincore
7 except AttributeError, e:
10 _page_size = os.sysconf("SC_PAGE_SIZE")
12 BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
13 BLOB_READ_SIZE = 1024*1024
15 progress_callback = None
18 GIT_MODE_FILE = 0100644
19 GIT_MODE_TREE = 040000
20 GIT_MODE_SYMLINK = 0120000
21 assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
23 # The purpose of this type of buffer is to avoid copying on peek(), get(),
24 # and eat(). We do copy the buffer contents on put(), but that should
25 # be ok if we always only put() large amounts of data at a time.
33 self.data = buffer(self.data, self.start) + s
36 def peek(self, count):
37 return buffer(self.data, self.start, count)
43 v = buffer(self.data, self.start, count)
48 return len(self.data) - self.start
51 def _fadvise_pages_done(fd, first_page, count):
52 assert(first_page >= 0)
55 _helpers.fadvise_done(fd, first_page * _page_size, count * _page_size)
58 def _nonresident_page_regions(status_bytes, max_region_len=None):
59 """Return (start_page, count) pairs in ascending start_page order for
60 each contiguous region of nonresident pages indicated by the
61 mincore() status_bytes. Limit the number of pages in each region
63 assert(max_region_len is None or max_region_len > 0)
65 for i, x in enumerate(status_bytes):
75 elif max_region_len and count >= max_region_len:
79 yield (start, len(status_bytes) - start)
82 def _uncache_ours_upto(fd, offset, first_region, remaining_regions):
83 """Uncache the pages of fd indicated by first_region and
84 remaining_regions that are before offset, where each region is a
85 (start_page, count) pair. The final region must have a start_page
87 rstart, rlen = first_region
88 while rstart is not None and (rstart + rlen) * _page_size <= offset:
89 _fadvise_pages_done(fd, rstart, rlen)
90 rstart, rlen = next(remaining_regions, (None, None))
94 def readfile_iter(files, progress=None):
95 for filenum,f in enumerate(files):
98 fd = rpr = rstart = rlen = None
99 if _fmincore and hasattr(f, 'fileno'):
101 max_chunk = max(1, (8 * 1024 * 1024) / _page_size)
102 rpr = _nonresident_page_regions(_helpers.fmincore(fd), max_chunk)
103 rstart, rlen = next(rpr, (None, None))
106 progress(filenum, len(b))
107 b = f.read(BLOB_READ_SIZE)
110 rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
115 rstart, rlen = _uncache_ours_upto(fd, ofs, (rstart, rlen), rpr)
118 def _splitbuf(buf, basebits, fanbits):
120 b = buf.peek(buf.used())
121 (ofs, bits) = _helpers.splitbuf(b)
127 level = (bits-basebits)//fanbits # integer division
129 yield buffer(b, 0, ofs), level
132 while buf.used() >= BLOB_MAX:
133 # limit max blob size
134 yield buf.get(BLOB_MAX), 0
137 def _hashsplit_iter(files, progress):
138 assert(BLOB_READ_SIZE > BLOB_MAX)
139 basebits = _helpers.blobbits()
140 fanbits = int(math.log(fanout or 128, 2))
142 for inblock in readfile_iter(files, progress):
144 for buf_and_level in _splitbuf(buf, basebits, fanbits):
147 yield buf.get(buf.used()), 0
150 def _hashsplit_iter_keep_boundaries(files, progress):
151 for real_filenum,f in enumerate(files):
153 def prog(filenum, nbytes):
154 # the inner _hashsplit_iter doesn't know the real file count,
155 # so we'll replace it here.
156 return progress(real_filenum, nbytes)
159 for buf_and_level in _hashsplit_iter([f], progress=prog):
163 def hashsplit_iter(files, keep_boundaries, progress):
165 return _hashsplit_iter_keep_boundaries(files, progress)
167 return _hashsplit_iter(files, progress)
171 def split_to_blobs(makeblob, files, keep_boundaries, progress):
173 for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
175 total_split += len(blob)
176 if progress_callback:
177 progress_callback(len(blob))
178 yield (sha, len(blob), level)
181 def _make_shalist(l):
184 total = sum(size for mode,sha,size, in l)
185 vlen = len('%x' % total)
187 for (mode, sha, size) in l:
188 shalist.append((mode, '%0*x' % (vlen,ofs), sha))
191 return (shalist, total)
194 def _squish(maketree, stacks, n):
196 while i < n or len(stacks[i]) >= MAX_PER_TREE:
197 while len(stacks) <= i+1:
199 if len(stacks[i]) == 1:
200 stacks[i+1] += stacks[i]
202 (shalist, size) = _make_shalist(stacks[i])
203 tree = maketree(shalist)
204 stacks[i+1].append((GIT_MODE_TREE, tree, size))
209 def split_to_shalist(makeblob, maketree, files,
210 keep_boundaries, progress=None):
211 sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
215 for (sha,size,level) in sl:
216 shal.append((GIT_MODE_FILE, sha, size))
217 return _make_shalist(shal)[0]
220 for (sha,size,level) in sl:
221 stacks[0].append((GIT_MODE_FILE, sha, size))
222 _squish(maketree, stacks, level)
223 #log('stacks: %r\n' % [len(i) for i in stacks])
224 _squish(maketree, stacks, len(stacks)-1)
225 #log('stacks: %r\n' % [len(i) for i in stacks])
226 return _make_shalist(stacks[-1])[0]
229 def split_to_blob_or_tree(makeblob, maketree, files,
230 keep_boundaries, progress=None):
231 shalist = list(split_to_shalist(makeblob, maketree,
232 files, keep_boundaries, progress))
233 if len(shalist) == 1:
234 return (shalist[0][0], shalist[0][2])
235 elif len(shalist) == 0:
236 return (GIT_MODE_FILE, makeblob(''))
238 return (GIT_MODE_TREE, maketree(shalist))
241 def open_noatime(name):
242 fd = _helpers.open_noatime(name)
244 return os.fdopen(fd, 'rb', 1024*1024)