2 from bup import _helpers
3 from bup.helpers import *
5 BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
6 BLOB_READ_SIZE = 1024*1024
8 progress_callback = None
11 GIT_MODE_FILE = 0100644
12 GIT_MODE_TREE = 040000
13 assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
15 # The purpose of this type of buffer is to avoid copying on peek(), get(),
16 # and eat(). We do copy the buffer contents on put(), but that should
17 # be ok if we always only put() large amounts of data at a time.
25 self.data = buffer(self.data, self.start) + s
28 def peek(self, count):
29 return buffer(self.data, self.start, count)
35 v = buffer(self.data, self.start, count)
40 return len(self.data) - self.start
43 def readfile_iter(files, progress=None):
44 for filenum,f in enumerate(files):
49 progress(filenum, len(b))
50 fadvise_done(f, max(0, ofs - 1024*1024))
51 b = f.read(BLOB_READ_SIZE)
59 def _splitbuf(buf, basebits, fanbits):
61 b = buf.peek(buf.used())
62 (ofs, bits) = _helpers.splitbuf(b)
67 level = (bits-basebits)//fanbits # integer division
68 yield buffer(b, 0, ofs), level
71 while buf.used() >= BLOB_MAX:
73 yield buf.get(BLOB_MAX), 0
76 def _hashsplit_iter(files, progress):
77 assert(BLOB_READ_SIZE > BLOB_MAX)
78 basebits = _helpers.blobbits()
79 fanbits = int(math.log(fanout or 128, 2))
81 for inblock in readfile_iter(files, progress):
83 for buf_and_level in _splitbuf(buf, basebits, fanbits):
86 yield buf.get(buf.used()), 0
89 def _hashsplit_iter_keep_boundaries(files, progress):
90 for real_filenum,f in enumerate(files):
92 def prog(filenum, nbytes):
93 # the inner _hashsplit_iter doesn't know the real file count,
94 # so we'll replace it here.
95 return progress(real_filenum, nbytes)
98 for buf_and_level in _hashsplit_iter([f], progress=prog):
102 def hashsplit_iter(files, keep_boundaries, progress):
104 return _hashsplit_iter_keep_boundaries(files, progress)
106 return _hashsplit_iter(files, progress)
110 def split_to_blobs(makeblob, files, keep_boundaries, progress):
112 for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
114 total_split += len(blob)
115 if progress_callback:
116 progress_callback(len(blob))
117 yield (sha, len(blob), level)
120 def _make_shalist(l):
123 total = sum(size for mode,sha,size, in l)
124 vlen = len('%x' % total)
126 for (mode, sha, size) in l:
127 shalist.append((mode, '%0*x' % (vlen,ofs), sha))
130 return (shalist, total)
133 def _squish(maketree, stacks, n):
135 while i<n or len(stacks[i]) > MAX_PER_TREE:
136 while len(stacks) <= i+1:
138 if len(stacks[i]) == 1:
139 stacks[i+1] += stacks[i]
141 (shalist, size) = _make_shalist(stacks[i])
142 tree = maketree(shalist)
143 stacks[i+1].append((GIT_MODE_TREE, tree, size))
148 def split_to_shalist(makeblob, maketree, files,
149 keep_boundaries, progress=None):
150 sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
154 for (sha,size,level) in sl:
155 shal.append((GIT_MODE_FILE, sha, size))
156 return _make_shalist(shal)[0]
159 for (sha,size,level) in sl:
160 stacks[0].append((GIT_MODE_FILE, sha, size))
162 _squish(maketree, stacks, level)
163 #log('stacks: %r\n' % [len(i) for i in stacks])
164 _squish(maketree, stacks, len(stacks)-1)
165 #log('stacks: %r\n' % [len(i) for i in stacks])
166 return _make_shalist(stacks[-1])[0]
169 def split_to_blob_or_tree(makeblob, maketree, files, keep_boundaries):
170 shalist = list(split_to_shalist(makeblob, maketree,
171 files, keep_boundaries))
172 if len(shalist) == 1:
173 return (shalist[0][0], shalist[0][2])
174 elif len(shalist) == 0:
175 return (GIT_MODE_FILE, makeblob(''))
177 return (GIT_MODE_TREE, maketree(shalist))
180 def open_noatime(name):
181 fd = _helpers.open_noatime(name)
183 return os.fdopen(fd, 'rb', 1024*1024)
192 def fadvise_done(f, ofs):
194 if ofs > 0 and hasattr(f, 'fileno'):
195 _helpers.fadvise_done(f.fileno(), ofs)