2 from bup import _helpers
3 from bup.helpers import *
5 BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
6 BLOB_READ_SIZE = 1024*1024
8 progress_callback = None
11 GIT_MODE_FILE = 0100644
12 GIT_MODE_TREE = 040000
13 GIT_MODE_SYMLINK = 0120000
14 assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
16 # The purpose of this type of buffer is to avoid copying on peek(), get(),
17 # and eat(). We do copy the buffer contents on put(), but that should
18 # be ok if we always only put() large amounts of data at a time.
26 self.data = buffer(self.data, self.start) + s
29 def peek(self, count):
30 return buffer(self.data, self.start, count)
36 v = buffer(self.data, self.start, count)
41 return len(self.data) - self.start
44 def readfile_iter(files, progress=None):
45 for filenum,f in enumerate(files):
50 progress(filenum, len(b))
51 fadvise_done(f, max(0, ofs - 1024*1024))
52 b = f.read(BLOB_READ_SIZE)
60 def _splitbuf(buf, basebits, fanbits):
62 b = buf.peek(buf.used())
63 (ofs, bits) = _helpers.splitbuf(b)
69 level = (bits-basebits)//fanbits # integer division
71 yield buffer(b, 0, ofs), level
74 while buf.used() >= BLOB_MAX:
76 yield buf.get(BLOB_MAX), 0
79 def _hashsplit_iter(files, progress):
80 assert(BLOB_READ_SIZE > BLOB_MAX)
81 basebits = _helpers.blobbits()
82 fanbits = int(math.log(fanout or 128, 2))
84 for inblock in readfile_iter(files, progress):
86 for buf_and_level in _splitbuf(buf, basebits, fanbits):
89 yield buf.get(buf.used()), 0
92 def _hashsplit_iter_keep_boundaries(files, progress):
93 for real_filenum,f in enumerate(files):
95 def prog(filenum, nbytes):
96 # the inner _hashsplit_iter doesn't know the real file count,
97 # so we'll replace it here.
98 return progress(real_filenum, nbytes)
101 for buf_and_level in _hashsplit_iter([f], progress=prog):
105 def hashsplit_iter(files, keep_boundaries, progress):
107 return _hashsplit_iter_keep_boundaries(files, progress)
109 return _hashsplit_iter(files, progress)
113 def split_to_blobs(makeblob, files, keep_boundaries, progress):
115 for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
117 total_split += len(blob)
118 if progress_callback:
119 progress_callback(len(blob))
120 yield (sha, len(blob), level)
123 def _make_shalist(l):
126 total = sum(size for mode,sha,size, in l)
127 vlen = len('%x' % total)
129 for (mode, sha, size) in l:
130 shalist.append((mode, '%0*x' % (vlen,ofs), sha))
133 return (shalist, total)
136 def _squish(maketree, stacks, n):
138 while i < n or len(stacks[i]) >= MAX_PER_TREE:
139 while len(stacks) <= i+1:
141 if len(stacks[i]) == 1:
142 stacks[i+1] += stacks[i]
144 (shalist, size) = _make_shalist(stacks[i])
145 tree = maketree(shalist)
146 stacks[i+1].append((GIT_MODE_TREE, tree, size))
151 def split_to_shalist(makeblob, maketree, files,
152 keep_boundaries, progress=None):
153 sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
157 for (sha,size,level) in sl:
158 shal.append((GIT_MODE_FILE, sha, size))
159 return _make_shalist(shal)[0]
162 for (sha,size,level) in sl:
163 stacks[0].append((GIT_MODE_FILE, sha, size))
164 _squish(maketree, stacks, level)
165 #log('stacks: %r\n' % [len(i) for i in stacks])
166 _squish(maketree, stacks, len(stacks)-1)
167 #log('stacks: %r\n' % [len(i) for i in stacks])
168 return _make_shalist(stacks[-1])[0]
171 def split_to_blob_or_tree(makeblob, maketree, files, keep_boundaries):
172 shalist = list(split_to_shalist(makeblob, maketree,
173 files, keep_boundaries))
174 if len(shalist) == 1:
175 return (shalist[0][0], shalist[0][2])
176 elif len(shalist) == 0:
177 return (GIT_MODE_FILE, makeblob(''))
179 return (GIT_MODE_TREE, maketree(shalist))
182 def open_noatime(name):
183 fd = _helpers.open_noatime(name)
185 return os.fdopen(fd, 'rb', 1024*1024)
194 def fadvise_done(f, ofs):
196 if ofs > 0 and hasattr(f, 'fileno'):
197 _helpers.fadvise_done(f.fileno(), ofs)