]> arthur.barton.de Git - bup.git/blob - lib/bup/hashsplit.py
571ddf6434ac1723c1948ffc5c4ba6f59843a72c
[bup.git] / lib / bup / hashsplit.py
1 import math
2 from bup import _helpers
3 from bup.helpers import *
4
5 BLOB_MAX = 8192*4   # 8192 is the "typical" blob size for bupsplit
6 BLOB_READ_SIZE = 1024*1024
7 MAX_PER_TREE = 256
8 progress_callback = None
9 fanout = 16
10
11 GIT_MODE_FILE = 0100644
12 GIT_MODE_TREE = 040000
13 GIT_MODE_SYMLINK = 0120000
14 assert(GIT_MODE_TREE != 40000)  # 0xxx should be treated as octal
15
16 # The purpose of this type of buffer is to avoid copying on peek(), get(),
17 # and eat().  We do copy the buffer contents on put(), but that should
18 # be ok if we always only put() large amounts of data at a time.
19 class Buf:
20     def __init__(self):
21         self.data = ''
22         self.start = 0
23
24     def put(self, s):
25         if s:
26             self.data = buffer(self.data, self.start) + s
27             self.start = 0
28             
29     def peek(self, count):
30         return buffer(self.data, self.start, count)
31     
32     def eat(self, count):
33         self.start += count
34
35     def get(self, count):
36         v = buffer(self.data, self.start, count)
37         self.start += count
38         return v
39
40     def used(self):
41         return len(self.data) - self.start
42
43
44 def readfile_iter(files, progress=None):
45     for filenum,f in enumerate(files):
46         ofs = 0
47         b = ''
48         while 1:
49             if progress:
50                 progress(filenum, len(b))
51             b = f.read(BLOB_READ_SIZE)
52             ofs += len(b)
53             # Warning: ofs == 0 means 'done with the whole file'
54             # This will only happen here when the file is empty
55             fadvise_done(f, ofs)
56             if not b:
57                 break
58             yield b
59
60
61 def _splitbuf(buf, basebits, fanbits):
62     while 1:
63         b = buf.peek(buf.used())
64         (ofs, bits) = _helpers.splitbuf(b)
65         if ofs:
66             if ofs > BLOB_MAX:
67                 ofs = BLOB_MAX
68                 level = 0
69             else:
70                 level = (bits-basebits)//fanbits  # integer division
71             buf.eat(ofs)
72             yield buffer(b, 0, ofs), level
73         else:
74             break
75     while buf.used() >= BLOB_MAX:
76         # limit max blob size
77         yield buf.get(BLOB_MAX), 0
78
79
80 def _hashsplit_iter(files, progress):
81     assert(BLOB_READ_SIZE > BLOB_MAX)
82     basebits = _helpers.blobbits()
83     fanbits = int(math.log(fanout or 128, 2))
84     buf = Buf()
85     for inblock in readfile_iter(files, progress):
86         buf.put(inblock)
87         for buf_and_level in _splitbuf(buf, basebits, fanbits):
88             yield buf_and_level
89     if buf.used():
90         yield buf.get(buf.used()), 0
91
92
93 def _hashsplit_iter_keep_boundaries(files, progress):
94     for real_filenum,f in enumerate(files):
95         if progress:
96             def prog(filenum, nbytes):
97                 # the inner _hashsplit_iter doesn't know the real file count,
98                 # so we'll replace it here.
99                 return progress(real_filenum, nbytes)
100         else:
101             prog = None
102         for buf_and_level in _hashsplit_iter([f], progress=prog):
103             yield buf_and_level
104
105
106 def hashsplit_iter(files, keep_boundaries, progress):
107     if keep_boundaries:
108         return _hashsplit_iter_keep_boundaries(files, progress)
109     else:
110         return _hashsplit_iter(files, progress)
111
112
113 total_split = 0
114 def split_to_blobs(makeblob, files, keep_boundaries, progress):
115     global total_split
116     for (blob, level) in hashsplit_iter(files, keep_boundaries, progress):
117         sha = makeblob(blob)
118         total_split += len(blob)
119         if progress_callback:
120             progress_callback(len(blob))
121         yield (sha, len(blob), level)
122
123
124 def _make_shalist(l):
125     ofs = 0
126     l = list(l)
127     total = sum(size for mode,sha,size, in l)
128     vlen = len('%x' % total)
129     shalist = []
130     for (mode, sha, size) in l:
131         shalist.append((mode, '%0*x' % (vlen,ofs), sha))
132         ofs += size
133     assert(ofs == total)
134     return (shalist, total)
135
136
137 def _squish(maketree, stacks, n):
138     i = 0
139     while i < n or len(stacks[i]) >= MAX_PER_TREE:
140         while len(stacks) <= i+1:
141             stacks.append([])
142         if len(stacks[i]) == 1:
143             stacks[i+1] += stacks[i]
144         elif stacks[i]:
145             (shalist, size) = _make_shalist(stacks[i])
146             tree = maketree(shalist)
147             stacks[i+1].append((GIT_MODE_TREE, tree, size))
148         stacks[i] = []
149         i += 1
150
151
152 def split_to_shalist(makeblob, maketree, files,
153                      keep_boundaries, progress=None):
154     sl = split_to_blobs(makeblob, files, keep_boundaries, progress)
155     assert(fanout != 0)
156     if not fanout:
157         shal = []
158         for (sha,size,level) in sl:
159             shal.append((GIT_MODE_FILE, sha, size))
160         return _make_shalist(shal)[0]
161     else:
162         stacks = [[]]
163         for (sha,size,level) in sl:
164             stacks[0].append((GIT_MODE_FILE, sha, size))
165             _squish(maketree, stacks, level)
166         #log('stacks: %r\n' % [len(i) for i in stacks])
167         _squish(maketree, stacks, len(stacks)-1)
168         #log('stacks: %r\n' % [len(i) for i in stacks])
169         return _make_shalist(stacks[-1])[0]
170
171
172 def split_to_blob_or_tree(makeblob, maketree, files,
173                           keep_boundaries, progress=None):
174     shalist = list(split_to_shalist(makeblob, maketree,
175                                     files, keep_boundaries, progress))
176     if len(shalist) == 1:
177         return (shalist[0][0], shalist[0][2])
178     elif len(shalist) == 0:
179         return (GIT_MODE_FILE, makeblob(''))
180     else:
181         return (GIT_MODE_TREE, maketree(shalist))
182
183
184 def open_noatime(name):
185     fd = _helpers.open_noatime(name)
186     try:
187         return os.fdopen(fd, 'rb', 1024*1024)
188     except:
189         try:
190             os.close(fd)
191         except:
192             pass
193         raise
194
195
196 def fadvise_done(f, ofs):
197     assert(ofs >= 0)
198     if ofs > 0 and hasattr(f, 'fileno'):
199         _helpers.fadvise_done(f.fileno(), ofs)