-import math, os
-from bup import _helpers, helpers
+from __future__ import absolute_import
+import io, math, os
+
+from bup import _helpers, compat, helpers
+from bup.compat import buffer, join_bytes
from bup.helpers import sc_page_size
+
_fmincore = getattr(helpers, 'fmincore', None)
BLOB_MAX = 8192*4 # 8192 is the "typical" blob size for bupsplit
progress_callback = None
fanout = 16
-GIT_MODE_FILE = 0100644
-GIT_MODE_TREE = 040000
-GIT_MODE_SYMLINK = 0120000
-assert(GIT_MODE_TREE != 40000) # 0xxx should be treated as octal
+GIT_MODE_FILE = 0o100644
+GIT_MODE_TREE = 0o40000
+GIT_MODE_SYMLINK = 0o120000
# The purpose of this type of buffer is to avoid copying on peek(), get(),
# and eat(). We do copy the buffer contents on put(), but that should
# be ok if we always only put() large amounts of data at a time.
class Buf:
def __init__(self):
- self.data = ''
+ self.data = b''
self.start = 0
def put(self, s):
if s:
- self.data = buffer(self.data, self.start) + s
+ self.data = join_bytes(buffer(self.data, self.start), s)
self.start = 0
def peek(self, count):
self.start += count
def get(self, count):
- v = buffer(self.data, self.start, count)
+ if count <= 256:
+ v = self.data[self.start : self.start + count]
+ else:
+ v = buffer(self.data, self.start, count)
self.start += count
return v
b = ''
fd = rpr = rstart = rlen = None
if _fmincore and hasattr(f, 'fileno'):
- fd = f.fileno()
- max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
- rpr = _nonresident_page_regions(_fmincore(fd),
- helpers.MINCORE_INCORE, max_chunk)
- rstart, rlen = next(rpr, (None, None))
+ try:
+ fd = f.fileno()
+ except io.UnsupportedOperation:
+ pass
+ if fd:
+ mcore = _fmincore(fd)
+ if mcore:
+ max_chunk = max(1, (8 * 1024 * 1024) / sc_page_size)
+ rpr = _nonresident_page_regions(mcore, helpers.MINCORE_INCORE,
+ max_chunk)
+ rstart, rlen = next(rpr, (None, None))
while 1:
if progress:
progress(filenum, len(b))