1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
8 from pipes import quote
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 # This function should really be in helpers, not in bup.options. But we
16 # want options.py to be standalone so people can include it in other projects.
17 from bup.options import _tty_width as tty_width
21 """Helper to deal with Python scoping issues"""
25 sc_page_size = os.sysconf('SC_PAGE_SIZE')
26 assert(sc_page_size > 0)
28 sc_arg_max = os.sysconf('SC_ARG_MAX')
29 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
30 sc_arg_max = 2 * 1024 * 1024
34 for result in iterable:
40 """Convert the string 's' to an integer. Return 0 if s is not a number."""
48 """Convert the string 's' to a float. Return 0 if s is not a number."""
50 return float(s or '0')
55 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
59 _fdatasync = os.fdatasync
60 except AttributeError:
63 if sys.platform.startswith('darwin'):
64 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
68 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
70 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
71 if e.errno == errno.ENOTSUP:
76 fdatasync = _fdatasync
79 def partition(predicate, stream):
80 """Returns (leading_matches_it, rest_it), where leading_matches_it
81 must be completely exhausted before traversing rest_it.
86 ns.first_nonmatch = None
87 def leading_matches():
92 ns.first_nonmatch = (x,)
96 yield ns.first_nonmatch[0]
99 return (leading_matches(), rest())
109 def lines_until_sentinel(f, sentinel, ex_type):
110 # sentinel must end with \n and must contain only one \n
113 if not (line and line.endswith('\n')):
114 raise ex_type('Hit EOF while reading line')
120 def stat_if_exists(path):
124 if e.errno != errno.ENOENT:
129 # Write (blockingly) to sockets that may or may not be in blocking mode.
130 # We need this because our stderr is sometimes eaten by subprocesses
131 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
132 # leading to race conditions. Ick. We'll do it the hard way.
133 def _hard_write(fd, buf):
135 (r,w,x) = select.select([], [fd], [], None)
137 raise IOError('select(fd) returned without being writable')
139 sz = os.write(fd, buf)
141 if e.errno != errno.EAGAIN:
149 """Print a log message to stderr."""
152 _hard_write(sys.stderr.fileno(), s)
166 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
167 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
170 """Calls log() if stderr is a TTY. Does nothing otherwise."""
171 global _last_progress
178 """Calls progress() only if we haven't printed progress in a while.
180 This avoids overloading the stderr buffer with excess junk.
184 if now - _last_prog > 0.1:
190 """Calls progress() to redisplay the most recent progress message.
192 Useful after you've printed some other message that wipes out the
195 if _last_progress and _last_progress.endswith('\r'):
196 progress(_last_progress)
199 def mkdirp(d, mode=None):
200 """Recursively create directories on path 'd'.
202 Unlike os.makedirs(), it doesn't raise an exception if the last element of
203 the path already exists.
211 if e.errno == errno.EEXIST:
217 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
219 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
221 samekey = operator.eq
223 total = sum(len(it) for it in iters)
224 iters = (iter(it) for it in iters)
225 heap = ((next(it, None),it) for it in iters)
226 heap = [(e,it) for e,it in heap if e]
231 if not count % pfreq:
234 if not samekey(e, pe):
240 except StopIteration:
241 heapq.heappop(heap) # remove current
243 heapq.heapreplace(heap, (e, it)) # shift current to new location
248 """Delete a file at path 'f' if it currently exists.
250 Unlike os.unlink(), does not throw an exception if the file didn't already
256 if e.errno != errno.ENOENT:
261 if isinstance(cmd, compat.str_type):
264 return ' '.join(map(quote, cmd))
266 exc = subprocess.check_call
276 assert stdin in (None, PIPE)
279 stdin=stdin, stdout=PIPE, stderr=stderr,
281 preexec_fn=preexec_fn)
282 out, err = p.communicate(input)
283 if check and p.returncode != 0:
284 raise Exception('subprocess %r failed with status %d, stderr: %r'
285 % (' '.join(map(quote, cmd)), p.returncode, err))
288 def readpipe(argv, preexec_fn=None, shell=False):
289 """Run a subprocess and return its output."""
290 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
292 out, err = p.communicate()
293 if p.returncode != 0:
294 raise Exception('subprocess %r failed with status %d'
295 % (' '.join(argv), p.returncode))
299 def _argmax_base(command):
302 base_size += len(command) + 1
303 for k, v in compat.items(environ):
304 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
308 def _argmax_args_size(args):
309 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
312 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
313 """If args is not empty, yield the output produced by calling the
314 command list with args as a sequence of strings (It may be necessary
315 to return multiple strings in order to respect ARG_MAX)."""
316 # The optional arg_max arg is a workaround for an issue with the
317 # current wvtest behavior.
318 base_size = _argmax_base(command)
320 room = arg_max - base_size
323 next_size = _argmax_args_size(args[i:i+1])
324 if room - next_size < 0:
330 assert(len(sub_args))
331 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
334 def resolve_parent(p):
335 """Return the absolute path of a file without following any final symlink.
337 Behaves like os.path.realpath, but doesn't follow a symlink for the last
338 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
339 will follow symlinks in p's directory)
345 if st and stat.S_ISLNK(st.st_mode):
346 (dir, name) = os.path.split(p)
347 dir = os.path.realpath(dir)
348 out = os.path.join(dir, name)
350 out = os.path.realpath(p)
351 #log('realpathing:%r,%r\n' % (p, out))
355 def detect_fakeroot():
356 "Return True if we appear to be running under fakeroot."
357 return os.getenv("FAKEROOTKEY") != None
360 if sys.platform.startswith('cygwin'):
362 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
363 groups = os.getgroups()
364 return 544 in groups or 0 in groups
367 return os.geteuid() == 0
370 def _cache_key_value(get_value, key, cache):
371 """Return (value, was_cached). If there is a value in the cache
372 for key, use that, otherwise, call get_value(key) which should
373 throw a KeyError if there is no value -- in which case the cached
374 and returned value will be None.
376 try: # Do we already have it (or know there wasn't one)?
383 cache[key] = value = get_value(key)
389 _uid_to_pwd_cache = {}
390 _name_to_pwd_cache = {}
392 def pwd_from_uid(uid):
393 """Return password database entry for uid (may be a cached value).
394 Return None if no entry is found.
396 global _uid_to_pwd_cache, _name_to_pwd_cache
397 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
398 if entry and not cached:
399 _name_to_pwd_cache[entry.pw_name] = entry
403 def pwd_from_name(name):
404 """Return password database entry for name (may be a cached value).
405 Return None if no entry is found.
407 global _uid_to_pwd_cache, _name_to_pwd_cache
408 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
409 if entry and not cached:
410 _uid_to_pwd_cache[entry.pw_uid] = entry
414 _gid_to_grp_cache = {}
415 _name_to_grp_cache = {}
417 def grp_from_gid(gid):
418 """Return password database entry for gid (may be a cached value).
419 Return None if no entry is found.
421 global _gid_to_grp_cache, _name_to_grp_cache
422 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
423 if entry and not cached:
424 _name_to_grp_cache[entry.gr_name] = entry
428 def grp_from_name(name):
429 """Return password database entry for name (may be a cached value).
430 Return None if no entry is found.
432 global _gid_to_grp_cache, _name_to_grp_cache
433 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
434 if entry and not cached:
435 _gid_to_grp_cache[entry.gr_gid] = entry
441 """Get the user's login name."""
445 _username = pwd_from_uid(uid).pw_name or b'user%d' % uid
451 """Get the user's full name."""
453 if not _userfullname:
455 entry = pwd_from_uid(uid)
457 _userfullname = entry.pw_gecos.split(b',')[0] or entry.pw_name
458 if not _userfullname:
459 _userfullname = b'user%d' % uid
465 """Get the FQDN of this machine."""
468 _hostname = socket.getfqdn()
472 _resource_path = None
473 def resource_path(subdir=''):
474 global _resource_path
475 if not _resource_path:
476 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
477 return os.path.join(_resource_path, subdir)
479 def format_filesize(size):
484 exponent = int(math.log(size) // math.log(unit))
485 size_prefix = "KMGTPE"[exponent - 1]
486 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
489 class NotOk(Exception):
494 def __init__(self, outp):
498 while self._read(65536): pass
500 def read(self, size):
501 """Read 'size' bytes from input stream."""
503 return self._read(size)
506 """Read from input stream until a newline is found."""
508 return self._readline()
510 def write(self, data):
511 """Write 'data' to output stream."""
512 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
513 self.outp.write(data)
516 """Return true if input stream is readable."""
517 raise NotImplemented("Subclasses must implement has_input")
520 """Indicate end of output from last sent command."""
524 """Indicate server error to the client."""
525 s = re.sub(r'\s+', ' ', str(s))
526 self.write('\nerror %s\n' % s)
528 def _check_ok(self, onempty):
531 for rl in linereader(self):
532 #log('%d got line: %r\n' % (os.getpid(), rl))
533 if not rl: # empty line
537 elif rl.startswith('error '):
538 #log('client: error: %s\n' % rl[6:])
542 raise Exception('server exited unexpectedly; see errors above')
544 def drain_and_check_ok(self):
545 """Remove all data for the current command from input stream."""
548 return self._check_ok(onempty)
551 """Verify that server action completed successfully."""
553 raise Exception('expected "ok", got %r' % rl)
554 return self._check_ok(onempty)
557 class Conn(BaseConn):
558 def __init__(self, inp, outp):
559 BaseConn.__init__(self, outp)
562 def _read(self, size):
563 return self.inp.read(size)
566 return self.inp.readline()
569 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
571 assert(rl[0] == self.inp.fileno())
577 def checked_reader(fd, n):
579 rl, _, _ = select.select([fd], [], [])
582 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
587 MAX_PACKET = 128 * 1024
588 def mux(p, outfd, outr, errr):
591 while p.poll() is None:
592 rl, _, _ = select.select(fds, [], [])
595 buf = os.read(outr, MAX_PACKET)
597 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
599 buf = os.read(errr, 1024)
601 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
603 os.write(outfd, struct.pack('!IB', 0, 3))
606 class DemuxConn(BaseConn):
607 """A helper class for bup's client-server protocol."""
608 def __init__(self, infd, outp):
609 BaseConn.__init__(self, outp)
610 # Anything that comes through before the sync string was not
611 # multiplexed and can be assumed to be debug/log before mux init.
613 while tail != 'BUPMUX':
614 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
616 raise IOError('demux: unexpected EOF during initialization')
618 sys.stderr.write(tail[:-6]) # pre-mux log messages
625 def write(self, data):
627 BaseConn.write(self, data)
629 def _next_packet(self, timeout):
630 if self.closed: return False
631 rl, wl, xl = select.select([self.infd], [], [], timeout)
632 if not rl: return False
633 assert(rl[0] == self.infd)
634 ns = ''.join(checked_reader(self.infd, 5))
635 n, fdw = struct.unpack('!IB', ns)
636 assert(n <= MAX_PACKET)
638 self.reader = checked_reader(self.infd, n)
640 for buf in checked_reader(self.infd, n):
641 sys.stderr.write(buf)
644 debug2("DemuxConn: marked closed\n")
647 def _load_buf(self, timeout):
648 if self.buf is not None:
650 while not self.closed:
651 while not self.reader:
652 if not self._next_packet(timeout):
655 self.buf = next(self.reader)
657 except StopIteration:
661 def _read_parts(self, ix_fn):
662 while self._load_buf(None):
663 assert(self.buf is not None)
665 if i is None or i == len(self.buf):
670 self.buf = self.buf[i:]
678 return buf.index('\n')+1
681 return ''.join(self._read_parts(find_eol))
683 def _read(self, size):
685 def until_size(buf): # Closes on csize
686 if len(buf) < csize[0]:
691 return ''.join(self._read_parts(until_size))
694 return self._load_buf(0)
698 """Generate a list of input lines from 'f' without terminating newlines."""
706 def chunkyreader(f, count = None):
707 """Generate a list of chunks of data read from 'f'.
709 If count is None, read until EOF is reached.
711 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
712 reached while reading, raise IOError.
716 b = f.read(min(count, 65536))
718 raise IOError('EOF with %d bytes remaining' % count)
729 def atomically_replaced_file(name, mode='w', buffering=-1):
730 """Yield a file that will be atomically renamed name when leaving the block.
732 This contextmanager yields an open file object that is backed by a
733 temporary file which will be renamed (atomically) to the target
734 name if everything succeeds.
736 The mode and buffering arguments are handled exactly as with open,
737 and the yielded file will have very restrictive permissions, as
742 with atomically_replaced_file('foo.txt', 'w') as f:
743 f.write('hello jack.')
747 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
748 text=('b' not in mode))
751 f = os.fdopen(ffd, mode, buffering)
759 os.rename(tempname, name)
761 unlink(tempname) # nonexistant file is ignored
765 """Append "/" to 's' if it doesn't aleady end in "/"."""
766 if s and not s.endswith('/'):
772 def _mmap_do(f, sz, flags, prot, close):
774 st = os.fstat(f.fileno())
777 # trying to open a zero-length map gives an error, but an empty
778 # string has all the same behaviour of a zero-length map, ie. it has
781 map = mmap.mmap(f.fileno(), sz, flags, prot)
783 f.close() # map will persist beyond file close
787 def mmap_read(f, sz = 0, close=True):
788 """Create a read-only memory mapped region on file 'f'.
789 If sz is 0, the region will cover the entire file.
791 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
794 def mmap_readwrite(f, sz = 0, close=True):
795 """Create a read-write memory mapped region on file 'f'.
796 If sz is 0, the region will cover the entire file.
798 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
802 def mmap_readwrite_private(f, sz = 0, close=True):
803 """Create a read-write memory mapped region on file 'f'.
804 If sz is 0, the region will cover the entire file.
805 The map is private, which means the changes are never flushed back to the
808 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
812 _mincore = getattr(_helpers, 'mincore', None)
814 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
815 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
817 _fmincore_chunk_size = None
818 def _set_fmincore_chunk_size():
819 global _fmincore_chunk_size
820 pref_chunk_size = 64 * 1024 * 1024
821 chunk_size = sc_page_size
822 if (sc_page_size < pref_chunk_size):
823 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
824 _fmincore_chunk_size = chunk_size
827 """Return the mincore() data for fd as a bytearray whose values can be
828 tested via MINCORE_INCORE, or None if fd does not fully
829 support the operation."""
831 if (st.st_size == 0):
833 if not _fmincore_chunk_size:
834 _set_fmincore_chunk_size()
835 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
836 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
837 chunk_count = page_count // _fmincore_chunk_size
840 result = bytearray(page_count)
841 for ci in compat.range(chunk_count):
842 pos = _fmincore_chunk_size * ci;
843 msize = min(_fmincore_chunk_size, st.st_size - pos)
845 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
846 except mmap.error as ex:
847 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
848 # Perhaps the file was a pipe, i.e. "... | bup split ..."
852 _mincore(m, msize, 0, result, ci * pages_per_chunk)
853 except OSError as ex:
854 if ex.errno == errno.ENOSYS:
860 def parse_timestamp(epoch_str):
861 """Return the number of nanoseconds since the epoch that are described
862 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
863 throw a ValueError that may contain additional information."""
864 ns_per = {'s' : 1000000000,
868 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
870 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
871 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
873 (n, units) = match.group(1, 2)
877 return n * ns_per[units]
881 """Parse data size information into a float number.
883 Here are some examples of conversions:
884 199.2k means 203981 bytes
885 1GB means 1073741824 bytes
886 2.1 tb means 2199023255552 bytes
888 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
890 raise ValueError("can't parse %r as a number" % s)
891 (val, unit) = g.groups()
894 if unit in ['t', 'tb']:
895 mult = 1024*1024*1024*1024
896 elif unit in ['g', 'gb']:
897 mult = 1024*1024*1024
898 elif unit in ['m', 'mb']:
900 elif unit in ['k', 'kb']:
902 elif unit in ['', 'b']:
905 raise ValueError("invalid unit %r in number %r" % (unit, s))
910 """Count the number of elements in an iterator. (consumes the iterator)"""
911 return reduce(lambda x,y: x+1, l)
916 """Append an error message to the list of saved errors.
918 Once processing is able to stop and output the errors, the saved errors are
919 accessible in the module variable helpers.saved_errors.
921 saved_errors.append(e)
930 def die_if_errors(msg=None, status=1):
934 msg = 'warning: %d errors encountered\n' % len(saved_errors)
940 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
942 The new exception handler will make sure that bup will exit without an ugly
943 stacktrace when Ctrl-C is hit.
945 oldhook = sys.excepthook
946 def newhook(exctype, value, traceback):
947 if exctype == KeyboardInterrupt:
948 log('\nInterrupted.\n')
950 return oldhook(exctype, value, traceback)
951 sys.excepthook = newhook
954 def columnate(l, prefix):
955 """Format elements of 'l' in columns with 'prefix' leading each line.
957 The number of columns is determined automatically based on the string
963 clen = max(len(s) for s in l)
964 ncols = (tty_width() - len(prefix)) // (clen + 2)
969 while len(l) % ncols:
971 rows = len(l) // ncols
972 for s in compat.range(0, len(l), rows):
973 cols.append(l[s:s+rows])
975 for row in zip(*cols):
976 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
980 def parse_date_or_fatal(str, fatal):
981 """Parses the given date or calls Option.fatal().
982 For now we expect a string that contains a float."""
985 except ValueError as e:
986 raise fatal('invalid date format (should be a float): %r' % e)
991 def parse_excludes(options, fatal):
992 """Traverse the options and extract all excludes, or call Option.fatal()."""
996 (option, parameter) = flag
997 if option == '--exclude':
998 excluded_paths.append(resolve_parent(parameter))
999 elif option == '--exclude-from':
1001 f = open(resolve_parent(parameter))
1002 except IOError as e:
1003 raise fatal("couldn't read %s" % parameter)
1004 for exclude_path in f.readlines():
1005 # FIXME: perhaps this should be rstrip('\n')
1006 exclude_path = resolve_parent(exclude_path.strip())
1008 excluded_paths.append(exclude_path)
1009 return sorted(frozenset(excluded_paths))
1012 def parse_rx_excludes(options, fatal):
1013 """Traverse the options and extract all rx excludes, or call
1015 excluded_patterns = []
1017 for flag in options:
1018 (option, parameter) = flag
1019 if option == '--exclude-rx':
1021 excluded_patterns.append(re.compile(parameter))
1022 except re.error as ex:
1023 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
1024 elif option == '--exclude-rx-from':
1026 f = open(resolve_parent(parameter))
1027 except IOError as e:
1028 raise fatal("couldn't read %s" % parameter)
1029 for pattern in f.readlines():
1030 spattern = pattern.rstrip('\n')
1034 excluded_patterns.append(re.compile(spattern))
1035 except re.error as ex:
1036 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
1037 return excluded_patterns
1040 def should_rx_exclude_path(path, exclude_rxs):
1041 """Return True if path matches a regular expression in exclude_rxs."""
1042 for rx in exclude_rxs:
1044 debug1('Skipping %r: excluded by rx pattern %r.\n'
1045 % (path, rx.pattern))
1050 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1051 # that resolve against the current filesystem in the strip/graft
1052 # functions for example, but elsewhere as well. I suspect bup's not
1053 # always being careful about that. For some cases, the contents of
1054 # the current filesystem should be irrelevant, and consulting it might
1055 # produce the wrong result, perhaps via unintended symlink resolution,
1058 def path_components(path):
1059 """Break path into a list of pairs of the form (name,
1060 full_path_to_name). Path must start with '/'.
1062 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1063 if not path.startswith('/'):
1064 raise Exception('path must start with "/": %s' % path)
1065 # Since we assume path startswith('/'), we can skip the first element.
1066 result = [('', '/')]
1067 norm_path = os.path.abspath(path)
1068 if norm_path == '/':
1071 for p in norm_path.split('/')[1:]:
1072 full_path += '/' + p
1073 result.append((p, full_path))
1077 def stripped_path_components(path, strip_prefixes):
1078 """Strip any prefix in strip_prefixes from path and return a list
1079 of path components where each component is (name,
1080 none_or_full_fs_path_to_name). Assume path startswith('/').
1081 See thelpers.py for examples."""
1082 normalized_path = os.path.abspath(path)
1083 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1084 for bp in sorted_strip_prefixes:
1085 normalized_bp = os.path.abspath(bp)
1086 if normalized_bp == '/':
1088 if normalized_path.startswith(normalized_bp):
1089 prefix = normalized_path[:len(normalized_bp)]
1091 for p in normalized_path[len(normalized_bp):].split('/'):
1095 result.append((p, prefix))
1098 return path_components(path)
1101 def grafted_path_components(graft_points, path):
1102 # Create a result that consists of some number of faked graft
1103 # directories before the graft point, followed by all of the real
1104 # directories from path that are after the graft point. Arrange
1105 # for the directory at the graft point in the result to correspond
1106 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1107 # for some examples.
1109 # Note that given --graft orig=new, orig and new have *nothing* to
1110 # do with each other, even if some of their component names
1111 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1112 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1115 # FIXME: This can't be the best solution...
1116 clean_path = os.path.abspath(path)
1117 for graft_point in graft_points:
1118 old_prefix, new_prefix = graft_point
1119 # Expand prefixes iff not absolute paths.
1120 old_prefix = os.path.normpath(old_prefix)
1121 new_prefix = os.path.normpath(new_prefix)
1122 if clean_path.startswith(old_prefix):
1123 escaped_prefix = re.escape(old_prefix)
1124 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1125 # Handle /foo=/ (at least) -- which produces //whatever.
1126 grafted_path = '/' + grafted_path.lstrip('/')
1127 clean_path_components = path_components(clean_path)
1128 # Count the components that were stripped.
1129 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1130 new_prefix_parts = new_prefix.split('/')
1131 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1132 result = [(p, None) for p in result_prefix] \
1133 + clean_path_components[strip_count:]
1134 # Now set the graft point name to match the end of new_prefix.
1135 graft_point = len(result_prefix)
1136 result[graft_point] = \
1137 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1138 if new_prefix == '/': # --graft ...=/ is a special case.
1141 return path_components(clean_path)
1147 _localtime = getattr(_helpers, 'localtime', None)
1150 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1151 'tm_hour', 'tm_min', 'tm_sec',
1152 'tm_wday', 'tm_yday',
1153 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1155 # Define a localtime() that returns bup_time when possible. Note:
1156 # this means that any helpers.localtime() results may need to be
1157 # passed through to_py_time() before being passed to python's time
1158 # module, which doesn't appear willing to ignore the extra items.
1160 def localtime(time):
1161 return bup_time(*_helpers.localtime(time))
1162 def utc_offset_str(t):
1163 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1164 If the current UTC offset does not represent an integer number
1165 of minutes, the fractional component will be truncated."""
1166 off = localtime(t).tm_gmtoff
1167 # Note: // doesn't truncate like C for negative values, it rounds down.
1168 offmin = abs(off) // 60
1170 h = (offmin - m) // 60
1171 return "%+03d%02d" % (-h if off < 0 else h, m)
1173 if isinstance(x, time.struct_time):
1175 return time.struct_time(x[:9])
1177 localtime = time.localtime
1178 def utc_offset_str(t):
1179 return time.strftime('%z', localtime(t))
1184 _some_invalid_save_parts_rx = re.compile(r'[\[ ~^:?*\\]|\.\.|//|@{')
1186 def valid_save_name(name):
1187 # Enforce a superset of the restrictions in git-check-ref-format(1)
1189 or name.startswith('/') or name.endswith('/') \
1190 or name.endswith('.'):
1192 if _some_invalid_save_parts_rx.search(name):
1195 if ord(c) < 0x20 or ord(c) == 0x7f:
1197 for part in name.split('/'):
1198 if part.startswith('.') or part.endswith('.lock'):
1203 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1205 def period_as_secs(s):
1208 match = _period_rx.match(s)
1211 mag = int(match.group(1))
1212 scale = match.group(2)
1213 return mag * {'s': 1,
1217 'w': 60 * 60 * 24 * 7,
1218 'm': 60 * 60 * 24 * 31,
1219 'y': 60 * 60 * 24 * 366}[scale]