1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import byte_int
16 from bup.io import path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
23 """Helper to deal with Python scoping issues"""
27 sc_page_size = os.sysconf('SC_PAGE_SIZE')
28 assert(sc_page_size > 0)
30 sc_arg_max = os.sysconf('SC_ARG_MAX')
31 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
32 sc_arg_max = 2 * 1024 * 1024
36 for result in iterable:
42 """Convert s (ascii bytes) to an integer. Return 0 if s is not a number."""
50 """Convert s (ascii bytes) to a float. Return 0 if s is not a number."""
52 return float(s or b'0')
57 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
61 _fdatasync = os.fdatasync
62 except AttributeError:
65 if sys.platform.startswith('darwin'):
66 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
70 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
72 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
73 if e.errno == errno.ENOTSUP:
78 fdatasync = _fdatasync
81 def partition(predicate, stream):
82 """Returns (leading_matches_it, rest_it), where leading_matches_it
83 must be completely exhausted before traversing rest_it.
88 ns.first_nonmatch = None
89 def leading_matches():
94 ns.first_nonmatch = (x,)
98 yield ns.first_nonmatch[0]
101 return (leading_matches(), rest())
111 def lines_until_sentinel(f, sentinel, ex_type):
112 # sentinel must end with \n and must contain only one \n
115 if not (line and line.endswith(b'\n')):
116 raise ex_type('Hit EOF while reading line')
122 def stat_if_exists(path):
126 if e.errno != errno.ENOENT:
131 # Write (blockingly) to sockets that may or may not be in blocking mode.
132 # We need this because our stderr is sometimes eaten by subprocesses
133 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
134 # leading to race conditions. Ick. We'll do it the hard way.
135 def _hard_write(fd, buf):
137 (r,w,x) = select.select([], [fd], [], None)
139 raise IOError('select(fd) returned without being writable')
141 sz = os.write(fd, buf)
143 if e.errno != errno.EAGAIN:
151 """Print a log message to stderr."""
154 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
168 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
169 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
172 """Calls log() if stderr is a TTY. Does nothing otherwise."""
173 global _last_progress
180 """Calls progress() only if we haven't printed progress in a while.
182 This avoids overloading the stderr buffer with excess junk.
186 if now - _last_prog > 0.1:
192 """Calls progress() to redisplay the most recent progress message.
194 Useful after you've printed some other message that wipes out the
197 if _last_progress and _last_progress.endswith('\r'):
198 progress(_last_progress)
201 def mkdirp(d, mode=None):
202 """Recursively create directories on path 'd'.
204 Unlike os.makedirs(), it doesn't raise an exception if the last element of
205 the path already exists.
213 if e.errno == errno.EEXIST:
220 def __init__(self, entry, read_it):
222 self.read_it = read_it
224 return self.entry < x.entry
226 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
228 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
230 samekey = operator.eq
232 total = sum(len(it) for it in iters)
233 iters = (iter(it) for it in iters)
234 heap = ((next(it, None),it) for it in iters)
235 heap = [MergeIterItem(e, it) for e, it in heap if e]
240 if not count % pfreq:
242 e, it = heap[0].entry, heap[0].read_it
243 if not samekey(e, pe):
249 except StopIteration:
250 heapq.heappop(heap) # remove current
252 # shift current to new location
253 heapq.heapreplace(heap, MergeIterItem(e, it))
258 """Delete a file at path 'f' if it currently exists.
260 Unlike os.unlink(), does not throw an exception if the file didn't already
266 if e.errno != errno.ENOENT:
270 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
271 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
276 if _bq_simple_id_rx.match(x):
278 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
283 if _sq_simple_id_rx.match(x):
285 return "'%s'" % x.replace("'", "'\"'\"'")
288 if isinstance(x, bytes):
290 if isinstance(x, compat.str_type):
295 """Return a shell quoted string for cmd if it's a sequence, else cmd.
297 cmd must be a string, bytes, or a sequence of one or the other,
298 and the assumption is that if cmd is a string or bytes, then it's
299 already quoted (because it's what's actually being passed to
300 call() and friends. e.g. log(shstr(cmd)); call(cmd)
303 if isinstance(cmd, (bytes, compat.str_type)):
305 elif all(isinstance(x, bytes) for x in cmd):
306 return b' '.join(map(bquote, cmd))
307 elif all(isinstance(x, compat.str_type) for x in cmd):
308 return ' '.join(map(squote, cmd))
309 raise TypeError('unsupported shstr argument: ' + repr(cmd))
312 exc = subprocess.check_call
322 assert stdin in (None, PIPE)
325 stdin=stdin, stdout=PIPE, stderr=stderr,
327 preexec_fn=preexec_fn)
328 out, err = p.communicate(input)
329 if check and p.returncode != 0:
330 raise Exception('subprocess %r failed with status %d, stderr: %r'
331 % (b' '.join(map(quote, cmd)), p.returncode, err))
334 def readpipe(argv, preexec_fn=None, shell=False):
335 """Run a subprocess and return its output."""
336 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
338 out, err = p.communicate()
339 if p.returncode != 0:
340 raise Exception('subprocess %r failed with status %d'
341 % (b' '.join(argv), p.returncode))
345 def _argmax_base(command):
348 base_size += len(command) + 1
349 for k, v in compat.items(environ):
350 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
354 def _argmax_args_size(args):
355 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
358 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
359 """If args is not empty, yield the output produced by calling the
360 command list with args as a sequence of strings (It may be necessary
361 to return multiple strings in order to respect ARG_MAX)."""
362 # The optional arg_max arg is a workaround for an issue with the
363 # current wvtest behavior.
364 base_size = _argmax_base(command)
366 room = arg_max - base_size
369 next_size = _argmax_args_size(args[i:i+1])
370 if room - next_size < 0:
376 assert(len(sub_args))
377 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
380 def resolve_parent(p):
381 """Return the absolute path of a file without following any final symlink.
383 Behaves like os.path.realpath, but doesn't follow a symlink for the last
384 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
385 will follow symlinks in p's directory)
391 if st and stat.S_ISLNK(st.st_mode):
392 (dir, name) = os.path.split(p)
393 dir = os.path.realpath(dir)
394 out = os.path.join(dir, name)
396 out = os.path.realpath(p)
397 #log('realpathing:%r,%r\n' % (p, out))
401 def detect_fakeroot():
402 "Return True if we appear to be running under fakeroot."
403 return os.getenv("FAKEROOTKEY") != None
406 if sys.platform.startswith('cygwin'):
408 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
409 groups = os.getgroups()
410 return 544 in groups or 0 in groups
413 return os.geteuid() == 0
416 def cache_key_value(get_value, key, cache):
417 """Return (value, was_cached). If there is a value in the cache
418 for key, use that, otherwise, call get_value(key) which should
419 throw a KeyError if there is no value -- in which case the cached
420 and returned value will be None.
422 try: # Do we already have it (or know there wasn't one)?
429 cache[key] = value = get_value(key)
437 """Get the FQDN of this machine."""
440 _hostname = socket.getfqdn().encode('iso-8859-1')
444 def format_filesize(size):
449 exponent = int(math.log(size) // math.log(unit))
450 size_prefix = "KMGTPE"[exponent - 1]
451 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
454 class NotOk(Exception):
459 def __init__(self, outp):
463 while self._read(65536): pass
465 def read(self, size):
466 """Read 'size' bytes from input stream."""
468 return self._read(size)
471 """Read from input stream until a newline is found."""
473 return self._readline()
475 def write(self, data):
476 """Write 'data' to output stream."""
477 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
478 self.outp.write(data)
481 """Return true if input stream is readable."""
482 raise NotImplemented("Subclasses must implement has_input")
485 """Indicate end of output from last sent command."""
486 self.write(b'\nok\n')
489 """Indicate server error to the client."""
490 s = re.sub(br'\s+', b' ', s)
491 self.write(b'\nerror %s\n' % s)
493 def _check_ok(self, onempty):
496 for rl in linereader(self):
497 #log('%d got line: %r\n' % (os.getpid(), rl))
498 if not rl: # empty line
502 elif rl.startswith(b'error '):
503 #log('client: error: %s\n' % rl[6:])
507 raise Exception('server exited unexpectedly; see errors above')
509 def drain_and_check_ok(self):
510 """Remove all data for the current command from input stream."""
513 return self._check_ok(onempty)
516 """Verify that server action completed successfully."""
518 raise Exception('expected "ok", got %r' % rl)
519 return self._check_ok(onempty)
522 class Conn(BaseConn):
523 def __init__(self, inp, outp):
524 BaseConn.__init__(self, outp)
527 def _read(self, size):
528 return self.inp.read(size)
531 return self.inp.readline()
534 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
536 assert(rl[0] == self.inp.fileno())
542 def checked_reader(fd, n):
544 rl, _, _ = select.select([fd], [], [])
547 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
552 MAX_PACKET = 128 * 1024
553 def mux(p, outfd, outr, errr):
556 while p.poll() is None:
557 rl, _, _ = select.select(fds, [], [])
560 buf = os.read(outr, MAX_PACKET)
562 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
564 buf = os.read(errr, 1024)
566 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
568 os.write(outfd, struct.pack('!IB', 0, 3))
571 class DemuxConn(BaseConn):
572 """A helper class for bup's client-server protocol."""
573 def __init__(self, infd, outp):
574 BaseConn.__init__(self, outp)
575 # Anything that comes through before the sync string was not
576 # multiplexed and can be assumed to be debug/log before mux init.
578 while tail != 'BUPMUX':
579 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
581 raise IOError('demux: unexpected EOF during initialization')
583 sys.stderr.write(tail[:-6]) # pre-mux log messages
590 def write(self, data):
592 BaseConn.write(self, data)
594 def _next_packet(self, timeout):
595 if self.closed: return False
596 rl, wl, xl = select.select([self.infd], [], [], timeout)
597 if not rl: return False
598 assert(rl[0] == self.infd)
599 ns = ''.join(checked_reader(self.infd, 5))
600 n, fdw = struct.unpack('!IB', ns)
601 assert(n <= MAX_PACKET)
603 self.reader = checked_reader(self.infd, n)
605 for buf in checked_reader(self.infd, n):
606 sys.stderr.write(buf)
609 debug2("DemuxConn: marked closed\n")
612 def _load_buf(self, timeout):
613 if self.buf is not None:
615 while not self.closed:
616 while not self.reader:
617 if not self._next_packet(timeout):
620 self.buf = next(self.reader)
622 except StopIteration:
626 def _read_parts(self, ix_fn):
627 while self._load_buf(None):
628 assert(self.buf is not None)
630 if i is None or i == len(self.buf):
635 self.buf = self.buf[i:]
643 return buf.index('\n')+1
646 return ''.join(self._read_parts(find_eol))
648 def _read(self, size):
650 def until_size(buf): # Closes on csize
651 if len(buf) < csize[0]:
656 return ''.join(self._read_parts(until_size))
659 return self._load_buf(0)
663 """Generate a list of input lines from 'f' without terminating newlines."""
671 def chunkyreader(f, count = None):
672 """Generate a list of chunks of data read from 'f'.
674 If count is None, read until EOF is reached.
676 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
677 reached while reading, raise IOError.
681 b = f.read(min(count, 65536))
683 raise IOError('EOF with %d bytes remaining' % count)
694 def atomically_replaced_file(name, mode='w', buffering=-1):
695 """Yield a file that will be atomically renamed name when leaving the block.
697 This contextmanager yields an open file object that is backed by a
698 temporary file which will be renamed (atomically) to the target
699 name if everything succeeds.
701 The mode and buffering arguments are handled exactly as with open,
702 and the yielded file will have very restrictive permissions, as
707 with atomically_replaced_file('foo.txt', 'w') as f:
708 f.write('hello jack.')
712 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
713 text=('b' not in mode))
716 f = os.fdopen(ffd, mode, buffering)
724 os.rename(tempname, name)
726 unlink(tempname) # nonexistant file is ignored
730 """Append "/" to 's' if it doesn't aleady end in "/"."""
731 assert isinstance(s, bytes)
732 if s and not s.endswith(b'/'):
738 def _mmap_do(f, sz, flags, prot, close):
740 st = os.fstat(f.fileno())
743 # trying to open a zero-length map gives an error, but an empty
744 # string has all the same behaviour of a zero-length map, ie. it has
747 map = mmap.mmap(f.fileno(), sz, flags, prot)
749 f.close() # map will persist beyond file close
753 def mmap_read(f, sz = 0, close=True):
754 """Create a read-only memory mapped region on file 'f'.
755 If sz is 0, the region will cover the entire file.
757 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
760 def mmap_readwrite(f, sz = 0, close=True):
761 """Create a read-write memory mapped region on file 'f'.
762 If sz is 0, the region will cover the entire file.
764 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
768 def mmap_readwrite_private(f, sz = 0, close=True):
769 """Create a read-write memory mapped region on file 'f'.
770 If sz is 0, the region will cover the entire file.
771 The map is private, which means the changes are never flushed back to the
774 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
778 _mincore = getattr(_helpers, 'mincore', None)
780 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
781 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
783 _fmincore_chunk_size = None
784 def _set_fmincore_chunk_size():
785 global _fmincore_chunk_size
786 pref_chunk_size = 64 * 1024 * 1024
787 chunk_size = sc_page_size
788 if (sc_page_size < pref_chunk_size):
789 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
790 _fmincore_chunk_size = chunk_size
793 """Return the mincore() data for fd as a bytearray whose values can be
794 tested via MINCORE_INCORE, or None if fd does not fully
795 support the operation."""
797 if (st.st_size == 0):
799 if not _fmincore_chunk_size:
800 _set_fmincore_chunk_size()
801 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
802 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
803 chunk_count = page_count // _fmincore_chunk_size
806 result = bytearray(page_count)
807 for ci in compat.range(chunk_count):
808 pos = _fmincore_chunk_size * ci;
809 msize = min(_fmincore_chunk_size, st.st_size - pos)
811 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
812 except mmap.error as ex:
813 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
814 # Perhaps the file was a pipe, i.e. "... | bup split ..."
818 _mincore(m, msize, 0, result, ci * pages_per_chunk)
819 except OSError as ex:
820 if ex.errno == errno.ENOSYS:
826 def parse_timestamp(epoch_str):
827 """Return the number of nanoseconds since the epoch that are described
828 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
829 throw a ValueError that may contain additional information."""
830 ns_per = {'s' : 1000000000,
834 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
836 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
837 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
839 (n, units) = match.group(1, 2)
843 return n * ns_per[units]
847 """Parse string or bytes as a possibly unit suffixed number.
850 199.2k means 203981 bytes
851 1GB means 1073741824 bytes
852 2.1 tb means 2199023255552 bytes
854 if isinstance(s, bytes):
855 # FIXME: should this raise a ValueError for UnicodeDecodeError
856 # (perhaps with the latter as the context).
857 s = s.decode('ascii')
858 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
860 raise ValueError("can't parse %r as a number" % s)
861 (val, unit) = g.groups()
864 if unit in ['t', 'tb']:
865 mult = 1024*1024*1024*1024
866 elif unit in ['g', 'gb']:
867 mult = 1024*1024*1024
868 elif unit in ['m', 'mb']:
870 elif unit in ['k', 'kb']:
872 elif unit in ['', 'b']:
875 raise ValueError("invalid unit %r in number %r" % (unit, s))
881 """Append an error message to the list of saved errors.
883 Once processing is able to stop and output the errors, the saved errors are
884 accessible in the module variable helpers.saved_errors.
886 saved_errors.append(e)
895 def die_if_errors(msg=None, status=1):
899 msg = 'warning: %d errors encountered\n' % len(saved_errors)
905 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
907 The new exception handler will make sure that bup will exit without an ugly
908 stacktrace when Ctrl-C is hit.
910 oldhook = sys.excepthook
911 def newhook(exctype, value, traceback):
912 if exctype == KeyboardInterrupt:
913 log('\nInterrupted.\n')
915 return oldhook(exctype, value, traceback)
916 sys.excepthook = newhook
919 def columnate(l, prefix):
920 """Format elements of 'l' in columns with 'prefix' leading each line.
922 The number of columns is determined automatically based on the string
928 clen = max(len(s) for s in l)
929 ncols = (tty_width() - len(prefix)) // (clen + 2)
934 while len(l) % ncols:
936 rows = len(l) // ncols
937 for s in compat.range(0, len(l), rows):
938 cols.append(l[s:s+rows])
940 for row in zip(*cols):
941 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
945 def parse_date_or_fatal(str, fatal):
946 """Parses the given date or calls Option.fatal().
947 For now we expect a string that contains a float."""
950 except ValueError as e:
951 raise fatal('invalid date format (should be a float): %r' % e)
956 def parse_excludes(options, fatal):
957 """Traverse the options and extract all excludes, or call Option.fatal()."""
961 (option, parameter) = flag
962 if option == '--exclude':
963 excluded_paths.append(resolve_parent(parameter))
964 elif option == '--exclude-from':
966 f = open(resolve_parent(parameter))
968 raise fatal("couldn't read %s" % parameter)
969 for exclude_path in f.readlines():
970 # FIXME: perhaps this should be rstrip('\n')
971 exclude_path = resolve_parent(exclude_path.strip())
973 excluded_paths.append(exclude_path)
974 return sorted(frozenset(excluded_paths))
977 def parse_rx_excludes(options, fatal):
978 """Traverse the options and extract all rx excludes, or call
980 excluded_patterns = []
983 (option, parameter) = flag
984 if option == '--exclude-rx':
986 excluded_patterns.append(re.compile(parameter))
987 except re.error as ex:
988 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
989 elif option == '--exclude-rx-from':
991 f = open(resolve_parent(parameter))
993 raise fatal("couldn't read %s" % parameter)
994 for pattern in f.readlines():
995 spattern = pattern.rstrip('\n')
999 excluded_patterns.append(re.compile(spattern))
1000 except re.error as ex:
1001 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
1002 return excluded_patterns
1005 def should_rx_exclude_path(path, exclude_rxs):
1006 """Return True if path matches a regular expression in exclude_rxs."""
1007 for rx in exclude_rxs:
1009 debug1('Skipping %r: excluded by rx pattern %r.\n'
1010 % (path, rx.pattern))
1015 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1016 # that resolve against the current filesystem in the strip/graft
1017 # functions for example, but elsewhere as well. I suspect bup's not
1018 # always being careful about that. For some cases, the contents of
1019 # the current filesystem should be irrelevant, and consulting it might
1020 # produce the wrong result, perhaps via unintended symlink resolution,
1023 def path_components(path):
1024 """Break path into a list of pairs of the form (name,
1025 full_path_to_name). Path must start with '/'.
1027 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1028 if not path.startswith(b'/'):
1029 raise Exception('path must start with "/": %s' % path_msg(path))
1030 # Since we assume path startswith('/'), we can skip the first element.
1031 result = [(b'', b'/')]
1032 norm_path = os.path.abspath(path)
1033 if norm_path == b'/':
1036 for p in norm_path.split(b'/')[1:]:
1037 full_path += b'/' + p
1038 result.append((p, full_path))
1042 def stripped_path_components(path, strip_prefixes):
1043 """Strip any prefix in strip_prefixes from path and return a list
1044 of path components where each component is (name,
1045 none_or_full_fs_path_to_name). Assume path startswith('/').
1046 See thelpers.py for examples."""
1047 normalized_path = os.path.abspath(path)
1048 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1049 for bp in sorted_strip_prefixes:
1050 normalized_bp = os.path.abspath(bp)
1051 if normalized_bp == b'/':
1053 if normalized_path.startswith(normalized_bp):
1054 prefix = normalized_path[:len(normalized_bp)]
1056 for p in normalized_path[len(normalized_bp):].split(b'/'):
1060 result.append((p, prefix))
1063 return path_components(path)
1066 def grafted_path_components(graft_points, path):
1067 # Create a result that consists of some number of faked graft
1068 # directories before the graft point, followed by all of the real
1069 # directories from path that are after the graft point. Arrange
1070 # for the directory at the graft point in the result to correspond
1071 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1072 # for some examples.
1074 # Note that given --graft orig=new, orig and new have *nothing* to
1075 # do with each other, even if some of their component names
1076 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1077 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1080 # FIXME: This can't be the best solution...
1081 clean_path = os.path.abspath(path)
1082 for graft_point in graft_points:
1083 old_prefix, new_prefix = graft_point
1084 # Expand prefixes iff not absolute paths.
1085 old_prefix = os.path.normpath(old_prefix)
1086 new_prefix = os.path.normpath(new_prefix)
1087 if clean_path.startswith(old_prefix):
1088 escaped_prefix = re.escape(old_prefix)
1089 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1090 # Handle /foo=/ (at least) -- which produces //whatever.
1091 grafted_path = b'/' + grafted_path.lstrip(b'/')
1092 clean_path_components = path_components(clean_path)
1093 # Count the components that were stripped.
1094 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1095 new_prefix_parts = new_prefix.split(b'/')
1096 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1097 result = [(p, None) for p in result_prefix] \
1098 + clean_path_components[strip_count:]
1099 # Now set the graft point name to match the end of new_prefix.
1100 graft_point = len(result_prefix)
1101 result[graft_point] = \
1102 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1103 if new_prefix == b'/': # --graft ...=/ is a special case.
1106 return path_components(clean_path)
1112 _localtime = getattr(_helpers, 'localtime', None)
1115 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1116 'tm_hour', 'tm_min', 'tm_sec',
1117 'tm_wday', 'tm_yday',
1118 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1120 # Define a localtime() that returns bup_time when possible. Note:
1121 # this means that any helpers.localtime() results may need to be
1122 # passed through to_py_time() before being passed to python's time
1123 # module, which doesn't appear willing to ignore the extra items.
1125 def localtime(time):
1126 return bup_time(*_helpers.localtime(floor(time)))
1127 def utc_offset_str(t):
1128 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1129 If the current UTC offset does not represent an integer number
1130 of minutes, the fractional component will be truncated."""
1131 off = localtime(t).tm_gmtoff
1132 # Note: // doesn't truncate like C for negative values, it rounds down.
1133 offmin = abs(off) // 60
1135 h = (offmin - m) // 60
1136 return b'%+03d%02d' % (-h if off < 0 else h, m)
1138 if isinstance(x, time.struct_time):
1140 return time.struct_time(x[:9])
1142 localtime = time.localtime
1143 def utc_offset_str(t):
1144 return time.strftime(b'%z', localtime(t))
1149 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1151 def valid_save_name(name):
1152 # Enforce a superset of the restrictions in git-check-ref-format(1)
1154 or name.startswith(b'/') or name.endswith(b'/') \
1155 or name.endswith(b'.'):
1157 if _some_invalid_save_parts_rx.search(name):
1160 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1162 for part in name.split(b'/'):
1163 if part.startswith(b'.') or part.endswith(b'.lock'):
1168 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1170 def period_as_secs(s):
1173 match = _period_rx.match(s)
1176 mag = int(match.group(1))
1177 scale = match.group(2)
1178 return mag * {'s': 1,
1182 'w': 60 * 60 * 24 * 7,
1183 'm': 60 * 60 * 24 * 31,
1184 'y': 60 * 60 * 24 * 366}[scale]