1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import argv_bytes, byte_int
16 from bup.io import byte_stream, path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
23 """Helper to deal with Python scoping issues"""
27 sc_page_size = os.sysconf('SC_PAGE_SIZE')
28 assert(sc_page_size > 0)
30 sc_arg_max = os.sysconf('SC_ARG_MAX')
31 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
32 sc_arg_max = 2 * 1024 * 1024
36 for result in iterable:
42 """Convert s (ascii bytes) to an integer. Return 0 if s is not a number."""
50 """Convert s (ascii bytes) to a float. Return 0 if s is not a number."""
52 return float(s or b'0')
57 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
61 _fdatasync = os.fdatasync
62 except AttributeError:
65 if sys.platform.startswith('darwin'):
66 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
70 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
72 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
73 if e.errno == errno.ENOTSUP:
78 fdatasync = _fdatasync
81 def partition(predicate, stream):
82 """Returns (leading_matches_it, rest_it), where leading_matches_it
83 must be completely exhausted before traversing rest_it.
88 ns.first_nonmatch = None
89 def leading_matches():
94 ns.first_nonmatch = (x,)
98 yield ns.first_nonmatch[0]
101 return (leading_matches(), rest())
111 def lines_until_sentinel(f, sentinel, ex_type):
112 # sentinel must end with \n and must contain only one \n
115 if not (line and line.endswith(b'\n')):
116 raise ex_type('Hit EOF while reading line')
122 def stat_if_exists(path):
126 if e.errno != errno.ENOENT:
131 # Write (blockingly) to sockets that may or may not be in blocking mode.
132 # We need this because our stderr is sometimes eaten by subprocesses
133 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
134 # leading to race conditions. Ick. We'll do it the hard way.
135 def _hard_write(fd, buf):
137 (r,w,x) = select.select([], [fd], [], None)
139 raise IOError('select(fd) returned without being writable')
141 sz = os.write(fd, buf)
143 if e.errno != errno.EAGAIN:
151 """Print a log message to stderr."""
154 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
168 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
169 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
172 """Calls log() if stderr is a TTY. Does nothing otherwise."""
173 global _last_progress
180 """Calls progress() only if we haven't printed progress in a while.
182 This avoids overloading the stderr buffer with excess junk.
186 if now - _last_prog > 0.1:
192 """Calls progress() to redisplay the most recent progress message.
194 Useful after you've printed some other message that wipes out the
197 if _last_progress and _last_progress.endswith('\r'):
198 progress(_last_progress)
201 def mkdirp(d, mode=None):
202 """Recursively create directories on path 'd'.
204 Unlike os.makedirs(), it doesn't raise an exception if the last element of
205 the path already exists.
213 if e.errno == errno.EEXIST:
220 def __init__(self, entry, read_it):
222 self.read_it = read_it
224 return self.entry < x.entry
226 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
228 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
230 samekey = operator.eq
232 total = sum(len(it) for it in iters)
233 iters = (iter(it) for it in iters)
234 heap = ((next(it, None),it) for it in iters)
235 heap = [MergeIterItem(e, it) for e, it in heap if e]
240 if not count % pfreq:
242 e, it = heap[0].entry, heap[0].read_it
243 if not samekey(e, pe):
249 except StopIteration:
250 heapq.heappop(heap) # remove current
252 # shift current to new location
253 heapq.heapreplace(heap, MergeIterItem(e, it))
258 """Delete a file at path 'f' if it currently exists.
260 Unlike os.unlink(), does not throw an exception if the file didn't already
266 if e.errno != errno.ENOENT:
270 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
271 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
276 if _bq_simple_id_rx.match(x):
278 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
283 if _sq_simple_id_rx.match(x):
285 return "'%s'" % x.replace("'", "'\"'\"'")
288 if isinstance(x, bytes):
290 if isinstance(x, compat.str_type):
295 """Return a shell quoted string for cmd if it's a sequence, else cmd.
297 cmd must be a string, bytes, or a sequence of one or the other,
298 and the assumption is that if cmd is a string or bytes, then it's
299 already quoted (because it's what's actually being passed to
300 call() and friends. e.g. log(shstr(cmd)); call(cmd)
303 if isinstance(cmd, (bytes, compat.str_type)):
305 elif all(isinstance(x, bytes) for x in cmd):
306 return b' '.join(map(bquote, cmd))
307 elif all(isinstance(x, compat.str_type) for x in cmd):
308 return ' '.join(map(squote, cmd))
309 raise TypeError('unsupported shstr argument: ' + repr(cmd))
312 exc = subprocess.check_call
323 assert stdin in (None, PIPE)
326 stdin=stdin, stdout=PIPE, stderr=stderr,
328 preexec_fn=preexec_fn,
330 out, err = p.communicate(input)
331 if check and p.returncode != 0:
332 raise Exception('subprocess %r failed with status %d%s'
333 % (b' '.join(map(quote, cmd)), p.returncode,
334 ', stderr: %r' % err if err else ''))
337 def readpipe(argv, preexec_fn=None, shell=False):
338 """Run a subprocess and return its output."""
339 return exo(argv, preexec_fn=preexec_fn, shell=shell)[0]
342 def _argmax_base(command):
345 base_size += len(command) + 1
346 for k, v in compat.items(environ):
347 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
351 def _argmax_args_size(args):
352 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
355 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
356 """If args is not empty, yield the output produced by calling the
357 command list with args as a sequence of strings (It may be necessary
358 to return multiple strings in order to respect ARG_MAX)."""
359 # The optional arg_max arg is a workaround for an issue with the
360 # current wvtest behavior.
361 base_size = _argmax_base(command)
363 room = arg_max - base_size
366 next_size = _argmax_args_size(args[i:i+1])
367 if room - next_size < 0:
373 assert(len(sub_args))
374 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
377 def resolve_parent(p):
378 """Return the absolute path of a file without following any final symlink.
380 Behaves like os.path.realpath, but doesn't follow a symlink for the last
381 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
382 will follow symlinks in p's directory)
388 if st and stat.S_ISLNK(st.st_mode):
389 (dir, name) = os.path.split(p)
390 dir = os.path.realpath(dir)
391 out = os.path.join(dir, name)
393 out = os.path.realpath(p)
394 #log('realpathing:%r,%r\n' % (p, out))
398 def detect_fakeroot():
399 "Return True if we appear to be running under fakeroot."
400 return os.getenv("FAKEROOTKEY") != None
403 if sys.platform.startswith('cygwin'):
405 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
406 groups = os.getgroups()
407 return 544 in groups or 0 in groups
410 return os.geteuid() == 0
413 def cache_key_value(get_value, key, cache):
414 """Return (value, was_cached). If there is a value in the cache
415 for key, use that, otherwise, call get_value(key) which should
416 throw a KeyError if there is no value -- in which case the cached
417 and returned value will be None.
419 try: # Do we already have it (or know there wasn't one)?
426 cache[key] = value = get_value(key)
434 """Get the FQDN of this machine."""
437 _hostname = socket.getfqdn().encode('iso-8859-1')
441 def format_filesize(size):
446 exponent = int(math.log(size) // math.log(unit))
447 size_prefix = "KMGTPE"[exponent - 1]
448 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
451 class NotOk(Exception):
456 def __init__(self, outp):
460 while self._read(65536): pass
462 def read(self, size):
463 """Read 'size' bytes from input stream."""
465 return self._read(size)
468 """Read from input stream until a newline is found."""
470 return self._readline()
472 def write(self, data):
473 """Write 'data' to output stream."""
474 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
475 self.outp.write(data)
478 """Return true if input stream is readable."""
479 raise NotImplemented("Subclasses must implement has_input")
482 """Indicate end of output from last sent command."""
483 self.write(b'\nok\n')
486 """Indicate server error to the client."""
487 s = re.sub(br'\s+', b' ', s)
488 self.write(b'\nerror %s\n' % s)
490 def _check_ok(self, onempty):
493 for rl in linereader(self):
494 #log('%d got line: %r\n' % (os.getpid(), rl))
495 if not rl: # empty line
499 elif rl.startswith(b'error '):
500 #log('client: error: %s\n' % rl[6:])
504 raise Exception('server exited unexpectedly; see errors above')
506 def drain_and_check_ok(self):
507 """Remove all data for the current command from input stream."""
510 return self._check_ok(onempty)
513 """Verify that server action completed successfully."""
515 raise Exception('expected "ok", got %r' % rl)
516 return self._check_ok(onempty)
519 class Conn(BaseConn):
520 def __init__(self, inp, outp):
521 BaseConn.__init__(self, outp)
524 def _read(self, size):
525 return self.inp.read(size)
528 return self.inp.readline()
531 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
533 assert(rl[0] == self.inp.fileno())
539 def checked_reader(fd, n):
541 rl, _, _ = select.select([fd], [], [])
544 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
549 MAX_PACKET = 128 * 1024
550 def mux(p, outfd, outr, errr):
553 while p.poll() is None:
554 rl, _, _ = select.select(fds, [], [])
557 buf = os.read(outr, MAX_PACKET)
559 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
561 buf = os.read(errr, 1024)
563 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
565 os.write(outfd, struct.pack('!IB', 0, 3))
568 class DemuxConn(BaseConn):
569 """A helper class for bup's client-server protocol."""
570 def __init__(self, infd, outp):
571 BaseConn.__init__(self, outp)
572 # Anything that comes through before the sync string was not
573 # multiplexed and can be assumed to be debug/log before mux init.
575 while tail != b'BUPMUX':
576 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
578 raise IOError('demux: unexpected EOF during initialization')
580 byte_stream(sys.stderr).write(tail[:-6]) # pre-mux log messages
587 def write(self, data):
589 BaseConn.write(self, data)
591 def _next_packet(self, timeout):
592 if self.closed: return False
593 rl, wl, xl = select.select([self.infd], [], [], timeout)
594 if not rl: return False
595 assert(rl[0] == self.infd)
596 ns = b''.join(checked_reader(self.infd, 5))
597 n, fdw = struct.unpack('!IB', ns)
598 assert(n <= MAX_PACKET)
600 self.reader = checked_reader(self.infd, n)
602 for buf in checked_reader(self.infd, n):
603 byte_stream(sys.stderr).write(buf)
606 debug2("DemuxConn: marked closed\n")
609 def _load_buf(self, timeout):
610 if self.buf is not None:
612 while not self.closed:
613 while not self.reader:
614 if not self._next_packet(timeout):
617 self.buf = next(self.reader)
619 except StopIteration:
623 def _read_parts(self, ix_fn):
624 while self._load_buf(None):
625 assert(self.buf is not None)
627 if i is None or i == len(self.buf):
632 self.buf = self.buf[i:]
640 return buf.index(b'\n')+1
643 return b''.join(self._read_parts(find_eol))
645 def _read(self, size):
647 def until_size(buf): # Closes on csize
648 if len(buf) < csize[0]:
653 return b''.join(self._read_parts(until_size))
656 return self._load_buf(0)
660 """Generate a list of input lines from 'f' without terminating newlines."""
668 def chunkyreader(f, count = None):
669 """Generate a list of chunks of data read from 'f'.
671 If count is None, read until EOF is reached.
673 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
674 reached while reading, raise IOError.
678 b = f.read(min(count, 65536))
680 raise IOError('EOF with %d bytes remaining' % count)
691 def atomically_replaced_file(name, mode='w', buffering=-1):
692 """Yield a file that will be atomically renamed name when leaving the block.
694 This contextmanager yields an open file object that is backed by a
695 temporary file which will be renamed (atomically) to the target
696 name if everything succeeds.
698 The mode and buffering arguments are handled exactly as with open,
699 and the yielded file will have very restrictive permissions, as
704 with atomically_replaced_file('foo.txt', 'w') as f:
705 f.write('hello jack.')
709 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
710 text=('b' not in mode))
713 f = os.fdopen(ffd, mode, buffering)
721 os.rename(tempname, name)
723 unlink(tempname) # nonexistant file is ignored
727 """Append "/" to 's' if it doesn't aleady end in "/"."""
728 assert isinstance(s, bytes)
729 if s and not s.endswith(b'/'):
735 def _mmap_do(f, sz, flags, prot, close):
737 st = os.fstat(f.fileno())
740 # trying to open a zero-length map gives an error, but an empty
741 # string has all the same behaviour of a zero-length map, ie. it has
744 map = mmap.mmap(f.fileno(), sz, flags, prot)
746 f.close() # map will persist beyond file close
750 def mmap_read(f, sz = 0, close=True):
751 """Create a read-only memory mapped region on file 'f'.
752 If sz is 0, the region will cover the entire file.
754 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
757 def mmap_readwrite(f, sz = 0, close=True):
758 """Create a read-write memory mapped region on file 'f'.
759 If sz is 0, the region will cover the entire file.
761 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
765 def mmap_readwrite_private(f, sz = 0, close=True):
766 """Create a read-write memory mapped region on file 'f'.
767 If sz is 0, the region will cover the entire file.
768 The map is private, which means the changes are never flushed back to the
771 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
775 _mincore = getattr(_helpers, 'mincore', None)
777 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
778 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
780 _fmincore_chunk_size = None
781 def _set_fmincore_chunk_size():
782 global _fmincore_chunk_size
783 pref_chunk_size = 64 * 1024 * 1024
784 chunk_size = sc_page_size
785 if (sc_page_size < pref_chunk_size):
786 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
787 _fmincore_chunk_size = chunk_size
790 """Return the mincore() data for fd as a bytearray whose values can be
791 tested via MINCORE_INCORE, or None if fd does not fully
792 support the operation."""
794 if (st.st_size == 0):
796 if not _fmincore_chunk_size:
797 _set_fmincore_chunk_size()
798 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
799 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
800 chunk_count = page_count // _fmincore_chunk_size
803 result = bytearray(page_count)
804 for ci in compat.range(chunk_count):
805 pos = _fmincore_chunk_size * ci;
806 msize = min(_fmincore_chunk_size, st.st_size - pos)
808 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
809 except mmap.error as ex:
810 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
811 # Perhaps the file was a pipe, i.e. "... | bup split ..."
815 _mincore(m, msize, 0, result, ci * pages_per_chunk)
816 except OSError as ex:
817 if ex.errno == errno.ENOSYS:
823 def parse_timestamp(epoch_str):
824 """Return the number of nanoseconds since the epoch that are described
825 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
826 throw a ValueError that may contain additional information."""
827 ns_per = {'s' : 1000000000,
831 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
833 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
834 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
836 (n, units) = match.group(1, 2)
840 return n * ns_per[units]
844 """Parse string or bytes as a possibly unit suffixed number.
847 199.2k means 203981 bytes
848 1GB means 1073741824 bytes
849 2.1 tb means 2199023255552 bytes
851 if isinstance(s, bytes):
852 # FIXME: should this raise a ValueError for UnicodeDecodeError
853 # (perhaps with the latter as the context).
854 s = s.decode('ascii')
855 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
857 raise ValueError("can't parse %r as a number" % s)
858 (val, unit) = g.groups()
861 if unit in ['t', 'tb']:
862 mult = 1024*1024*1024*1024
863 elif unit in ['g', 'gb']:
864 mult = 1024*1024*1024
865 elif unit in ['m', 'mb']:
867 elif unit in ['k', 'kb']:
869 elif unit in ['', 'b']:
872 raise ValueError("invalid unit %r in number %r" % (unit, s))
878 """Append an error message to the list of saved errors.
880 Once processing is able to stop and output the errors, the saved errors are
881 accessible in the module variable helpers.saved_errors.
883 saved_errors.append(e)
892 def die_if_errors(msg=None, status=1):
896 msg = 'warning: %d errors encountered\n' % len(saved_errors)
902 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
904 The new exception handler will make sure that bup will exit without an ugly
905 stacktrace when Ctrl-C is hit.
907 oldhook = sys.excepthook
908 def newhook(exctype, value, traceback):
909 if exctype == KeyboardInterrupt:
910 log('\nInterrupted.\n')
912 return oldhook(exctype, value, traceback)
913 sys.excepthook = newhook
916 def columnate(l, prefix):
917 """Format elements of 'l' in columns with 'prefix' leading each line.
919 The number of columns is determined automatically based on the string
922 binary = isinstance(prefix, bytes)
923 nothing = b'' if binary else ''
924 nl = b'\n' if binary else '\n'
928 clen = max(len(s) for s in l)
929 ncols = (tty_width() - len(prefix)) // (clen + 2)
934 while len(l) % ncols:
936 rows = len(l) // ncols
937 for s in compat.range(0, len(l), rows):
938 cols.append(l[s:s+rows])
940 fmt = b'%-*s' if binary else '%-*s'
941 for row in zip(*cols):
942 out += prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl
946 def parse_date_or_fatal(str, fatal):
947 """Parses the given date or calls Option.fatal().
948 For now we expect a string that contains a float."""
951 except ValueError as e:
952 raise fatal('invalid date format (should be a float): %r' % e)
957 def parse_excludes(options, fatal):
958 """Traverse the options and extract all excludes, or call Option.fatal()."""
962 (option, parameter) = flag
963 if option == '--exclude':
964 excluded_paths.append(resolve_parent(argv_bytes(parameter)))
965 elif option == '--exclude-from':
967 f = open(resolve_parent(argv_bytes(parameter)), 'rb')
969 raise fatal("couldn't read %r" % parameter)
970 for exclude_path in f.readlines():
971 # FIXME: perhaps this should be rstrip('\n')
972 exclude_path = resolve_parent(exclude_path.strip())
974 excluded_paths.append(exclude_path)
975 return sorted(frozenset(excluded_paths))
978 def parse_rx_excludes(options, fatal):
979 """Traverse the options and extract all rx excludes, or call
981 excluded_patterns = []
984 (option, parameter) = flag
985 if option == '--exclude-rx':
987 excluded_patterns.append(re.compile(argv_bytes(parameter)))
988 except re.error as ex:
989 fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex))
990 elif option == '--exclude-rx-from':
992 f = open(resolve_parent(parameter), 'rb')
994 raise fatal("couldn't read %r" % parameter)
995 for pattern in f.readlines():
996 spattern = pattern.rstrip(b'\n')
1000 excluded_patterns.append(re.compile(spattern))
1001 except re.error as ex:
1002 fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex))
1003 return excluded_patterns
1006 def should_rx_exclude_path(path, exclude_rxs):
1007 """Return True if path matches a regular expression in exclude_rxs."""
1008 for rx in exclude_rxs:
1010 debug1('Skipping %r: excluded by rx pattern %r.\n'
1011 % (path, rx.pattern))
1016 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1017 # that resolve against the current filesystem in the strip/graft
1018 # functions for example, but elsewhere as well. I suspect bup's not
1019 # always being careful about that. For some cases, the contents of
1020 # the current filesystem should be irrelevant, and consulting it might
1021 # produce the wrong result, perhaps via unintended symlink resolution,
1024 def path_components(path):
1025 """Break path into a list of pairs of the form (name,
1026 full_path_to_name). Path must start with '/'.
1028 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1029 if not path.startswith(b'/'):
1030 raise Exception('path must start with "/": %s' % path_msg(path))
1031 # Since we assume path startswith('/'), we can skip the first element.
1032 result = [(b'', b'/')]
1033 norm_path = os.path.abspath(path)
1034 if norm_path == b'/':
1037 for p in norm_path.split(b'/')[1:]:
1038 full_path += b'/' + p
1039 result.append((p, full_path))
1043 def stripped_path_components(path, strip_prefixes):
1044 """Strip any prefix in strip_prefixes from path and return a list
1045 of path components where each component is (name,
1046 none_or_full_fs_path_to_name). Assume path startswith('/').
1047 See thelpers.py for examples."""
1048 normalized_path = os.path.abspath(path)
1049 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1050 for bp in sorted_strip_prefixes:
1051 normalized_bp = os.path.abspath(bp)
1052 if normalized_bp == b'/':
1054 if normalized_path.startswith(normalized_bp):
1055 prefix = normalized_path[:len(normalized_bp)]
1057 for p in normalized_path[len(normalized_bp):].split(b'/'):
1061 result.append((p, prefix))
1064 return path_components(path)
1067 def grafted_path_components(graft_points, path):
1068 # Create a result that consists of some number of faked graft
1069 # directories before the graft point, followed by all of the real
1070 # directories from path that are after the graft point. Arrange
1071 # for the directory at the graft point in the result to correspond
1072 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1073 # for some examples.
1075 # Note that given --graft orig=new, orig and new have *nothing* to
1076 # do with each other, even if some of their component names
1077 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1078 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1081 # FIXME: This can't be the best solution...
1082 clean_path = os.path.abspath(path)
1083 for graft_point in graft_points:
1084 old_prefix, new_prefix = graft_point
1085 # Expand prefixes iff not absolute paths.
1086 old_prefix = os.path.normpath(old_prefix)
1087 new_prefix = os.path.normpath(new_prefix)
1088 if clean_path.startswith(old_prefix):
1089 escaped_prefix = re.escape(old_prefix)
1090 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1091 # Handle /foo=/ (at least) -- which produces //whatever.
1092 grafted_path = b'/' + grafted_path.lstrip(b'/')
1093 clean_path_components = path_components(clean_path)
1094 # Count the components that were stripped.
1095 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1096 new_prefix_parts = new_prefix.split(b'/')
1097 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1098 result = [(p, None) for p in result_prefix] \
1099 + clean_path_components[strip_count:]
1100 # Now set the graft point name to match the end of new_prefix.
1101 graft_point = len(result_prefix)
1102 result[graft_point] = \
1103 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1104 if new_prefix == b'/': # --graft ...=/ is a special case.
1107 return path_components(clean_path)
1113 _localtime = getattr(_helpers, 'localtime', None)
1116 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1117 'tm_hour', 'tm_min', 'tm_sec',
1118 'tm_wday', 'tm_yday',
1119 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1121 # Define a localtime() that returns bup_time when possible. Note:
1122 # this means that any helpers.localtime() results may need to be
1123 # passed through to_py_time() before being passed to python's time
1124 # module, which doesn't appear willing to ignore the extra items.
1126 def localtime(time):
1127 return bup_time(*_helpers.localtime(floor(time)))
1128 def utc_offset_str(t):
1129 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1130 If the current UTC offset does not represent an integer number
1131 of minutes, the fractional component will be truncated."""
1132 off = localtime(t).tm_gmtoff
1133 # Note: // doesn't truncate like C for negative values, it rounds down.
1134 offmin = abs(off) // 60
1136 h = (offmin - m) // 60
1137 return b'%+03d%02d' % (-h if off < 0 else h, m)
1139 if isinstance(x, time.struct_time):
1141 return time.struct_time(x[:9])
1143 localtime = time.localtime
1144 def utc_offset_str(t):
1145 return time.strftime(b'%z', localtime(t))
1150 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1152 def valid_save_name(name):
1153 # Enforce a superset of the restrictions in git-check-ref-format(1)
1155 or name.startswith(b'/') or name.endswith(b'/') \
1156 or name.endswith(b'.'):
1158 if _some_invalid_save_parts_rx.search(name):
1161 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1163 for part in name.split(b'/'):
1164 if part.startswith(b'.') or part.endswith(b'.lock'):
1169 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1171 def period_as_secs(s):
1174 match = _period_rx.match(s)
1177 mag = int(match.group(1))
1178 scale = match.group(2)
1179 return mag * {'s': 1,
1183 'w': 60 * 60 * 24 * 7,
1184 'm': 60 * 60 * 24 * 31,
1185 'y': 60 * 60 * 24 * 366}[scale]