1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import argv_bytes, byte_int
16 from bup.io import byte_stream, path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
22 buglvl = int(os.environ.get('BUP_DEBUG', 0))
26 """Helper to deal with Python scoping issues"""
30 sc_page_size = os.sysconf('SC_PAGE_SIZE')
31 assert(sc_page_size > 0)
33 sc_arg_max = os.sysconf('SC_ARG_MAX')
34 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
35 sc_arg_max = 2 * 1024 * 1024
39 for result in iterable:
44 _fdatasync = os.fdatasync
45 except AttributeError:
48 if sys.platform.startswith('darwin'):
49 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
53 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
55 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
56 if e.errno == errno.ENOTSUP:
61 fdatasync = _fdatasync
64 def partition(predicate, stream):
65 """Returns (leading_matches_it, rest_it), where leading_matches_it
66 must be completely exhausted before traversing rest_it.
71 ns.first_nonmatch = None
72 def leading_matches():
77 ns.first_nonmatch = (x,)
81 yield ns.first_nonmatch[0]
84 return (leading_matches(), rest())
94 def lines_until_sentinel(f, sentinel, ex_type):
95 # sentinel must end with \n and must contain only one \n
98 if not (line and line.endswith(b'\n')):
99 raise ex_type('Hit EOF while reading line')
105 def stat_if_exists(path):
109 if e.errno != errno.ENOENT:
114 # Write (blockingly) to sockets that may or may not be in blocking mode.
115 # We need this because our stderr is sometimes eaten by subprocesses
116 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
117 # leading to race conditions. Ick. We'll do it the hard way.
118 def _hard_write(fd, buf):
120 (r,w,x) = select.select([], [fd], [], None)
122 raise IOError('select(fd) returned without being writable')
124 sz = os.write(fd, buf)
126 if e.errno != errno.EAGAIN:
134 """Print a log message to stderr."""
137 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
151 istty1 = os.isatty(1) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 1)
152 istty2 = os.isatty(2) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 2)
155 """Calls log() if stderr is a TTY. Does nothing otherwise."""
156 global _last_progress
163 """Calls progress() only if we haven't printed progress in a while.
165 This avoids overloading the stderr buffer with excess junk.
169 if now - _last_prog > 0.1:
175 """Calls progress() to redisplay the most recent progress message.
177 Useful after you've printed some other message that wipes out the
180 if _last_progress and _last_progress.endswith('\r'):
181 progress(_last_progress)
184 def mkdirp(d, mode=None):
185 """Recursively create directories on path 'd'.
187 Unlike os.makedirs(), it doesn't raise an exception if the last element of
188 the path already exists.
196 if e.errno == errno.EEXIST:
203 def __init__(self, entry, read_it):
205 self.read_it = read_it
207 return self.entry < x.entry
209 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
211 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
213 samekey = operator.eq
215 total = sum(len(it) for it in iters)
216 iters = (iter(it) for it in iters)
217 heap = ((next(it, None),it) for it in iters)
218 heap = [MergeIterItem(e, it) for e, it in heap if e]
223 if not count % pfreq:
225 e, it = heap[0].entry, heap[0].read_it
226 if not samekey(e, pe):
232 except StopIteration:
233 heapq.heappop(heap) # remove current
235 # shift current to new location
236 heapq.heapreplace(heap, MergeIterItem(e, it))
241 """Delete a file at path 'f' if it currently exists.
243 Unlike os.unlink(), does not throw an exception if the file didn't already
249 if e.errno != errno.ENOENT:
253 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
254 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
259 if _bq_simple_id_rx.match(x):
261 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
266 if _sq_simple_id_rx.match(x):
268 return "'%s'" % x.replace("'", "'\"'\"'")
271 if isinstance(x, bytes):
273 if isinstance(x, compat.str_type):
278 """Return a shell quoted string for cmd if it's a sequence, else cmd.
280 cmd must be a string, bytes, or a sequence of one or the other,
281 and the assumption is that if cmd is a string or bytes, then it's
282 already quoted (because it's what's actually being passed to
283 call() and friends. e.g. log(shstr(cmd)); call(cmd)
286 if isinstance(cmd, (bytes, compat.str_type)):
288 elif all(isinstance(x, bytes) for x in cmd):
289 return b' '.join(map(bquote, cmd))
290 elif all(isinstance(x, compat.str_type) for x in cmd):
291 return ' '.join(map(squote, cmd))
292 raise TypeError('unsupported shstr argument: ' + repr(cmd))
295 exc = subprocess.check_call
306 assert stdin in (None, PIPE)
309 stdin=stdin, stdout=PIPE, stderr=stderr,
311 preexec_fn=preexec_fn,
313 out, err = p.communicate(input)
314 if check and p.returncode != 0:
315 raise Exception('subprocess %r failed with status %d%s'
316 % (b' '.join(map(quote, cmd)), p.returncode,
317 ', stderr: %r' % err if err else ''))
320 def readpipe(argv, preexec_fn=None, shell=False):
321 """Run a subprocess and return its output."""
322 return exo(argv, preexec_fn=preexec_fn, shell=shell)[0]
325 def _argmax_base(command):
328 base_size += len(command) + 1
329 for k, v in compat.items(environ):
330 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
334 def _argmax_args_size(args):
335 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
338 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
339 """If args is not empty, yield the output produced by calling the
340 command list with args as a sequence of strings (It may be necessary
341 to return multiple strings in order to respect ARG_MAX)."""
342 # The optional arg_max arg is a workaround for an issue with the
343 # current wvtest behavior.
344 base_size = _argmax_base(command)
346 room = arg_max - base_size
349 next_size = _argmax_args_size(args[i:i+1])
350 if room - next_size < 0:
356 assert(len(sub_args))
357 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
360 def resolve_parent(p):
361 """Return the absolute path of a file without following any final symlink.
363 Behaves like os.path.realpath, but doesn't follow a symlink for the last
364 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
365 will follow symlinks in p's directory)
371 if st and stat.S_ISLNK(st.st_mode):
372 (dir, name) = os.path.split(p)
373 dir = os.path.realpath(dir)
374 out = os.path.join(dir, name)
376 out = os.path.realpath(p)
377 #log('realpathing:%r,%r\n' % (p, out))
381 def detect_fakeroot():
382 "Return True if we appear to be running under fakeroot."
383 return os.getenv("FAKEROOTKEY") != None
386 if sys.platform.startswith('cygwin'):
388 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
389 groups = os.getgroups()
390 return 544 in groups or 0 in groups
393 return os.geteuid() == 0
396 def cache_key_value(get_value, key, cache):
397 """Return (value, was_cached). If there is a value in the cache
398 for key, use that, otherwise, call get_value(key) which should
399 throw a KeyError if there is no value -- in which case the cached
400 and returned value will be None.
402 try: # Do we already have it (or know there wasn't one)?
409 cache[key] = value = get_value(key)
417 """Get the FQDN of this machine."""
420 _hostname = _helpers.gethostname()
424 def format_filesize(size):
429 exponent = int(math.log(size) // math.log(unit))
430 size_prefix = "KMGTPE"[exponent - 1]
431 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
434 class NotOk(Exception):
439 def __init__(self, outp):
443 while self._read(65536): pass
445 def _read(self, size):
446 raise NotImplementedError("Subclasses must implement _read")
448 def read(self, size):
449 """Read 'size' bytes from input stream."""
451 return self._read(size)
453 def _readline(self, size):
454 raise NotImplementedError("Subclasses must implement _readline")
457 """Read from input stream until a newline is found."""
459 return self._readline()
461 def write(self, data):
462 """Write 'data' to output stream."""
463 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
464 self.outp.write(data)
467 """Return true if input stream is readable."""
468 raise NotImplementedError("Subclasses must implement has_input")
471 """Indicate end of output from last sent command."""
472 self.write(b'\nok\n')
475 """Indicate server error to the client."""
476 s = re.sub(br'\s+', b' ', s)
477 self.write(b'\nerror %s\n' % s)
479 def _check_ok(self, onempty):
482 for rl in linereader(self):
483 #log('%d got line: %r\n' % (os.getpid(), rl))
484 if not rl: # empty line
488 elif rl.startswith(b'error '):
489 #log('client: error: %s\n' % rl[6:])
493 raise Exception('server exited unexpectedly; see errors above')
495 def drain_and_check_ok(self):
496 """Remove all data for the current command from input stream."""
499 return self._check_ok(onempty)
502 """Verify that server action completed successfully."""
504 raise Exception('expected "ok", got %r' % rl)
505 return self._check_ok(onempty)
508 class Conn(BaseConn):
509 def __init__(self, inp, outp):
510 BaseConn.__init__(self, outp)
513 def _read(self, size):
514 return self.inp.read(size)
517 return self.inp.readline()
520 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
522 assert(rl[0] == self.inp.fileno())
528 def checked_reader(fd, n):
530 rl, _, _ = select.select([fd], [], [])
533 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
538 MAX_PACKET = 128 * 1024
539 def mux(p, outfd, outr, errr):
542 while p.poll() is None:
543 rl, _, _ = select.select(fds, [], [])
546 buf = os.read(outr, MAX_PACKET)
548 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
550 buf = os.read(errr, 1024)
552 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
554 os.write(outfd, struct.pack('!IB', 0, 3))
557 class DemuxConn(BaseConn):
558 """A helper class for bup's client-server protocol."""
559 def __init__(self, infd, outp):
560 BaseConn.__init__(self, outp)
561 # Anything that comes through before the sync string was not
562 # multiplexed and can be assumed to be debug/log before mux init.
564 while tail != b'BUPMUX':
565 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
567 raise IOError('demux: unexpected EOF during initialization')
569 byte_stream(sys.stderr).write(tail[:-6]) # pre-mux log messages
576 def write(self, data):
578 BaseConn.write(self, data)
580 def _next_packet(self, timeout):
581 if self.closed: return False
582 rl, wl, xl = select.select([self.infd], [], [], timeout)
583 if not rl: return False
584 assert(rl[0] == self.infd)
585 ns = b''.join(checked_reader(self.infd, 5))
586 n, fdw = struct.unpack('!IB', ns)
587 assert(n <= MAX_PACKET)
589 self.reader = checked_reader(self.infd, n)
591 for buf in checked_reader(self.infd, n):
592 byte_stream(sys.stderr).write(buf)
595 debug2("DemuxConn: marked closed\n")
598 def _load_buf(self, timeout):
599 if self.buf is not None:
601 while not self.closed:
602 while not self.reader:
603 if not self._next_packet(timeout):
606 self.buf = next(self.reader)
608 except StopIteration:
612 def _read_parts(self, ix_fn):
613 while self._load_buf(None):
614 assert(self.buf is not None)
616 if i is None or i == len(self.buf):
621 self.buf = self.buf[i:]
629 return buf.index(b'\n')+1
632 return b''.join(self._read_parts(find_eol))
634 def _read(self, size):
636 def until_size(buf): # Closes on csize
637 if len(buf) < csize[0]:
642 return b''.join(self._read_parts(until_size))
645 return self._load_buf(0)
649 """Generate a list of input lines from 'f' without terminating newlines."""
657 def chunkyreader(f, count = None):
658 """Generate a list of chunks of data read from 'f'.
660 If count is None, read until EOF is reached.
662 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
663 reached while reading, raise IOError.
667 b = f.read(min(count, 65536))
669 raise IOError('EOF with %d bytes remaining' % count)
680 def atomically_replaced_file(name, mode='w', buffering=-1):
681 """Yield a file that will be atomically renamed name when leaving the block.
683 This contextmanager yields an open file object that is backed by a
684 temporary file which will be renamed (atomically) to the target
685 name if everything succeeds.
687 The mode and buffering arguments are handled exactly as with open,
688 and the yielded file will have very restrictive permissions, as
693 with atomically_replaced_file('foo.txt', 'w') as f:
694 f.write('hello jack.')
698 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
699 text=('b' not in mode))
702 f = os.fdopen(ffd, mode, buffering)
710 os.rename(tempname, name)
712 unlink(tempname) # nonexistant file is ignored
716 """Append "/" to 's' if it doesn't aleady end in "/"."""
717 assert isinstance(s, bytes)
718 if s and not s.endswith(b'/'):
724 def _mmap_do(f, sz, flags, prot, close):
726 st = os.fstat(f.fileno())
729 # trying to open a zero-length map gives an error, but an empty
730 # string has all the same behaviour of a zero-length map, ie. it has
733 map = mmap.mmap(f.fileno(), sz, flags, prot)
735 f.close() # map will persist beyond file close
739 def mmap_read(f, sz = 0, close=True):
740 """Create a read-only memory mapped region on file 'f'.
741 If sz is 0, the region will cover the entire file.
743 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
746 def mmap_readwrite(f, sz = 0, close=True):
747 """Create a read-write memory mapped region on file 'f'.
748 If sz is 0, the region will cover the entire file.
750 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
754 def mmap_readwrite_private(f, sz = 0, close=True):
755 """Create a read-write memory mapped region on file 'f'.
756 If sz is 0, the region will cover the entire file.
757 The map is private, which means the changes are never flushed back to the
760 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
764 _mincore = getattr(_helpers, 'mincore', None)
766 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
767 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
769 _fmincore_chunk_size = None
770 def _set_fmincore_chunk_size():
771 global _fmincore_chunk_size
772 pref_chunk_size = 64 * 1024 * 1024
773 chunk_size = sc_page_size
774 if (sc_page_size < pref_chunk_size):
775 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
776 _fmincore_chunk_size = chunk_size
779 """Return the mincore() data for fd as a bytearray whose values can be
780 tested via MINCORE_INCORE, or None if fd does not fully
781 support the operation."""
783 if (st.st_size == 0):
785 if not _fmincore_chunk_size:
786 _set_fmincore_chunk_size()
787 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
788 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
789 chunk_count = (st.st_size + _fmincore_chunk_size - 1) // _fmincore_chunk_size
790 result = bytearray(page_count)
791 for ci in compat.range(chunk_count):
792 pos = _fmincore_chunk_size * ci;
793 msize = min(_fmincore_chunk_size, st.st_size - pos)
795 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
796 except mmap.error as ex:
797 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
798 # Perhaps the file was a pipe, i.e. "... | bup split ..."
802 _mincore(m, msize, 0, result, ci * pages_per_chunk)
803 except OSError as ex:
804 if ex.errno == errno.ENOSYS:
810 def parse_timestamp(epoch_str):
811 """Return the number of nanoseconds since the epoch that are described
812 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
813 throw a ValueError that may contain additional information."""
814 ns_per = {'s' : 1000000000,
818 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
820 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
821 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
823 (n, units) = match.group(1, 2)
827 return n * ns_per[units]
831 """Parse string or bytes as a possibly unit suffixed number.
834 199.2k means 203981 bytes
835 1GB means 1073741824 bytes
836 2.1 tb means 2199023255552 bytes
838 if isinstance(s, bytes):
839 # FIXME: should this raise a ValueError for UnicodeDecodeError
840 # (perhaps with the latter as the context).
841 s = s.decode('ascii')
842 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
844 raise ValueError("can't parse %r as a number" % s)
845 (val, unit) = g.groups()
848 if unit in ['t', 'tb']:
849 mult = 1024*1024*1024*1024
850 elif unit in ['g', 'gb']:
851 mult = 1024*1024*1024
852 elif unit in ['m', 'mb']:
854 elif unit in ['k', 'kb']:
856 elif unit in ['', 'b']:
859 raise ValueError("invalid unit %r in number %r" % (unit, s))
865 """Append an error message to the list of saved errors.
867 Once processing is able to stop and output the errors, the saved errors are
868 accessible in the module variable helpers.saved_errors.
870 saved_errors.append(e)
879 def die_if_errors(msg=None, status=1):
883 msg = 'warning: %d errors encountered\n' % len(saved_errors)
889 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
891 The new exception handler will make sure that bup will exit without an ugly
892 stacktrace when Ctrl-C is hit.
894 oldhook = sys.excepthook
895 def newhook(exctype, value, traceback):
896 if exctype == KeyboardInterrupt:
897 log('\nInterrupted.\n')
899 return oldhook(exctype, value, traceback)
900 sys.excepthook = newhook
903 def columnate(l, prefix):
904 """Format elements of 'l' in columns with 'prefix' leading each line.
906 The number of columns is determined automatically based on the string
909 binary = isinstance(prefix, bytes)
910 nothing = b'' if binary else ''
911 nl = b'\n' if binary else '\n'
915 clen = max(len(s) for s in l)
916 ncols = (tty_width() - len(prefix)) // (clen + 2)
921 while len(l) % ncols:
923 rows = len(l) // ncols
924 for s in compat.range(0, len(l), rows):
925 cols.append(l[s:s+rows])
927 fmt = b'%-*s' if binary else '%-*s'
928 for row in zip(*cols):
929 out += prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl
933 def parse_date_or_fatal(str, fatal):
934 """Parses the given date or calls Option.fatal().
935 For now we expect a string that contains a float."""
938 except ValueError as e:
939 raise fatal('invalid date format (should be a float): %r' % e)
944 def parse_excludes(options, fatal):
945 """Traverse the options and extract all excludes, or call Option.fatal()."""
949 (option, parameter) = flag
950 if option == '--exclude':
951 excluded_paths.append(resolve_parent(argv_bytes(parameter)))
952 elif option == '--exclude-from':
954 f = open(resolve_parent(argv_bytes(parameter)), 'rb')
956 raise fatal("couldn't read %r" % parameter)
957 for exclude_path in f.readlines():
958 # FIXME: perhaps this should be rstrip('\n')
959 exclude_path = resolve_parent(exclude_path.strip())
961 excluded_paths.append(exclude_path)
962 return sorted(frozenset(excluded_paths))
965 def parse_rx_excludes(options, fatal):
966 """Traverse the options and extract all rx excludes, or call
968 excluded_patterns = []
971 (option, parameter) = flag
972 if option == '--exclude-rx':
974 excluded_patterns.append(re.compile(argv_bytes(parameter)))
975 except re.error as ex:
976 fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex))
977 elif option == '--exclude-rx-from':
979 f = open(resolve_parent(parameter), 'rb')
981 raise fatal("couldn't read %r" % parameter)
982 for pattern in f.readlines():
983 spattern = pattern.rstrip(b'\n')
987 excluded_patterns.append(re.compile(spattern))
988 except re.error as ex:
989 fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex))
990 return excluded_patterns
993 def should_rx_exclude_path(path, exclude_rxs):
994 """Return True if path matches a regular expression in exclude_rxs."""
995 for rx in exclude_rxs:
997 debug1('Skipping %r: excluded by rx pattern %r.\n'
998 % (path, rx.pattern))
1003 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1004 # that resolve against the current filesystem in the strip/graft
1005 # functions for example, but elsewhere as well. I suspect bup's not
1006 # always being careful about that. For some cases, the contents of
1007 # the current filesystem should be irrelevant, and consulting it might
1008 # produce the wrong result, perhaps via unintended symlink resolution,
1011 def path_components(path):
1012 """Break path into a list of pairs of the form (name,
1013 full_path_to_name). Path must start with '/'.
1015 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1016 if not path.startswith(b'/'):
1017 raise Exception('path must start with "/": %s' % path_msg(path))
1018 # Since we assume path startswith('/'), we can skip the first element.
1019 result = [(b'', b'/')]
1020 norm_path = os.path.abspath(path)
1021 if norm_path == b'/':
1024 for p in norm_path.split(b'/')[1:]:
1025 full_path += b'/' + p
1026 result.append((p, full_path))
1030 def stripped_path_components(path, strip_prefixes):
1031 """Strip any prefix in strip_prefixes from path and return a list
1032 of path components where each component is (name,
1033 none_or_full_fs_path_to_name). Assume path startswith('/').
1034 See thelpers.py for examples."""
1035 normalized_path = os.path.abspath(path)
1036 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1037 for bp in sorted_strip_prefixes:
1038 normalized_bp = os.path.abspath(bp)
1039 if normalized_bp == b'/':
1041 if normalized_path.startswith(normalized_bp):
1042 prefix = normalized_path[:len(normalized_bp)]
1044 for p in normalized_path[len(normalized_bp):].split(b'/'):
1048 result.append((p, prefix))
1051 return path_components(path)
1054 def grafted_path_components(graft_points, path):
1055 # Create a result that consists of some number of faked graft
1056 # directories before the graft point, followed by all of the real
1057 # directories from path that are after the graft point. Arrange
1058 # for the directory at the graft point in the result to correspond
1059 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1060 # for some examples.
1062 # Note that given --graft orig=new, orig and new have *nothing* to
1063 # do with each other, even if some of their component names
1064 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1065 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1068 # FIXME: This can't be the best solution...
1069 clean_path = os.path.abspath(path)
1070 for graft_point in graft_points:
1071 old_prefix, new_prefix = graft_point
1072 # Expand prefixes iff not absolute paths.
1073 old_prefix = os.path.normpath(old_prefix)
1074 new_prefix = os.path.normpath(new_prefix)
1075 if clean_path.startswith(old_prefix):
1076 escaped_prefix = re.escape(old_prefix)
1077 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1078 # Handle /foo=/ (at least) -- which produces //whatever.
1079 grafted_path = b'/' + grafted_path.lstrip(b'/')
1080 clean_path_components = path_components(clean_path)
1081 # Count the components that were stripped.
1082 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1083 new_prefix_parts = new_prefix.split(b'/')
1084 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1085 result = [(p, None) for p in result_prefix] \
1086 + clean_path_components[strip_count:]
1087 # Now set the graft point name to match the end of new_prefix.
1088 graft_point = len(result_prefix)
1089 result[graft_point] = \
1090 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1091 if new_prefix == b'/': # --graft ...=/ is a special case.
1094 return path_components(clean_path)
1100 _localtime = getattr(_helpers, 'localtime', None)
1103 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1104 'tm_hour', 'tm_min', 'tm_sec',
1105 'tm_wday', 'tm_yday',
1106 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1108 # Define a localtime() that returns bup_time when possible. Note:
1109 # this means that any helpers.localtime() results may need to be
1110 # passed through to_py_time() before being passed to python's time
1111 # module, which doesn't appear willing to ignore the extra items.
1113 def localtime(time):
1114 return bup_time(*_helpers.localtime(int(floor(time))))
1115 def utc_offset_str(t):
1116 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1117 If the current UTC offset does not represent an integer number
1118 of minutes, the fractional component will be truncated."""
1119 off = localtime(t).tm_gmtoff
1120 # Note: // doesn't truncate like C for negative values, it rounds down.
1121 offmin = abs(off) // 60
1123 h = (offmin - m) // 60
1124 return b'%+03d%02d' % (-h if off < 0 else h, m)
1126 if isinstance(x, time.struct_time):
1128 return time.struct_time(x[:9])
1130 localtime = time.localtime
1131 def utc_offset_str(t):
1132 return time.strftime(b'%z', localtime(t))
1137 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1139 def valid_save_name(name):
1140 # Enforce a superset of the restrictions in git-check-ref-format(1)
1142 or name.startswith(b'/') or name.endswith(b'/') \
1143 or name.endswith(b'.'):
1145 if _some_invalid_save_parts_rx.search(name):
1148 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1150 for part in name.split(b'/'):
1151 if part.startswith(b'.') or part.endswith(b'.lock'):
1156 _period_rx = re.compile(br'^([0-9]+)(s|min|h|d|w|m|y)$')
1158 def period_as_secs(s):
1161 match = _period_rx.match(s)
1164 mag = int(match.group(1))
1165 scale = match.group(2)
1166 return mag * {b's': 1,
1170 b'w': 60 * 60 * 24 * 7,
1171 b'm': 60 * 60 * 24 * 31,
1172 b'y': 60 * 60 * 24 * 366}[scale]