1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import argv_bytes, byte_int, pending_raise
16 from bup.io import byte_stream, path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
22 buglvl = int(os.environ.get('BUP_DEBUG', 0))
26 """Helper to deal with Python scoping issues"""
30 sc_page_size = os.sysconf('SC_PAGE_SIZE')
31 assert(sc_page_size > 0)
33 sc_arg_max = os.sysconf('SC_ARG_MAX')
34 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
35 sc_arg_max = 2 * 1024 * 1024
39 for result in iterable:
44 _fdatasync = os.fdatasync
45 except AttributeError:
48 if sys.platform.startswith('darwin'):
49 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
53 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
55 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
56 if e.errno == errno.ENOTSUP:
61 fdatasync = _fdatasync
64 def partition(predicate, stream):
65 """Returns (leading_matches_it, rest_it), where leading_matches_it
66 must be completely exhausted before traversing rest_it.
71 ns.first_nonmatch = None
72 def leading_matches():
77 ns.first_nonmatch = (x,)
81 yield ns.first_nonmatch[0]
84 return (leading_matches(), rest())
94 def lines_until_sentinel(f, sentinel, ex_type):
95 # sentinel must end with \n and must contain only one \n
98 if not (line and line.endswith(b'\n')):
99 raise ex_type('Hit EOF while reading line')
105 def stat_if_exists(path):
109 if e.errno != errno.ENOENT:
114 # Write (blockingly) to sockets that may or may not be in blocking mode.
115 # We need this because our stderr is sometimes eaten by subprocesses
116 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
117 # leading to race conditions. Ick. We'll do it the hard way.
118 def _hard_write(fd, buf):
120 (r,w,x) = select.select([], [fd], [], None)
122 raise IOError('select(fd) returned without being writable')
124 sz = os.write(fd, buf)
126 if e.errno != errno.EAGAIN:
134 """Print a log message to stderr."""
137 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
151 istty1 = os.isatty(1) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 1)
152 istty2 = os.isatty(2) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 2)
155 """Calls log() if stderr is a TTY. Does nothing otherwise."""
156 global _last_progress
163 """Calls progress() only if we haven't printed progress in a while.
165 This avoids overloading the stderr buffer with excess junk.
169 if now - _last_prog > 0.1:
175 """Calls progress() to redisplay the most recent progress message.
177 Useful after you've printed some other message that wipes out the
180 if _last_progress and _last_progress.endswith('\r'):
181 progress(_last_progress)
184 def mkdirp(d, mode=None):
185 """Recursively create directories on path 'd'.
187 Unlike os.makedirs(), it doesn't raise an exception if the last element of
188 the path already exists.
196 if e.errno == errno.EEXIST:
203 def __init__(self, entry, read_it):
205 self.read_it = read_it
207 return self.entry < x.entry
209 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
211 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
213 samekey = operator.eq
215 total = sum(len(it) for it in iters)
216 iters = (iter(it) for it in iters)
217 heap = ((next(it, None),it) for it in iters)
218 heap = [MergeIterItem(e, it) for e, it in heap if e]
223 if not count % pfreq:
225 e, it = heap[0].entry, heap[0].read_it
226 if not samekey(e, pe):
232 except StopIteration:
233 heapq.heappop(heap) # remove current
235 # shift current to new location
236 heapq.heapreplace(heap, MergeIterItem(e, it))
241 """Delete a file at path 'f' if it currently exists.
243 Unlike os.unlink(), does not throw an exception if the file didn't already
249 if e.errno != errno.ENOENT:
253 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
254 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
259 if _bq_simple_id_rx.match(x):
261 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
266 if _sq_simple_id_rx.match(x):
268 return "'%s'" % x.replace("'", "'\"'\"'")
271 if isinstance(x, bytes):
273 if isinstance(x, compat.str_type):
278 """Return a shell quoted string for cmd if it's a sequence, else cmd.
280 cmd must be a string, bytes, or a sequence of one or the other,
281 and the assumption is that if cmd is a string or bytes, then it's
282 already quoted (because it's what's actually being passed to
283 call() and friends. e.g. log(shstr(cmd)); call(cmd)
286 if isinstance(cmd, (bytes, compat.str_type)):
288 elif all(isinstance(x, bytes) for x in cmd):
289 return b' '.join(map(bquote, cmd))
290 elif all(isinstance(x, compat.str_type) for x in cmd):
291 return ' '.join(map(squote, cmd))
292 raise TypeError('unsupported shstr argument: ' + repr(cmd))
295 exc = subprocess.check_call
306 assert stdin in (None, PIPE)
309 stdin=stdin, stdout=PIPE, stderr=stderr,
311 preexec_fn=preexec_fn,
313 out, err = p.communicate(input)
314 if check and p.returncode != 0:
315 raise Exception('subprocess %r failed with status %d%s'
316 % (b' '.join(map(quote, cmd)), p.returncode,
317 ', stderr: %r' % err if err else ''))
320 def readpipe(argv, preexec_fn=None, shell=False):
321 """Run a subprocess and return its output."""
322 return exo(argv, preexec_fn=preexec_fn, shell=shell)[0]
325 def _argmax_base(command):
328 base_size += len(command) + 1
329 for k, v in compat.items(environ):
330 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
334 def _argmax_args_size(args):
335 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
338 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
339 """If args is not empty, yield the output produced by calling the
340 command list with args as a sequence of strings (It may be necessary
341 to return multiple strings in order to respect ARG_MAX)."""
342 # The optional arg_max arg is a workaround for an issue with the
343 # current wvtest behavior.
344 base_size = _argmax_base(command)
346 room = arg_max - base_size
349 next_size = _argmax_args_size(args[i:i+1])
350 if room - next_size < 0:
356 assert(len(sub_args))
357 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
360 def resolve_parent(p):
361 """Return the absolute path of a file without following any final symlink.
363 Behaves like os.path.realpath, but doesn't follow a symlink for the last
364 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
365 will follow symlinks in p's directory)
371 if st and stat.S_ISLNK(st.st_mode):
372 (dir, name) = os.path.split(p)
373 dir = os.path.realpath(dir)
374 out = os.path.join(dir, name)
376 out = os.path.realpath(p)
377 #log('realpathing:%r,%r\n' % (p, out))
381 def detect_fakeroot():
382 "Return True if we appear to be running under fakeroot."
383 return os.getenv("FAKEROOTKEY") != None
386 if sys.platform.startswith('cygwin'):
388 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
389 groups = os.getgroups()
390 return 544 in groups or 0 in groups
393 return os.geteuid() == 0
396 def cache_key_value(get_value, key, cache):
397 """Return (value, was_cached). If there is a value in the cache
398 for key, use that, otherwise, call get_value(key) which should
399 throw a KeyError if there is no value -- in which case the cached
400 and returned value will be None.
402 try: # Do we already have it (or know there wasn't one)?
409 cache[key] = value = get_value(key)
417 """Get the FQDN of this machine."""
420 _hostname = _helpers.gethostname()
424 def format_filesize(size):
429 exponent = int(math.log(size) // math.log(unit))
430 size_prefix = "KMGTPE"[exponent - 1]
431 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
434 class NotOk(Exception):
439 def __init__(self, outp):
443 while self._read(65536): pass
445 def _read(self, size):
446 raise NotImplementedError("Subclasses must implement _read")
448 def read(self, size):
449 """Read 'size' bytes from input stream."""
451 return self._read(size)
453 def _readline(self, size):
454 raise NotImplementedError("Subclasses must implement _readline")
457 """Read from input stream until a newline is found."""
459 return self._readline()
461 def write(self, data):
462 """Write 'data' to output stream."""
463 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
464 self.outp.write(data)
467 """Return true if input stream is readable."""
468 raise NotImplementedError("Subclasses must implement has_input")
471 """Indicate end of output from last sent command."""
472 self.write(b'\nok\n')
475 """Indicate server error to the client."""
476 s = re.sub(br'\s+', b' ', s)
477 self.write(b'\nerror %s\n' % s)
479 def _check_ok(self, onempty):
482 for rl in linereader(self):
483 #log('%d got line: %r\n' % (os.getpid(), rl))
484 if not rl: # empty line
488 elif rl.startswith(b'error '):
489 #log('client: error: %s\n' % rl[6:])
493 raise Exception('server exited unexpectedly; see errors above')
495 def drain_and_check_ok(self):
496 """Remove all data for the current command from input stream."""
499 return self._check_ok(onempty)
502 """Verify that server action completed successfully."""
504 raise Exception('expected "ok", got %r' % rl)
505 return self._check_ok(onempty)
508 class Conn(BaseConn):
509 def __init__(self, inp, outp):
510 BaseConn.__init__(self, outp)
513 def _read(self, size):
514 return self.inp.read(size)
517 return self.inp.readline()
520 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
522 assert(rl[0] == self.inp.fileno())
528 def checked_reader(fd, n):
530 rl, _, _ = select.select([fd], [], [])
533 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
538 MAX_PACKET = 128 * 1024
539 def mux(p, outfd, outr, errr):
542 while p.poll() is None:
543 rl, _, _ = select.select(fds, [], [])
546 buf = os.read(outr, MAX_PACKET)
548 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
550 buf = os.read(errr, 1024)
552 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
554 os.write(outfd, struct.pack('!IB', 0, 3))
557 class DemuxConn(BaseConn):
558 """A helper class for bup's client-server protocol."""
559 def __init__(self, infd, outp):
560 BaseConn.__init__(self, outp)
561 # Anything that comes through before the sync string was not
562 # multiplexed and can be assumed to be debug/log before mux init.
564 stderr = byte_stream(sys.stderr)
565 while tail != b'BUPMUX':
566 # Make sure to write all pre-BUPMUX output to stderr
567 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
569 ex = IOError('demux: unexpected EOF during initialization')
570 with pending_raise(ex):
574 stderr.write(tail[:-6])
582 def write(self, data):
584 BaseConn.write(self, data)
586 def _next_packet(self, timeout):
587 if self.closed: return False
588 rl, wl, xl = select.select([self.infd], [], [], timeout)
589 if not rl: return False
590 assert(rl[0] == self.infd)
591 ns = b''.join(checked_reader(self.infd, 5))
592 n, fdw = struct.unpack('!IB', ns)
593 assert(n <= MAX_PACKET)
595 self.reader = checked_reader(self.infd, n)
597 for buf in checked_reader(self.infd, n):
598 byte_stream(sys.stderr).write(buf)
601 debug2("DemuxConn: marked closed\n")
604 def _load_buf(self, timeout):
605 if self.buf is not None:
607 while not self.closed:
608 while not self.reader:
609 if not self._next_packet(timeout):
612 self.buf = next(self.reader)
614 except StopIteration:
618 def _read_parts(self, ix_fn):
619 while self._load_buf(None):
620 assert(self.buf is not None)
622 if i is None or i == len(self.buf):
627 self.buf = self.buf[i:]
635 return buf.index(b'\n')+1
638 return b''.join(self._read_parts(find_eol))
640 def _read(self, size):
642 def until_size(buf): # Closes on csize
643 if len(buf) < csize[0]:
648 return b''.join(self._read_parts(until_size))
651 return self._load_buf(0)
655 """Generate a list of input lines from 'f' without terminating newlines."""
663 def chunkyreader(f, count = None):
664 """Generate a list of chunks of data read from 'f'.
666 If count is None, read until EOF is reached.
668 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
669 reached while reading, raise IOError.
673 b = f.read(min(count, 65536))
675 raise IOError('EOF with %d bytes remaining' % count)
686 def atomically_replaced_file(name, mode='w', buffering=-1):
687 """Yield a file that will be atomically renamed name when leaving the block.
689 This contextmanager yields an open file object that is backed by a
690 temporary file which will be renamed (atomically) to the target
691 name if everything succeeds.
693 The mode and buffering arguments are handled exactly as with open,
694 and the yielded file will have very restrictive permissions, as
699 with atomically_replaced_file('foo.txt', 'w') as f:
700 f.write('hello jack.')
704 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
705 text=('b' not in mode))
708 f = os.fdopen(ffd, mode, buffering)
716 os.rename(tempname, name)
718 unlink(tempname) # nonexistant file is ignored
722 """Append "/" to 's' if it doesn't aleady end in "/"."""
723 assert isinstance(s, bytes)
724 if s and not s.endswith(b'/'):
730 def _mmap_do(f, sz, flags, prot, close):
732 st = os.fstat(f.fileno())
735 # trying to open a zero-length map gives an error, but an empty
736 # string has all the same behaviour of a zero-length map, ie. it has
739 map = mmap.mmap(f.fileno(), sz, flags, prot)
741 f.close() # map will persist beyond file close
745 def mmap_read(f, sz = 0, close=True):
746 """Create a read-only memory mapped region on file 'f'.
747 If sz is 0, the region will cover the entire file.
749 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
752 def mmap_readwrite(f, sz = 0, close=True):
753 """Create a read-write memory mapped region on file 'f'.
754 If sz is 0, the region will cover the entire file.
756 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
760 def mmap_readwrite_private(f, sz = 0, close=True):
761 """Create a read-write memory mapped region on file 'f'.
762 If sz is 0, the region will cover the entire file.
763 The map is private, which means the changes are never flushed back to the
766 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
770 _mincore = getattr(_helpers, 'mincore', None)
772 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
773 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
775 _fmincore_chunk_size = None
776 def _set_fmincore_chunk_size():
777 global _fmincore_chunk_size
778 pref_chunk_size = 64 * 1024 * 1024
779 chunk_size = sc_page_size
780 if (sc_page_size < pref_chunk_size):
781 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
782 _fmincore_chunk_size = chunk_size
785 """Return the mincore() data for fd as a bytearray whose values can be
786 tested via MINCORE_INCORE, or None if fd does not fully
787 support the operation."""
789 if (st.st_size == 0):
791 if not _fmincore_chunk_size:
792 _set_fmincore_chunk_size()
793 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
794 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
795 chunk_count = (st.st_size + _fmincore_chunk_size - 1) // _fmincore_chunk_size
796 result = bytearray(page_count)
797 for ci in compat.range(chunk_count):
798 pos = _fmincore_chunk_size * ci;
799 msize = min(_fmincore_chunk_size, st.st_size - pos)
801 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
802 except mmap.error as ex:
803 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
804 # Perhaps the file was a pipe, i.e. "... | bup split ..."
808 _mincore(m, msize, 0, result, ci * pages_per_chunk)
809 except OSError as ex:
810 if ex.errno == errno.ENOSYS:
816 def parse_timestamp(epoch_str):
817 """Return the number of nanoseconds since the epoch that are described
818 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
819 throw a ValueError that may contain additional information."""
820 ns_per = {'s' : 1000000000,
824 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
826 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
827 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
829 (n, units) = match.group(1, 2)
833 return n * ns_per[units]
837 """Parse string or bytes as a possibly unit suffixed number.
840 199.2k means 203981 bytes
841 1GB means 1073741824 bytes
842 2.1 tb means 2199023255552 bytes
844 if isinstance(s, bytes):
845 # FIXME: should this raise a ValueError for UnicodeDecodeError
846 # (perhaps with the latter as the context).
847 s = s.decode('ascii')
848 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
850 raise ValueError("can't parse %r as a number" % s)
851 (val, unit) = g.groups()
854 if unit in ['t', 'tb']:
855 mult = 1024*1024*1024*1024
856 elif unit in ['g', 'gb']:
857 mult = 1024*1024*1024
858 elif unit in ['m', 'mb']:
860 elif unit in ['k', 'kb']:
862 elif unit in ['', 'b']:
865 raise ValueError("invalid unit %r in number %r" % (unit, s))
871 """Append an error message to the list of saved errors.
873 Once processing is able to stop and output the errors, the saved errors are
874 accessible in the module variable helpers.saved_errors.
876 saved_errors.append(e)
885 def die_if_errors(msg=None, status=1):
889 msg = 'warning: %d errors encountered\n' % len(saved_errors)
895 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
897 The new exception handler will make sure that bup will exit without an ugly
898 stacktrace when Ctrl-C is hit.
900 oldhook = sys.excepthook
901 def newhook(exctype, value, traceback):
902 if exctype == KeyboardInterrupt:
903 log('\nInterrupted.\n')
905 return oldhook(exctype, value, traceback)
906 sys.excepthook = newhook
909 def columnate(l, prefix):
910 """Format elements of 'l' in columns with 'prefix' leading each line.
912 The number of columns is determined automatically based on the string
915 binary = isinstance(prefix, bytes)
916 nothing = b'' if binary else ''
917 nl = b'\n' if binary else '\n'
921 clen = max(len(s) for s in l)
922 ncols = (tty_width() - len(prefix)) // (clen + 2)
927 while len(l) % ncols:
929 rows = len(l) // ncols
930 for s in compat.range(0, len(l), rows):
931 cols.append(l[s:s+rows])
933 fmt = b'%-*s' if binary else '%-*s'
934 for row in zip(*cols):
935 out += prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl
939 def parse_date_or_fatal(str, fatal):
940 """Parses the given date or calls Option.fatal().
941 For now we expect a string that contains a float."""
944 except ValueError as e:
945 raise fatal('invalid date format (should be a float): %r' % e)
950 def parse_excludes(options, fatal):
951 """Traverse the options and extract all excludes, or call Option.fatal()."""
955 (option, parameter) = flag
956 if option == '--exclude':
957 excluded_paths.append(resolve_parent(argv_bytes(parameter)))
958 elif option == '--exclude-from':
960 f = open(resolve_parent(argv_bytes(parameter)), 'rb')
962 raise fatal("couldn't read %r" % parameter)
963 for exclude_path in f.readlines():
964 # FIXME: perhaps this should be rstrip('\n')
965 exclude_path = resolve_parent(exclude_path.strip())
967 excluded_paths.append(exclude_path)
968 return sorted(frozenset(excluded_paths))
971 def parse_rx_excludes(options, fatal):
972 """Traverse the options and extract all rx excludes, or call
974 excluded_patterns = []
977 (option, parameter) = flag
978 if option == '--exclude-rx':
980 excluded_patterns.append(re.compile(argv_bytes(parameter)))
981 except re.error as ex:
982 fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex))
983 elif option == '--exclude-rx-from':
985 f = open(resolve_parent(parameter), 'rb')
987 raise fatal("couldn't read %r" % parameter)
988 for pattern in f.readlines():
989 spattern = pattern.rstrip(b'\n')
993 excluded_patterns.append(re.compile(spattern))
994 except re.error as ex:
995 fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex))
996 return excluded_patterns
999 def should_rx_exclude_path(path, exclude_rxs):
1000 """Return True if path matches a regular expression in exclude_rxs."""
1001 for rx in exclude_rxs:
1003 debug1('Skipping %r: excluded by rx pattern %r.\n'
1004 % (path, rx.pattern))
1009 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1010 # that resolve against the current filesystem in the strip/graft
1011 # functions for example, but elsewhere as well. I suspect bup's not
1012 # always being careful about that. For some cases, the contents of
1013 # the current filesystem should be irrelevant, and consulting it might
1014 # produce the wrong result, perhaps via unintended symlink resolution,
1017 def path_components(path):
1018 """Break path into a list of pairs of the form (name,
1019 full_path_to_name). Path must start with '/'.
1021 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1022 if not path.startswith(b'/'):
1023 raise Exception('path must start with "/": %s' % path_msg(path))
1024 # Since we assume path startswith('/'), we can skip the first element.
1025 result = [(b'', b'/')]
1026 norm_path = os.path.abspath(path)
1027 if norm_path == b'/':
1030 for p in norm_path.split(b'/')[1:]:
1031 full_path += b'/' + p
1032 result.append((p, full_path))
1036 def stripped_path_components(path, strip_prefixes):
1037 """Strip any prefix in strip_prefixes from path and return a list
1038 of path components where each component is (name,
1039 none_or_full_fs_path_to_name). Assume path startswith('/').
1040 See thelpers.py for examples."""
1041 normalized_path = os.path.abspath(path)
1042 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1043 for bp in sorted_strip_prefixes:
1044 normalized_bp = os.path.abspath(bp)
1045 if normalized_bp == b'/':
1047 if normalized_path.startswith(normalized_bp):
1048 prefix = normalized_path[:len(normalized_bp)]
1050 for p in normalized_path[len(normalized_bp):].split(b'/'):
1054 result.append((p, prefix))
1057 return path_components(path)
1060 def grafted_path_components(graft_points, path):
1061 # Create a result that consists of some number of faked graft
1062 # directories before the graft point, followed by all of the real
1063 # directories from path that are after the graft point. Arrange
1064 # for the directory at the graft point in the result to correspond
1065 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1066 # for some examples.
1068 # Note that given --graft orig=new, orig and new have *nothing* to
1069 # do with each other, even if some of their component names
1070 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1071 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1074 # FIXME: This can't be the best solution...
1075 clean_path = os.path.abspath(path)
1076 for graft_point in graft_points:
1077 old_prefix, new_prefix = graft_point
1078 # Expand prefixes iff not absolute paths.
1079 old_prefix = os.path.normpath(old_prefix)
1080 new_prefix = os.path.normpath(new_prefix)
1081 if clean_path.startswith(old_prefix):
1082 escaped_prefix = re.escape(old_prefix)
1083 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1084 # Handle /foo=/ (at least) -- which produces //whatever.
1085 grafted_path = b'/' + grafted_path.lstrip(b'/')
1086 clean_path_components = path_components(clean_path)
1087 # Count the components that were stripped.
1088 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1089 new_prefix_parts = new_prefix.split(b'/')
1090 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1091 result = [(p, None) for p in result_prefix] \
1092 + clean_path_components[strip_count:]
1093 # Now set the graft point name to match the end of new_prefix.
1094 graft_point = len(result_prefix)
1095 result[graft_point] = \
1096 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1097 if new_prefix == b'/': # --graft ...=/ is a special case.
1100 return path_components(clean_path)
1106 _localtime = getattr(_helpers, 'localtime', None)
1109 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1110 'tm_hour', 'tm_min', 'tm_sec',
1111 'tm_wday', 'tm_yday',
1112 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1114 # Define a localtime() that returns bup_time when possible. Note:
1115 # this means that any helpers.localtime() results may need to be
1116 # passed through to_py_time() before being passed to python's time
1117 # module, which doesn't appear willing to ignore the extra items.
1119 def localtime(time):
1120 return bup_time(*_helpers.localtime(int(floor(time))))
1121 def utc_offset_str(t):
1122 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1123 If the current UTC offset does not represent an integer number
1124 of minutes, the fractional component will be truncated."""
1125 off = localtime(t).tm_gmtoff
1126 # Note: // doesn't truncate like C for negative values, it rounds down.
1127 offmin = abs(off) // 60
1129 h = (offmin - m) // 60
1130 return b'%+03d%02d' % (-h if off < 0 else h, m)
1132 if isinstance(x, time.struct_time):
1134 return time.struct_time(x[:9])
1136 localtime = time.localtime
1137 def utc_offset_str(t):
1138 return time.strftime(b'%z', localtime(t))
1143 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1145 def valid_save_name(name):
1146 # Enforce a superset of the restrictions in git-check-ref-format(1)
1148 or name.startswith(b'/') or name.endswith(b'/') \
1149 or name.endswith(b'.'):
1151 if _some_invalid_save_parts_rx.search(name):
1154 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1156 for part in name.split(b'/'):
1157 if part.startswith(b'.') or part.endswith(b'.lock'):
1162 _period_rx = re.compile(br'^([0-9]+)(s|min|h|d|w|m|y)$')
1164 def period_as_secs(s):
1167 match = _period_rx.match(s)
1170 mag = int(match.group(1))
1171 scale = match.group(2)
1172 return mag * {b's': 1,
1176 b'w': 60 * 60 * 24 * 7,
1177 b'm': 60 * 60 * 24 * 31,
1178 b'y': 60 * 60 * 24 * 366}[scale]