1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, subprocess, errno, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import argv_bytes, byte_int, pending_raise
16 from bup.io import byte_stream, path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
22 buglvl = int(os.environ.get('BUP_DEBUG', 0))
26 """Helper to deal with Python scoping issues"""
30 sc_page_size = os.sysconf('SC_PAGE_SIZE')
31 assert(sc_page_size > 0)
33 sc_arg_max = os.sysconf('SC_ARG_MAX')
34 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
35 sc_arg_max = 2 * 1024 * 1024
39 for result in iterable:
44 _fdatasync = os.fdatasync
45 except AttributeError:
48 if sys.platform.startswith('darwin'):
49 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
53 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
55 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
56 if e.errno == errno.ENOTSUP:
61 fdatasync = _fdatasync
64 def partition(predicate, stream):
65 """Returns (leading_matches_it, rest_it), where leading_matches_it
66 must be completely exhausted before traversing rest_it.
71 ns.first_nonmatch = None
72 def leading_matches():
77 ns.first_nonmatch = (x,)
81 yield ns.first_nonmatch[0]
84 return (leading_matches(), rest())
94 def lines_until_sentinel(f, sentinel, ex_type):
95 # sentinel must end with \n and must contain only one \n
98 if not (line and line.endswith(b'\n')):
99 raise ex_type('Hit EOF while reading line')
105 def stat_if_exists(path):
109 if e.errno != errno.ENOENT:
114 # Write (blockingly) to sockets that may or may not be in blocking mode.
115 # We need this because our stderr is sometimes eaten by subprocesses
116 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
117 # leading to race conditions. Ick. We'll do it the hard way.
118 def _hard_write(fd, buf):
120 (r,w,x) = select.select([], [fd], [], None)
122 raise IOError('select(fd) returned without being writable')
124 sz = os.write(fd, buf)
126 if e.errno != errno.EAGAIN:
134 """Print a log message to stderr."""
137 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
151 istty1 = os.isatty(1) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 1)
152 istty2 = os.isatty(2) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 2)
155 """Calls log() if stderr is a TTY. Does nothing otherwise."""
156 global _last_progress
163 """Calls progress() only if we haven't printed progress in a while.
165 This avoids overloading the stderr buffer with excess junk.
169 if now - _last_prog > 0.1:
175 """Calls progress() to redisplay the most recent progress message.
177 Useful after you've printed some other message that wipes out the
180 if _last_progress and _last_progress.endswith('\r'):
181 progress(_last_progress)
184 def mkdirp(d, mode=None):
185 """Recursively create directories on path 'd'.
187 Unlike os.makedirs(), it doesn't raise an exception if the last element of
188 the path already exists.
196 if e.errno == errno.EEXIST:
203 def __init__(self, entry, read_it):
205 self.read_it = read_it
207 return self.entry < x.entry
209 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
211 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
213 samekey = operator.eq
215 total = sum(len(it) for it in iters)
216 iters = (iter(it) for it in iters)
217 heap = ((next(it, None),it) for it in iters)
218 heap = [MergeIterItem(e, it) for e, it in heap if e]
223 if not count % pfreq:
225 e, it = heap[0].entry, heap[0].read_it
226 if not samekey(e, pe):
232 except StopIteration:
233 heapq.heappop(heap) # remove current
235 # shift current to new location
236 heapq.heapreplace(heap, MergeIterItem(e, it))
241 """Delete a file at path 'f' if it currently exists.
243 Unlike os.unlink(), does not throw an exception if the file didn't already
249 if e.errno != errno.ENOENT:
253 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
254 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
259 if _bq_simple_id_rx.match(x):
261 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
266 if _sq_simple_id_rx.match(x):
268 return "'%s'" % x.replace("'", "'\"'\"'")
271 if isinstance(x, bytes):
273 if isinstance(x, compat.str_type):
278 """Return a shell quoted string for cmd if it's a sequence, else cmd.
280 cmd must be a string, bytes, or a sequence of one or the other,
281 and the assumption is that if cmd is a string or bytes, then it's
282 already quoted (because it's what's actually being passed to
283 call() and friends. e.g. log(shstr(cmd)); call(cmd)
286 if isinstance(cmd, (bytes, compat.str_type)):
288 elif all(isinstance(x, bytes) for x in cmd):
289 return b' '.join(map(bquote, cmd))
290 elif all(isinstance(x, compat.str_type) for x in cmd):
291 return ' '.join(map(squote, cmd))
292 raise TypeError('unsupported shstr argument: ' + repr(cmd))
295 exc = subprocess.check_call
306 assert stdin in (None, PIPE)
309 stdin=stdin, stdout=PIPE, stderr=stderr,
311 preexec_fn=preexec_fn,
313 out, err = p.communicate(input)
314 if check and p.returncode != 0:
315 raise Exception('subprocess %r failed with status %d%s'
316 % (b' '.join(map(quote, cmd)), p.returncode,
317 ', stderr: %r' % err if err else ''))
320 def readpipe(argv, preexec_fn=None, shell=False):
321 """Run a subprocess and return its output."""
322 return exo(argv, preexec_fn=preexec_fn, shell=shell)[0]
325 def _argmax_base(command):
328 base_size += len(command) + 1
329 for k, v in compat.items(environ):
330 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
334 def _argmax_args_size(args):
335 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
338 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
339 """If args is not empty, yield the output produced by calling the
340 command list with args as a sequence of strings (It may be necessary
341 to return multiple strings in order to respect ARG_MAX)."""
342 # The optional arg_max arg is a workaround for an issue with the
343 # current wvtest behavior.
344 base_size = _argmax_base(command)
346 room = arg_max - base_size
349 next_size = _argmax_args_size(args[i:i+1])
350 if room - next_size < 0:
356 assert(len(sub_args))
357 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
360 def resolve_parent(p):
361 """Return the absolute path of a file without following any final symlink.
363 Behaves like os.path.realpath, but doesn't follow a symlink for the last
364 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
365 will follow symlinks in p's directory)
371 if st and stat.S_ISLNK(st.st_mode):
372 (dir, name) = os.path.split(p)
373 dir = os.path.realpath(dir)
374 out = os.path.join(dir, name)
376 out = os.path.realpath(p)
377 #log('realpathing:%r,%r\n' % (p, out))
381 def detect_fakeroot():
382 "Return True if we appear to be running under fakeroot."
383 return os.getenv("FAKEROOTKEY") != None
386 if sys.platform.startswith('cygwin'):
388 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
389 groups = os.getgroups()
390 return 544 in groups or 0 in groups
393 return os.geteuid() == 0
396 def cache_key_value(get_value, key, cache):
397 """Return (value, was_cached). If there is a value in the cache
398 for key, use that, otherwise, call get_value(key) which should
399 throw a KeyError if there is no value -- in which case the cached
400 and returned value will be None.
402 try: # Do we already have it (or know there wasn't one)?
409 cache[key] = value = get_value(key)
417 """Get the FQDN of this machine."""
420 _hostname = _helpers.gethostname()
424 def format_filesize(size):
429 exponent = int(math.log(size) // math.log(unit))
430 size_prefix = "KMGTPE"[exponent - 1]
431 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
434 class NotOk(Exception):
439 def __init__(self, outp):
443 while self._read(65536): pass
445 def _read(self, size):
446 raise NotImplementedError("Subclasses must implement _read")
448 def read(self, size):
449 """Read 'size' bytes from input stream."""
451 return self._read(size)
453 def _readline(self, size):
454 raise NotImplementedError("Subclasses must implement _readline")
457 """Read from input stream until a newline is found."""
459 return self._readline()
461 def write(self, data):
462 """Write 'data' to output stream."""
463 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
464 self.outp.write(data)
467 """Return true if input stream is readable."""
468 raise NotImplementedError("Subclasses must implement has_input")
471 """Indicate end of output from last sent command."""
472 self.write(b'\nok\n')
475 """Indicate server error to the client."""
476 s = re.sub(br'\s+', b' ', s)
477 self.write(b'\nerror %s\n' % s)
479 def _check_ok(self, onempty):
482 for rl in linereader(self):
483 #log('%d got line: %r\n' % (os.getpid(), rl))
484 if not rl: # empty line
488 elif rl.startswith(b'error '):
489 #log('client: error: %s\n' % rl[6:])
493 raise Exception('server exited unexpectedly; see errors above')
495 def drain_and_check_ok(self):
496 """Remove all data for the current command from input stream."""
499 return self._check_ok(onempty)
502 """Verify that server action completed successfully."""
504 raise Exception('expected "ok", got %r' % rl)
505 return self._check_ok(onempty)
508 class Conn(BaseConn):
509 def __init__(self, inp, outp):
510 BaseConn.__init__(self, outp)
513 def _read(self, size):
514 return self.inp.read(size)
517 return self.inp.readline()
520 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
522 assert(rl[0] == self.inp.fileno())
528 def checked_reader(fd, n):
530 rl, _, _ = select.select([fd], [], [])
533 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
538 MAX_PACKET = 128 * 1024
539 def mux(p, outfd, outr, errr):
542 while p.poll() is None:
543 rl, _, _ = select.select(fds, [], [])
546 buf = os.read(outr, MAX_PACKET)
548 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
550 buf = os.read(errr, 1024)
552 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
554 os.write(outfd, struct.pack('!IB', 0, 3))
557 class DemuxConn(BaseConn):
558 """A helper class for bup's client-server protocol."""
559 def __init__(self, infd, outp):
560 BaseConn.__init__(self, outp)
561 # Anything that comes through before the sync string was not
562 # multiplexed and can be assumed to be debug/log before mux init.
564 stderr = byte_stream(sys.stderr)
565 while tail != b'BUPMUX':
566 # Make sure to write all pre-BUPMUX output to stderr
567 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
569 ex = IOError('demux: unexpected EOF during initialization')
570 with pending_raise(ex):
574 stderr.write(tail[:-6])
582 def write(self, data):
584 BaseConn.write(self, data)
586 def _next_packet(self, timeout):
587 if self.closed: return False
588 rl, wl, xl = select.select([self.infd], [], [], timeout)
589 if not rl: return False
590 assert(rl[0] == self.infd)
591 ns = b''.join(checked_reader(self.infd, 5))
592 n, fdw = struct.unpack('!IB', ns)
594 # assume that something went wrong and print stuff
595 ns += os.read(self.infd, 1024)
596 stderr = byte_stream(sys.stderr)
599 raise Exception("Connection broken")
601 self.reader = checked_reader(self.infd, n)
603 for buf in checked_reader(self.infd, n):
604 byte_stream(sys.stderr).write(buf)
607 debug2("DemuxConn: marked closed\n")
610 def _load_buf(self, timeout):
611 if self.buf is not None:
613 while not self.closed:
614 while not self.reader:
615 if not self._next_packet(timeout):
618 self.buf = next(self.reader)
620 except StopIteration:
624 def _read_parts(self, ix_fn):
625 while self._load_buf(None):
626 assert(self.buf is not None)
628 if i is None or i == len(self.buf):
633 self.buf = self.buf[i:]
641 return buf.index(b'\n')+1
644 return b''.join(self._read_parts(find_eol))
646 def _read(self, size):
648 def until_size(buf): # Closes on csize
649 if len(buf) < csize[0]:
654 return b''.join(self._read_parts(until_size))
657 return self._load_buf(0)
661 """Generate a list of input lines from 'f' without terminating newlines."""
669 def chunkyreader(f, count = None):
670 """Generate a list of chunks of data read from 'f'.
672 If count is None, read until EOF is reached.
674 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
675 reached while reading, raise IOError.
679 b = f.read(min(count, 65536))
681 raise IOError('EOF with %d bytes remaining' % count)
692 def atomically_replaced_file(name, mode='w', buffering=-1):
693 """Yield a file that will be atomically renamed name when leaving the block.
695 This contextmanager yields an open file object that is backed by a
696 temporary file which will be renamed (atomically) to the target
697 name if everything succeeds.
699 The mode and buffering arguments are handled exactly as with open,
700 and the yielded file will have very restrictive permissions, as
705 with atomically_replaced_file('foo.txt', 'w') as f:
706 f.write('hello jack.')
710 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
711 text=('b' not in mode))
714 f = os.fdopen(ffd, mode, buffering)
722 os.rename(tempname, name)
724 unlink(tempname) # nonexistant file is ignored
728 """Append "/" to 's' if it doesn't aleady end in "/"."""
729 assert isinstance(s, bytes)
730 if s and not s.endswith(b'/'):
736 def _mmap_do(f, sz, flags, prot, close):
738 st = os.fstat(f.fileno())
741 # trying to open a zero-length map gives an error, but an empty
742 # string has all the same behaviour of a zero-length map, ie. it has
745 map = mmap.mmap(f.fileno(), sz, flags, prot)
747 f.close() # map will persist beyond file close
751 def mmap_read(f, sz = 0, close=True):
752 """Create a read-only memory mapped region on file 'f'.
753 If sz is 0, the region will cover the entire file.
755 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
758 def mmap_readwrite(f, sz = 0, close=True):
759 """Create a read-write memory mapped region on file 'f'.
760 If sz is 0, the region will cover the entire file.
762 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
766 def mmap_readwrite_private(f, sz = 0, close=True):
767 """Create a read-write memory mapped region on file 'f'.
768 If sz is 0, the region will cover the entire file.
769 The map is private, which means the changes are never flushed back to the
772 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
776 _mincore = getattr(_helpers, 'mincore', None)
778 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
779 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
781 _fmincore_chunk_size = None
782 def _set_fmincore_chunk_size():
783 global _fmincore_chunk_size
784 pref_chunk_size = 64 * 1024 * 1024
785 chunk_size = sc_page_size
786 if (sc_page_size < pref_chunk_size):
787 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
788 _fmincore_chunk_size = chunk_size
791 """Return the mincore() data for fd as a bytearray whose values can be
792 tested via MINCORE_INCORE, or None if fd does not fully
793 support the operation."""
795 if (st.st_size == 0):
797 if not _fmincore_chunk_size:
798 _set_fmincore_chunk_size()
799 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
800 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
801 chunk_count = (st.st_size + _fmincore_chunk_size - 1) // _fmincore_chunk_size
802 result = bytearray(page_count)
803 for ci in compat.range(chunk_count):
804 pos = _fmincore_chunk_size * ci;
805 msize = min(_fmincore_chunk_size, st.st_size - pos)
807 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
808 except mmap.error as ex:
809 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
810 # Perhaps the file was a pipe, i.e. "... | bup split ..."
814 _mincore(m, msize, 0, result, ci * pages_per_chunk)
815 except OSError as ex:
816 if ex.errno == errno.ENOSYS:
822 def parse_timestamp(epoch_str):
823 """Return the number of nanoseconds since the epoch that are described
824 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
825 throw a ValueError that may contain additional information."""
826 ns_per = {'s' : 1000000000,
830 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
832 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
833 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
835 (n, units) = match.group(1, 2)
839 return n * ns_per[units]
843 """Parse string or bytes as a possibly unit suffixed number.
846 199.2k means 203981 bytes
847 1GB means 1073741824 bytes
848 2.1 tb means 2199023255552 bytes
850 if isinstance(s, bytes):
851 # FIXME: should this raise a ValueError for UnicodeDecodeError
852 # (perhaps with the latter as the context).
853 s = s.decode('ascii')
854 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
856 raise ValueError("can't parse %r as a number" % s)
857 (val, unit) = g.groups()
860 if unit in ['t', 'tb']:
861 mult = 1024*1024*1024*1024
862 elif unit in ['g', 'gb']:
863 mult = 1024*1024*1024
864 elif unit in ['m', 'mb']:
866 elif unit in ['k', 'kb']:
868 elif unit in ['', 'b']:
871 raise ValueError("invalid unit %r in number %r" % (unit, s))
877 """Append an error message to the list of saved errors.
879 Once processing is able to stop and output the errors, the saved errors are
880 accessible in the module variable helpers.saved_errors.
882 saved_errors.append(e)
891 def die_if_errors(msg=None, status=1):
895 msg = 'warning: %d errors encountered\n' % len(saved_errors)
901 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
903 The new exception handler will make sure that bup will exit without an ugly
904 stacktrace when Ctrl-C is hit.
906 oldhook = sys.excepthook
907 def newhook(exctype, value, traceback):
908 if exctype == KeyboardInterrupt:
909 log('\nInterrupted.\n')
911 return oldhook(exctype, value, traceback)
912 sys.excepthook = newhook
915 def columnate(l, prefix):
916 """Format elements of 'l' in columns with 'prefix' leading each line.
918 The number of columns is determined automatically based on the string
921 binary = isinstance(prefix, bytes)
922 nothing = b'' if binary else ''
923 nl = b'\n' if binary else '\n'
927 clen = max(len(s) for s in l)
928 ncols = (tty_width() - len(prefix)) // (clen + 2)
933 while len(l) % ncols:
935 rows = len(l) // ncols
936 for s in compat.range(0, len(l), rows):
937 cols.append(l[s:s+rows])
939 fmt = b'%-*s' if binary else '%-*s'
940 for row in zip(*cols):
941 out += prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl
945 def parse_date_or_fatal(str, fatal):
946 """Parses the given date or calls Option.fatal().
947 For now we expect a string that contains a float."""
950 except ValueError as e:
951 raise fatal('invalid date format (should be a float): %r' % e)
956 def parse_excludes(options, fatal):
957 """Traverse the options and extract all excludes, or call Option.fatal()."""
961 (option, parameter) = flag
962 if option == '--exclude':
963 excluded_paths.append(resolve_parent(argv_bytes(parameter)))
964 elif option == '--exclude-from':
966 f = open(resolve_parent(argv_bytes(parameter)), 'rb')
968 raise fatal("couldn't read %r" % parameter)
969 for exclude_path in f.readlines():
970 # FIXME: perhaps this should be rstrip('\n')
971 exclude_path = resolve_parent(exclude_path.strip())
973 excluded_paths.append(exclude_path)
974 return sorted(frozenset(excluded_paths))
977 def parse_rx_excludes(options, fatal):
978 """Traverse the options and extract all rx excludes, or call
980 excluded_patterns = []
983 (option, parameter) = flag
984 if option == '--exclude-rx':
986 excluded_patterns.append(re.compile(argv_bytes(parameter)))
987 except re.error as ex:
988 fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex))
989 elif option == '--exclude-rx-from':
991 f = open(resolve_parent(parameter), 'rb')
993 raise fatal("couldn't read %r" % parameter)
994 for pattern in f.readlines():
995 spattern = pattern.rstrip(b'\n')
999 excluded_patterns.append(re.compile(spattern))
1000 except re.error as ex:
1001 fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex))
1002 return excluded_patterns
1005 def should_rx_exclude_path(path, exclude_rxs):
1006 """Return True if path matches a regular expression in exclude_rxs."""
1007 for rx in exclude_rxs:
1009 debug1('Skipping %r: excluded by rx pattern %r.\n'
1010 % (path, rx.pattern))
1015 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1016 # that resolve against the current filesystem in the strip/graft
1017 # functions for example, but elsewhere as well. I suspect bup's not
1018 # always being careful about that. For some cases, the contents of
1019 # the current filesystem should be irrelevant, and consulting it might
1020 # produce the wrong result, perhaps via unintended symlink resolution,
1023 def path_components(path):
1024 """Break path into a list of pairs of the form (name,
1025 full_path_to_name). Path must start with '/'.
1027 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1028 if not path.startswith(b'/'):
1029 raise Exception('path must start with "/": %s' % path_msg(path))
1030 # Since we assume path startswith('/'), we can skip the first element.
1031 result = [(b'', b'/')]
1032 norm_path = os.path.abspath(path)
1033 if norm_path == b'/':
1036 for p in norm_path.split(b'/')[1:]:
1037 full_path += b'/' + p
1038 result.append((p, full_path))
1042 def stripped_path_components(path, strip_prefixes):
1043 """Strip any prefix in strip_prefixes from path and return a list
1044 of path components where each component is (name,
1045 none_or_full_fs_path_to_name). Assume path startswith('/').
1046 See thelpers.py for examples."""
1047 normalized_path = os.path.abspath(path)
1048 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1049 for bp in sorted_strip_prefixes:
1050 normalized_bp = os.path.abspath(bp)
1051 if normalized_bp == b'/':
1053 if normalized_path.startswith(normalized_bp):
1054 prefix = normalized_path[:len(normalized_bp)]
1056 for p in normalized_path[len(normalized_bp):].split(b'/'):
1060 result.append((p, prefix))
1063 return path_components(path)
1066 def grafted_path_components(graft_points, path):
1067 # Create a result that consists of some number of faked graft
1068 # directories before the graft point, followed by all of the real
1069 # directories from path that are after the graft point. Arrange
1070 # for the directory at the graft point in the result to correspond
1071 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1072 # for some examples.
1074 # Note that given --graft orig=new, orig and new have *nothing* to
1075 # do with each other, even if some of their component names
1076 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1077 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1080 # FIXME: This can't be the best solution...
1081 clean_path = os.path.abspath(path)
1082 for graft_point in graft_points:
1083 old_prefix, new_prefix = graft_point
1084 # Expand prefixes iff not absolute paths.
1085 old_prefix = os.path.normpath(old_prefix)
1086 new_prefix = os.path.normpath(new_prefix)
1087 if clean_path.startswith(old_prefix):
1088 escaped_prefix = re.escape(old_prefix)
1089 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1090 # Handle /foo=/ (at least) -- which produces //whatever.
1091 grafted_path = b'/' + grafted_path.lstrip(b'/')
1092 clean_path_components = path_components(clean_path)
1093 # Count the components that were stripped.
1094 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1095 new_prefix_parts = new_prefix.split(b'/')
1096 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1097 result = [(p, None) for p in result_prefix] \
1098 + clean_path_components[strip_count:]
1099 # Now set the graft point name to match the end of new_prefix.
1100 graft_point = len(result_prefix)
1101 result[graft_point] = \
1102 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1103 if new_prefix == b'/': # --graft ...=/ is a special case.
1106 return path_components(clean_path)
1112 _localtime = getattr(_helpers, 'localtime', None)
1115 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1116 'tm_hour', 'tm_min', 'tm_sec',
1117 'tm_wday', 'tm_yday',
1118 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1120 # Define a localtime() that returns bup_time when possible. Note:
1121 # this means that any helpers.localtime() results may need to be
1122 # passed through to_py_time() before being passed to python's time
1123 # module, which doesn't appear willing to ignore the extra items.
1125 def localtime(time):
1126 return bup_time(*_helpers.localtime(int(floor(time))))
1127 def utc_offset_str(t):
1128 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1129 If the current UTC offset does not represent an integer number
1130 of minutes, the fractional component will be truncated."""
1131 off = localtime(t).tm_gmtoff
1132 # Note: // doesn't truncate like C for negative values, it rounds down.
1133 offmin = abs(off) // 60
1135 h = (offmin - m) // 60
1136 return b'%+03d%02d' % (-h if off < 0 else h, m)
1138 if isinstance(x, time.struct_time):
1140 return time.struct_time(x[:9])
1142 localtime = time.localtime
1143 def utc_offset_str(t):
1144 return time.strftime(b'%z', localtime(t))
1149 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1151 def valid_save_name(name):
1152 # Enforce a superset of the restrictions in git-check-ref-format(1)
1154 or name.startswith(b'/') or name.endswith(b'/') \
1155 or name.endswith(b'.'):
1157 if _some_invalid_save_parts_rx.search(name):
1160 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1162 for part in name.split(b'/'):
1163 if part.startswith(b'.') or part.endswith(b'.lock'):
1168 _period_rx = re.compile(br'^([0-9]+)(s|min|h|d|w|m|y)$')
1170 def period_as_secs(s):
1173 match = _period_rx.match(s)
1176 mag = int(match.group(1))
1177 scale = match.group(2)
1178 return mag * {b's': 1,
1182 b'w': 60 * 60 * 24 * 7,
1183 b'm': 60 * 60 * 24 * 31,
1184 b'y': 60 * 60 * 24 * 366}[scale]