1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
9 from subprocess import PIPE, Popen
10 import sys, os, subprocess, errno, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, tempfile
13 from bup import _helpers
14 from bup import compat
15 from bup.compat import argv_bytes, byte_int, pending_raise
16 from bup.io import byte_stream, path_msg
17 # This function should really be in helpers, not in bup.options. But we
18 # want options.py to be standalone so people can include it in other projects.
19 from bup.options import _tty_width as tty_width
22 buglvl = int(os.environ.get('BUP_DEBUG', 0))
26 """Helper to deal with Python scoping issues"""
31 def finalized(enter_result=None, finalize=None):
35 except BaseException as ex:
36 with pending_raise(ex):
37 finalize(enter_result)
38 finalize(enter_result)
41 sc_page_size = os.sysconf('SC_PAGE_SIZE')
42 assert(sc_page_size > 0)
44 sc_arg_max = os.sysconf('SC_ARG_MAX')
45 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
46 sc_arg_max = 2 * 1024 * 1024
50 for result in iterable:
55 _fdatasync = os.fdatasync
56 except AttributeError:
59 if sys.platform.startswith('darwin'):
60 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
64 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
66 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
67 if e.errno == errno.ENOTSUP:
72 fdatasync = _fdatasync
75 def partition(predicate, stream):
76 """Returns (leading_matches_it, rest_it), where leading_matches_it
77 must be completely exhausted before traversing rest_it.
82 ns.first_nonmatch = None
83 def leading_matches():
88 ns.first_nonmatch = (x,)
92 yield ns.first_nonmatch[0]
95 return (leading_matches(), rest())
105 def lines_until_sentinel(f, sentinel, ex_type):
106 # sentinel must end with \n and must contain only one \n
109 if not (line and line.endswith(b'\n')):
110 raise ex_type('Hit EOF while reading line')
116 def stat_if_exists(path):
120 if e.errno != errno.ENOENT:
125 # Write (blockingly) to sockets that may or may not be in blocking mode.
126 # We need this because our stderr is sometimes eaten by subprocesses
127 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
128 # leading to race conditions. Ick. We'll do it the hard way.
129 def _hard_write(fd, buf):
131 (r,w,x) = select.select([], [fd], [], None)
133 raise IOError('select(fd) returned without being writable')
135 sz = os.write(fd, buf)
137 if e.errno != errno.EAGAIN:
145 """Print a log message to stderr."""
148 _hard_write(sys.stderr.fileno(), s if isinstance(s, bytes) else s.encode())
162 istty1 = os.isatty(1) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 1)
163 istty2 = os.isatty(2) or (int(os.environ.get('BUP_FORCE_TTY', 0)) & 2)
166 """Calls log() if stderr is a TTY. Does nothing otherwise."""
167 global _last_progress
174 """Calls progress() only if we haven't printed progress in a while.
176 This avoids overloading the stderr buffer with excess junk.
180 if now - _last_prog > 0.1:
186 """Calls progress() to redisplay the most recent progress message.
188 Useful after you've printed some other message that wipes out the
191 if _last_progress and _last_progress.endswith('\r'):
192 progress(_last_progress)
195 def mkdirp(d, mode=None):
196 """Recursively create directories on path 'd'.
198 Unlike os.makedirs(), it doesn't raise an exception if the last element of
199 the path already exists.
207 if e.errno == errno.EEXIST:
214 def __init__(self, entry, read_it):
216 self.read_it = read_it
218 return self.entry < x.entry
220 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
222 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
224 samekey = operator.eq
226 total = sum(len(it) for it in iters)
227 iters = (iter(it) for it in iters)
228 heap = ((next(it, None),it) for it in iters)
229 heap = [MergeIterItem(e, it) for e, it in heap if e]
234 if not count % pfreq:
236 e, it = heap[0].entry, heap[0].read_it
237 if not samekey(e, pe):
243 except StopIteration:
244 heapq.heappop(heap) # remove current
246 # shift current to new location
247 heapq.heapreplace(heap, MergeIterItem(e, it))
252 """Delete a file at path 'f' if it currently exists.
254 Unlike os.unlink(), does not throw an exception if the file didn't already
260 if e.errno != errno.ENOENT:
264 _bq_simple_id_rx = re.compile(br'^[-_./a-zA-Z0-9]+$')
265 _sq_simple_id_rx = re.compile(r'^[-_./a-zA-Z0-9]+$')
270 if _bq_simple_id_rx.match(x):
272 return b"'%s'" % x.replace(b"'", b"'\"'\"'")
277 if _sq_simple_id_rx.match(x):
279 return "'%s'" % x.replace("'", "'\"'\"'")
282 if isinstance(x, bytes):
284 if isinstance(x, compat.str_type):
287 # some versions of pylint get confused
291 """Return a shell quoted string for cmd if it's a sequence, else cmd.
293 cmd must be a string, bytes, or a sequence of one or the other,
294 and the assumption is that if cmd is a string or bytes, then it's
295 already quoted (because it's what's actually being passed to
296 call() and friends. e.g. log(shstr(cmd)); call(cmd)
299 if isinstance(cmd, (bytes, compat.str_type)):
301 elif all(isinstance(x, bytes) for x in cmd):
302 return b' '.join(map(bquote, cmd))
303 elif all(isinstance(x, compat.str_type) for x in cmd):
304 return ' '.join(map(squote, cmd))
305 raise TypeError('unsupported shstr argument: ' + repr(cmd))
308 exc = subprocess.check_call
319 assert stdin in (None, PIPE)
322 stdin=stdin, stdout=PIPE, stderr=stderr,
324 preexec_fn=preexec_fn,
326 out, err = p.communicate(input)
327 if check and p.returncode != 0:
328 raise Exception('subprocess %r failed with status %d%s'
329 % (b' '.join(map(quote, cmd)), p.returncode,
330 ', stderr: %r' % err if err else ''))
333 def readpipe(argv, preexec_fn=None, shell=False):
334 """Run a subprocess and return its output."""
335 return exo(argv, preexec_fn=preexec_fn, shell=shell)[0]
338 def _argmax_base(command):
341 base_size += len(command) + 1
342 for k, v in compat.items(environ):
343 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
347 def _argmax_args_size(args):
348 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
351 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
352 """If args is not empty, yield the output produced by calling the
353 command list with args as a sequence of strings (It may be necessary
354 to return multiple strings in order to respect ARG_MAX)."""
355 # The optional arg_max arg is a workaround for an issue with the
356 # current wvtest behavior.
357 base_size = _argmax_base(command)
359 room = arg_max - base_size
362 next_size = _argmax_args_size(args[i:i+1])
363 if room - next_size < 0:
369 assert(len(sub_args))
370 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
373 def resolve_parent(p):
374 """Return the absolute path of a file without following any final symlink.
376 Behaves like os.path.realpath, but doesn't follow a symlink for the last
377 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
378 will follow symlinks in p's directory)
384 if st and stat.S_ISLNK(st.st_mode):
385 (dir, name) = os.path.split(p)
386 dir = os.path.realpath(dir)
387 out = os.path.join(dir, name)
389 out = os.path.realpath(p)
390 #log('realpathing:%r,%r\n' % (p, out))
394 def detect_fakeroot():
395 "Return True if we appear to be running under fakeroot."
396 return os.getenv("FAKEROOTKEY") != None
399 if sys.platform.startswith('cygwin'):
401 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
402 groups = os.getgroups()
403 return 544 in groups or 0 in groups
406 return os.geteuid() == 0
409 def cache_key_value(get_value, key, cache):
410 """Return (value, was_cached). If there is a value in the cache
411 for key, use that, otherwise, call get_value(key) which should
412 throw a KeyError if there is no value -- in which case the cached
413 and returned value will be None.
415 try: # Do we already have it (or know there wasn't one)?
422 cache[key] = value = get_value(key)
430 """Get the FQDN of this machine."""
433 _hostname = _helpers.gethostname()
437 def format_filesize(size):
442 exponent = int(math.log(size) // math.log(unit))
443 size_prefix = "KMGTPE"[exponent - 1]
444 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
447 class NotOk(Exception):
452 def __init__(self, outp):
456 while self._read(65536): pass
458 def _read(self, size):
459 raise NotImplementedError("Subclasses must implement _read")
461 def read(self, size):
462 """Read 'size' bytes from input stream."""
464 return self._read(size)
466 def _readline(self, size):
467 raise NotImplementedError("Subclasses must implement _readline")
470 """Read from input stream until a newline is found."""
472 return self._readline()
474 def write(self, data):
475 """Write 'data' to output stream."""
476 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
477 self.outp.write(data)
480 """Return true if input stream is readable."""
481 raise NotImplementedError("Subclasses must implement has_input")
484 """Indicate end of output from last sent command."""
485 self.write(b'\nok\n')
488 """Indicate server error to the client."""
489 s = re.sub(br'\s+', b' ', s)
490 self.write(b'\nerror %s\n' % s)
492 def _check_ok(self, onempty):
495 for rl in linereader(self):
496 #log('%d got line: %r\n' % (os.getpid(), rl))
497 if not rl: # empty line
501 elif rl.startswith(b'error '):
502 #log('client: error: %s\n' % rl[6:])
506 raise Exception('server exited unexpectedly; see errors above')
508 def drain_and_check_ok(self):
509 """Remove all data for the current command from input stream."""
512 return self._check_ok(onempty)
515 """Verify that server action completed successfully."""
517 raise Exception('expected "ok", got %r' % rl)
518 return self._check_ok(onempty)
521 class Conn(BaseConn):
522 def __init__(self, inp, outp):
523 BaseConn.__init__(self, outp)
526 def _read(self, size):
527 return self.inp.read(size)
530 return self.inp.readline()
533 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
535 assert(rl[0] == self.inp.fileno())
541 def checked_reader(fd, n):
543 rl, _, _ = select.select([fd], [], [])
546 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
551 MAX_PACKET = 128 * 1024
552 def mux(p, outfd, outr, errr):
555 while p.poll() is None:
556 rl, _, _ = select.select(fds, [], [])
559 buf = os.read(outr, MAX_PACKET)
561 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
563 buf = os.read(errr, 1024)
565 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
567 os.write(outfd, struct.pack('!IB', 0, 3))
570 class DemuxConn(BaseConn):
571 """A helper class for bup's client-server protocol."""
572 def __init__(self, infd, outp):
573 BaseConn.__init__(self, outp)
574 # Anything that comes through before the sync string was not
575 # multiplexed and can be assumed to be debug/log before mux init.
577 stderr = byte_stream(sys.stderr)
578 while tail != b'BUPMUX':
579 # Make sure to write all pre-BUPMUX output to stderr
580 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
582 ex = IOError('demux: unexpected EOF during initialization')
583 with pending_raise(ex):
587 stderr.write(tail[:-6])
595 def write(self, data):
597 BaseConn.write(self, data)
599 def _next_packet(self, timeout):
600 if self.closed: return False
601 rl, wl, xl = select.select([self.infd], [], [], timeout)
602 if not rl: return False
603 assert(rl[0] == self.infd)
604 ns = b''.join(checked_reader(self.infd, 5))
605 n, fdw = struct.unpack('!IB', ns)
607 # assume that something went wrong and print stuff
608 ns += os.read(self.infd, 1024)
609 stderr = byte_stream(sys.stderr)
612 raise Exception("Connection broken")
614 self.reader = checked_reader(self.infd, n)
616 for buf in checked_reader(self.infd, n):
617 byte_stream(sys.stderr).write(buf)
620 debug2("DemuxConn: marked closed\n")
623 def _load_buf(self, timeout):
624 if self.buf is not None:
626 while not self.closed:
627 while not self.reader:
628 if not self._next_packet(timeout):
631 self.buf = next(self.reader)
633 except StopIteration:
637 def _read_parts(self, ix_fn):
638 while self._load_buf(None):
639 assert(self.buf is not None)
641 if i is None or i == len(self.buf):
646 self.buf = self.buf[i:]
654 return buf.index(b'\n')+1
657 return b''.join(self._read_parts(find_eol))
659 def _read(self, size):
661 def until_size(buf): # Closes on csize
662 if len(buf) < csize[0]:
667 return b''.join(self._read_parts(until_size))
670 return self._load_buf(0)
674 """Generate a list of input lines from 'f' without terminating newlines."""
682 def chunkyreader(f, count = None):
683 """Generate a list of chunks of data read from 'f'.
685 If count is None, read until EOF is reached.
687 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
688 reached while reading, raise IOError.
692 b = f.read(min(count, 65536))
694 raise IOError('EOF with %d bytes remaining' % count)
705 def atomically_replaced_file(name, mode='w', buffering=-1):
706 """Yield a file that will be atomically renamed name when leaving the block.
708 This contextmanager yields an open file object that is backed by a
709 temporary file which will be renamed (atomically) to the target
710 name if everything succeeds.
712 The mode and buffering arguments are handled exactly as with open,
713 and the yielded file will have very restrictive permissions, as
718 with atomically_replaced_file('foo.txt', 'w') as f:
719 f.write('hello jack.')
723 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
724 text=('b' not in mode))
727 f = os.fdopen(ffd, mode, buffering)
735 os.rename(tempname, name)
737 unlink(tempname) # nonexistant file is ignored
741 """Append "/" to 's' if it doesn't aleady end in "/"."""
742 assert isinstance(s, bytes)
743 if s and not s.endswith(b'/'):
749 def _mmap_do(f, sz, flags, prot, close):
751 st = os.fstat(f.fileno())
754 # trying to open a zero-length map gives an error, but an empty
755 # string has all the same behaviour of a zero-length map, ie. it has
758 map = mmap.mmap(f.fileno(), sz, flags, prot)
760 f.close() # map will persist beyond file close
764 def mmap_read(f, sz = 0, close=True):
765 """Create a read-only memory mapped region on file 'f'.
766 If sz is 0, the region will cover the entire file.
768 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
771 def mmap_readwrite(f, sz = 0, close=True):
772 """Create a read-write memory mapped region on file 'f'.
773 If sz is 0, the region will cover the entire file.
775 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
779 def mmap_readwrite_private(f, sz = 0, close=True):
780 """Create a read-write memory mapped region on file 'f'.
781 If sz is 0, the region will cover the entire file.
782 The map is private, which means the changes are never flushed back to the
785 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
789 _mincore = getattr(_helpers, 'mincore', None)
791 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
792 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
794 _fmincore_chunk_size = None
795 def _set_fmincore_chunk_size():
796 global _fmincore_chunk_size
797 pref_chunk_size = 64 * 1024 * 1024
798 chunk_size = sc_page_size
799 if (sc_page_size < pref_chunk_size):
800 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
801 _fmincore_chunk_size = chunk_size
804 """Return the mincore() data for fd as a bytearray whose values can be
805 tested via MINCORE_INCORE, or None if fd does not fully
806 support the operation."""
808 if (st.st_size == 0):
810 if not _fmincore_chunk_size:
811 _set_fmincore_chunk_size()
812 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
813 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
814 chunk_count = (st.st_size + _fmincore_chunk_size - 1) // _fmincore_chunk_size
815 result = bytearray(page_count)
816 for ci in compat.range(chunk_count):
817 pos = _fmincore_chunk_size * ci;
818 msize = min(_fmincore_chunk_size, st.st_size - pos)
820 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
821 except mmap.error as ex:
822 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
823 # Perhaps the file was a pipe, i.e. "... | bup split ..."
827 _mincore(m, msize, 0, result, ci * pages_per_chunk)
828 except OSError as ex:
829 if ex.errno == errno.ENOSYS:
835 def parse_timestamp(epoch_str):
836 """Return the number of nanoseconds since the epoch that are described
837 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
838 throw a ValueError that may contain additional information."""
839 ns_per = {'s' : 1000000000,
843 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
845 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
846 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
848 (n, units) = match.group(1, 2)
852 return n * ns_per[units]
856 """Parse string or bytes as a possibly unit suffixed number.
859 199.2k means 203981 bytes
860 1GB means 1073741824 bytes
861 2.1 tb means 2199023255552 bytes
863 if isinstance(s, bytes):
864 # FIXME: should this raise a ValueError for UnicodeDecodeError
865 # (perhaps with the latter as the context).
866 s = s.decode('ascii')
867 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
869 raise ValueError("can't parse %r as a number" % s)
870 (val, unit) = g.groups()
873 if unit in ['t', 'tb']:
874 mult = 1024*1024*1024*1024
875 elif unit in ['g', 'gb']:
876 mult = 1024*1024*1024
877 elif unit in ['m', 'mb']:
879 elif unit in ['k', 'kb']:
881 elif unit in ['', 'b']:
884 raise ValueError("invalid unit %r in number %r" % (unit, s))
890 """Append an error message to the list of saved errors.
892 Once processing is able to stop and output the errors, the saved errors are
893 accessible in the module variable helpers.saved_errors.
895 saved_errors.append(e)
904 def die_if_errors(msg=None, status=1):
908 msg = 'warning: %d errors encountered\n' % len(saved_errors)
914 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
916 The new exception handler will make sure that bup will exit without an ugly
917 stacktrace when Ctrl-C is hit.
919 oldhook = sys.excepthook
920 def newhook(exctype, value, traceback):
921 if exctype == KeyboardInterrupt:
922 log('\nInterrupted.\n')
924 oldhook(exctype, value, traceback)
925 sys.excepthook = newhook
928 def columnate(l, prefix):
929 """Format elements of 'l' in columns with 'prefix' leading each line.
931 The number of columns is determined automatically based on the string
934 binary = isinstance(prefix, bytes)
935 nothing = b'' if binary else ''
936 nl = b'\n' if binary else '\n'
940 clen = max(len(s) for s in l)
941 ncols = (tty_width() - len(prefix)) // (clen + 2)
946 while len(l) % ncols:
948 rows = len(l) // ncols
949 for s in compat.range(0, len(l), rows):
950 cols.append(l[s:s+rows])
952 fmt = b'%-*s' if binary else '%-*s'
953 for row in zip(*cols):
954 out += prefix + nothing.join((fmt % (clen+2, s)) for s in row) + nl
958 def parse_date_or_fatal(str, fatal):
959 """Parses the given date or calls Option.fatal().
960 For now we expect a string that contains a float."""
963 except ValueError as e:
964 raise fatal('invalid date format (should be a float): %r' % e)
969 def parse_excludes(options, fatal):
970 """Traverse the options and extract all excludes, or call Option.fatal()."""
974 (option, parameter) = flag
975 if option == '--exclude':
976 excluded_paths.append(resolve_parent(argv_bytes(parameter)))
977 elif option == '--exclude-from':
979 f = open(resolve_parent(argv_bytes(parameter)), 'rb')
981 raise fatal("couldn't read %r" % parameter)
982 for exclude_path in f.readlines():
983 # FIXME: perhaps this should be rstrip('\n')
984 exclude_path = resolve_parent(exclude_path.strip())
986 excluded_paths.append(exclude_path)
987 return sorted(frozenset(excluded_paths))
990 def parse_rx_excludes(options, fatal):
991 """Traverse the options and extract all rx excludes, or call
993 excluded_patterns = []
996 (option, parameter) = flag
997 if option == '--exclude-rx':
999 excluded_patterns.append(re.compile(argv_bytes(parameter)))
1000 except re.error as ex:
1001 fatal('invalid --exclude-rx pattern (%r): %s' % (parameter, ex))
1002 elif option == '--exclude-rx-from':
1004 f = open(resolve_parent(parameter), 'rb')
1005 except IOError as e:
1006 raise fatal("couldn't read %r" % parameter)
1007 for pattern in f.readlines():
1008 spattern = pattern.rstrip(b'\n')
1012 excluded_patterns.append(re.compile(spattern))
1013 except re.error as ex:
1014 fatal('invalid --exclude-rx pattern (%r): %s' % (spattern, ex))
1015 return excluded_patterns
1018 def should_rx_exclude_path(path, exclude_rxs):
1019 """Return True if path matches a regular expression in exclude_rxs."""
1020 for rx in exclude_rxs:
1022 debug1('Skipping %r: excluded by rx pattern %r.\n'
1023 % (path, rx.pattern))
1028 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1029 # that resolve against the current filesystem in the strip/graft
1030 # functions for example, but elsewhere as well. I suspect bup's not
1031 # always being careful about that. For some cases, the contents of
1032 # the current filesystem should be irrelevant, and consulting it might
1033 # produce the wrong result, perhaps via unintended symlink resolution,
1036 def path_components(path):
1037 """Break path into a list of pairs of the form (name,
1038 full_path_to_name). Path must start with '/'.
1040 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1041 if not path.startswith(b'/'):
1042 raise Exception('path must start with "/": %s' % path_msg(path))
1043 # Since we assume path startswith('/'), we can skip the first element.
1044 result = [(b'', b'/')]
1045 norm_path = os.path.abspath(path)
1046 if norm_path == b'/':
1049 for p in norm_path.split(b'/')[1:]:
1050 full_path += b'/' + p
1051 result.append((p, full_path))
1055 def stripped_path_components(path, strip_prefixes):
1056 """Strip any prefix in strip_prefixes from path and return a list
1057 of path components where each component is (name,
1058 none_or_full_fs_path_to_name). Assume path startswith('/').
1059 See thelpers.py for examples."""
1060 normalized_path = os.path.abspath(path)
1061 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1062 for bp in sorted_strip_prefixes:
1063 normalized_bp = os.path.abspath(bp)
1064 if normalized_bp == b'/':
1066 if normalized_path.startswith(normalized_bp):
1067 prefix = normalized_path[:len(normalized_bp)]
1069 for p in normalized_path[len(normalized_bp):].split(b'/'):
1073 result.append((p, prefix))
1076 return path_components(path)
1079 def grafted_path_components(graft_points, path):
1080 # Create a result that consists of some number of faked graft
1081 # directories before the graft point, followed by all of the real
1082 # directories from path that are after the graft point. Arrange
1083 # for the directory at the graft point in the result to correspond
1084 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1085 # for some examples.
1087 # Note that given --graft orig=new, orig and new have *nothing* to
1088 # do with each other, even if some of their component names
1089 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1090 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1093 # FIXME: This can't be the best solution...
1094 clean_path = os.path.abspath(path)
1095 for graft_point in graft_points:
1096 old_prefix, new_prefix = graft_point
1097 # Expand prefixes iff not absolute paths.
1098 old_prefix = os.path.normpath(old_prefix)
1099 new_prefix = os.path.normpath(new_prefix)
1100 if clean_path.startswith(old_prefix):
1101 escaped_prefix = re.escape(old_prefix)
1102 grafted_path = re.sub(br'^' + escaped_prefix, new_prefix, clean_path)
1103 # Handle /foo=/ (at least) -- which produces //whatever.
1104 grafted_path = b'/' + grafted_path.lstrip(b'/')
1105 clean_path_components = path_components(clean_path)
1106 # Count the components that were stripped.
1107 strip_count = 0 if old_prefix == b'/' else old_prefix.count(b'/')
1108 new_prefix_parts = new_prefix.split(b'/')
1109 result_prefix = grafted_path.split(b'/')[:new_prefix.count(b'/')]
1110 result = [(p, None) for p in result_prefix] \
1111 + clean_path_components[strip_count:]
1112 # Now set the graft point name to match the end of new_prefix.
1113 graft_point = len(result_prefix)
1114 result[graft_point] = \
1115 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1116 if new_prefix == b'/': # --graft ...=/ is a special case.
1119 return path_components(clean_path)
1125 _localtime = getattr(_helpers, 'localtime', None)
1128 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1129 'tm_hour', 'tm_min', 'tm_sec',
1130 'tm_wday', 'tm_yday',
1131 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1133 # Define a localtime() that returns bup_time when possible. Note:
1134 # this means that any helpers.localtime() results may need to be
1135 # passed through to_py_time() before being passed to python's time
1136 # module, which doesn't appear willing to ignore the extra items.
1138 def localtime(time):
1139 return bup_time(*_helpers.localtime(int(floor(time))))
1140 def utc_offset_str(t):
1141 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1142 If the current UTC offset does not represent an integer number
1143 of minutes, the fractional component will be truncated."""
1144 off = localtime(t).tm_gmtoff
1145 # Note: // doesn't truncate like C for negative values, it rounds down.
1146 offmin = abs(off) // 60
1148 h = (offmin - m) // 60
1149 return b'%+03d%02d' % (-h if off < 0 else h, m)
1151 if isinstance(x, time.struct_time):
1153 return time.struct_time(x[:9])
1155 localtime = time.localtime
1156 def utc_offset_str(t):
1157 return time.strftime(b'%z', localtime(t))
1162 _some_invalid_save_parts_rx = re.compile(br'[\[ ~^:?*\\]|\.\.|//|@{')
1164 def valid_save_name(name):
1165 # Enforce a superset of the restrictions in git-check-ref-format(1)
1167 or name.startswith(b'/') or name.endswith(b'/') \
1168 or name.endswith(b'.'):
1170 if _some_invalid_save_parts_rx.search(name):
1173 if byte_int(c) < 0x20 or byte_int(c) == 0x7f:
1175 for part in name.split(b'/'):
1176 if part.startswith(b'.') or part.endswith(b'.lock'):
1181 _period_rx = re.compile(br'^([0-9]+)(s|min|h|d|w|m|y)$')
1183 def period_as_secs(s):
1186 match = _period_rx.match(s)
1189 mag = int(match.group(1))
1190 scale = match.group(2)
1191 return mag * {b's': 1,
1195 b'w': 60 * 60 * 24 * 7,
1196 b'm': 60 * 60 * 24 * 31,
1197 b'y': 60 * 60 * 24 * 366}[scale]