1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
8 from pipes import quote
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 # This function should really be in helpers, not in bup.options. But we
16 # want options.py to be standalone so people can include it in other projects.
17 from bup.options import _tty_width as tty_width
21 """Helper to deal with Python scoping issues"""
25 sc_page_size = os.sysconf('SC_PAGE_SIZE')
26 assert(sc_page_size > 0)
28 sc_arg_max = os.sysconf('SC_ARG_MAX')
29 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
30 sc_arg_max = 2 * 1024 * 1024
34 for result in iterable:
40 """Convert the string 's' to an integer. Return 0 if s is not a number."""
48 """Convert the string 's' to a float. Return 0 if s is not a number."""
50 return float(s or '0')
55 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
59 _fdatasync = os.fdatasync
60 except AttributeError:
63 if sys.platform.startswith('darwin'):
64 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
68 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
70 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
71 if e.errno == errno.ENOTSUP:
76 fdatasync = _fdatasync
79 def partition(predicate, stream):
80 """Returns (leading_matches_it, rest_it), where leading_matches_it
81 must be completely exhausted before traversing rest_it.
86 ns.first_nonmatch = None
87 def leading_matches():
92 ns.first_nonmatch = (x,)
96 yield ns.first_nonmatch[0]
99 return (leading_matches(), rest())
102 def lines_until_sentinel(f, sentinel, ex_type):
103 # sentinel must end with \n and must contain only one \n
106 if not (line and line.endswith('\n')):
107 raise ex_type('Hit EOF while reading line')
113 def stat_if_exists(path):
117 if e.errno != errno.ENOENT:
122 # Write (blockingly) to sockets that may or may not be in blocking mode.
123 # We need this because our stderr is sometimes eaten by subprocesses
124 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
125 # leading to race conditions. Ick. We'll do it the hard way.
126 def _hard_write(fd, buf):
128 (r,w,x) = select.select([], [fd], [], None)
130 raise IOError('select(fd) returned without being writable')
132 sz = os.write(fd, buf)
134 if e.errno != errno.EAGAIN:
142 """Print a log message to stderr."""
145 _hard_write(sys.stderr.fileno(), s)
159 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
160 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
163 """Calls log() if stderr is a TTY. Does nothing otherwise."""
164 global _last_progress
171 """Calls progress() only if we haven't printed progress in a while.
173 This avoids overloading the stderr buffer with excess junk.
177 if now - _last_prog > 0.1:
183 """Calls progress() to redisplay the most recent progress message.
185 Useful after you've printed some other message that wipes out the
188 if _last_progress and _last_progress.endswith('\r'):
189 progress(_last_progress)
192 def mkdirp(d, mode=None):
193 """Recursively create directories on path 'd'.
195 Unlike os.makedirs(), it doesn't raise an exception if the last element of
196 the path already exists.
204 if e.errno == errno.EEXIST:
210 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
212 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
214 samekey = operator.eq
216 total = sum(len(it) for it in iters)
217 iters = (iter(it) for it in iters)
218 heap = ((next(it, None),it) for it in iters)
219 heap = [(e,it) for e,it in heap if e]
224 if not count % pfreq:
227 if not samekey(e, pe):
233 except StopIteration:
234 heapq.heappop(heap) # remove current
236 heapq.heapreplace(heap, (e, it)) # shift current to new location
241 """Delete a file at path 'f' if it currently exists.
243 Unlike os.unlink(), does not throw an exception if the file didn't already
249 if e.errno != errno.ENOENT:
254 if isinstance(cmd, compat.str_type):
257 return ' '.join(map(quote, cmd))
259 exc = subprocess.check_call
269 assert stdin in (None, PIPE)
272 stdin=stdin, stdout=PIPE, stderr=stderr,
274 preexec_fn=preexec_fn)
275 out, err = p.communicate(input)
276 if check and p.returncode != 0:
277 raise Exception('subprocess %r failed with status %d, stderr: %r'
278 % (' '.join(map(quote, cmd)), p.returncode, err))
281 def readpipe(argv, preexec_fn=None, shell=False):
282 """Run a subprocess and return its output."""
283 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
285 out, err = p.communicate()
286 if p.returncode != 0:
287 raise Exception('subprocess %r failed with status %d'
288 % (' '.join(argv), p.returncode))
292 def _argmax_base(command):
295 base_size += len(command) + 1
296 for k, v in compat.items(environ):
297 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
301 def _argmax_args_size(args):
302 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
305 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
306 """If args is not empty, yield the output produced by calling the
307 command list with args as a sequence of strings (It may be necessary
308 to return multiple strings in order to respect ARG_MAX)."""
309 # The optional arg_max arg is a workaround for an issue with the
310 # current wvtest behavior.
311 base_size = _argmax_base(command)
313 room = arg_max - base_size
316 next_size = _argmax_args_size(args[i:i+1])
317 if room - next_size < 0:
323 assert(len(sub_args))
324 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
327 def resolve_parent(p):
328 """Return the absolute path of a file without following any final symlink.
330 Behaves like os.path.realpath, but doesn't follow a symlink for the last
331 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
332 will follow symlinks in p's directory)
338 if st and stat.S_ISLNK(st.st_mode):
339 (dir, name) = os.path.split(p)
340 dir = os.path.realpath(dir)
341 out = os.path.join(dir, name)
343 out = os.path.realpath(p)
344 #log('realpathing:%r,%r\n' % (p, out))
348 def detect_fakeroot():
349 "Return True if we appear to be running under fakeroot."
350 return os.getenv("FAKEROOTKEY") != None
353 if sys.platform.startswith('cygwin'):
355 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
356 groups = os.getgroups()
357 return 544 in groups or 0 in groups
360 return os.geteuid() == 0
363 def _cache_key_value(get_value, key, cache):
364 """Return (value, was_cached). If there is a value in the cache
365 for key, use that, otherwise, call get_value(key) which should
366 throw a KeyError if there is no value -- in which case the cached
367 and returned value will be None.
369 try: # Do we already have it (or know there wasn't one)?
376 cache[key] = value = get_value(key)
382 _uid_to_pwd_cache = {}
383 _name_to_pwd_cache = {}
385 def pwd_from_uid(uid):
386 """Return password database entry for uid (may be a cached value).
387 Return None if no entry is found.
389 global _uid_to_pwd_cache, _name_to_pwd_cache
390 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
391 if entry and not cached:
392 _name_to_pwd_cache[entry.pw_name] = entry
396 def pwd_from_name(name):
397 """Return password database entry for name (may be a cached value).
398 Return None if no entry is found.
400 global _uid_to_pwd_cache, _name_to_pwd_cache
401 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
402 if entry and not cached:
403 _uid_to_pwd_cache[entry.pw_uid] = entry
407 _gid_to_grp_cache = {}
408 _name_to_grp_cache = {}
410 def grp_from_gid(gid):
411 """Return password database entry for gid (may be a cached value).
412 Return None if no entry is found.
414 global _gid_to_grp_cache, _name_to_grp_cache
415 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
416 if entry and not cached:
417 _name_to_grp_cache[entry.gr_name] = entry
421 def grp_from_name(name):
422 """Return password database entry for name (may be a cached value).
423 Return None if no entry is found.
425 global _gid_to_grp_cache, _name_to_grp_cache
426 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
427 if entry and not cached:
428 _gid_to_grp_cache[entry.gr_gid] = entry
434 """Get the user's login name."""
438 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
444 """Get the user's full name."""
446 if not _userfullname:
448 entry = pwd_from_uid(uid)
450 _userfullname = entry[4].split(',')[0] or entry[0]
451 if not _userfullname:
452 _userfullname = 'user%d' % uid
458 """Get the FQDN of this machine."""
461 _hostname = socket.getfqdn()
465 _resource_path = None
466 def resource_path(subdir=''):
467 global _resource_path
468 if not _resource_path:
469 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
470 return os.path.join(_resource_path, subdir)
472 def format_filesize(size):
477 exponent = int(math.log(size) // math.log(unit))
478 size_prefix = "KMGTPE"[exponent - 1]
479 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
482 class NotOk(Exception):
487 def __init__(self, outp):
491 while self._read(65536): pass
493 def read(self, size):
494 """Read 'size' bytes from input stream."""
496 return self._read(size)
499 """Read from input stream until a newline is found."""
501 return self._readline()
503 def write(self, data):
504 """Write 'data' to output stream."""
505 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
506 self.outp.write(data)
509 """Return true if input stream is readable."""
510 raise NotImplemented("Subclasses must implement has_input")
513 """Indicate end of output from last sent command."""
517 """Indicate server error to the client."""
518 s = re.sub(r'\s+', ' ', str(s))
519 self.write('\nerror %s\n' % s)
521 def _check_ok(self, onempty):
524 for rl in linereader(self):
525 #log('%d got line: %r\n' % (os.getpid(), rl))
526 if not rl: # empty line
530 elif rl.startswith('error '):
531 #log('client: error: %s\n' % rl[6:])
535 raise Exception('server exited unexpectedly; see errors above')
537 def drain_and_check_ok(self):
538 """Remove all data for the current command from input stream."""
541 return self._check_ok(onempty)
544 """Verify that server action completed successfully."""
546 raise Exception('expected "ok", got %r' % rl)
547 return self._check_ok(onempty)
550 class Conn(BaseConn):
551 def __init__(self, inp, outp):
552 BaseConn.__init__(self, outp)
555 def _read(self, size):
556 return self.inp.read(size)
559 return self.inp.readline()
562 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
564 assert(rl[0] == self.inp.fileno())
570 def checked_reader(fd, n):
572 rl, _, _ = select.select([fd], [], [])
575 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
580 MAX_PACKET = 128 * 1024
581 def mux(p, outfd, outr, errr):
584 while p.poll() is None:
585 rl, _, _ = select.select(fds, [], [])
588 buf = os.read(outr, MAX_PACKET)
590 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
592 buf = os.read(errr, 1024)
594 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
596 os.write(outfd, struct.pack('!IB', 0, 3))
599 class DemuxConn(BaseConn):
600 """A helper class for bup's client-server protocol."""
601 def __init__(self, infd, outp):
602 BaseConn.__init__(self, outp)
603 # Anything that comes through before the sync string was not
604 # multiplexed and can be assumed to be debug/log before mux init.
606 while tail != 'BUPMUX':
607 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
609 raise IOError('demux: unexpected EOF during initialization')
611 sys.stderr.write(tail[:-6]) # pre-mux log messages
618 def write(self, data):
620 BaseConn.write(self, data)
622 def _next_packet(self, timeout):
623 if self.closed: return False
624 rl, wl, xl = select.select([self.infd], [], [], timeout)
625 if not rl: return False
626 assert(rl[0] == self.infd)
627 ns = ''.join(checked_reader(self.infd, 5))
628 n, fdw = struct.unpack('!IB', ns)
629 assert(n <= MAX_PACKET)
631 self.reader = checked_reader(self.infd, n)
633 for buf in checked_reader(self.infd, n):
634 sys.stderr.write(buf)
637 debug2("DemuxConn: marked closed\n")
640 def _load_buf(self, timeout):
641 if self.buf is not None:
643 while not self.closed:
644 while not self.reader:
645 if not self._next_packet(timeout):
648 self.buf = next(self.reader)
650 except StopIteration:
654 def _read_parts(self, ix_fn):
655 while self._load_buf(None):
656 assert(self.buf is not None)
658 if i is None or i == len(self.buf):
663 self.buf = self.buf[i:]
671 return buf.index('\n')+1
674 return ''.join(self._read_parts(find_eol))
676 def _read(self, size):
678 def until_size(buf): # Closes on csize
679 if len(buf) < csize[0]:
684 return ''.join(self._read_parts(until_size))
687 return self._load_buf(0)
691 """Generate a list of input lines from 'f' without terminating newlines."""
699 def chunkyreader(f, count = None):
700 """Generate a list of chunks of data read from 'f'.
702 If count is None, read until EOF is reached.
704 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
705 reached while reading, raise IOError.
709 b = f.read(min(count, 65536))
711 raise IOError('EOF with %d bytes remaining' % count)
722 def atomically_replaced_file(name, mode='w', buffering=-1):
723 """Yield a file that will be atomically renamed name when leaving the block.
725 This contextmanager yields an open file object that is backed by a
726 temporary file which will be renamed (atomically) to the target
727 name if everything succeeds.
729 The mode and buffering arguments are handled exactly as with open,
730 and the yielded file will have very restrictive permissions, as
735 with atomically_replaced_file('foo.txt', 'w') as f:
736 f.write('hello jack.')
740 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
741 text=('b' not in mode))
744 f = os.fdopen(ffd, mode, buffering)
752 os.rename(tempname, name)
754 unlink(tempname) # nonexistant file is ignored
758 """Append "/" to 's' if it doesn't aleady end in "/"."""
759 if s and not s.endswith('/'):
765 def _mmap_do(f, sz, flags, prot, close):
767 st = os.fstat(f.fileno())
770 # trying to open a zero-length map gives an error, but an empty
771 # string has all the same behaviour of a zero-length map, ie. it has
774 map = mmap.mmap(f.fileno(), sz, flags, prot)
776 f.close() # map will persist beyond file close
780 def mmap_read(f, sz = 0, close=True):
781 """Create a read-only memory mapped region on file 'f'.
782 If sz is 0, the region will cover the entire file.
784 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
787 def mmap_readwrite(f, sz = 0, close=True):
788 """Create a read-write memory mapped region on file 'f'.
789 If sz is 0, the region will cover the entire file.
791 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
795 def mmap_readwrite_private(f, sz = 0, close=True):
796 """Create a read-write memory mapped region on file 'f'.
797 If sz is 0, the region will cover the entire file.
798 The map is private, which means the changes are never flushed back to the
801 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
805 _mincore = getattr(_helpers, 'mincore', None)
807 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
808 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
810 _fmincore_chunk_size = None
811 def _set_fmincore_chunk_size():
812 global _fmincore_chunk_size
813 pref_chunk_size = 64 * 1024 * 1024
814 chunk_size = sc_page_size
815 if (sc_page_size < pref_chunk_size):
816 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
817 _fmincore_chunk_size = chunk_size
820 """Return the mincore() data for fd as a bytearray whose values can be
821 tested via MINCORE_INCORE, or None if fd does not fully
822 support the operation."""
824 if (st.st_size == 0):
826 if not _fmincore_chunk_size:
827 _set_fmincore_chunk_size()
828 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
829 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
830 chunk_count = page_count // _fmincore_chunk_size
833 result = bytearray(page_count)
834 for ci in compat.range(chunk_count):
835 pos = _fmincore_chunk_size * ci;
836 msize = min(_fmincore_chunk_size, st.st_size - pos)
838 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
839 except mmap.error as ex:
840 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
841 # Perhaps the file was a pipe, i.e. "... | bup split ..."
845 _mincore(m, msize, 0, result, ci * pages_per_chunk)
846 except OSError as ex:
847 if ex.errno == errno.ENOSYS:
853 def parse_timestamp(epoch_str):
854 """Return the number of nanoseconds since the epoch that are described
855 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
856 throw a ValueError that may contain additional information."""
857 ns_per = {'s' : 1000000000,
861 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
863 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
864 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
866 (n, units) = match.group(1, 2)
870 return n * ns_per[units]
874 """Parse data size information into a float number.
876 Here are some examples of conversions:
877 199.2k means 203981 bytes
878 1GB means 1073741824 bytes
879 2.1 tb means 2199023255552 bytes
881 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
883 raise ValueError("can't parse %r as a number" % s)
884 (val, unit) = g.groups()
887 if unit in ['t', 'tb']:
888 mult = 1024*1024*1024*1024
889 elif unit in ['g', 'gb']:
890 mult = 1024*1024*1024
891 elif unit in ['m', 'mb']:
893 elif unit in ['k', 'kb']:
895 elif unit in ['', 'b']:
898 raise ValueError("invalid unit %r in number %r" % (unit, s))
903 """Count the number of elements in an iterator. (consumes the iterator)"""
904 return reduce(lambda x,y: x+1, l)
909 """Append an error message to the list of saved errors.
911 Once processing is able to stop and output the errors, the saved errors are
912 accessible in the module variable helpers.saved_errors.
914 saved_errors.append(e)
923 def die_if_errors(msg=None, status=1):
927 msg = 'warning: %d errors encountered\n' % len(saved_errors)
933 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
935 The new exception handler will make sure that bup will exit without an ugly
936 stacktrace when Ctrl-C is hit.
938 oldhook = sys.excepthook
939 def newhook(exctype, value, traceback):
940 if exctype == KeyboardInterrupt:
941 log('\nInterrupted.\n')
943 return oldhook(exctype, value, traceback)
944 sys.excepthook = newhook
947 def columnate(l, prefix):
948 """Format elements of 'l' in columns with 'prefix' leading each line.
950 The number of columns is determined automatically based on the string
956 clen = max(len(s) for s in l)
957 ncols = (tty_width() - len(prefix)) // (clen + 2)
962 while len(l) % ncols:
964 rows = len(l) // ncols
965 for s in compat.range(0, len(l), rows):
966 cols.append(l[s:s+rows])
968 for row in zip(*cols):
969 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
973 def parse_date_or_fatal(str, fatal):
974 """Parses the given date or calls Option.fatal().
975 For now we expect a string that contains a float."""
978 except ValueError as e:
979 raise fatal('invalid date format (should be a float): %r' % e)
984 def parse_excludes(options, fatal):
985 """Traverse the options and extract all excludes, or call Option.fatal()."""
989 (option, parameter) = flag
990 if option == '--exclude':
991 excluded_paths.append(resolve_parent(parameter))
992 elif option == '--exclude-from':
994 f = open(resolve_parent(parameter))
996 raise fatal("couldn't read %s" % parameter)
997 for exclude_path in f.readlines():
998 # FIXME: perhaps this should be rstrip('\n')
999 exclude_path = resolve_parent(exclude_path.strip())
1001 excluded_paths.append(exclude_path)
1002 return sorted(frozenset(excluded_paths))
1005 def parse_rx_excludes(options, fatal):
1006 """Traverse the options and extract all rx excludes, or call
1008 excluded_patterns = []
1010 for flag in options:
1011 (option, parameter) = flag
1012 if option == '--exclude-rx':
1014 excluded_patterns.append(re.compile(parameter))
1015 except re.error as ex:
1016 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
1017 elif option == '--exclude-rx-from':
1019 f = open(resolve_parent(parameter))
1020 except IOError as e:
1021 raise fatal("couldn't read %s" % parameter)
1022 for pattern in f.readlines():
1023 spattern = pattern.rstrip('\n')
1027 excluded_patterns.append(re.compile(spattern))
1028 except re.error as ex:
1029 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
1030 return excluded_patterns
1033 def should_rx_exclude_path(path, exclude_rxs):
1034 """Return True if path matches a regular expression in exclude_rxs."""
1035 for rx in exclude_rxs:
1037 debug1('Skipping %r: excluded by rx pattern %r.\n'
1038 % (path, rx.pattern))
1043 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1044 # that resolve against the current filesystem in the strip/graft
1045 # functions for example, but elsewhere as well. I suspect bup's not
1046 # always being careful about that. For some cases, the contents of
1047 # the current filesystem should be irrelevant, and consulting it might
1048 # produce the wrong result, perhaps via unintended symlink resolution,
1051 def path_components(path):
1052 """Break path into a list of pairs of the form (name,
1053 full_path_to_name). Path must start with '/'.
1055 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1056 if not path.startswith('/'):
1057 raise Exception('path must start with "/": %s' % path)
1058 # Since we assume path startswith('/'), we can skip the first element.
1059 result = [('', '/')]
1060 norm_path = os.path.abspath(path)
1061 if norm_path == '/':
1064 for p in norm_path.split('/')[1:]:
1065 full_path += '/' + p
1066 result.append((p, full_path))
1070 def stripped_path_components(path, strip_prefixes):
1071 """Strip any prefix in strip_prefixes from path and return a list
1072 of path components where each component is (name,
1073 none_or_full_fs_path_to_name). Assume path startswith('/').
1074 See thelpers.py for examples."""
1075 normalized_path = os.path.abspath(path)
1076 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1077 for bp in sorted_strip_prefixes:
1078 normalized_bp = os.path.abspath(bp)
1079 if normalized_bp == '/':
1081 if normalized_path.startswith(normalized_bp):
1082 prefix = normalized_path[:len(normalized_bp)]
1084 for p in normalized_path[len(normalized_bp):].split('/'):
1088 result.append((p, prefix))
1091 return path_components(path)
1094 def grafted_path_components(graft_points, path):
1095 # Create a result that consists of some number of faked graft
1096 # directories before the graft point, followed by all of the real
1097 # directories from path that are after the graft point. Arrange
1098 # for the directory at the graft point in the result to correspond
1099 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1100 # for some examples.
1102 # Note that given --graft orig=new, orig and new have *nothing* to
1103 # do with each other, even if some of their component names
1104 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1105 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1108 # FIXME: This can't be the best solution...
1109 clean_path = os.path.abspath(path)
1110 for graft_point in graft_points:
1111 old_prefix, new_prefix = graft_point
1112 # Expand prefixes iff not absolute paths.
1113 old_prefix = os.path.normpath(old_prefix)
1114 new_prefix = os.path.normpath(new_prefix)
1115 if clean_path.startswith(old_prefix):
1116 escaped_prefix = re.escape(old_prefix)
1117 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1118 # Handle /foo=/ (at least) -- which produces //whatever.
1119 grafted_path = '/' + grafted_path.lstrip('/')
1120 clean_path_components = path_components(clean_path)
1121 # Count the components that were stripped.
1122 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1123 new_prefix_parts = new_prefix.split('/')
1124 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1125 result = [(p, None) for p in result_prefix] \
1126 + clean_path_components[strip_count:]
1127 # Now set the graft point name to match the end of new_prefix.
1128 graft_point = len(result_prefix)
1129 result[graft_point] = \
1130 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1131 if new_prefix == '/': # --graft ...=/ is a special case.
1134 return path_components(clean_path)
1140 _localtime = getattr(_helpers, 'localtime', None)
1143 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1144 'tm_hour', 'tm_min', 'tm_sec',
1145 'tm_wday', 'tm_yday',
1146 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1148 # Define a localtime() that returns bup_time when possible. Note:
1149 # this means that any helpers.localtime() results may need to be
1150 # passed through to_py_time() before being passed to python's time
1151 # module, which doesn't appear willing to ignore the extra items.
1153 def localtime(time):
1154 return bup_time(*_helpers.localtime(time))
1155 def utc_offset_str(t):
1156 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1157 If the current UTC offset does not represent an integer number
1158 of minutes, the fractional component will be truncated."""
1159 off = localtime(t).tm_gmtoff
1160 # Note: // doesn't truncate like C for negative values, it rounds down.
1161 offmin = abs(off) // 60
1163 h = (offmin - m) // 60
1164 return "%+03d%02d" % (-h if off < 0 else h, m)
1166 if isinstance(x, time.struct_time):
1168 return time.struct_time(x[:9])
1170 localtime = time.localtime
1171 def utc_offset_str(t):
1172 return time.strftime('%z', localtime(t))
1177 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1179 def valid_save_name(name):
1180 # Enforce a superset of the restrictions in git-check-ref-format(1)
1182 or name.startswith('/') or name.endswith('/') \
1183 or name.endswith('.'):
1185 if _some_invalid_save_parts_rx.search(name):
1188 if ord(c) < 0x20 or ord(c) == 0x7f:
1190 for part in name.split('/'):
1191 if part.startswith('.') or part.endswith('.lock'):
1196 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1198 def period_as_secs(s):
1201 match = _period_rx.match(s)
1204 mag = int(match.group(1))
1205 scale = match.group(2)
1206 return mag * {'s': 1,
1210 'w': 60 * 60 * 24 * 7,
1211 'm': 60 * 60 * 24 * 31,
1212 'y': 60 * 60 * 24 * 366}[scale]