1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
14 """Helper to deal with Python scoping issues"""
18 sc_page_size = os.sysconf('SC_PAGE_SIZE')
19 assert(sc_page_size > 0)
21 sc_arg_max = os.sysconf('SC_ARG_MAX')
22 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
23 sc_arg_max = 2 * 1024 * 1024
25 # This function should really be in helpers, not in bup.options. But we
26 # want options.py to be standalone so people can include it in other projects.
27 from bup.options import _tty_width
28 tty_width = _tty_width
32 """Convert the string 's' to an integer. Return 0 if s is not a number."""
40 """Convert the string 's' to a float. Return 0 if s is not a number."""
42 return float(s or '0')
47 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
51 _fdatasync = os.fdatasync
52 except AttributeError:
55 if sys.platform.startswith('darwin'):
56 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
60 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
62 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
63 if e.errno == errno.ENOTSUP:
68 fdatasync = _fdatasync
71 def partition(predicate, stream):
72 """Returns (leading_matches_it, rest_it), where leading_matches_it
73 must be completely exhausted before traversing rest_it.
78 ns.first_nonmatch = None
79 def leading_matches():
84 ns.first_nonmatch = (x,)
88 yield ns.first_nonmatch[0]
91 return (leading_matches(), rest())
94 def stat_if_exists(path):
98 if e.errno != errno.ENOENT:
103 # Write (blockingly) to sockets that may or may not be in blocking mode.
104 # We need this because our stderr is sometimes eaten by subprocesses
105 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
106 # leading to race conditions. Ick. We'll do it the hard way.
107 def _hard_write(fd, buf):
109 (r,w,x) = select.select([], [fd], [], None)
111 raise IOError('select(fd) returned without being writable')
113 sz = os.write(fd, buf)
115 if e.errno != errno.EAGAIN:
123 """Print a log message to stderr."""
126 _hard_write(sys.stderr.fileno(), s)
140 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
141 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
144 """Calls log() if stderr is a TTY. Does nothing otherwise."""
145 global _last_progress
152 """Calls progress() only if we haven't printed progress in a while.
154 This avoids overloading the stderr buffer with excess junk.
158 if now - _last_prog > 0.1:
164 """Calls progress() to redisplay the most recent progress message.
166 Useful after you've printed some other message that wipes out the
169 if _last_progress and _last_progress.endswith('\r'):
170 progress(_last_progress)
173 def mkdirp(d, mode=None):
174 """Recursively create directories on path 'd'.
176 Unlike os.makedirs(), it doesn't raise an exception if the last element of
177 the path already exists.
185 if e.errno == errno.EEXIST:
191 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
193 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
195 samekey = operator.eq
197 total = sum(len(it) for it in iters)
198 iters = (iter(it) for it in iters)
199 heap = ((next(it, None),it) for it in iters)
200 heap = [(e,it) for e,it in heap if e]
205 if not count % pfreq:
208 if not samekey(e, pe):
214 except StopIteration:
215 heapq.heappop(heap) # remove current
217 heapq.heapreplace(heap, (e, it)) # shift current to new location
222 """Delete a file at path 'f' if it currently exists.
224 Unlike os.unlink(), does not throw an exception if the file didn't already
230 if e.errno != errno.ENOENT:
234 def readpipe(argv, preexec_fn=None, shell=False):
235 """Run a subprocess and return its output."""
236 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
238 out, err = p.communicate()
239 if p.returncode != 0:
240 raise Exception('subprocess %r failed with status %d'
241 % (' '.join(argv), p.returncode))
245 def _argmax_base(command):
248 base_size += len(command) + 1
249 for k, v in environ.iteritems():
250 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
254 def _argmax_args_size(args):
255 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
258 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
259 """If args is not empty, yield the output produced by calling the
260 command list with args as a sequence of strings (It may be necessary
261 to return multiple strings in order to respect ARG_MAX)."""
262 # The optional arg_max arg is a workaround for an issue with the
263 # current wvtest behavior.
264 base_size = _argmax_base(command)
266 room = arg_max - base_size
269 next_size = _argmax_args_size(args[i:i+1])
270 if room - next_size < 0:
276 assert(len(sub_args))
277 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
280 def resolve_parent(p):
281 """Return the absolute path of a file without following any final symlink.
283 Behaves like os.path.realpath, but doesn't follow a symlink for the last
284 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
285 will follow symlinks in p's directory)
291 if st and stat.S_ISLNK(st.st_mode):
292 (dir, name) = os.path.split(p)
293 dir = os.path.realpath(dir)
294 out = os.path.join(dir, name)
296 out = os.path.realpath(p)
297 #log('realpathing:%r,%r\n' % (p, out))
301 def detect_fakeroot():
302 "Return True if we appear to be running under fakeroot."
303 return os.getenv("FAKEROOTKEY") != None
306 _warned_about_superuser_detection = None
308 if sys.platform.startswith('cygwin'):
309 if sys.getwindowsversion()[0] > 5:
310 # Sounds like situation is much more complicated here
311 global _warned_about_superuser_detection
312 if not _warned_about_superuser_detection:
313 log("can't detect root status for OS version > 5; assuming not root")
314 _warned_about_superuser_detection = True
317 return ctypes.cdll.shell32.IsUserAnAdmin()
319 return os.geteuid() == 0
322 def _cache_key_value(get_value, key, cache):
323 """Return (value, was_cached). If there is a value in the cache
324 for key, use that, otherwise, call get_value(key) which should
325 throw a KeyError if there is no value -- in which case the cached
326 and returned value will be None.
328 try: # Do we already have it (or know there wasn't one)?
335 cache[key] = value = get_value(key)
341 _uid_to_pwd_cache = {}
342 _name_to_pwd_cache = {}
344 def pwd_from_uid(uid):
345 """Return password database entry for uid (may be a cached value).
346 Return None if no entry is found.
348 global _uid_to_pwd_cache, _name_to_pwd_cache
349 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
350 if entry and not cached:
351 _name_to_pwd_cache[entry.pw_name] = entry
355 def pwd_from_name(name):
356 """Return password database entry for name (may be a cached value).
357 Return None if no entry is found.
359 global _uid_to_pwd_cache, _name_to_pwd_cache
360 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
361 if entry and not cached:
362 _uid_to_pwd_cache[entry.pw_uid] = entry
366 _gid_to_grp_cache = {}
367 _name_to_grp_cache = {}
369 def grp_from_gid(gid):
370 """Return password database entry for gid (may be a cached value).
371 Return None if no entry is found.
373 global _gid_to_grp_cache, _name_to_grp_cache
374 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
375 if entry and not cached:
376 _name_to_grp_cache[entry.gr_name] = entry
380 def grp_from_name(name):
381 """Return password database entry for name (may be a cached value).
382 Return None if no entry is found.
384 global _gid_to_grp_cache, _name_to_grp_cache
385 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
386 if entry and not cached:
387 _gid_to_grp_cache[entry.gr_gid] = entry
393 """Get the user's login name."""
397 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
403 """Get the user's full name."""
405 if not _userfullname:
407 entry = pwd_from_uid(uid)
409 _userfullname = entry[4].split(',')[0] or entry[0]
410 if not _userfullname:
411 _userfullname = 'user%d' % uid
417 """Get the FQDN of this machine."""
420 _hostname = socket.getfqdn()
424 _resource_path = None
425 def resource_path(subdir=''):
426 global _resource_path
427 if not _resource_path:
428 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
429 return os.path.join(_resource_path, subdir)
431 def format_filesize(size):
436 exponent = int(math.log(size) / math.log(unit))
437 size_prefix = "KMGTPE"[exponent - 1]
438 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
441 class NotOk(Exception):
446 def __init__(self, outp):
450 while self._read(65536): pass
452 def read(self, size):
453 """Read 'size' bytes from input stream."""
455 return self._read(size)
458 """Read from input stream until a newline is found."""
460 return self._readline()
462 def write(self, data):
463 """Write 'data' to output stream."""
464 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
465 self.outp.write(data)
468 """Return true if input stream is readable."""
469 raise NotImplemented("Subclasses must implement has_input")
472 """Indicate end of output from last sent command."""
476 """Indicate server error to the client."""
477 s = re.sub(r'\s+', ' ', str(s))
478 self.write('\nerror %s\n' % s)
480 def _check_ok(self, onempty):
483 for rl in linereader(self):
484 #log('%d got line: %r\n' % (os.getpid(), rl))
485 if not rl: # empty line
489 elif rl.startswith('error '):
490 #log('client: error: %s\n' % rl[6:])
494 raise Exception('server exited unexpectedly; see errors above')
496 def drain_and_check_ok(self):
497 """Remove all data for the current command from input stream."""
500 return self._check_ok(onempty)
503 """Verify that server action completed successfully."""
505 raise Exception('expected "ok", got %r' % rl)
506 return self._check_ok(onempty)
509 class Conn(BaseConn):
510 def __init__(self, inp, outp):
511 BaseConn.__init__(self, outp)
514 def _read(self, size):
515 return self.inp.read(size)
518 return self.inp.readline()
521 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
523 assert(rl[0] == self.inp.fileno())
529 def checked_reader(fd, n):
531 rl, _, _ = select.select([fd], [], [])
534 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
539 MAX_PACKET = 128 * 1024
540 def mux(p, outfd, outr, errr):
543 while p.poll() is None:
544 rl, _, _ = select.select(fds, [], [])
547 buf = os.read(outr, MAX_PACKET)
549 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
551 buf = os.read(errr, 1024)
553 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
555 os.write(outfd, struct.pack('!IB', 0, 3))
558 class DemuxConn(BaseConn):
559 """A helper class for bup's client-server protocol."""
560 def __init__(self, infd, outp):
561 BaseConn.__init__(self, outp)
562 # Anything that comes through before the sync string was not
563 # multiplexed and can be assumed to be debug/log before mux init.
565 while tail != 'BUPMUX':
566 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
568 raise IOError('demux: unexpected EOF during initialization')
570 sys.stderr.write(tail[:-6]) # pre-mux log messages
577 def write(self, data):
579 BaseConn.write(self, data)
581 def _next_packet(self, timeout):
582 if self.closed: return False
583 rl, wl, xl = select.select([self.infd], [], [], timeout)
584 if not rl: return False
585 assert(rl[0] == self.infd)
586 ns = ''.join(checked_reader(self.infd, 5))
587 n, fdw = struct.unpack('!IB', ns)
588 assert(n <= MAX_PACKET)
590 self.reader = checked_reader(self.infd, n)
592 for buf in checked_reader(self.infd, n):
593 sys.stderr.write(buf)
596 debug2("DemuxConn: marked closed\n")
599 def _load_buf(self, timeout):
600 if self.buf is not None:
602 while not self.closed:
603 while not self.reader:
604 if not self._next_packet(timeout):
607 self.buf = next(self.reader)
609 except StopIteration:
613 def _read_parts(self, ix_fn):
614 while self._load_buf(None):
615 assert(self.buf is not None)
617 if i is None or i == len(self.buf):
622 self.buf = self.buf[i:]
630 return buf.index('\n')+1
633 return ''.join(self._read_parts(find_eol))
635 def _read(self, size):
637 def until_size(buf): # Closes on csize
638 if len(buf) < csize[0]:
643 return ''.join(self._read_parts(until_size))
646 return self._load_buf(0)
650 """Generate a list of input lines from 'f' without terminating newlines."""
658 def chunkyreader(f, count = None):
659 """Generate a list of chunks of data read from 'f'.
661 If count is None, read until EOF is reached.
663 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
664 reached while reading, raise IOError.
668 b = f.read(min(count, 65536))
670 raise IOError('EOF with %d bytes remaining' % count)
681 def atomically_replaced_file(name, mode='w', buffering=-1):
682 """Yield a file that will be atomically renamed name when leaving the block.
684 This contextmanager yields an open file object that is backed by a
685 temporary file which will be renamed (atomically) to the target
686 name if everything succeeds.
688 The mode and buffering arguments are handled exactly as with open,
689 and the yielded file will have very restrictive permissions, as
694 with atomically_replaced_file('foo.txt', 'w') as f:
695 f.write('hello jack.')
699 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
700 text=('b' not in mode))
703 f = os.fdopen(ffd, mode, buffering)
711 os.rename(tempname, name)
713 unlink(tempname) # nonexistant file is ignored
717 """Append "/" to 's' if it doesn't aleady end in "/"."""
718 if s and not s.endswith('/'):
724 def _mmap_do(f, sz, flags, prot, close):
726 st = os.fstat(f.fileno())
729 # trying to open a zero-length map gives an error, but an empty
730 # string has all the same behaviour of a zero-length map, ie. it has
733 map = mmap.mmap(f.fileno(), sz, flags, prot)
735 f.close() # map will persist beyond file close
739 def mmap_read(f, sz = 0, close=True):
740 """Create a read-only memory mapped region on file 'f'.
741 If sz is 0, the region will cover the entire file.
743 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
746 def mmap_readwrite(f, sz = 0, close=True):
747 """Create a read-write memory mapped region on file 'f'.
748 If sz is 0, the region will cover the entire file.
750 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
754 def mmap_readwrite_private(f, sz = 0, close=True):
755 """Create a read-write memory mapped region on file 'f'.
756 If sz is 0, the region will cover the entire file.
757 The map is private, which means the changes are never flushed back to the
760 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
764 _mincore = getattr(_helpers, 'mincore', None)
766 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
767 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
769 _fmincore_chunk_size = None
770 def _set_fmincore_chunk_size():
771 global _fmincore_chunk_size
772 pref_chunk_size = 64 * 1024 * 1024
773 chunk_size = sc_page_size
774 if (sc_page_size < pref_chunk_size):
775 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
776 _fmincore_chunk_size = chunk_size
779 """Return the mincore() data for fd as a bytearray whose values can be
780 tested via MINCORE_INCORE, or None if fd does not fully
781 support the operation."""
783 if (st.st_size == 0):
785 if not _fmincore_chunk_size:
786 _set_fmincore_chunk_size()
787 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
788 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
789 chunk_count = page_count / _fmincore_chunk_size
792 result = bytearray(page_count)
793 for ci in xrange(chunk_count):
794 pos = _fmincore_chunk_size * ci;
795 msize = min(_fmincore_chunk_size, st.st_size - pos)
797 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
798 except mmap.error as ex:
799 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
800 # Perhaps the file was a pipe, i.e. "... | bup split ..."
803 _mincore(m, msize, 0, result, ci * pages_per_chunk);
807 def parse_timestamp(epoch_str):
808 """Return the number of nanoseconds since the epoch that are described
809 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
810 throw a ValueError that may contain additional information."""
811 ns_per = {'s' : 1000000000,
815 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
817 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
818 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
820 (n, units) = match.group(1, 2)
824 return n * ns_per[units]
828 """Parse data size information into a float number.
830 Here are some examples of conversions:
831 199.2k means 203981 bytes
832 1GB means 1073741824 bytes
833 2.1 tb means 2199023255552 bytes
835 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
837 raise ValueError("can't parse %r as a number" % s)
838 (val, unit) = g.groups()
841 if unit in ['t', 'tb']:
842 mult = 1024*1024*1024*1024
843 elif unit in ['g', 'gb']:
844 mult = 1024*1024*1024
845 elif unit in ['m', 'mb']:
847 elif unit in ['k', 'kb']:
849 elif unit in ['', 'b']:
852 raise ValueError("invalid unit %r in number %r" % (unit, s))
857 """Count the number of elements in an iterator. (consumes the iterator)"""
858 return reduce(lambda x,y: x+1, l)
863 """Append an error message to the list of saved errors.
865 Once processing is able to stop and output the errors, the saved errors are
866 accessible in the module variable helpers.saved_errors.
868 saved_errors.append(e)
877 def die_if_errors(msg=None, status=1):
881 msg = 'warning: %d errors encountered\n' % len(saved_errors)
887 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
889 The new exception handler will make sure that bup will exit without an ugly
890 stacktrace when Ctrl-C is hit.
892 oldhook = sys.excepthook
893 def newhook(exctype, value, traceback):
894 if exctype == KeyboardInterrupt:
895 log('\nInterrupted.\n')
897 return oldhook(exctype, value, traceback)
898 sys.excepthook = newhook
901 def columnate(l, prefix):
902 """Format elements of 'l' in columns with 'prefix' leading each line.
904 The number of columns is determined automatically based on the string
910 clen = max(len(s) for s in l)
911 ncols = (tty_width() - len(prefix)) / (clen + 2)
916 while len(l) % ncols:
919 for s in range(0, len(l), rows):
920 cols.append(l[s:s+rows])
922 for row in zip(*cols):
923 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
927 def parse_date_or_fatal(str, fatal):
928 """Parses the given date or calls Option.fatal().
929 For now we expect a string that contains a float."""
932 except ValueError as e:
933 raise fatal('invalid date format (should be a float): %r' % e)
938 def parse_excludes(options, fatal):
939 """Traverse the options and extract all excludes, or call Option.fatal()."""
943 (option, parameter) = flag
944 if option == '--exclude':
945 excluded_paths.append(resolve_parent(parameter))
946 elif option == '--exclude-from':
948 f = open(resolve_parent(parameter))
950 raise fatal("couldn't read %s" % parameter)
951 for exclude_path in f.readlines():
952 # FIXME: perhaps this should be rstrip('\n')
953 exclude_path = resolve_parent(exclude_path.strip())
955 excluded_paths.append(exclude_path)
956 return sorted(frozenset(excluded_paths))
959 def parse_rx_excludes(options, fatal):
960 """Traverse the options and extract all rx excludes, or call
962 excluded_patterns = []
965 (option, parameter) = flag
966 if option == '--exclude-rx':
968 excluded_patterns.append(re.compile(parameter))
969 except re.error as ex:
970 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
971 elif option == '--exclude-rx-from':
973 f = open(resolve_parent(parameter))
975 raise fatal("couldn't read %s" % parameter)
976 for pattern in f.readlines():
977 spattern = pattern.rstrip('\n')
981 excluded_patterns.append(re.compile(spattern))
982 except re.error as ex:
983 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
984 return excluded_patterns
987 def should_rx_exclude_path(path, exclude_rxs):
988 """Return True if path matches a regular expression in exclude_rxs."""
989 for rx in exclude_rxs:
991 debug1('Skipping %r: excluded by rx pattern %r.\n'
992 % (path, rx.pattern))
997 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
998 # that resolve against the current filesystem in the strip/graft
999 # functions for example, but elsewhere as well. I suspect bup's not
1000 # always being careful about that. For some cases, the contents of
1001 # the current filesystem should be irrelevant, and consulting it might
1002 # produce the wrong result, perhaps via unintended symlink resolution,
1005 def path_components(path):
1006 """Break path into a list of pairs of the form (name,
1007 full_path_to_name). Path must start with '/'.
1009 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1010 if not path.startswith('/'):
1011 raise Exception, 'path must start with "/": %s' % path
1012 # Since we assume path startswith('/'), we can skip the first element.
1013 result = [('', '/')]
1014 norm_path = os.path.abspath(path)
1015 if norm_path == '/':
1018 for p in norm_path.split('/')[1:]:
1019 full_path += '/' + p
1020 result.append((p, full_path))
1024 def stripped_path_components(path, strip_prefixes):
1025 """Strip any prefix in strip_prefixes from path and return a list
1026 of path components where each component is (name,
1027 none_or_full_fs_path_to_name). Assume path startswith('/').
1028 See thelpers.py for examples."""
1029 normalized_path = os.path.abspath(path)
1030 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1031 for bp in sorted_strip_prefixes:
1032 normalized_bp = os.path.abspath(bp)
1033 if normalized_bp == '/':
1035 if normalized_path.startswith(normalized_bp):
1036 prefix = normalized_path[:len(normalized_bp)]
1038 for p in normalized_path[len(normalized_bp):].split('/'):
1042 result.append((p, prefix))
1045 return path_components(path)
1048 def grafted_path_components(graft_points, path):
1049 # Create a result that consists of some number of faked graft
1050 # directories before the graft point, followed by all of the real
1051 # directories from path that are after the graft point. Arrange
1052 # for the directory at the graft point in the result to correspond
1053 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1054 # for some examples.
1056 # Note that given --graft orig=new, orig and new have *nothing* to
1057 # do with each other, even if some of their component names
1058 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1059 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1062 # FIXME: This can't be the best solution...
1063 clean_path = os.path.abspath(path)
1064 for graft_point in graft_points:
1065 old_prefix, new_prefix = graft_point
1066 # Expand prefixes iff not absolute paths.
1067 old_prefix = os.path.normpath(old_prefix)
1068 new_prefix = os.path.normpath(new_prefix)
1069 if clean_path.startswith(old_prefix):
1070 escaped_prefix = re.escape(old_prefix)
1071 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1072 # Handle /foo=/ (at least) -- which produces //whatever.
1073 grafted_path = '/' + grafted_path.lstrip('/')
1074 clean_path_components = path_components(clean_path)
1075 # Count the components that were stripped.
1076 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1077 new_prefix_parts = new_prefix.split('/')
1078 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1079 result = [(p, None) for p in result_prefix] \
1080 + clean_path_components[strip_count:]
1081 # Now set the graft point name to match the end of new_prefix.
1082 graft_point = len(result_prefix)
1083 result[graft_point] = \
1084 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1085 if new_prefix == '/': # --graft ...=/ is a special case.
1088 return path_components(clean_path)
1094 _localtime = getattr(_helpers, 'localtime', None)
1097 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1098 'tm_hour', 'tm_min', 'tm_sec',
1099 'tm_wday', 'tm_yday',
1100 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1102 # Define a localtime() that returns bup_time when possible. Note:
1103 # this means that any helpers.localtime() results may need to be
1104 # passed through to_py_time() before being passed to python's time
1105 # module, which doesn't appear willing to ignore the extra items.
1107 def localtime(time):
1108 return bup_time(*_helpers.localtime(time))
1109 def utc_offset_str(t):
1110 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1111 If the current UTC offset does not represent an integer number
1112 of minutes, the fractional component will be truncated."""
1113 off = localtime(t).tm_gmtoff
1114 # Note: // doesn't truncate like C for negative values, it rounds down.
1115 offmin = abs(off) // 60
1117 h = (offmin - m) // 60
1118 return "%+03d%02d" % (-h if off < 0 else h, m)
1120 if isinstance(x, time.struct_time):
1122 return time.struct_time(x[:9])
1124 localtime = time.localtime
1125 def utc_offset_str(t):
1126 return time.strftime('%z', localtime(t))
1131 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1133 def valid_save_name(name):
1134 # Enforce a superset of the restrictions in git-check-ref-format(1)
1136 or name.startswith('/') or name.endswith('/') \
1137 or name.endswith('.'):
1139 if _some_invalid_save_parts_rx.search(name):
1142 if ord(c) < 0x20 or ord(c) == 0x7f:
1144 for part in name.split('/'):
1145 if part.startswith('.') or part.endswith('.lock'):
1150 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1152 def period_as_secs(s):
1155 match = _period_rx.match(s)
1158 mag = int(match.group(1))
1159 scale = match.group(2)
1160 return mag * {'s': 1,
1164 'w': 60 * 60 * 24 * 7,
1165 'm': 60 * 60 * 24 * 31,
1166 'y': 60 * 60 * 24 * 366}[scale]