1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
14 """Helper to deal with Python scoping issues"""
18 sc_page_size = os.sysconf('SC_PAGE_SIZE')
19 assert(sc_page_size > 0)
21 sc_arg_max = os.sysconf('SC_ARG_MAX')
22 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
23 sc_arg_max = 2 * 1024 * 1024
25 # This function should really be in helpers, not in bup.options. But we
26 # want options.py to be standalone so people can include it in other projects.
27 from bup.options import _tty_width
28 tty_width = _tty_width
32 """Convert the string 's' to an integer. Return 0 if s is not a number."""
40 """Convert the string 's' to a float. Return 0 if s is not a number."""
42 return float(s or '0')
47 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
51 _fdatasync = os.fdatasync
52 except AttributeError:
55 if sys.platform.startswith('darwin'):
56 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
60 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
62 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
63 if e.errno == errno.ENOTSUP:
68 fdatasync = _fdatasync
71 def partition(predicate, stream):
72 """Returns (leading_matches_it, rest_it), where leading_matches_it
73 must be completely exhausted before traversing rest_it.
78 ns.first_nonmatch = None
79 def leading_matches():
84 ns.first_nonmatch = (x,)
88 yield ns.first_nonmatch[0]
91 return (leading_matches(), rest())
94 # Write (blockingly) to sockets that may or may not be in blocking mode.
95 # We need this because our stderr is sometimes eaten by subprocesses
96 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
97 # leading to race conditions. Ick. We'll do it the hard way.
98 def _hard_write(fd, buf):
100 (r,w,x) = select.select([], [fd], [], None)
102 raise IOError('select(fd) returned without being writable')
104 sz = os.write(fd, buf)
106 if e.errno != errno.EAGAIN:
114 """Print a log message to stderr."""
117 _hard_write(sys.stderr.fileno(), s)
131 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
132 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
135 """Calls log() if stderr is a TTY. Does nothing otherwise."""
136 global _last_progress
143 """Calls progress() only if we haven't printed progress in a while.
145 This avoids overloading the stderr buffer with excess junk.
149 if now - _last_prog > 0.1:
155 """Calls progress() to redisplay the most recent progress message.
157 Useful after you've printed some other message that wipes out the
160 if _last_progress and _last_progress.endswith('\r'):
161 progress(_last_progress)
164 def mkdirp(d, mode=None):
165 """Recursively create directories on path 'd'.
167 Unlike os.makedirs(), it doesn't raise an exception if the last element of
168 the path already exists.
176 if e.errno == errno.EEXIST:
182 _unspecified_next_default = object()
184 def _fallback_next(it, default=_unspecified_next_default):
185 """Retrieve the next item from the iterator by calling its
186 next() method. If default is given, it is returned if the
187 iterator is exhausted, otherwise StopIteration is raised."""
189 if default is _unspecified_next_default:
194 except StopIteration:
197 if sys.version_info < (2, 6):
198 next = _fallback_next
201 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
203 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
205 samekey = operator.eq
207 total = sum(len(it) for it in iters)
208 iters = (iter(it) for it in iters)
209 heap = ((next(it, None),it) for it in iters)
210 heap = [(e,it) for e,it in heap if e]
215 if not count % pfreq:
218 if not samekey(e, pe):
223 e = it.next() # Don't use next() function, it's too expensive
224 except StopIteration:
225 heapq.heappop(heap) # remove current
227 heapq.heapreplace(heap, (e, it)) # shift current to new location
232 """Delete a file at path 'f' if it currently exists.
234 Unlike os.unlink(), does not throw an exception if the file didn't already
240 if e.errno != errno.ENOENT:
244 def readpipe(argv, preexec_fn=None, shell=False):
245 """Run a subprocess and return its output."""
246 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
248 out, err = p.communicate()
249 if p.returncode != 0:
250 raise Exception('subprocess %r failed with status %d'
251 % (' '.join(argv), p.returncode))
255 def _argmax_base(command):
258 base_size += len(command) + 1
259 for k, v in environ.iteritems():
260 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
264 def _argmax_args_size(args):
265 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
268 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
269 """If args is not empty, yield the output produced by calling the
270 command list with args as a sequence of strings (It may be necessary
271 to return multiple strings in order to respect ARG_MAX)."""
272 # The optional arg_max arg is a workaround for an issue with the
273 # current wvtest behavior.
274 base_size = _argmax_base(command)
276 room = arg_max - base_size
279 next_size = _argmax_args_size(args[i:i+1])
280 if room - next_size < 0:
286 assert(len(sub_args))
287 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
290 def resolve_parent(p):
291 """Return the absolute path of a file without following any final symlink.
293 Behaves like os.path.realpath, but doesn't follow a symlink for the last
294 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
295 will follow symlinks in p's directory)
301 if st and stat.S_ISLNK(st.st_mode):
302 (dir, name) = os.path.split(p)
303 dir = os.path.realpath(dir)
304 out = os.path.join(dir, name)
306 out = os.path.realpath(p)
307 #log('realpathing:%r,%r\n' % (p, out))
311 def detect_fakeroot():
312 "Return True if we appear to be running under fakeroot."
313 return os.getenv("FAKEROOTKEY") != None
316 _warned_about_superuser_detection = None
318 if sys.platform.startswith('cygwin'):
319 if sys.getwindowsversion()[0] > 5:
320 # Sounds like situation is much more complicated here
321 global _warned_about_superuser_detection
322 if not _warned_about_superuser_detection:
323 log("can't detect root status for OS version > 5; assuming not root")
324 _warned_about_superuser_detection = True
327 return ctypes.cdll.shell32.IsUserAnAdmin()
329 return os.geteuid() == 0
332 def _cache_key_value(get_value, key, cache):
333 """Return (value, was_cached). If there is a value in the cache
334 for key, use that, otherwise, call get_value(key) which should
335 throw a KeyError if there is no value -- in which case the cached
336 and returned value will be None.
338 try: # Do we already have it (or know there wasn't one)?
345 cache[key] = value = get_value(key)
351 _uid_to_pwd_cache = {}
352 _name_to_pwd_cache = {}
354 def pwd_from_uid(uid):
355 """Return password database entry for uid (may be a cached value).
356 Return None if no entry is found.
358 global _uid_to_pwd_cache, _name_to_pwd_cache
359 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
360 if entry and not cached:
361 _name_to_pwd_cache[entry.pw_name] = entry
365 def pwd_from_name(name):
366 """Return password database entry for name (may be a cached value).
367 Return None if no entry is found.
369 global _uid_to_pwd_cache, _name_to_pwd_cache
370 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
371 if entry and not cached:
372 _uid_to_pwd_cache[entry.pw_uid] = entry
376 _gid_to_grp_cache = {}
377 _name_to_grp_cache = {}
379 def grp_from_gid(gid):
380 """Return password database entry for gid (may be a cached value).
381 Return None if no entry is found.
383 global _gid_to_grp_cache, _name_to_grp_cache
384 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
385 if entry and not cached:
386 _name_to_grp_cache[entry.gr_name] = entry
390 def grp_from_name(name):
391 """Return password database entry for name (may be a cached value).
392 Return None if no entry is found.
394 global _gid_to_grp_cache, _name_to_grp_cache
395 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
396 if entry and not cached:
397 _gid_to_grp_cache[entry.gr_gid] = entry
403 """Get the user's login name."""
407 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
413 """Get the user's full name."""
415 if not _userfullname:
417 entry = pwd_from_uid(uid)
419 _userfullname = entry[4].split(',')[0] or entry[0]
420 if not _userfullname:
421 _userfullname = 'user%d' % uid
427 """Get the FQDN of this machine."""
430 _hostname = socket.getfqdn()
434 _resource_path = None
435 def resource_path(subdir=''):
436 global _resource_path
437 if not _resource_path:
438 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
439 return os.path.join(_resource_path, subdir)
441 def format_filesize(size):
446 exponent = int(math.log(size) / math.log(unit))
447 size_prefix = "KMGTPE"[exponent - 1]
448 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
451 class NotOk(Exception):
456 def __init__(self, outp):
460 while self._read(65536): pass
462 def read(self, size):
463 """Read 'size' bytes from input stream."""
465 return self._read(size)
468 """Read from input stream until a newline is found."""
470 return self._readline()
472 def write(self, data):
473 """Write 'data' to output stream."""
474 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
475 self.outp.write(data)
478 """Return true if input stream is readable."""
479 raise NotImplemented("Subclasses must implement has_input")
482 """Indicate end of output from last sent command."""
486 """Indicate server error to the client."""
487 s = re.sub(r'\s+', ' ', str(s))
488 self.write('\nerror %s\n' % s)
490 def _check_ok(self, onempty):
493 for rl in linereader(self):
494 #log('%d got line: %r\n' % (os.getpid(), rl))
495 if not rl: # empty line
499 elif rl.startswith('error '):
500 #log('client: error: %s\n' % rl[6:])
504 raise Exception('server exited unexpectedly; see errors above')
506 def drain_and_check_ok(self):
507 """Remove all data for the current command from input stream."""
510 return self._check_ok(onempty)
513 """Verify that server action completed successfully."""
515 raise Exception('expected "ok", got %r' % rl)
516 return self._check_ok(onempty)
519 class Conn(BaseConn):
520 def __init__(self, inp, outp):
521 BaseConn.__init__(self, outp)
524 def _read(self, size):
525 return self.inp.read(size)
528 return self.inp.readline()
531 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
533 assert(rl[0] == self.inp.fileno())
539 def checked_reader(fd, n):
541 rl, _, _ = select.select([fd], [], [])
544 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
549 MAX_PACKET = 128 * 1024
550 def mux(p, outfd, outr, errr):
553 while p.poll() is None:
554 rl, _, _ = select.select(fds, [], [])
557 buf = os.read(outr, MAX_PACKET)
559 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
561 buf = os.read(errr, 1024)
563 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
565 os.write(outfd, struct.pack('!IB', 0, 3))
568 class DemuxConn(BaseConn):
569 """A helper class for bup's client-server protocol."""
570 def __init__(self, infd, outp):
571 BaseConn.__init__(self, outp)
572 # Anything that comes through before the sync string was not
573 # multiplexed and can be assumed to be debug/log before mux init.
575 while tail != 'BUPMUX':
576 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
578 raise IOError('demux: unexpected EOF during initialization')
580 sys.stderr.write(tail[:-6]) # pre-mux log messages
587 def write(self, data):
589 BaseConn.write(self, data)
591 def _next_packet(self, timeout):
592 if self.closed: return False
593 rl, wl, xl = select.select([self.infd], [], [], timeout)
594 if not rl: return False
595 assert(rl[0] == self.infd)
596 ns = ''.join(checked_reader(self.infd, 5))
597 n, fdw = struct.unpack('!IB', ns)
598 assert(n <= MAX_PACKET)
600 self.reader = checked_reader(self.infd, n)
602 for buf in checked_reader(self.infd, n):
603 sys.stderr.write(buf)
606 debug2("DemuxConn: marked closed\n")
609 def _load_buf(self, timeout):
610 if self.buf is not None:
612 while not self.closed:
613 while not self.reader:
614 if not self._next_packet(timeout):
617 self.buf = self.reader.next()
619 except StopIteration:
623 def _read_parts(self, ix_fn):
624 while self._load_buf(None):
625 assert(self.buf is not None)
627 if i is None or i == len(self.buf):
632 self.buf = self.buf[i:]
640 return buf.index('\n')+1
643 return ''.join(self._read_parts(find_eol))
645 def _read(self, size):
647 def until_size(buf): # Closes on csize
648 if len(buf) < csize[0]:
653 return ''.join(self._read_parts(until_size))
656 return self._load_buf(0)
660 """Generate a list of input lines from 'f' without terminating newlines."""
668 def chunkyreader(f, count = None):
669 """Generate a list of chunks of data read from 'f'.
671 If count is None, read until EOF is reached.
673 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
674 reached while reading, raise IOError.
678 b = f.read(min(count, 65536))
680 raise IOError('EOF with %d bytes remaining' % count)
691 def atomically_replaced_file(name, mode='w', buffering=-1):
692 """Yield a file that will be atomically renamed name when leaving the block.
694 This contextmanager yields an open file object that is backed by a
695 temporary file which will be renamed (atomically) to the target
696 name if everything succeeds.
698 The mode and buffering arguments are handled exactly as with open,
699 and the yielded file will have very restrictive permissions, as
704 with atomically_replaced_file('foo.txt', 'w') as f:
705 f.write('hello jack.')
709 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
710 text=('b' not in mode))
713 f = os.fdopen(ffd, mode, buffering)
721 os.rename(tempname, name)
723 unlink(tempname) # nonexistant file is ignored
727 """Append "/" to 's' if it doesn't aleady end in "/"."""
728 if s and not s.endswith('/'):
734 def _mmap_do(f, sz, flags, prot, close):
736 st = os.fstat(f.fileno())
739 # trying to open a zero-length map gives an error, but an empty
740 # string has all the same behaviour of a zero-length map, ie. it has
743 map = mmap.mmap(f.fileno(), sz, flags, prot)
745 f.close() # map will persist beyond file close
749 def mmap_read(f, sz = 0, close=True):
750 """Create a read-only memory mapped region on file 'f'.
751 If sz is 0, the region will cover the entire file.
753 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
756 def mmap_readwrite(f, sz = 0, close=True):
757 """Create a read-write memory mapped region on file 'f'.
758 If sz is 0, the region will cover the entire file.
760 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
764 def mmap_readwrite_private(f, sz = 0, close=True):
765 """Create a read-write memory mapped region on file 'f'.
766 If sz is 0, the region will cover the entire file.
767 The map is private, which means the changes are never flushed back to the
770 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
774 _mincore = getattr(_helpers, 'mincore', None)
776 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
777 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
779 _fmincore_chunk_size = None
780 def _set_fmincore_chunk_size():
781 global _fmincore_chunk_size
782 pref_chunk_size = 64 * 1024 * 1024
783 chunk_size = sc_page_size
784 if (sc_page_size < pref_chunk_size):
785 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
786 _fmincore_chunk_size = chunk_size
789 """Return the mincore() data for fd as a bytearray whose values can be
790 tested via MINCORE_INCORE, or None if fd does not fully
791 support the operation."""
793 if (st.st_size == 0):
795 if not _fmincore_chunk_size:
796 _set_fmincore_chunk_size()
797 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
798 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
799 chunk_count = page_count / _fmincore_chunk_size
802 result = bytearray(page_count)
803 for ci in xrange(chunk_count):
804 pos = _fmincore_chunk_size * ci;
805 msize = min(_fmincore_chunk_size, st.st_size - pos)
807 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
808 except mmap.error as ex:
809 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
810 # Perhaps the file was a pipe, i.e. "... | bup split ..."
813 _mincore(m, msize, 0, result, ci * pages_per_chunk);
817 def parse_timestamp(epoch_str):
818 """Return the number of nanoseconds since the epoch that are described
819 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
820 throw a ValueError that may contain additional information."""
821 ns_per = {'s' : 1000000000,
825 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
827 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
828 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
830 (n, units) = match.group(1, 2)
834 return n * ns_per[units]
838 """Parse data size information into a float number.
840 Here are some examples of conversions:
841 199.2k means 203981 bytes
842 1GB means 1073741824 bytes
843 2.1 tb means 2199023255552 bytes
845 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
847 raise ValueError("can't parse %r as a number" % s)
848 (val, unit) = g.groups()
851 if unit in ['t', 'tb']:
852 mult = 1024*1024*1024*1024
853 elif unit in ['g', 'gb']:
854 mult = 1024*1024*1024
855 elif unit in ['m', 'mb']:
857 elif unit in ['k', 'kb']:
859 elif unit in ['', 'b']:
862 raise ValueError("invalid unit %r in number %r" % (unit, s))
867 """Count the number of elements in an iterator. (consumes the iterator)"""
868 return reduce(lambda x,y: x+1, l)
873 """Append an error message to the list of saved errors.
875 Once processing is able to stop and output the errors, the saved errors are
876 accessible in the module variable helpers.saved_errors.
878 saved_errors.append(e)
887 def die_if_errors(msg=None, status=1):
891 msg = 'warning: %d errors encountered\n' % len(saved_errors)
897 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
899 The new exception handler will make sure that bup will exit without an ugly
900 stacktrace when Ctrl-C is hit.
902 oldhook = sys.excepthook
903 def newhook(exctype, value, traceback):
904 if exctype == KeyboardInterrupt:
905 log('\nInterrupted.\n')
907 return oldhook(exctype, value, traceback)
908 sys.excepthook = newhook
911 def columnate(l, prefix):
912 """Format elements of 'l' in columns with 'prefix' leading each line.
914 The number of columns is determined automatically based on the string
920 clen = max(len(s) for s in l)
921 ncols = (tty_width() - len(prefix)) / (clen + 2)
926 while len(l) % ncols:
929 for s in range(0, len(l), rows):
930 cols.append(l[s:s+rows])
932 for row in zip(*cols):
933 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
937 def parse_date_or_fatal(str, fatal):
938 """Parses the given date or calls Option.fatal().
939 For now we expect a string that contains a float."""
942 except ValueError as e:
943 raise fatal('invalid date format (should be a float): %r' % e)
948 def parse_excludes(options, fatal):
949 """Traverse the options and extract all excludes, or call Option.fatal()."""
953 (option, parameter) = flag
954 if option == '--exclude':
955 excluded_paths.append(resolve_parent(parameter))
956 elif option == '--exclude-from':
958 f = open(resolve_parent(parameter))
960 raise fatal("couldn't read %s" % parameter)
961 for exclude_path in f.readlines():
962 # FIXME: perhaps this should be rstrip('\n')
963 exclude_path = resolve_parent(exclude_path.strip())
965 excluded_paths.append(exclude_path)
966 return sorted(frozenset(excluded_paths))
969 def parse_rx_excludes(options, fatal):
970 """Traverse the options and extract all rx excludes, or call
972 excluded_patterns = []
975 (option, parameter) = flag
976 if option == '--exclude-rx':
978 excluded_patterns.append(re.compile(parameter))
979 except re.error as ex:
980 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
981 elif option == '--exclude-rx-from':
983 f = open(resolve_parent(parameter))
985 raise fatal("couldn't read %s" % parameter)
986 for pattern in f.readlines():
987 spattern = pattern.rstrip('\n')
991 excluded_patterns.append(re.compile(spattern))
992 except re.error as ex:
993 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
994 return excluded_patterns
997 def should_rx_exclude_path(path, exclude_rxs):
998 """Return True if path matches a regular expression in exclude_rxs."""
999 for rx in exclude_rxs:
1001 debug1('Skipping %r: excluded by rx pattern %r.\n'
1002 % (path, rx.pattern))
1007 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1008 # that resolve against the current filesystem in the strip/graft
1009 # functions for example, but elsewhere as well. I suspect bup's not
1010 # always being careful about that. For some cases, the contents of
1011 # the current filesystem should be irrelevant, and consulting it might
1012 # produce the wrong result, perhaps via unintended symlink resolution,
1015 def path_components(path):
1016 """Break path into a list of pairs of the form (name,
1017 full_path_to_name). Path must start with '/'.
1019 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1020 if not path.startswith('/'):
1021 raise Exception, 'path must start with "/": %s' % path
1022 # Since we assume path startswith('/'), we can skip the first element.
1023 result = [('', '/')]
1024 norm_path = os.path.abspath(path)
1025 if norm_path == '/':
1028 for p in norm_path.split('/')[1:]:
1029 full_path += '/' + p
1030 result.append((p, full_path))
1034 def stripped_path_components(path, strip_prefixes):
1035 """Strip any prefix in strip_prefixes from path and return a list
1036 of path components where each component is (name,
1037 none_or_full_fs_path_to_name). Assume path startswith('/').
1038 See thelpers.py for examples."""
1039 normalized_path = os.path.abspath(path)
1040 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1041 for bp in sorted_strip_prefixes:
1042 normalized_bp = os.path.abspath(bp)
1043 if normalized_bp == '/':
1045 if normalized_path.startswith(normalized_bp):
1046 prefix = normalized_path[:len(normalized_bp)]
1048 for p in normalized_path[len(normalized_bp):].split('/'):
1052 result.append((p, prefix))
1055 return path_components(path)
1058 def grafted_path_components(graft_points, path):
1059 # Create a result that consists of some number of faked graft
1060 # directories before the graft point, followed by all of the real
1061 # directories from path that are after the graft point. Arrange
1062 # for the directory at the graft point in the result to correspond
1063 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1064 # for some examples.
1066 # Note that given --graft orig=new, orig and new have *nothing* to
1067 # do with each other, even if some of their component names
1068 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1069 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1072 # FIXME: This can't be the best solution...
1073 clean_path = os.path.abspath(path)
1074 for graft_point in graft_points:
1075 old_prefix, new_prefix = graft_point
1076 # Expand prefixes iff not absolute paths.
1077 old_prefix = os.path.normpath(old_prefix)
1078 new_prefix = os.path.normpath(new_prefix)
1079 if clean_path.startswith(old_prefix):
1080 escaped_prefix = re.escape(old_prefix)
1081 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1082 # Handle /foo=/ (at least) -- which produces //whatever.
1083 grafted_path = '/' + grafted_path.lstrip('/')
1084 clean_path_components = path_components(clean_path)
1085 # Count the components that were stripped.
1086 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1087 new_prefix_parts = new_prefix.split('/')
1088 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1089 result = [(p, None) for p in result_prefix] \
1090 + clean_path_components[strip_count:]
1091 # Now set the graft point name to match the end of new_prefix.
1092 graft_point = len(result_prefix)
1093 result[graft_point] = \
1094 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1095 if new_prefix == '/': # --graft ...=/ is a special case.
1098 return path_components(clean_path)
1104 _localtime = getattr(_helpers, 'localtime', None)
1107 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1108 'tm_hour', 'tm_min', 'tm_sec',
1109 'tm_wday', 'tm_yday',
1110 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1112 # Define a localtime() that returns bup_time when possible. Note:
1113 # this means that any helpers.localtime() results may need to be
1114 # passed through to_py_time() before being passed to python's time
1115 # module, which doesn't appear willing to ignore the extra items.
1117 def localtime(time):
1118 return bup_time(*_helpers.localtime(time))
1119 def utc_offset_str(t):
1120 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1121 If the current UTC offset does not represent an integer number
1122 of minutes, the fractional component will be truncated."""
1123 off = localtime(t).tm_gmtoff
1124 # Note: // doesn't truncate like C for negative values, it rounds down.
1125 offmin = abs(off) // 60
1127 h = (offmin - m) // 60
1128 return "%+03d%02d" % (-h if off < 0 else h, m)
1130 if isinstance(x, time.struct_time):
1132 return time.struct_time(x[:9])
1134 localtime = time.localtime
1135 def utc_offset_str(t):
1136 return time.strftime('%z', localtime(t))
1141 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1143 def valid_save_name(name):
1144 # Enforce a superset of the restrictions in git-check-ref-format(1)
1146 or name.startswith('/') or name.endswith('/') \
1147 or name.endswith('.'):
1149 if _some_invalid_save_parts_rx.search(name):
1152 if ord(c) < 0x20 or ord(c) == 0x7f:
1154 for part in name.split('/'):
1155 if part.startswith('.') or part.endswith('.lock'):
1160 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1162 def period_as_secs(s):
1165 match = _period_rx.match(s)
1168 mag = int(match.group(1))
1169 scale = match.group(2)
1170 return mag * {'s': 1,
1174 'w': 60 * 60 * 24 * 7,
1175 'm': 60 * 60 * 24 * 31,
1176 'y': 60 * 60 * 24 * 366}[scale]