1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
14 """Helper to deal with Python scoping issues"""
18 sc_page_size = os.sysconf('SC_PAGE_SIZE')
19 assert(sc_page_size > 0)
21 sc_arg_max = os.sysconf('SC_ARG_MAX')
22 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
23 sc_arg_max = 2 * 1024 * 1024
25 # This function should really be in helpers, not in bup.options. But we
26 # want options.py to be standalone so people can include it in other projects.
27 from bup.options import _tty_width
28 tty_width = _tty_width
32 """Convert the string 's' to an integer. Return 0 if s is not a number."""
40 """Convert the string 's' to a float. Return 0 if s is not a number."""
42 return float(s or '0')
47 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
51 _fdatasync = os.fdatasync
52 except AttributeError:
55 if sys.platform.startswith('darwin'):
56 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
60 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
62 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
63 if e.errno == errno.ENOTSUP:
68 fdatasync = _fdatasync
71 def partition(predicate, stream):
72 """Returns (leading_matches_it, rest_it), where leading_matches_it
73 must be completely exhausted before traversing rest_it.
78 ns.first_nonmatch = None
79 def leading_matches():
84 ns.first_nonmatch = (x,)
88 yield ns.first_nonmatch[0]
91 return (leading_matches(), rest())
94 def stat_if_exists(path):
98 if e.errno != errno.ENOENT:
103 # Write (blockingly) to sockets that may or may not be in blocking mode.
104 # We need this because our stderr is sometimes eaten by subprocesses
105 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
106 # leading to race conditions. Ick. We'll do it the hard way.
107 def _hard_write(fd, buf):
109 (r,w,x) = select.select([], [fd], [], None)
111 raise IOError('select(fd) returned without being writable')
113 sz = os.write(fd, buf)
115 if e.errno != errno.EAGAIN:
123 """Print a log message to stderr."""
126 _hard_write(sys.stderr.fileno(), s)
140 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
141 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
144 """Calls log() if stderr is a TTY. Does nothing otherwise."""
145 global _last_progress
152 """Calls progress() only if we haven't printed progress in a while.
154 This avoids overloading the stderr buffer with excess junk.
158 if now - _last_prog > 0.1:
164 """Calls progress() to redisplay the most recent progress message.
166 Useful after you've printed some other message that wipes out the
169 if _last_progress and _last_progress.endswith('\r'):
170 progress(_last_progress)
173 def mkdirp(d, mode=None):
174 """Recursively create directories on path 'd'.
176 Unlike os.makedirs(), it doesn't raise an exception if the last element of
177 the path already exists.
185 if e.errno == errno.EEXIST:
191 _unspecified_next_default = object()
193 def _fallback_next(it, default=_unspecified_next_default):
194 """Retrieve the next item from the iterator by calling its
195 next() method. If default is given, it is returned if the
196 iterator is exhausted, otherwise StopIteration is raised."""
198 if default is _unspecified_next_default:
203 except StopIteration:
206 if sys.version_info < (2, 6):
207 next = _fallback_next
210 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
212 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
214 samekey = operator.eq
216 total = sum(len(it) for it in iters)
217 iters = (iter(it) for it in iters)
218 heap = ((next(it, None),it) for it in iters)
219 heap = [(e,it) for e,it in heap if e]
224 if not count % pfreq:
227 if not samekey(e, pe):
232 e = it.next() # Don't use next() function, it's too expensive
233 except StopIteration:
234 heapq.heappop(heap) # remove current
236 heapq.heapreplace(heap, (e, it)) # shift current to new location
241 """Delete a file at path 'f' if it currently exists.
243 Unlike os.unlink(), does not throw an exception if the file didn't already
249 if e.errno != errno.ENOENT:
253 def readpipe(argv, preexec_fn=None, shell=False):
254 """Run a subprocess and return its output."""
255 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
257 out, err = p.communicate()
258 if p.returncode != 0:
259 raise Exception('subprocess %r failed with status %d'
260 % (' '.join(argv), p.returncode))
264 def _argmax_base(command):
267 base_size += len(command) + 1
268 for k, v in environ.iteritems():
269 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
273 def _argmax_args_size(args):
274 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
277 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
278 """If args is not empty, yield the output produced by calling the
279 command list with args as a sequence of strings (It may be necessary
280 to return multiple strings in order to respect ARG_MAX)."""
281 # The optional arg_max arg is a workaround for an issue with the
282 # current wvtest behavior.
283 base_size = _argmax_base(command)
285 room = arg_max - base_size
288 next_size = _argmax_args_size(args[i:i+1])
289 if room - next_size < 0:
295 assert(len(sub_args))
296 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
299 def resolve_parent(p):
300 """Return the absolute path of a file without following any final symlink.
302 Behaves like os.path.realpath, but doesn't follow a symlink for the last
303 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
304 will follow symlinks in p's directory)
310 if st and stat.S_ISLNK(st.st_mode):
311 (dir, name) = os.path.split(p)
312 dir = os.path.realpath(dir)
313 out = os.path.join(dir, name)
315 out = os.path.realpath(p)
316 #log('realpathing:%r,%r\n' % (p, out))
320 def detect_fakeroot():
321 "Return True if we appear to be running under fakeroot."
322 return os.getenv("FAKEROOTKEY") != None
325 _warned_about_superuser_detection = None
327 if sys.platform.startswith('cygwin'):
328 if sys.getwindowsversion()[0] > 5:
329 # Sounds like situation is much more complicated here
330 global _warned_about_superuser_detection
331 if not _warned_about_superuser_detection:
332 log("can't detect root status for OS version > 5; assuming not root")
333 _warned_about_superuser_detection = True
336 return ctypes.cdll.shell32.IsUserAnAdmin()
338 return os.geteuid() == 0
341 def _cache_key_value(get_value, key, cache):
342 """Return (value, was_cached). If there is a value in the cache
343 for key, use that, otherwise, call get_value(key) which should
344 throw a KeyError if there is no value -- in which case the cached
345 and returned value will be None.
347 try: # Do we already have it (or know there wasn't one)?
354 cache[key] = value = get_value(key)
360 _uid_to_pwd_cache = {}
361 _name_to_pwd_cache = {}
363 def pwd_from_uid(uid):
364 """Return password database entry for uid (may be a cached value).
365 Return None if no entry is found.
367 global _uid_to_pwd_cache, _name_to_pwd_cache
368 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
369 if entry and not cached:
370 _name_to_pwd_cache[entry.pw_name] = entry
374 def pwd_from_name(name):
375 """Return password database entry for name (may be a cached value).
376 Return None if no entry is found.
378 global _uid_to_pwd_cache, _name_to_pwd_cache
379 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
380 if entry and not cached:
381 _uid_to_pwd_cache[entry.pw_uid] = entry
385 _gid_to_grp_cache = {}
386 _name_to_grp_cache = {}
388 def grp_from_gid(gid):
389 """Return password database entry for gid (may be a cached value).
390 Return None if no entry is found.
392 global _gid_to_grp_cache, _name_to_grp_cache
393 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
394 if entry and not cached:
395 _name_to_grp_cache[entry.gr_name] = entry
399 def grp_from_name(name):
400 """Return password database entry for name (may be a cached value).
401 Return None if no entry is found.
403 global _gid_to_grp_cache, _name_to_grp_cache
404 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
405 if entry and not cached:
406 _gid_to_grp_cache[entry.gr_gid] = entry
412 """Get the user's login name."""
416 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
422 """Get the user's full name."""
424 if not _userfullname:
426 entry = pwd_from_uid(uid)
428 _userfullname = entry[4].split(',')[0] or entry[0]
429 if not _userfullname:
430 _userfullname = 'user%d' % uid
436 """Get the FQDN of this machine."""
439 _hostname = socket.getfqdn()
443 _resource_path = None
444 def resource_path(subdir=''):
445 global _resource_path
446 if not _resource_path:
447 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
448 return os.path.join(_resource_path, subdir)
450 def format_filesize(size):
455 exponent = int(math.log(size) / math.log(unit))
456 size_prefix = "KMGTPE"[exponent - 1]
457 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
460 class NotOk(Exception):
465 def __init__(self, outp):
469 while self._read(65536): pass
471 def read(self, size):
472 """Read 'size' bytes from input stream."""
474 return self._read(size)
477 """Read from input stream until a newline is found."""
479 return self._readline()
481 def write(self, data):
482 """Write 'data' to output stream."""
483 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
484 self.outp.write(data)
487 """Return true if input stream is readable."""
488 raise NotImplemented("Subclasses must implement has_input")
491 """Indicate end of output from last sent command."""
495 """Indicate server error to the client."""
496 s = re.sub(r'\s+', ' ', str(s))
497 self.write('\nerror %s\n' % s)
499 def _check_ok(self, onempty):
502 for rl in linereader(self):
503 #log('%d got line: %r\n' % (os.getpid(), rl))
504 if not rl: # empty line
508 elif rl.startswith('error '):
509 #log('client: error: %s\n' % rl[6:])
513 raise Exception('server exited unexpectedly; see errors above')
515 def drain_and_check_ok(self):
516 """Remove all data for the current command from input stream."""
519 return self._check_ok(onempty)
522 """Verify that server action completed successfully."""
524 raise Exception('expected "ok", got %r' % rl)
525 return self._check_ok(onempty)
528 class Conn(BaseConn):
529 def __init__(self, inp, outp):
530 BaseConn.__init__(self, outp)
533 def _read(self, size):
534 return self.inp.read(size)
537 return self.inp.readline()
540 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
542 assert(rl[0] == self.inp.fileno())
548 def checked_reader(fd, n):
550 rl, _, _ = select.select([fd], [], [])
553 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
558 MAX_PACKET = 128 * 1024
559 def mux(p, outfd, outr, errr):
562 while p.poll() is None:
563 rl, _, _ = select.select(fds, [], [])
566 buf = os.read(outr, MAX_PACKET)
568 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
570 buf = os.read(errr, 1024)
572 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
574 os.write(outfd, struct.pack('!IB', 0, 3))
577 class DemuxConn(BaseConn):
578 """A helper class for bup's client-server protocol."""
579 def __init__(self, infd, outp):
580 BaseConn.__init__(self, outp)
581 # Anything that comes through before the sync string was not
582 # multiplexed and can be assumed to be debug/log before mux init.
584 while tail != 'BUPMUX':
585 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
587 raise IOError('demux: unexpected EOF during initialization')
589 sys.stderr.write(tail[:-6]) # pre-mux log messages
596 def write(self, data):
598 BaseConn.write(self, data)
600 def _next_packet(self, timeout):
601 if self.closed: return False
602 rl, wl, xl = select.select([self.infd], [], [], timeout)
603 if not rl: return False
604 assert(rl[0] == self.infd)
605 ns = ''.join(checked_reader(self.infd, 5))
606 n, fdw = struct.unpack('!IB', ns)
607 assert(n <= MAX_PACKET)
609 self.reader = checked_reader(self.infd, n)
611 for buf in checked_reader(self.infd, n):
612 sys.stderr.write(buf)
615 debug2("DemuxConn: marked closed\n")
618 def _load_buf(self, timeout):
619 if self.buf is not None:
621 while not self.closed:
622 while not self.reader:
623 if not self._next_packet(timeout):
626 self.buf = self.reader.next()
628 except StopIteration:
632 def _read_parts(self, ix_fn):
633 while self._load_buf(None):
634 assert(self.buf is not None)
636 if i is None or i == len(self.buf):
641 self.buf = self.buf[i:]
649 return buf.index('\n')+1
652 return ''.join(self._read_parts(find_eol))
654 def _read(self, size):
656 def until_size(buf): # Closes on csize
657 if len(buf) < csize[0]:
662 return ''.join(self._read_parts(until_size))
665 return self._load_buf(0)
669 """Generate a list of input lines from 'f' without terminating newlines."""
677 def chunkyreader(f, count = None):
678 """Generate a list of chunks of data read from 'f'.
680 If count is None, read until EOF is reached.
682 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
683 reached while reading, raise IOError.
687 b = f.read(min(count, 65536))
689 raise IOError('EOF with %d bytes remaining' % count)
700 def atomically_replaced_file(name, mode='w', buffering=-1):
701 """Yield a file that will be atomically renamed name when leaving the block.
703 This contextmanager yields an open file object that is backed by a
704 temporary file which will be renamed (atomically) to the target
705 name if everything succeeds.
707 The mode and buffering arguments are handled exactly as with open,
708 and the yielded file will have very restrictive permissions, as
713 with atomically_replaced_file('foo.txt', 'w') as f:
714 f.write('hello jack.')
718 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
719 text=('b' not in mode))
722 f = os.fdopen(ffd, mode, buffering)
730 os.rename(tempname, name)
732 unlink(tempname) # nonexistant file is ignored
736 """Append "/" to 's' if it doesn't aleady end in "/"."""
737 if s and not s.endswith('/'):
743 def _mmap_do(f, sz, flags, prot, close):
745 st = os.fstat(f.fileno())
748 # trying to open a zero-length map gives an error, but an empty
749 # string has all the same behaviour of a zero-length map, ie. it has
752 map = mmap.mmap(f.fileno(), sz, flags, prot)
754 f.close() # map will persist beyond file close
758 def mmap_read(f, sz = 0, close=True):
759 """Create a read-only memory mapped region on file 'f'.
760 If sz is 0, the region will cover the entire file.
762 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
765 def mmap_readwrite(f, sz = 0, close=True):
766 """Create a read-write memory mapped region on file 'f'.
767 If sz is 0, the region will cover the entire file.
769 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
773 def mmap_readwrite_private(f, sz = 0, close=True):
774 """Create a read-write memory mapped region on file 'f'.
775 If sz is 0, the region will cover the entire file.
776 The map is private, which means the changes are never flushed back to the
779 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
783 _mincore = getattr(_helpers, 'mincore', None)
785 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
786 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
788 _fmincore_chunk_size = None
789 def _set_fmincore_chunk_size():
790 global _fmincore_chunk_size
791 pref_chunk_size = 64 * 1024 * 1024
792 chunk_size = sc_page_size
793 if (sc_page_size < pref_chunk_size):
794 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
795 _fmincore_chunk_size = chunk_size
798 """Return the mincore() data for fd as a bytearray whose values can be
799 tested via MINCORE_INCORE, or None if fd does not fully
800 support the operation."""
802 if (st.st_size == 0):
804 if not _fmincore_chunk_size:
805 _set_fmincore_chunk_size()
806 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
807 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
808 chunk_count = page_count / _fmincore_chunk_size
811 result = bytearray(page_count)
812 for ci in xrange(chunk_count):
813 pos = _fmincore_chunk_size * ci;
814 msize = min(_fmincore_chunk_size, st.st_size - pos)
816 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
817 except mmap.error as ex:
818 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
819 # Perhaps the file was a pipe, i.e. "... | bup split ..."
822 _mincore(m, msize, 0, result, ci * pages_per_chunk);
826 def parse_timestamp(epoch_str):
827 """Return the number of nanoseconds since the epoch that are described
828 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
829 throw a ValueError that may contain additional information."""
830 ns_per = {'s' : 1000000000,
834 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
836 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
837 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
839 (n, units) = match.group(1, 2)
843 return n * ns_per[units]
847 """Parse data size information into a float number.
849 Here are some examples of conversions:
850 199.2k means 203981 bytes
851 1GB means 1073741824 bytes
852 2.1 tb means 2199023255552 bytes
854 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
856 raise ValueError("can't parse %r as a number" % s)
857 (val, unit) = g.groups()
860 if unit in ['t', 'tb']:
861 mult = 1024*1024*1024*1024
862 elif unit in ['g', 'gb']:
863 mult = 1024*1024*1024
864 elif unit in ['m', 'mb']:
866 elif unit in ['k', 'kb']:
868 elif unit in ['', 'b']:
871 raise ValueError("invalid unit %r in number %r" % (unit, s))
876 """Count the number of elements in an iterator. (consumes the iterator)"""
877 return reduce(lambda x,y: x+1, l)
882 """Append an error message to the list of saved errors.
884 Once processing is able to stop and output the errors, the saved errors are
885 accessible in the module variable helpers.saved_errors.
887 saved_errors.append(e)
896 def die_if_errors(msg=None, status=1):
900 msg = 'warning: %d errors encountered\n' % len(saved_errors)
906 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
908 The new exception handler will make sure that bup will exit without an ugly
909 stacktrace when Ctrl-C is hit.
911 oldhook = sys.excepthook
912 def newhook(exctype, value, traceback):
913 if exctype == KeyboardInterrupt:
914 log('\nInterrupted.\n')
916 return oldhook(exctype, value, traceback)
917 sys.excepthook = newhook
920 def columnate(l, prefix):
921 """Format elements of 'l' in columns with 'prefix' leading each line.
923 The number of columns is determined automatically based on the string
929 clen = max(len(s) for s in l)
930 ncols = (tty_width() - len(prefix)) / (clen + 2)
935 while len(l) % ncols:
938 for s in range(0, len(l), rows):
939 cols.append(l[s:s+rows])
941 for row in zip(*cols):
942 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
946 def parse_date_or_fatal(str, fatal):
947 """Parses the given date or calls Option.fatal().
948 For now we expect a string that contains a float."""
951 except ValueError as e:
952 raise fatal('invalid date format (should be a float): %r' % e)
957 def parse_excludes(options, fatal):
958 """Traverse the options and extract all excludes, or call Option.fatal()."""
962 (option, parameter) = flag
963 if option == '--exclude':
964 excluded_paths.append(resolve_parent(parameter))
965 elif option == '--exclude-from':
967 f = open(resolve_parent(parameter))
969 raise fatal("couldn't read %s" % parameter)
970 for exclude_path in f.readlines():
971 # FIXME: perhaps this should be rstrip('\n')
972 exclude_path = resolve_parent(exclude_path.strip())
974 excluded_paths.append(exclude_path)
975 return sorted(frozenset(excluded_paths))
978 def parse_rx_excludes(options, fatal):
979 """Traverse the options and extract all rx excludes, or call
981 excluded_patterns = []
984 (option, parameter) = flag
985 if option == '--exclude-rx':
987 excluded_patterns.append(re.compile(parameter))
988 except re.error as ex:
989 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
990 elif option == '--exclude-rx-from':
992 f = open(resolve_parent(parameter))
994 raise fatal("couldn't read %s" % parameter)
995 for pattern in f.readlines():
996 spattern = pattern.rstrip('\n')
1000 excluded_patterns.append(re.compile(spattern))
1001 except re.error as ex:
1002 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
1003 return excluded_patterns
1006 def should_rx_exclude_path(path, exclude_rxs):
1007 """Return True if path matches a regular expression in exclude_rxs."""
1008 for rx in exclude_rxs:
1010 debug1('Skipping %r: excluded by rx pattern %r.\n'
1011 % (path, rx.pattern))
1016 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
1017 # that resolve against the current filesystem in the strip/graft
1018 # functions for example, but elsewhere as well. I suspect bup's not
1019 # always being careful about that. For some cases, the contents of
1020 # the current filesystem should be irrelevant, and consulting it might
1021 # produce the wrong result, perhaps via unintended symlink resolution,
1024 def path_components(path):
1025 """Break path into a list of pairs of the form (name,
1026 full_path_to_name). Path must start with '/'.
1028 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
1029 if not path.startswith('/'):
1030 raise Exception, 'path must start with "/": %s' % path
1031 # Since we assume path startswith('/'), we can skip the first element.
1032 result = [('', '/')]
1033 norm_path = os.path.abspath(path)
1034 if norm_path == '/':
1037 for p in norm_path.split('/')[1:]:
1038 full_path += '/' + p
1039 result.append((p, full_path))
1043 def stripped_path_components(path, strip_prefixes):
1044 """Strip any prefix in strip_prefixes from path and return a list
1045 of path components where each component is (name,
1046 none_or_full_fs_path_to_name). Assume path startswith('/').
1047 See thelpers.py for examples."""
1048 normalized_path = os.path.abspath(path)
1049 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1050 for bp in sorted_strip_prefixes:
1051 normalized_bp = os.path.abspath(bp)
1052 if normalized_bp == '/':
1054 if normalized_path.startswith(normalized_bp):
1055 prefix = normalized_path[:len(normalized_bp)]
1057 for p in normalized_path[len(normalized_bp):].split('/'):
1061 result.append((p, prefix))
1064 return path_components(path)
1067 def grafted_path_components(graft_points, path):
1068 # Create a result that consists of some number of faked graft
1069 # directories before the graft point, followed by all of the real
1070 # directories from path that are after the graft point. Arrange
1071 # for the directory at the graft point in the result to correspond
1072 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1073 # for some examples.
1075 # Note that given --graft orig=new, orig and new have *nothing* to
1076 # do with each other, even if some of their component names
1077 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1078 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1081 # FIXME: This can't be the best solution...
1082 clean_path = os.path.abspath(path)
1083 for graft_point in graft_points:
1084 old_prefix, new_prefix = graft_point
1085 # Expand prefixes iff not absolute paths.
1086 old_prefix = os.path.normpath(old_prefix)
1087 new_prefix = os.path.normpath(new_prefix)
1088 if clean_path.startswith(old_prefix):
1089 escaped_prefix = re.escape(old_prefix)
1090 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1091 # Handle /foo=/ (at least) -- which produces //whatever.
1092 grafted_path = '/' + grafted_path.lstrip('/')
1093 clean_path_components = path_components(clean_path)
1094 # Count the components that were stripped.
1095 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1096 new_prefix_parts = new_prefix.split('/')
1097 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1098 result = [(p, None) for p in result_prefix] \
1099 + clean_path_components[strip_count:]
1100 # Now set the graft point name to match the end of new_prefix.
1101 graft_point = len(result_prefix)
1102 result[graft_point] = \
1103 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1104 if new_prefix == '/': # --graft ...=/ is a special case.
1107 return path_components(clean_path)
1113 _localtime = getattr(_helpers, 'localtime', None)
1116 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1117 'tm_hour', 'tm_min', 'tm_sec',
1118 'tm_wday', 'tm_yday',
1119 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1121 # Define a localtime() that returns bup_time when possible. Note:
1122 # this means that any helpers.localtime() results may need to be
1123 # passed through to_py_time() before being passed to python's time
1124 # module, which doesn't appear willing to ignore the extra items.
1126 def localtime(time):
1127 return bup_time(*_helpers.localtime(time))
1128 def utc_offset_str(t):
1129 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1130 If the current UTC offset does not represent an integer number
1131 of minutes, the fractional component will be truncated."""
1132 off = localtime(t).tm_gmtoff
1133 # Note: // doesn't truncate like C for negative values, it rounds down.
1134 offmin = abs(off) // 60
1136 h = (offmin - m) // 60
1137 return "%+03d%02d" % (-h if off < 0 else h, m)
1139 if isinstance(x, time.struct_time):
1141 return time.struct_time(x[:9])
1143 localtime = time.localtime
1144 def utc_offset_str(t):
1145 return time.strftime('%z', localtime(t))
1150 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1152 def valid_save_name(name):
1153 # Enforce a superset of the restrictions in git-check-ref-format(1)
1155 or name.startswith('/') or name.endswith('/') \
1156 or name.endswith('.'):
1158 if _some_invalid_save_parts_rx.search(name):
1161 if ord(c) < 0x20 or ord(c) == 0x7f:
1163 for part in name.split('/'):
1164 if part.startswith('.') or part.endswith('.lock'):
1169 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1171 def period_as_secs(s):
1174 match = _period_rx.match(s)
1177 mag = int(match.group(1))
1178 scale = match.group(2)
1179 return mag * {'s': 1,
1183 'w': 60 * 60 * 24 * 7,
1184 'm': 60 * 60 * 24 * 31,
1185 'y': 60 * 60 * 24 * 366}[scale]