1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
12 sc_page_size = os.sysconf('SC_PAGE_SIZE')
13 assert(sc_page_size > 0)
15 sc_arg_max = os.sysconf('SC_ARG_MAX')
16 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
17 sc_arg_max = 2 * 1024 * 1024
19 # This function should really be in helpers, not in bup.options. But we
20 # want options.py to be standalone so people can include it in other projects.
21 from bup.options import _tty_width
22 tty_width = _tty_width
26 """Convert the string 's' to an integer. Return 0 if s is not a number."""
34 """Convert the string 's' to a float. Return 0 if s is not a number."""
36 return float(s or '0')
41 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
45 _fdatasync = os.fdatasync
46 except AttributeError:
49 if sys.platform.startswith('darwin'):
50 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
54 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
56 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
57 if e.errno == errno.ENOTSUP:
62 fdatasync = _fdatasync
65 # Write (blockingly) to sockets that may or may not be in blocking mode.
66 # We need this because our stderr is sometimes eaten by subprocesses
67 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
68 # leading to race conditions. Ick. We'll do it the hard way.
69 def _hard_write(fd, buf):
71 (r,w,x) = select.select([], [fd], [], None)
73 raise IOError('select(fd) returned without being writable')
75 sz = os.write(fd, buf)
77 if e.errno != errno.EAGAIN:
85 """Print a log message to stderr."""
88 _hard_write(sys.stderr.fileno(), s)
102 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
103 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
106 """Calls log() if stderr is a TTY. Does nothing otherwise."""
107 global _last_progress
114 """Calls progress() only if we haven't printed progress in a while.
116 This avoids overloading the stderr buffer with excess junk.
120 if now - _last_prog > 0.1:
126 """Calls progress() to redisplay the most recent progress message.
128 Useful after you've printed some other message that wipes out the
131 if _last_progress and _last_progress.endswith('\r'):
132 progress(_last_progress)
135 def mkdirp(d, mode=None):
136 """Recursively create directories on path 'd'.
138 Unlike os.makedirs(), it doesn't raise an exception if the last element of
139 the path already exists.
147 if e.errno == errno.EEXIST:
153 _unspecified_next_default = object()
155 def _fallback_next(it, default=_unspecified_next_default):
156 """Retrieve the next item from the iterator by calling its
157 next() method. If default is given, it is returned if the
158 iterator is exhausted, otherwise StopIteration is raised."""
160 if default is _unspecified_next_default:
165 except StopIteration:
168 if sys.version_info < (2, 6):
169 next = _fallback_next
172 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
174 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
176 samekey = operator.eq
178 total = sum(len(it) for it in iters)
179 iters = (iter(it) for it in iters)
180 heap = ((next(it, None),it) for it in iters)
181 heap = [(e,it) for e,it in heap if e]
186 if not count % pfreq:
189 if not samekey(e, pe):
194 e = it.next() # Don't use next() function, it's too expensive
195 except StopIteration:
196 heapq.heappop(heap) # remove current
198 heapq.heapreplace(heap, (e, it)) # shift current to new location
203 """Delete a file at path 'f' if it currently exists.
205 Unlike os.unlink(), does not throw an exception if the file didn't already
211 if e.errno != errno.ENOENT:
215 def readpipe(argv, preexec_fn=None, shell=False):
216 """Run a subprocess and return its output."""
217 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
219 out, err = p.communicate()
220 if p.returncode != 0:
221 raise Exception('subprocess %r failed with status %d'
222 % (' '.join(argv), p.returncode))
226 def _argmax_base(command):
229 base_size += len(command) + 1
230 for k, v in environ.iteritems():
231 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
235 def _argmax_args_size(args):
236 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
239 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
240 """If args is not empty, yield the output produced by calling the
241 command list with args as a sequence of strings (It may be necessary
242 to return multiple strings in order to respect ARG_MAX)."""
243 # The optional arg_max arg is a workaround for an issue with the
244 # current wvtest behavior.
245 base_size = _argmax_base(command)
247 room = arg_max - base_size
250 next_size = _argmax_args_size(args[i:i+1])
251 if room - next_size < 0:
257 assert(len(sub_args))
258 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
261 def resolve_parent(p):
262 """Return the absolute path of a file without following any final symlink.
264 Behaves like os.path.realpath, but doesn't follow a symlink for the last
265 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
266 will follow symlinks in p's directory)
272 if st and stat.S_ISLNK(st.st_mode):
273 (dir, name) = os.path.split(p)
274 dir = os.path.realpath(dir)
275 out = os.path.join(dir, name)
277 out = os.path.realpath(p)
278 #log('realpathing:%r,%r\n' % (p, out))
282 def detect_fakeroot():
283 "Return True if we appear to be running under fakeroot."
284 return os.getenv("FAKEROOTKEY") != None
288 if sys.platform.startswith('cygwin'):
290 return ctypes.cdll.shell32.IsUserAnAdmin()
292 return os.geteuid() == 0
295 def _cache_key_value(get_value, key, cache):
296 """Return (value, was_cached). If there is a value in the cache
297 for key, use that, otherwise, call get_value(key) which should
298 throw a KeyError if there is no value -- in which case the cached
299 and returned value will be None.
301 try: # Do we already have it (or know there wasn't one)?
308 cache[key] = value = get_value(key)
314 _uid_to_pwd_cache = {}
315 _name_to_pwd_cache = {}
317 def pwd_from_uid(uid):
318 """Return password database entry for uid (may be a cached value).
319 Return None if no entry is found.
321 global _uid_to_pwd_cache, _name_to_pwd_cache
322 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
323 if entry and not cached:
324 _name_to_pwd_cache[entry.pw_name] = entry
328 def pwd_from_name(name):
329 """Return password database entry for name (may be a cached value).
330 Return None if no entry is found.
332 global _uid_to_pwd_cache, _name_to_pwd_cache
333 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
334 if entry and not cached:
335 _uid_to_pwd_cache[entry.pw_uid] = entry
339 _gid_to_grp_cache = {}
340 _name_to_grp_cache = {}
342 def grp_from_gid(gid):
343 """Return password database entry for gid (may be a cached value).
344 Return None if no entry is found.
346 global _gid_to_grp_cache, _name_to_grp_cache
347 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
348 if entry and not cached:
349 _name_to_grp_cache[entry.gr_name] = entry
353 def grp_from_name(name):
354 """Return password database entry for name (may be a cached value).
355 Return None if no entry is found.
357 global _gid_to_grp_cache, _name_to_grp_cache
358 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
359 if entry and not cached:
360 _gid_to_grp_cache[entry.gr_gid] = entry
366 """Get the user's login name."""
370 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
376 """Get the user's full name."""
378 if not _userfullname:
380 entry = pwd_from_uid(uid)
382 _userfullname = entry[4].split(',')[0] or entry[0]
383 if not _userfullname:
384 _userfullname = 'user%d' % uid
390 """Get the FQDN of this machine."""
393 _hostname = socket.getfqdn()
397 _resource_path = None
398 def resource_path(subdir=''):
399 global _resource_path
400 if not _resource_path:
401 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
402 return os.path.join(_resource_path, subdir)
404 def format_filesize(size):
409 exponent = int(math.log(size) / math.log(unit))
410 size_prefix = "KMGTPE"[exponent - 1]
411 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
414 class NotOk(Exception):
419 def __init__(self, outp):
423 while self._read(65536): pass
425 def read(self, size):
426 """Read 'size' bytes from input stream."""
428 return self._read(size)
431 """Read from input stream until a newline is found."""
433 return self._readline()
435 def write(self, data):
436 """Write 'data' to output stream."""
437 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
438 self.outp.write(data)
441 """Return true if input stream is readable."""
442 raise NotImplemented("Subclasses must implement has_input")
445 """Indicate end of output from last sent command."""
449 """Indicate server error to the client."""
450 s = re.sub(r'\s+', ' ', str(s))
451 self.write('\nerror %s\n' % s)
453 def _check_ok(self, onempty):
456 for rl in linereader(self):
457 #log('%d got line: %r\n' % (os.getpid(), rl))
458 if not rl: # empty line
462 elif rl.startswith('error '):
463 #log('client: error: %s\n' % rl[6:])
467 raise Exception('server exited unexpectedly; see errors above')
469 def drain_and_check_ok(self):
470 """Remove all data for the current command from input stream."""
473 return self._check_ok(onempty)
476 """Verify that server action completed successfully."""
478 raise Exception('expected "ok", got %r' % rl)
479 return self._check_ok(onempty)
482 class Conn(BaseConn):
483 def __init__(self, inp, outp):
484 BaseConn.__init__(self, outp)
487 def _read(self, size):
488 return self.inp.read(size)
491 return self.inp.readline()
494 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
496 assert(rl[0] == self.inp.fileno())
502 def checked_reader(fd, n):
504 rl, _, _ = select.select([fd], [], [])
507 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
512 MAX_PACKET = 128 * 1024
513 def mux(p, outfd, outr, errr):
516 while p.poll() is None:
517 rl, _, _ = select.select(fds, [], [])
520 buf = os.read(outr, MAX_PACKET)
522 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
524 buf = os.read(errr, 1024)
526 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
528 os.write(outfd, struct.pack('!IB', 0, 3))
531 class DemuxConn(BaseConn):
532 """A helper class for bup's client-server protocol."""
533 def __init__(self, infd, outp):
534 BaseConn.__init__(self, outp)
535 # Anything that comes through before the sync string was not
536 # multiplexed and can be assumed to be debug/log before mux init.
538 while tail != 'BUPMUX':
539 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
541 raise IOError('demux: unexpected EOF during initialization')
543 sys.stderr.write(tail[:-6]) # pre-mux log messages
550 def write(self, data):
552 BaseConn.write(self, data)
554 def _next_packet(self, timeout):
555 if self.closed: return False
556 rl, wl, xl = select.select([self.infd], [], [], timeout)
557 if not rl: return False
558 assert(rl[0] == self.infd)
559 ns = ''.join(checked_reader(self.infd, 5))
560 n, fdw = struct.unpack('!IB', ns)
561 assert(n <= MAX_PACKET)
563 self.reader = checked_reader(self.infd, n)
565 for buf in checked_reader(self.infd, n):
566 sys.stderr.write(buf)
569 debug2("DemuxConn: marked closed\n")
572 def _load_buf(self, timeout):
573 if self.buf is not None:
575 while not self.closed:
576 while not self.reader:
577 if not self._next_packet(timeout):
580 self.buf = self.reader.next()
582 except StopIteration:
586 def _read_parts(self, ix_fn):
587 while self._load_buf(None):
588 assert(self.buf is not None)
590 if i is None or i == len(self.buf):
595 self.buf = self.buf[i:]
603 return buf.index('\n')+1
606 return ''.join(self._read_parts(find_eol))
608 def _read(self, size):
610 def until_size(buf): # Closes on csize
611 if len(buf) < csize[0]:
616 return ''.join(self._read_parts(until_size))
619 return self._load_buf(0)
623 """Generate a list of input lines from 'f' without terminating newlines."""
631 def chunkyreader(f, count = None):
632 """Generate a list of chunks of data read from 'f'.
634 If count is None, read until EOF is reached.
636 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
637 reached while reading, raise IOError.
641 b = f.read(min(count, 65536))
643 raise IOError('EOF with %d bytes remaining' % count)
654 def atomically_replaced_file(name, mode='w', buffering=-1):
655 """Yield a file that will be atomically renamed name when leaving the block.
657 This contextmanager yields an open file object that is backed by a
658 temporary file which will be renamed (atomically) to the target
659 name if everything succeeds.
661 The mode and buffering arguments are handled exactly as with open,
662 and the yielded file will have very restrictive permissions, as
667 with atomically_replaced_file('foo.txt', 'w') as f:
668 f.write('hello jack.')
672 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
673 text=('b' not in mode))
676 f = os.fdopen(ffd, mode, buffering)
684 os.rename(tempname, name)
686 unlink(tempname) # nonexistant file is ignored
690 """Append "/" to 's' if it doesn't aleady end in "/"."""
691 if s and not s.endswith('/'):
697 def _mmap_do(f, sz, flags, prot, close):
699 st = os.fstat(f.fileno())
702 # trying to open a zero-length map gives an error, but an empty
703 # string has all the same behaviour of a zero-length map, ie. it has
706 map = mmap.mmap(f.fileno(), sz, flags, prot)
708 f.close() # map will persist beyond file close
712 def mmap_read(f, sz = 0, close=True):
713 """Create a read-only memory mapped region on file 'f'.
714 If sz is 0, the region will cover the entire file.
716 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
719 def mmap_readwrite(f, sz = 0, close=True):
720 """Create a read-write memory mapped region on file 'f'.
721 If sz is 0, the region will cover the entire file.
723 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
727 def mmap_readwrite_private(f, sz = 0, close=True):
728 """Create a read-write memory mapped region on file 'f'.
729 If sz is 0, the region will cover the entire file.
730 The map is private, which means the changes are never flushed back to the
733 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
737 _mincore = getattr(_helpers, 'mincore', None)
739 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
740 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
742 _fmincore_chunk_size = None
743 def _set_fmincore_chunk_size():
744 global _fmincore_chunk_size
745 pref_chunk_size = 64 * 1024 * 1024
746 chunk_size = sc_page_size
747 if (sc_page_size < pref_chunk_size):
748 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
749 _fmincore_chunk_size = chunk_size
752 """Return the mincore() data for fd as a bytearray whose values can be
753 tested via MINCORE_INCORE, or None if fd does not fully
754 support the operation."""
756 if (st.st_size == 0):
758 if not _fmincore_chunk_size:
759 _set_fmincore_chunk_size()
760 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
761 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
762 chunk_count = page_count / _fmincore_chunk_size
765 result = bytearray(page_count)
766 for ci in xrange(chunk_count):
767 pos = _fmincore_chunk_size * ci;
768 msize = min(_fmincore_chunk_size, st.st_size - pos)
770 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
771 except mmap.error as ex:
772 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
773 # Perhaps the file was a pipe, i.e. "... | bup split ..."
776 _mincore(m, msize, 0, result, ci * pages_per_chunk);
780 def parse_timestamp(epoch_str):
781 """Return the number of nanoseconds since the epoch that are described
782 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
783 throw a ValueError that may contain additional information."""
784 ns_per = {'s' : 1000000000,
788 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
790 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
791 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
793 (n, units) = match.group(1, 2)
797 return n * ns_per[units]
801 """Parse data size information into a float number.
803 Here are some examples of conversions:
804 199.2k means 203981 bytes
805 1GB means 1073741824 bytes
806 2.1 tb means 2199023255552 bytes
808 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
810 raise ValueError("can't parse %r as a number" % s)
811 (val, unit) = g.groups()
814 if unit in ['t', 'tb']:
815 mult = 1024*1024*1024*1024
816 elif unit in ['g', 'gb']:
817 mult = 1024*1024*1024
818 elif unit in ['m', 'mb']:
820 elif unit in ['k', 'kb']:
822 elif unit in ['', 'b']:
825 raise ValueError("invalid unit %r in number %r" % (unit, s))
830 """Count the number of elements in an iterator. (consumes the iterator)"""
831 return reduce(lambda x,y: x+1, l)
836 """Append an error message to the list of saved errors.
838 Once processing is able to stop and output the errors, the saved errors are
839 accessible in the module variable helpers.saved_errors.
841 saved_errors.append(e)
851 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
853 The new exception handler will make sure that bup will exit without an ugly
854 stacktrace when Ctrl-C is hit.
856 oldhook = sys.excepthook
857 def newhook(exctype, value, traceback):
858 if exctype == KeyboardInterrupt:
859 log('\nInterrupted.\n')
861 return oldhook(exctype, value, traceback)
862 sys.excepthook = newhook
865 def columnate(l, prefix):
866 """Format elements of 'l' in columns with 'prefix' leading each line.
868 The number of columns is determined automatically based on the string
874 clen = max(len(s) for s in l)
875 ncols = (tty_width() - len(prefix)) / (clen + 2)
880 while len(l) % ncols:
883 for s in range(0, len(l), rows):
884 cols.append(l[s:s+rows])
886 for row in zip(*cols):
887 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
891 def parse_date_or_fatal(str, fatal):
892 """Parses the given date or calls Option.fatal().
893 For now we expect a string that contains a float."""
896 except ValueError as e:
897 raise fatal('invalid date format (should be a float): %r' % e)
902 def parse_excludes(options, fatal):
903 """Traverse the options and extract all excludes, or call Option.fatal()."""
907 (option, parameter) = flag
908 if option == '--exclude':
909 excluded_paths.append(resolve_parent(parameter))
910 elif option == '--exclude-from':
912 f = open(resolve_parent(parameter))
914 raise fatal("couldn't read %s" % parameter)
915 for exclude_path in f.readlines():
916 # FIXME: perhaps this should be rstrip('\n')
917 exclude_path = resolve_parent(exclude_path.strip())
919 excluded_paths.append(exclude_path)
920 return sorted(frozenset(excluded_paths))
923 def parse_rx_excludes(options, fatal):
924 """Traverse the options and extract all rx excludes, or call
926 excluded_patterns = []
929 (option, parameter) = flag
930 if option == '--exclude-rx':
932 excluded_patterns.append(re.compile(parameter))
933 except re.error as ex:
934 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
935 elif option == '--exclude-rx-from':
937 f = open(resolve_parent(parameter))
939 raise fatal("couldn't read %s" % parameter)
940 for pattern in f.readlines():
941 spattern = pattern.rstrip('\n')
945 excluded_patterns.append(re.compile(spattern))
946 except re.error as ex:
947 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
948 return excluded_patterns
951 def should_rx_exclude_path(path, exclude_rxs):
952 """Return True if path matches a regular expression in exclude_rxs."""
953 for rx in exclude_rxs:
955 debug1('Skipping %r: excluded by rx pattern %r.\n'
956 % (path, rx.pattern))
961 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
962 # that resolve against the current filesystem in the strip/graft
963 # functions for example, but elsewhere as well. I suspect bup's not
964 # always being careful about that. For some cases, the contents of
965 # the current filesystem should be irrelevant, and consulting it might
966 # produce the wrong result, perhaps via unintended symlink resolution,
969 def path_components(path):
970 """Break path into a list of pairs of the form (name,
971 full_path_to_name). Path must start with '/'.
973 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
974 if not path.startswith('/'):
975 raise Exception, 'path must start with "/": %s' % path
976 # Since we assume path startswith('/'), we can skip the first element.
978 norm_path = os.path.abspath(path)
982 for p in norm_path.split('/')[1:]:
984 result.append((p, full_path))
988 def stripped_path_components(path, strip_prefixes):
989 """Strip any prefix in strip_prefixes from path and return a list
990 of path components where each component is (name,
991 none_or_full_fs_path_to_name). Assume path startswith('/').
992 See thelpers.py for examples."""
993 normalized_path = os.path.abspath(path)
994 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
995 for bp in sorted_strip_prefixes:
996 normalized_bp = os.path.abspath(bp)
997 if normalized_bp == '/':
999 if normalized_path.startswith(normalized_bp):
1000 prefix = normalized_path[:len(normalized_bp)]
1002 for p in normalized_path[len(normalized_bp):].split('/'):
1006 result.append((p, prefix))
1009 return path_components(path)
1012 def grafted_path_components(graft_points, path):
1013 # Create a result that consists of some number of faked graft
1014 # directories before the graft point, followed by all of the real
1015 # directories from path that are after the graft point. Arrange
1016 # for the directory at the graft point in the result to correspond
1017 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1018 # for some examples.
1020 # Note that given --graft orig=new, orig and new have *nothing* to
1021 # do with each other, even if some of their component names
1022 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1023 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1026 # FIXME: This can't be the best solution...
1027 clean_path = os.path.abspath(path)
1028 for graft_point in graft_points:
1029 old_prefix, new_prefix = graft_point
1030 # Expand prefixes iff not absolute paths.
1031 old_prefix = os.path.normpath(old_prefix)
1032 new_prefix = os.path.normpath(new_prefix)
1033 if clean_path.startswith(old_prefix):
1034 escaped_prefix = re.escape(old_prefix)
1035 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1036 # Handle /foo=/ (at least) -- which produces //whatever.
1037 grafted_path = '/' + grafted_path.lstrip('/')
1038 clean_path_components = path_components(clean_path)
1039 # Count the components that were stripped.
1040 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1041 new_prefix_parts = new_prefix.split('/')
1042 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1043 result = [(p, None) for p in result_prefix] \
1044 + clean_path_components[strip_count:]
1045 # Now set the graft point name to match the end of new_prefix.
1046 graft_point = len(result_prefix)
1047 result[graft_point] = \
1048 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1049 if new_prefix == '/': # --graft ...=/ is a special case.
1052 return path_components(clean_path)
1058 _localtime = getattr(_helpers, 'localtime', None)
1061 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1062 'tm_hour', 'tm_min', 'tm_sec',
1063 'tm_wday', 'tm_yday',
1064 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1066 # Define a localtime() that returns bup_time when possible. Note:
1067 # this means that any helpers.localtime() results may need to be
1068 # passed through to_py_time() before being passed to python's time
1069 # module, which doesn't appear willing to ignore the extra items.
1071 def localtime(time):
1072 return bup_time(*_helpers.localtime(time))
1073 def utc_offset_str(t):
1074 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1075 If the current UTC offset does not represent an integer number
1076 of minutes, the fractional component will be truncated."""
1077 off = localtime(t).tm_gmtoff
1078 # Note: // doesn't truncate like C for negative values, it rounds down.
1079 offmin = abs(off) // 60
1081 h = (offmin - m) // 60
1082 return "%+03d%02d" % (-h if off < 0 else h, m)
1084 if isinstance(x, time.struct_time):
1086 return time.struct_time(x[:9])
1088 localtime = time.localtime
1089 def utc_offset_str(t):
1090 return time.strftime('%z', localtime(t))
1095 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1097 def valid_save_name(name):
1098 # Enforce a superset of the restrictions in git-check-ref-format(1)
1100 or name.startswith('/') or name.endswith('/') \
1101 or name.endswith('.'):
1103 if _some_invalid_save_parts_rx.search(name):
1106 if ord(c) < 0x20 or ord(c) == 0x7f:
1108 for part in name.split('/'):
1109 if part.startswith('.') or part.endswith('.lock'):