1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
12 sc_page_size = os.sysconf('SC_PAGE_SIZE')
13 assert(sc_page_size > 0)
15 sc_arg_max = os.sysconf('SC_ARG_MAX')
16 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
17 sc_arg_max = 2 * 1024 * 1024
19 # This function should really be in helpers, not in bup.options. But we
20 # want options.py to be standalone so people can include it in other projects.
21 from bup.options import _tty_width
22 tty_width = _tty_width
26 """Convert the string 's' to an integer. Return 0 if s is not a number."""
34 """Convert the string 's' to a float. Return 0 if s is not a number."""
36 return float(s or '0')
41 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
44 if sys.platform.startswith('darwin'):
45 # Apparently fsync on OS X doesn't guarantee to sync all the way down
47 fdatasync = lambda fd : fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
48 else: # If the platform doesn't have fdatasync, fall back to fsync
50 fdatasync = os.fdatasync
51 except AttributeError:
55 # Write (blockingly) to sockets that may or may not be in blocking mode.
56 # We need this because our stderr is sometimes eaten by subprocesses
57 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
58 # leading to race conditions. Ick. We'll do it the hard way.
59 def _hard_write(fd, buf):
61 (r,w,x) = select.select([], [fd], [], None)
63 raise IOError('select(fd) returned without being writable')
65 sz = os.write(fd, buf)
67 if e.errno != errno.EAGAIN:
75 """Print a log message to stderr."""
78 _hard_write(sys.stderr.fileno(), s)
92 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
93 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
96 """Calls log() if stderr is a TTY. Does nothing otherwise."""
104 """Calls progress() only if we haven't printed progress in a while.
106 This avoids overloading the stderr buffer with excess junk.
110 if now - _last_prog > 0.1:
116 """Calls progress() to redisplay the most recent progress message.
118 Useful after you've printed some other message that wipes out the
121 if _last_progress and _last_progress.endswith('\r'):
122 progress(_last_progress)
125 def mkdirp(d, mode=None):
126 """Recursively create directories on path 'd'.
128 Unlike os.makedirs(), it doesn't raise an exception if the last element of
129 the path already exists.
137 if e.errno == errno.EEXIST:
143 _unspecified_next_default = object()
145 def _fallback_next(it, default=_unspecified_next_default):
146 """Retrieve the next item from the iterator by calling its
147 next() method. If default is given, it is returned if the
148 iterator is exhausted, otherwise StopIteration is raised."""
150 if default is _unspecified_next_default:
155 except StopIteration:
158 if sys.version_info < (2, 6):
159 next = _fallback_next
162 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
164 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
166 samekey = operator.eq
168 total = sum(len(it) for it in iters)
169 iters = (iter(it) for it in iters)
170 heap = ((next(it, None),it) for it in iters)
171 heap = [(e,it) for e,it in heap if e]
176 if not count % pfreq:
179 if not samekey(e, pe):
184 e = it.next() # Don't use next() function, it's too expensive
185 except StopIteration:
186 heapq.heappop(heap) # remove current
188 heapq.heapreplace(heap, (e, it)) # shift current to new location
193 """Delete a file at path 'f' if it currently exists.
195 Unlike os.unlink(), does not throw an exception if the file didn't already
201 if e.errno != errno.ENOENT:
205 def readpipe(argv, preexec_fn=None):
206 """Run a subprocess and return its output."""
207 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn)
208 out, err = p.communicate()
209 if p.returncode != 0:
210 raise Exception('subprocess %r failed with status %d'
211 % (' '.join(argv), p.returncode))
215 def _argmax_base(command):
218 base_size += len(command) + 1
219 for k, v in environ.iteritems():
220 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
224 def _argmax_args_size(args):
225 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
228 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
229 """If args is not empty, yield the output produced by calling the
230 command list with args as a sequence of strings (It may be necessary
231 to return multiple strings in order to respect ARG_MAX)."""
232 # The optional arg_max arg is a workaround for an issue with the
233 # current wvtest behavior.
234 base_size = _argmax_base(command)
236 room = arg_max - base_size
239 next_size = _argmax_args_size(args[i:i+1])
240 if room - next_size < 0:
246 assert(len(sub_args))
247 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
250 def resolve_parent(p):
251 """Return the absolute path of a file without following any final symlink.
253 Behaves like os.path.realpath, but doesn't follow a symlink for the last
254 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
255 will follow symlinks in p's directory)
261 if st and stat.S_ISLNK(st.st_mode):
262 (dir, name) = os.path.split(p)
263 dir = os.path.realpath(dir)
264 out = os.path.join(dir, name)
266 out = os.path.realpath(p)
267 #log('realpathing:%r,%r\n' % (p, out))
271 def detect_fakeroot():
272 "Return True if we appear to be running under fakeroot."
273 return os.getenv("FAKEROOTKEY") != None
277 if sys.platform.startswith('cygwin'):
279 return ctypes.cdll.shell32.IsUserAnAdmin()
281 return os.geteuid() == 0
284 def _cache_key_value(get_value, key, cache):
285 """Return (value, was_cached). If there is a value in the cache
286 for key, use that, otherwise, call get_value(key) which should
287 throw a KeyError if there is no value -- in which case the cached
288 and returned value will be None.
290 try: # Do we already have it (or know there wasn't one)?
297 cache[key] = value = get_value(key)
303 _uid_to_pwd_cache = {}
304 _name_to_pwd_cache = {}
306 def pwd_from_uid(uid):
307 """Return password database entry for uid (may be a cached value).
308 Return None if no entry is found.
310 global _uid_to_pwd_cache, _name_to_pwd_cache
311 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
312 if entry and not cached:
313 _name_to_pwd_cache[entry.pw_name] = entry
317 def pwd_from_name(name):
318 """Return password database entry for name (may be a cached value).
319 Return None if no entry is found.
321 global _uid_to_pwd_cache, _name_to_pwd_cache
322 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
323 if entry and not cached:
324 _uid_to_pwd_cache[entry.pw_uid] = entry
328 _gid_to_grp_cache = {}
329 _name_to_grp_cache = {}
331 def grp_from_gid(gid):
332 """Return password database entry for gid (may be a cached value).
333 Return None if no entry is found.
335 global _gid_to_grp_cache, _name_to_grp_cache
336 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
337 if entry and not cached:
338 _name_to_grp_cache[entry.gr_name] = entry
342 def grp_from_name(name):
343 """Return password database entry for name (may be a cached value).
344 Return None if no entry is found.
346 global _gid_to_grp_cache, _name_to_grp_cache
347 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
348 if entry and not cached:
349 _gid_to_grp_cache[entry.gr_gid] = entry
355 """Get the user's login name."""
359 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
365 """Get the user's full name."""
367 if not _userfullname:
369 entry = pwd_from_uid(uid)
371 _userfullname = entry[4].split(',')[0] or entry[0]
372 if not _userfullname:
373 _userfullname = 'user%d' % uid
379 """Get the FQDN of this machine."""
382 _hostname = socket.getfqdn()
386 _resource_path = None
387 def resource_path(subdir=''):
388 global _resource_path
389 if not _resource_path:
390 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
391 return os.path.join(_resource_path, subdir)
393 def format_filesize(size):
398 exponent = int(math.log(size) / math.log(unit))
399 size_prefix = "KMGTPE"[exponent - 1]
400 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
403 class NotOk(Exception):
408 def __init__(self, outp):
412 while self._read(65536): pass
414 def read(self, size):
415 """Read 'size' bytes from input stream."""
417 return self._read(size)
420 """Read from input stream until a newline is found."""
422 return self._readline()
424 def write(self, data):
425 """Write 'data' to output stream."""
426 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
427 self.outp.write(data)
430 """Return true if input stream is readable."""
431 raise NotImplemented("Subclasses must implement has_input")
434 """Indicate end of output from last sent command."""
438 """Indicate server error to the client."""
439 s = re.sub(r'\s+', ' ', str(s))
440 self.write('\nerror %s\n' % s)
442 def _check_ok(self, onempty):
445 for rl in linereader(self):
446 #log('%d got line: %r\n' % (os.getpid(), rl))
447 if not rl: # empty line
451 elif rl.startswith('error '):
452 #log('client: error: %s\n' % rl[6:])
456 raise Exception('server exited unexpectedly; see errors above')
458 def drain_and_check_ok(self):
459 """Remove all data for the current command from input stream."""
462 return self._check_ok(onempty)
465 """Verify that server action completed successfully."""
467 raise Exception('expected "ok", got %r' % rl)
468 return self._check_ok(onempty)
471 class Conn(BaseConn):
472 def __init__(self, inp, outp):
473 BaseConn.__init__(self, outp)
476 def _read(self, size):
477 return self.inp.read(size)
480 return self.inp.readline()
483 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
485 assert(rl[0] == self.inp.fileno())
491 def checked_reader(fd, n):
493 rl, _, _ = select.select([fd], [], [])
496 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
501 MAX_PACKET = 128 * 1024
502 def mux(p, outfd, outr, errr):
505 while p.poll() is None:
506 rl, _, _ = select.select(fds, [], [])
509 buf = os.read(outr, MAX_PACKET)
511 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
513 buf = os.read(errr, 1024)
515 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
517 os.write(outfd, struct.pack('!IB', 0, 3))
520 class DemuxConn(BaseConn):
521 """A helper class for bup's client-server protocol."""
522 def __init__(self, infd, outp):
523 BaseConn.__init__(self, outp)
524 # Anything that comes through before the sync string was not
525 # multiplexed and can be assumed to be debug/log before mux init.
527 while tail != 'BUPMUX':
528 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
530 raise IOError('demux: unexpected EOF during initialization')
532 sys.stderr.write(tail[:-6]) # pre-mux log messages
539 def write(self, data):
541 BaseConn.write(self, data)
543 def _next_packet(self, timeout):
544 if self.closed: return False
545 rl, wl, xl = select.select([self.infd], [], [], timeout)
546 if not rl: return False
547 assert(rl[0] == self.infd)
548 ns = ''.join(checked_reader(self.infd, 5))
549 n, fdw = struct.unpack('!IB', ns)
550 assert(n <= MAX_PACKET)
552 self.reader = checked_reader(self.infd, n)
554 for buf in checked_reader(self.infd, n):
555 sys.stderr.write(buf)
558 debug2("DemuxConn: marked closed\n")
561 def _load_buf(self, timeout):
562 if self.buf is not None:
564 while not self.closed:
565 while not self.reader:
566 if not self._next_packet(timeout):
569 self.buf = self.reader.next()
571 except StopIteration:
575 def _read_parts(self, ix_fn):
576 while self._load_buf(None):
577 assert(self.buf is not None)
579 if i is None or i == len(self.buf):
584 self.buf = self.buf[i:]
592 return buf.index('\n')+1
595 return ''.join(self._read_parts(find_eol))
597 def _read(self, size):
599 def until_size(buf): # Closes on csize
600 if len(buf) < csize[0]:
605 return ''.join(self._read_parts(until_size))
608 return self._load_buf(0)
612 """Generate a list of input lines from 'f' without terminating newlines."""
620 def chunkyreader(f, count = None):
621 """Generate a list of chunks of data read from 'f'.
623 If count is None, read until EOF is reached.
625 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
626 reached while reading, raise IOError.
630 b = f.read(min(count, 65536))
632 raise IOError('EOF with %d bytes remaining' % count)
643 def atomically_replaced_file(name, mode='w', buffering=-1):
644 """Yield a file that will be atomically renamed name when leaving the block.
646 This contextmanager yields an open file object that is backed by a
647 temporary file which will be renamed (atomically) to the target
648 name if everything succeeds.
650 The mode and buffering arguments are handled exactly as with open,
651 and the yielded file will have very restrictive permissions, as
656 with atomically_replaced_file('foo.txt', 'w') as f:
657 f.write('hello jack.')
661 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
662 text=('b' not in mode))
665 f = os.fdopen(ffd, mode, buffering)
673 os.rename(tempname, name)
675 unlink(tempname) # nonexistant file is ignored
679 """Append "/" to 's' if it doesn't aleady end in "/"."""
680 if s and not s.endswith('/'):
686 def _mmap_do(f, sz, flags, prot, close):
688 st = os.fstat(f.fileno())
691 # trying to open a zero-length map gives an error, but an empty
692 # string has all the same behaviour of a zero-length map, ie. it has
695 map = mmap.mmap(f.fileno(), sz, flags, prot)
697 f.close() # map will persist beyond file close
701 def mmap_read(f, sz = 0, close=True):
702 """Create a read-only memory mapped region on file 'f'.
703 If sz is 0, the region will cover the entire file.
705 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
708 def mmap_readwrite(f, sz = 0, close=True):
709 """Create a read-write memory mapped region on file 'f'.
710 If sz is 0, the region will cover the entire file.
712 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
716 def mmap_readwrite_private(f, sz = 0, close=True):
717 """Create a read-write memory mapped region on file 'f'.
718 If sz is 0, the region will cover the entire file.
719 The map is private, which means the changes are never flushed back to the
722 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
726 _mincore = getattr(_helpers, 'mincore', None)
728 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
729 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
731 _fmincore_chunk_size = None
732 def _set_fmincore_chunk_size():
733 global _fmincore_chunk_size
734 pref_chunk_size = 64 * 1024 * 1024
735 chunk_size = sc_page_size
736 if (sc_page_size < pref_chunk_size):
737 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
738 _fmincore_chunk_size = chunk_size
741 """Return the mincore() data for fd as a bytearray whose values can be
742 tested via MINCORE_INCORE, or None if fd does not fully
743 support the operation."""
745 if (st.st_size == 0):
747 if not _fmincore_chunk_size:
748 _set_fmincore_chunk_size()
749 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
750 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
751 chunk_count = page_count / _fmincore_chunk_size
754 result = bytearray(page_count)
755 for ci in xrange(chunk_count):
756 pos = _fmincore_chunk_size * ci;
757 msize = min(_fmincore_chunk_size, st.st_size - pos)
759 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
760 except mmap.error as ex:
761 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
762 # Perhaps the file was a pipe, i.e. "... | bup split ..."
765 _mincore(m, msize, 0, result, ci * pages_per_chunk);
769 def parse_timestamp(epoch_str):
770 """Return the number of nanoseconds since the epoch that are described
771 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
772 throw a ValueError that may contain additional information."""
773 ns_per = {'s' : 1000000000,
777 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
779 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
780 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
782 (n, units) = match.group(1, 2)
786 return n * ns_per[units]
790 """Parse data size information into a float number.
792 Here are some examples of conversions:
793 199.2k means 203981 bytes
794 1GB means 1073741824 bytes
795 2.1 tb means 2199023255552 bytes
797 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
799 raise ValueError("can't parse %r as a number" % s)
800 (val, unit) = g.groups()
803 if unit in ['t', 'tb']:
804 mult = 1024*1024*1024*1024
805 elif unit in ['g', 'gb']:
806 mult = 1024*1024*1024
807 elif unit in ['m', 'mb']:
809 elif unit in ['k', 'kb']:
811 elif unit in ['', 'b']:
814 raise ValueError("invalid unit %r in number %r" % (unit, s))
819 """Count the number of elements in an iterator. (consumes the iterator)"""
820 return reduce(lambda x,y: x+1, l)
825 """Append an error message to the list of saved errors.
827 Once processing is able to stop and output the errors, the saved errors are
828 accessible in the module variable helpers.saved_errors.
830 saved_errors.append(e)
840 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
842 The new exception handler will make sure that bup will exit without an ugly
843 stacktrace when Ctrl-C is hit.
845 oldhook = sys.excepthook
846 def newhook(exctype, value, traceback):
847 if exctype == KeyboardInterrupt:
848 log('\nInterrupted.\n')
850 return oldhook(exctype, value, traceback)
851 sys.excepthook = newhook
854 def columnate(l, prefix):
855 """Format elements of 'l' in columns with 'prefix' leading each line.
857 The number of columns is determined automatically based on the string
863 clen = max(len(s) for s in l)
864 ncols = (tty_width() - len(prefix)) / (clen + 2)
869 while len(l) % ncols:
872 for s in range(0, len(l), rows):
873 cols.append(l[s:s+rows])
875 for row in zip(*cols):
876 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
880 def parse_date_or_fatal(str, fatal):
881 """Parses the given date or calls Option.fatal().
882 For now we expect a string that contains a float."""
885 except ValueError as e:
886 raise fatal('invalid date format (should be a float): %r' % e)
891 def parse_excludes(options, fatal):
892 """Traverse the options and extract all excludes, or call Option.fatal()."""
896 (option, parameter) = flag
897 if option == '--exclude':
898 excluded_paths.append(resolve_parent(parameter))
899 elif option == '--exclude-from':
901 f = open(resolve_parent(parameter))
903 raise fatal("couldn't read %s" % parameter)
904 for exclude_path in f.readlines():
905 # FIXME: perhaps this should be rstrip('\n')
906 exclude_path = resolve_parent(exclude_path.strip())
908 excluded_paths.append(exclude_path)
909 return sorted(frozenset(excluded_paths))
912 def parse_rx_excludes(options, fatal):
913 """Traverse the options and extract all rx excludes, or call
915 excluded_patterns = []
918 (option, parameter) = flag
919 if option == '--exclude-rx':
921 excluded_patterns.append(re.compile(parameter))
922 except re.error as ex:
923 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
924 elif option == '--exclude-rx-from':
926 f = open(resolve_parent(parameter))
928 raise fatal("couldn't read %s" % parameter)
929 for pattern in f.readlines():
930 spattern = pattern.rstrip('\n')
934 excluded_patterns.append(re.compile(spattern))
935 except re.error as ex:
936 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
937 return excluded_patterns
940 def should_rx_exclude_path(path, exclude_rxs):
941 """Return True if path matches a regular expression in exclude_rxs."""
942 for rx in exclude_rxs:
944 debug1('Skipping %r: excluded by rx pattern %r.\n'
945 % (path, rx.pattern))
950 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
951 # that resolve against the current filesystem in the strip/graft
952 # functions for example, but elsewhere as well. I suspect bup's not
953 # always being careful about that. For some cases, the contents of
954 # the current filesystem should be irrelevant, and consulting it might
955 # produce the wrong result, perhaps via unintended symlink resolution,
958 def path_components(path):
959 """Break path into a list of pairs of the form (name,
960 full_path_to_name). Path must start with '/'.
962 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
963 if not path.startswith('/'):
964 raise Exception, 'path must start with "/": %s' % path
965 # Since we assume path startswith('/'), we can skip the first element.
967 norm_path = os.path.abspath(path)
971 for p in norm_path.split('/')[1:]:
973 result.append((p, full_path))
977 def stripped_path_components(path, strip_prefixes):
978 """Strip any prefix in strip_prefixes from path and return a list
979 of path components where each component is (name,
980 none_or_full_fs_path_to_name). Assume path startswith('/').
981 See thelpers.py for examples."""
982 normalized_path = os.path.abspath(path)
983 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
984 for bp in sorted_strip_prefixes:
985 normalized_bp = os.path.abspath(bp)
986 if normalized_bp == '/':
988 if normalized_path.startswith(normalized_bp):
989 prefix = normalized_path[:len(normalized_bp)]
991 for p in normalized_path[len(normalized_bp):].split('/'):
995 result.append((p, prefix))
998 return path_components(path)
1001 def grafted_path_components(graft_points, path):
1002 # Create a result that consists of some number of faked graft
1003 # directories before the graft point, followed by all of the real
1004 # directories from path that are after the graft point. Arrange
1005 # for the directory at the graft point in the result to correspond
1006 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1007 # for some examples.
1009 # Note that given --graft orig=new, orig and new have *nothing* to
1010 # do with each other, even if some of their component names
1011 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1012 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1015 # FIXME: This can't be the best solution...
1016 clean_path = os.path.abspath(path)
1017 for graft_point in graft_points:
1018 old_prefix, new_prefix = graft_point
1019 # Expand prefixes iff not absolute paths.
1020 old_prefix = os.path.normpath(old_prefix)
1021 new_prefix = os.path.normpath(new_prefix)
1022 if clean_path.startswith(old_prefix):
1023 escaped_prefix = re.escape(old_prefix)
1024 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1025 # Handle /foo=/ (at least) -- which produces //whatever.
1026 grafted_path = '/' + grafted_path.lstrip('/')
1027 clean_path_components = path_components(clean_path)
1028 # Count the components that were stripped.
1029 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1030 new_prefix_parts = new_prefix.split('/')
1031 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1032 result = [(p, None) for p in result_prefix] \
1033 + clean_path_components[strip_count:]
1034 # Now set the graft point name to match the end of new_prefix.
1035 graft_point = len(result_prefix)
1036 result[graft_point] = \
1037 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1038 if new_prefix == '/': # --graft ...=/ is a special case.
1041 return path_components(clean_path)
1047 _localtime = getattr(_helpers, 'localtime', None)
1050 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1051 'tm_hour', 'tm_min', 'tm_sec',
1052 'tm_wday', 'tm_yday',
1053 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1055 # Define a localtime() that returns bup_time when possible. Note:
1056 # this means that any helpers.localtime() results may need to be
1057 # passed through to_py_time() before being passed to python's time
1058 # module, which doesn't appear willing to ignore the extra items.
1060 def localtime(time):
1061 return bup_time(*_helpers.localtime(time))
1062 def utc_offset_str(t):
1063 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1064 If the current UTC offset does not represent an integer number
1065 of minutes, the fractional component will be truncated."""
1066 off = localtime(t).tm_gmtoff
1067 # Note: // doesn't truncate like C for negative values, it rounds down.
1068 offmin = abs(off) // 60
1070 h = (offmin - m) // 60
1071 return "%+03d%02d" % (-h if off < 0 else h, m)
1073 if isinstance(x, time.struct_time):
1075 return time.struct_time(x[:9])
1077 localtime = time.localtime
1078 def utc_offset_str(t):
1079 return time.strftime('%z', localtime(t))
1084 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1086 def valid_save_name(name):
1087 # Enforce a superset of the restrictions in git-check-ref-format(1)
1089 or name.startswith('/') or name.endswith('/') \
1090 or name.endswith('.'):
1092 if _some_invalid_save_parts_rx.search(name):
1095 if ord(c) < 0x20 or ord(c) == 0x7f:
1097 for part in name.split('/'):
1098 if part.startswith('.') or part.endswith('.lock'):