1 """Helper functions and classes for bup."""
3 from collections import namedtuple
4 from ctypes import sizeof, c_void_p
6 from contextlib import contextmanager
7 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
8 import hashlib, heapq, math, operator, time, grp, tempfile
10 from bup import _helpers
12 sc_page_size = os.sysconf('SC_PAGE_SIZE')
13 assert(sc_page_size > 0)
15 sc_arg_max = os.sysconf('SC_ARG_MAX')
16 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
17 sc_arg_max = 2 * 1024 * 1024
19 # This function should really be in helpers, not in bup.options. But we
20 # want options.py to be standalone so people can include it in other projects.
21 from bup.options import _tty_width
22 tty_width = _tty_width
26 """Convert the string 's' to an integer. Return 0 if s is not a number."""
34 """Convert the string 's' to a float. Return 0 if s is not a number."""
36 return float(s or '0')
41 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
45 _fdatasync = os.fdatasync
46 except AttributeError:
49 if sys.platform.startswith('darwin'):
50 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
54 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
56 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
57 if e.errno == errno.ENOTSUP:
62 fdatasync = _fdatasync
65 # Write (blockingly) to sockets that may or may not be in blocking mode.
66 # We need this because our stderr is sometimes eaten by subprocesses
67 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
68 # leading to race conditions. Ick. We'll do it the hard way.
69 def _hard_write(fd, buf):
71 (r,w,x) = select.select([], [fd], [], None)
73 raise IOError('select(fd) returned without being writable')
75 sz = os.write(fd, buf)
77 if e.errno != errno.EAGAIN:
85 """Print a log message to stderr."""
88 _hard_write(sys.stderr.fileno(), s)
102 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
103 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
106 """Calls log() if stderr is a TTY. Does nothing otherwise."""
107 global _last_progress
114 """Calls progress() only if we haven't printed progress in a while.
116 This avoids overloading the stderr buffer with excess junk.
120 if now - _last_prog > 0.1:
126 """Calls progress() to redisplay the most recent progress message.
128 Useful after you've printed some other message that wipes out the
131 if _last_progress and _last_progress.endswith('\r'):
132 progress(_last_progress)
135 def mkdirp(d, mode=None):
136 """Recursively create directories on path 'd'.
138 Unlike os.makedirs(), it doesn't raise an exception if the last element of
139 the path already exists.
147 if e.errno == errno.EEXIST:
153 _unspecified_next_default = object()
155 def _fallback_next(it, default=_unspecified_next_default):
156 """Retrieve the next item from the iterator by calling its
157 next() method. If default is given, it is returned if the
158 iterator is exhausted, otherwise StopIteration is raised."""
160 if default is _unspecified_next_default:
165 except StopIteration:
168 if sys.version_info < (2, 6):
169 next = _fallback_next
172 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
174 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
176 samekey = operator.eq
178 total = sum(len(it) for it in iters)
179 iters = (iter(it) for it in iters)
180 heap = ((next(it, None),it) for it in iters)
181 heap = [(e,it) for e,it in heap if e]
186 if not count % pfreq:
189 if not samekey(e, pe):
194 e = it.next() # Don't use next() function, it's too expensive
195 except StopIteration:
196 heapq.heappop(heap) # remove current
198 heapq.heapreplace(heap, (e, it)) # shift current to new location
203 """Delete a file at path 'f' if it currently exists.
205 Unlike os.unlink(), does not throw an exception if the file didn't already
211 if e.errno != errno.ENOENT:
215 def readpipe(argv, preexec_fn=None, shell=False):
216 """Run a subprocess and return its output."""
217 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
219 out, err = p.communicate()
220 if p.returncode != 0:
221 raise Exception('subprocess %r failed with status %d'
222 % (' '.join(argv), p.returncode))
226 def _argmax_base(command):
229 base_size += len(command) + 1
230 for k, v in environ.iteritems():
231 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
235 def _argmax_args_size(args):
236 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
239 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
240 """If args is not empty, yield the output produced by calling the
241 command list with args as a sequence of strings (It may be necessary
242 to return multiple strings in order to respect ARG_MAX)."""
243 # The optional arg_max arg is a workaround for an issue with the
244 # current wvtest behavior.
245 base_size = _argmax_base(command)
247 room = arg_max - base_size
250 next_size = _argmax_args_size(args[i:i+1])
251 if room - next_size < 0:
257 assert(len(sub_args))
258 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
261 def resolve_parent(p):
262 """Return the absolute path of a file without following any final symlink.
264 Behaves like os.path.realpath, but doesn't follow a symlink for the last
265 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
266 will follow symlinks in p's directory)
272 if st and stat.S_ISLNK(st.st_mode):
273 (dir, name) = os.path.split(p)
274 dir = os.path.realpath(dir)
275 out = os.path.join(dir, name)
277 out = os.path.realpath(p)
278 #log('realpathing:%r,%r\n' % (p, out))
282 def detect_fakeroot():
283 "Return True if we appear to be running under fakeroot."
284 return os.getenv("FAKEROOTKEY") != None
287 _warned_about_superuser_detection = None
289 if sys.platform.startswith('cygwin'):
290 if sys.getwindowsversion()[0] > 5:
291 # Sounds like situation is much more complicated here
292 global _warned_about_superuser_detection
293 if not _warned_about_superuser_detection:
294 log("can't detect root status for OS version > 5; assuming not root")
295 _warned_about_superuser_detection = True
298 return ctypes.cdll.shell32.IsUserAnAdmin()
300 return os.geteuid() == 0
303 def _cache_key_value(get_value, key, cache):
304 """Return (value, was_cached). If there is a value in the cache
305 for key, use that, otherwise, call get_value(key) which should
306 throw a KeyError if there is no value -- in which case the cached
307 and returned value will be None.
309 try: # Do we already have it (or know there wasn't one)?
316 cache[key] = value = get_value(key)
322 _uid_to_pwd_cache = {}
323 _name_to_pwd_cache = {}
325 def pwd_from_uid(uid):
326 """Return password database entry for uid (may be a cached value).
327 Return None if no entry is found.
329 global _uid_to_pwd_cache, _name_to_pwd_cache
330 entry, cached = _cache_key_value(pwd.getpwuid, uid, _uid_to_pwd_cache)
331 if entry and not cached:
332 _name_to_pwd_cache[entry.pw_name] = entry
336 def pwd_from_name(name):
337 """Return password database entry for name (may be a cached value).
338 Return None if no entry is found.
340 global _uid_to_pwd_cache, _name_to_pwd_cache
341 entry, cached = _cache_key_value(pwd.getpwnam, name, _name_to_pwd_cache)
342 if entry and not cached:
343 _uid_to_pwd_cache[entry.pw_uid] = entry
347 _gid_to_grp_cache = {}
348 _name_to_grp_cache = {}
350 def grp_from_gid(gid):
351 """Return password database entry for gid (may be a cached value).
352 Return None if no entry is found.
354 global _gid_to_grp_cache, _name_to_grp_cache
355 entry, cached = _cache_key_value(grp.getgrgid, gid, _gid_to_grp_cache)
356 if entry and not cached:
357 _name_to_grp_cache[entry.gr_name] = entry
361 def grp_from_name(name):
362 """Return password database entry for name (may be a cached value).
363 Return None if no entry is found.
365 global _gid_to_grp_cache, _name_to_grp_cache
366 entry, cached = _cache_key_value(grp.getgrnam, name, _name_to_grp_cache)
367 if entry and not cached:
368 _gid_to_grp_cache[entry.gr_gid] = entry
374 """Get the user's login name."""
378 _username = pwd_from_uid(uid)[0] or 'user%d' % uid
384 """Get the user's full name."""
386 if not _userfullname:
388 entry = pwd_from_uid(uid)
390 _userfullname = entry[4].split(',')[0] or entry[0]
391 if not _userfullname:
392 _userfullname = 'user%d' % uid
398 """Get the FQDN of this machine."""
401 _hostname = socket.getfqdn()
405 _resource_path = None
406 def resource_path(subdir=''):
407 global _resource_path
408 if not _resource_path:
409 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
410 return os.path.join(_resource_path, subdir)
412 def format_filesize(size):
417 exponent = int(math.log(size) / math.log(unit))
418 size_prefix = "KMGTPE"[exponent - 1]
419 return "%.1f%s" % (size / math.pow(unit, exponent), size_prefix)
422 class NotOk(Exception):
427 def __init__(self, outp):
431 while self._read(65536): pass
433 def read(self, size):
434 """Read 'size' bytes from input stream."""
436 return self._read(size)
439 """Read from input stream until a newline is found."""
441 return self._readline()
443 def write(self, data):
444 """Write 'data' to output stream."""
445 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
446 self.outp.write(data)
449 """Return true if input stream is readable."""
450 raise NotImplemented("Subclasses must implement has_input")
453 """Indicate end of output from last sent command."""
457 """Indicate server error to the client."""
458 s = re.sub(r'\s+', ' ', str(s))
459 self.write('\nerror %s\n' % s)
461 def _check_ok(self, onempty):
464 for rl in linereader(self):
465 #log('%d got line: %r\n' % (os.getpid(), rl))
466 if not rl: # empty line
470 elif rl.startswith('error '):
471 #log('client: error: %s\n' % rl[6:])
475 raise Exception('server exited unexpectedly; see errors above')
477 def drain_and_check_ok(self):
478 """Remove all data for the current command from input stream."""
481 return self._check_ok(onempty)
484 """Verify that server action completed successfully."""
486 raise Exception('expected "ok", got %r' % rl)
487 return self._check_ok(onempty)
490 class Conn(BaseConn):
491 def __init__(self, inp, outp):
492 BaseConn.__init__(self, outp)
495 def _read(self, size):
496 return self.inp.read(size)
499 return self.inp.readline()
502 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
504 assert(rl[0] == self.inp.fileno())
510 def checked_reader(fd, n):
512 rl, _, _ = select.select([fd], [], [])
515 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
520 MAX_PACKET = 128 * 1024
521 def mux(p, outfd, outr, errr):
524 while p.poll() is None:
525 rl, _, _ = select.select(fds, [], [])
528 buf = os.read(outr, MAX_PACKET)
530 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
532 buf = os.read(errr, 1024)
534 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
536 os.write(outfd, struct.pack('!IB', 0, 3))
539 class DemuxConn(BaseConn):
540 """A helper class for bup's client-server protocol."""
541 def __init__(self, infd, outp):
542 BaseConn.__init__(self, outp)
543 # Anything that comes through before the sync string was not
544 # multiplexed and can be assumed to be debug/log before mux init.
546 while tail != 'BUPMUX':
547 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
549 raise IOError('demux: unexpected EOF during initialization')
551 sys.stderr.write(tail[:-6]) # pre-mux log messages
558 def write(self, data):
560 BaseConn.write(self, data)
562 def _next_packet(self, timeout):
563 if self.closed: return False
564 rl, wl, xl = select.select([self.infd], [], [], timeout)
565 if not rl: return False
566 assert(rl[0] == self.infd)
567 ns = ''.join(checked_reader(self.infd, 5))
568 n, fdw = struct.unpack('!IB', ns)
569 assert(n <= MAX_PACKET)
571 self.reader = checked_reader(self.infd, n)
573 for buf in checked_reader(self.infd, n):
574 sys.stderr.write(buf)
577 debug2("DemuxConn: marked closed\n")
580 def _load_buf(self, timeout):
581 if self.buf is not None:
583 while not self.closed:
584 while not self.reader:
585 if not self._next_packet(timeout):
588 self.buf = self.reader.next()
590 except StopIteration:
594 def _read_parts(self, ix_fn):
595 while self._load_buf(None):
596 assert(self.buf is not None)
598 if i is None or i == len(self.buf):
603 self.buf = self.buf[i:]
611 return buf.index('\n')+1
614 return ''.join(self._read_parts(find_eol))
616 def _read(self, size):
618 def until_size(buf): # Closes on csize
619 if len(buf) < csize[0]:
624 return ''.join(self._read_parts(until_size))
627 return self._load_buf(0)
631 """Generate a list of input lines from 'f' without terminating newlines."""
639 def chunkyreader(f, count = None):
640 """Generate a list of chunks of data read from 'f'.
642 If count is None, read until EOF is reached.
644 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
645 reached while reading, raise IOError.
649 b = f.read(min(count, 65536))
651 raise IOError('EOF with %d bytes remaining' % count)
662 def atomically_replaced_file(name, mode='w', buffering=-1):
663 """Yield a file that will be atomically renamed name when leaving the block.
665 This contextmanager yields an open file object that is backed by a
666 temporary file which will be renamed (atomically) to the target
667 name if everything succeeds.
669 The mode and buffering arguments are handled exactly as with open,
670 and the yielded file will have very restrictive permissions, as
675 with atomically_replaced_file('foo.txt', 'w') as f:
676 f.write('hello jack.')
680 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
681 text=('b' not in mode))
684 f = os.fdopen(ffd, mode, buffering)
692 os.rename(tempname, name)
694 unlink(tempname) # nonexistant file is ignored
698 """Append "/" to 's' if it doesn't aleady end in "/"."""
699 if s and not s.endswith('/'):
705 def _mmap_do(f, sz, flags, prot, close):
707 st = os.fstat(f.fileno())
710 # trying to open a zero-length map gives an error, but an empty
711 # string has all the same behaviour of a zero-length map, ie. it has
714 map = mmap.mmap(f.fileno(), sz, flags, prot)
716 f.close() # map will persist beyond file close
720 def mmap_read(f, sz = 0, close=True):
721 """Create a read-only memory mapped region on file 'f'.
722 If sz is 0, the region will cover the entire file.
724 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
727 def mmap_readwrite(f, sz = 0, close=True):
728 """Create a read-write memory mapped region on file 'f'.
729 If sz is 0, the region will cover the entire file.
731 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
735 def mmap_readwrite_private(f, sz = 0, close=True):
736 """Create a read-write memory mapped region on file 'f'.
737 If sz is 0, the region will cover the entire file.
738 The map is private, which means the changes are never flushed back to the
741 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
745 _mincore = getattr(_helpers, 'mincore', None)
747 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
748 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
750 _fmincore_chunk_size = None
751 def _set_fmincore_chunk_size():
752 global _fmincore_chunk_size
753 pref_chunk_size = 64 * 1024 * 1024
754 chunk_size = sc_page_size
755 if (sc_page_size < pref_chunk_size):
756 chunk_size = sc_page_size * (pref_chunk_size / sc_page_size)
757 _fmincore_chunk_size = chunk_size
760 """Return the mincore() data for fd as a bytearray whose values can be
761 tested via MINCORE_INCORE, or None if fd does not fully
762 support the operation."""
764 if (st.st_size == 0):
766 if not _fmincore_chunk_size:
767 _set_fmincore_chunk_size()
768 pages_per_chunk = _fmincore_chunk_size / sc_page_size;
769 page_count = (st.st_size + sc_page_size - 1) / sc_page_size;
770 chunk_count = page_count / _fmincore_chunk_size
773 result = bytearray(page_count)
774 for ci in xrange(chunk_count):
775 pos = _fmincore_chunk_size * ci;
776 msize = min(_fmincore_chunk_size, st.st_size - pos)
778 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
779 except mmap.error as ex:
780 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
781 # Perhaps the file was a pipe, i.e. "... | bup split ..."
784 _mincore(m, msize, 0, result, ci * pages_per_chunk);
788 def parse_timestamp(epoch_str):
789 """Return the number of nanoseconds since the epoch that are described
790 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
791 throw a ValueError that may contain additional information."""
792 ns_per = {'s' : 1000000000,
796 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
798 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
799 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
801 (n, units) = match.group(1, 2)
805 return n * ns_per[units]
809 """Parse data size information into a float number.
811 Here are some examples of conversions:
812 199.2k means 203981 bytes
813 1GB means 1073741824 bytes
814 2.1 tb means 2199023255552 bytes
816 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
818 raise ValueError("can't parse %r as a number" % s)
819 (val, unit) = g.groups()
822 if unit in ['t', 'tb']:
823 mult = 1024*1024*1024*1024
824 elif unit in ['g', 'gb']:
825 mult = 1024*1024*1024
826 elif unit in ['m', 'mb']:
828 elif unit in ['k', 'kb']:
830 elif unit in ['', 'b']:
833 raise ValueError("invalid unit %r in number %r" % (unit, s))
838 """Count the number of elements in an iterator. (consumes the iterator)"""
839 return reduce(lambda x,y: x+1, l)
844 """Append an error message to the list of saved errors.
846 Once processing is able to stop and output the errors, the saved errors are
847 accessible in the module variable helpers.saved_errors.
849 saved_errors.append(e)
858 def die_if_errors(msg=None, status=1):
862 msg = 'warning: %d errors encountered\n' % len(saved_errors)
868 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
870 The new exception handler will make sure that bup will exit without an ugly
871 stacktrace when Ctrl-C is hit.
873 oldhook = sys.excepthook
874 def newhook(exctype, value, traceback):
875 if exctype == KeyboardInterrupt:
876 log('\nInterrupted.\n')
878 return oldhook(exctype, value, traceback)
879 sys.excepthook = newhook
882 def columnate(l, prefix):
883 """Format elements of 'l' in columns with 'prefix' leading each line.
885 The number of columns is determined automatically based on the string
891 clen = max(len(s) for s in l)
892 ncols = (tty_width() - len(prefix)) / (clen + 2)
897 while len(l) % ncols:
900 for s in range(0, len(l), rows):
901 cols.append(l[s:s+rows])
903 for row in zip(*cols):
904 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
908 def parse_date_or_fatal(str, fatal):
909 """Parses the given date or calls Option.fatal().
910 For now we expect a string that contains a float."""
913 except ValueError as e:
914 raise fatal('invalid date format (should be a float): %r' % e)
919 def parse_excludes(options, fatal):
920 """Traverse the options and extract all excludes, or call Option.fatal()."""
924 (option, parameter) = flag
925 if option == '--exclude':
926 excluded_paths.append(resolve_parent(parameter))
927 elif option == '--exclude-from':
929 f = open(resolve_parent(parameter))
931 raise fatal("couldn't read %s" % parameter)
932 for exclude_path in f.readlines():
933 # FIXME: perhaps this should be rstrip('\n')
934 exclude_path = resolve_parent(exclude_path.strip())
936 excluded_paths.append(exclude_path)
937 return sorted(frozenset(excluded_paths))
940 def parse_rx_excludes(options, fatal):
941 """Traverse the options and extract all rx excludes, or call
943 excluded_patterns = []
946 (option, parameter) = flag
947 if option == '--exclude-rx':
949 excluded_patterns.append(re.compile(parameter))
950 except re.error as ex:
951 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
952 elif option == '--exclude-rx-from':
954 f = open(resolve_parent(parameter))
956 raise fatal("couldn't read %s" % parameter)
957 for pattern in f.readlines():
958 spattern = pattern.rstrip('\n')
962 excluded_patterns.append(re.compile(spattern))
963 except re.error as ex:
964 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
965 return excluded_patterns
968 def should_rx_exclude_path(path, exclude_rxs):
969 """Return True if path matches a regular expression in exclude_rxs."""
970 for rx in exclude_rxs:
972 debug1('Skipping %r: excluded by rx pattern %r.\n'
973 % (path, rx.pattern))
978 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
979 # that resolve against the current filesystem in the strip/graft
980 # functions for example, but elsewhere as well. I suspect bup's not
981 # always being careful about that. For some cases, the contents of
982 # the current filesystem should be irrelevant, and consulting it might
983 # produce the wrong result, perhaps via unintended symlink resolution,
986 def path_components(path):
987 """Break path into a list of pairs of the form (name,
988 full_path_to_name). Path must start with '/'.
990 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
991 if not path.startswith('/'):
992 raise Exception, 'path must start with "/": %s' % path
993 # Since we assume path startswith('/'), we can skip the first element.
995 norm_path = os.path.abspath(path)
999 for p in norm_path.split('/')[1:]:
1000 full_path += '/' + p
1001 result.append((p, full_path))
1005 def stripped_path_components(path, strip_prefixes):
1006 """Strip any prefix in strip_prefixes from path and return a list
1007 of path components where each component is (name,
1008 none_or_full_fs_path_to_name). Assume path startswith('/').
1009 See thelpers.py for examples."""
1010 normalized_path = os.path.abspath(path)
1011 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1012 for bp in sorted_strip_prefixes:
1013 normalized_bp = os.path.abspath(bp)
1014 if normalized_bp == '/':
1016 if normalized_path.startswith(normalized_bp):
1017 prefix = normalized_path[:len(normalized_bp)]
1019 for p in normalized_path[len(normalized_bp):].split('/'):
1023 result.append((p, prefix))
1026 return path_components(path)
1029 def grafted_path_components(graft_points, path):
1030 # Create a result that consists of some number of faked graft
1031 # directories before the graft point, followed by all of the real
1032 # directories from path that are after the graft point. Arrange
1033 # for the directory at the graft point in the result to correspond
1034 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1035 # for some examples.
1037 # Note that given --graft orig=new, orig and new have *nothing* to
1038 # do with each other, even if some of their component names
1039 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1040 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1043 # FIXME: This can't be the best solution...
1044 clean_path = os.path.abspath(path)
1045 for graft_point in graft_points:
1046 old_prefix, new_prefix = graft_point
1047 # Expand prefixes iff not absolute paths.
1048 old_prefix = os.path.normpath(old_prefix)
1049 new_prefix = os.path.normpath(new_prefix)
1050 if clean_path.startswith(old_prefix):
1051 escaped_prefix = re.escape(old_prefix)
1052 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1053 # Handle /foo=/ (at least) -- which produces //whatever.
1054 grafted_path = '/' + grafted_path.lstrip('/')
1055 clean_path_components = path_components(clean_path)
1056 # Count the components that were stripped.
1057 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1058 new_prefix_parts = new_prefix.split('/')
1059 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1060 result = [(p, None) for p in result_prefix] \
1061 + clean_path_components[strip_count:]
1062 # Now set the graft point name to match the end of new_prefix.
1063 graft_point = len(result_prefix)
1064 result[graft_point] = \
1065 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1066 if new_prefix == '/': # --graft ...=/ is a special case.
1069 return path_components(clean_path)
1075 _localtime = getattr(_helpers, 'localtime', None)
1078 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1079 'tm_hour', 'tm_min', 'tm_sec',
1080 'tm_wday', 'tm_yday',
1081 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1083 # Define a localtime() that returns bup_time when possible. Note:
1084 # this means that any helpers.localtime() results may need to be
1085 # passed through to_py_time() before being passed to python's time
1086 # module, which doesn't appear willing to ignore the extra items.
1088 def localtime(time):
1089 return bup_time(*_helpers.localtime(time))
1090 def utc_offset_str(t):
1091 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1092 If the current UTC offset does not represent an integer number
1093 of minutes, the fractional component will be truncated."""
1094 off = localtime(t).tm_gmtoff
1095 # Note: // doesn't truncate like C for negative values, it rounds down.
1096 offmin = abs(off) // 60
1098 h = (offmin - m) // 60
1099 return "%+03d%02d" % (-h if off < 0 else h, m)
1101 if isinstance(x, time.struct_time):
1103 return time.struct_time(x[:9])
1105 localtime = time.localtime
1106 def utc_offset_str(t):
1107 return time.strftime('%z', localtime(t))
1112 _some_invalid_save_parts_rx = re.compile(r'[[ ~^:?*\\]|\.\.|//|@{')
1114 def valid_save_name(name):
1115 # Enforce a superset of the restrictions in git-check-ref-format(1)
1117 or name.startswith('/') or name.endswith('/') \
1118 or name.endswith('.'):
1120 if _some_invalid_save_parts_rx.search(name):
1123 if ord(c) < 0x20 or ord(c) == 0x7f:
1125 for part in name.split('/'):
1126 if part.startswith('.') or part.endswith('.lock'):