1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
8 from pipes import quote
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 # This function should really be in helpers, not in bup.options. But we
16 # want options.py to be standalone so people can include it in other projects.
17 from bup.options import _tty_width as tty_width
21 """Helper to deal with Python scoping issues"""
25 sc_page_size = os.sysconf('SC_PAGE_SIZE')
26 assert(sc_page_size > 0)
28 sc_arg_max = os.sysconf('SC_ARG_MAX')
29 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
30 sc_arg_max = 2 * 1024 * 1024
34 for result in iterable:
40 """Convert the string 's' to an integer. Return 0 if s is not a number."""
48 """Convert the string 's' to a float. Return 0 if s is not a number."""
50 return float(s or '0')
55 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
59 _fdatasync = os.fdatasync
60 except AttributeError:
63 if sys.platform.startswith('darwin'):
64 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
68 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
70 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
71 if e.errno == errno.ENOTSUP:
76 fdatasync = _fdatasync
79 def partition(predicate, stream):
80 """Returns (leading_matches_it, rest_it), where leading_matches_it
81 must be completely exhausted before traversing rest_it.
86 ns.first_nonmatch = None
87 def leading_matches():
92 ns.first_nonmatch = (x,)
96 yield ns.first_nonmatch[0]
99 return (leading_matches(), rest())
109 def lines_until_sentinel(f, sentinel, ex_type):
110 # sentinel must end with \n and must contain only one \n
113 if not (line and line.endswith('\n')):
114 raise ex_type('Hit EOF while reading line')
120 def stat_if_exists(path):
124 if e.errno != errno.ENOENT:
129 # Write (blockingly) to sockets that may or may not be in blocking mode.
130 # We need this because our stderr is sometimes eaten by subprocesses
131 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
132 # leading to race conditions. Ick. We'll do it the hard way.
133 def _hard_write(fd, buf):
135 (r,w,x) = select.select([], [fd], [], None)
137 raise IOError('select(fd) returned without being writable')
139 sz = os.write(fd, buf)
141 if e.errno != errno.EAGAIN:
149 """Print a log message to stderr."""
152 _hard_write(sys.stderr.fileno(), s)
166 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
167 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
170 """Calls log() if stderr is a TTY. Does nothing otherwise."""
171 global _last_progress
178 """Calls progress() only if we haven't printed progress in a while.
180 This avoids overloading the stderr buffer with excess junk.
184 if now - _last_prog > 0.1:
190 """Calls progress() to redisplay the most recent progress message.
192 Useful after you've printed some other message that wipes out the
195 if _last_progress and _last_progress.endswith('\r'):
196 progress(_last_progress)
199 def mkdirp(d, mode=None):
200 """Recursively create directories on path 'd'.
202 Unlike os.makedirs(), it doesn't raise an exception if the last element of
203 the path already exists.
211 if e.errno == errno.EEXIST:
217 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
219 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
221 samekey = operator.eq
223 total = sum(len(it) for it in iters)
224 iters = (iter(it) for it in iters)
225 heap = ((next(it, None),it) for it in iters)
226 heap = [(e,it) for e,it in heap if e]
231 if not count % pfreq:
234 if not samekey(e, pe):
240 except StopIteration:
241 heapq.heappop(heap) # remove current
243 heapq.heapreplace(heap, (e, it)) # shift current to new location
248 """Delete a file at path 'f' if it currently exists.
250 Unlike os.unlink(), does not throw an exception if the file didn't already
256 if e.errno != errno.ENOENT:
261 if isinstance(cmd, compat.str_type):
264 return ' '.join(map(quote, cmd))
266 exc = subprocess.check_call
276 assert stdin in (None, PIPE)
279 stdin=stdin, stdout=PIPE, stderr=stderr,
281 preexec_fn=preexec_fn)
282 out, err = p.communicate(input)
283 if check and p.returncode != 0:
284 raise Exception('subprocess %r failed with status %d, stderr: %r'
285 % (' '.join(map(quote, cmd)), p.returncode, err))
288 def readpipe(argv, preexec_fn=None, shell=False):
289 """Run a subprocess and return its output."""
290 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
292 out, err = p.communicate()
293 if p.returncode != 0:
294 raise Exception('subprocess %r failed with status %d'
295 % (' '.join(argv), p.returncode))
299 def _argmax_base(command):
302 base_size += len(command) + 1
303 for k, v in compat.items(environ):
304 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
308 def _argmax_args_size(args):
309 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
312 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
313 """If args is not empty, yield the output produced by calling the
314 command list with args as a sequence of strings (It may be necessary
315 to return multiple strings in order to respect ARG_MAX)."""
316 # The optional arg_max arg is a workaround for an issue with the
317 # current wvtest behavior.
318 base_size = _argmax_base(command)
320 room = arg_max - base_size
323 next_size = _argmax_args_size(args[i:i+1])
324 if room - next_size < 0:
330 assert(len(sub_args))
331 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
334 def resolve_parent(p):
335 """Return the absolute path of a file without following any final symlink.
337 Behaves like os.path.realpath, but doesn't follow a symlink for the last
338 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
339 will follow symlinks in p's directory)
345 if st and stat.S_ISLNK(st.st_mode):
346 (dir, name) = os.path.split(p)
347 dir = os.path.realpath(dir)
348 out = os.path.join(dir, name)
350 out = os.path.realpath(p)
351 #log('realpathing:%r,%r\n' % (p, out))
355 def detect_fakeroot():
356 "Return True if we appear to be running under fakeroot."
357 return os.getenv("FAKEROOTKEY") != None
360 if sys.platform.startswith('cygwin'):
362 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
363 groups = os.getgroups()
364 return 544 in groups or 0 in groups
367 return os.geteuid() == 0
370 def cache_key_value(get_value, key, cache):
371 """Return (value, was_cached). If there is a value in the cache
372 for key, use that, otherwise, call get_value(key) which should
373 throw a KeyError if there is no value -- in which case the cached
374 and returned value will be None.
376 try: # Do we already have it (or know there wasn't one)?
383 cache[key] = value = get_value(key)
391 """Get the FQDN of this machine."""
394 _hostname = socket.getfqdn()
398 def format_filesize(size):
403 exponent = int(math.log(size) // math.log(unit))
404 size_prefix = "KMGTPE"[exponent - 1]
405 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
408 class NotOk(Exception):
413 def __init__(self, outp):
417 while self._read(65536): pass
419 def read(self, size):
420 """Read 'size' bytes from input stream."""
422 return self._read(size)
425 """Read from input stream until a newline is found."""
427 return self._readline()
429 def write(self, data):
430 """Write 'data' to output stream."""
431 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
432 self.outp.write(data)
435 """Return true if input stream is readable."""
436 raise NotImplemented("Subclasses must implement has_input")
439 """Indicate end of output from last sent command."""
443 """Indicate server error to the client."""
444 s = re.sub(r'\s+', ' ', str(s))
445 self.write('\nerror %s\n' % s)
447 def _check_ok(self, onempty):
450 for rl in linereader(self):
451 #log('%d got line: %r\n' % (os.getpid(), rl))
452 if not rl: # empty line
456 elif rl.startswith('error '):
457 #log('client: error: %s\n' % rl[6:])
461 raise Exception('server exited unexpectedly; see errors above')
463 def drain_and_check_ok(self):
464 """Remove all data for the current command from input stream."""
467 return self._check_ok(onempty)
470 """Verify that server action completed successfully."""
472 raise Exception('expected "ok", got %r' % rl)
473 return self._check_ok(onempty)
476 class Conn(BaseConn):
477 def __init__(self, inp, outp):
478 BaseConn.__init__(self, outp)
481 def _read(self, size):
482 return self.inp.read(size)
485 return self.inp.readline()
488 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
490 assert(rl[0] == self.inp.fileno())
496 def checked_reader(fd, n):
498 rl, _, _ = select.select([fd], [], [])
501 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
506 MAX_PACKET = 128 * 1024
507 def mux(p, outfd, outr, errr):
510 while p.poll() is None:
511 rl, _, _ = select.select(fds, [], [])
514 buf = os.read(outr, MAX_PACKET)
516 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
518 buf = os.read(errr, 1024)
520 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
522 os.write(outfd, struct.pack('!IB', 0, 3))
525 class DemuxConn(BaseConn):
526 """A helper class for bup's client-server protocol."""
527 def __init__(self, infd, outp):
528 BaseConn.__init__(self, outp)
529 # Anything that comes through before the sync string was not
530 # multiplexed and can be assumed to be debug/log before mux init.
532 while tail != 'BUPMUX':
533 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
535 raise IOError('demux: unexpected EOF during initialization')
537 sys.stderr.write(tail[:-6]) # pre-mux log messages
544 def write(self, data):
546 BaseConn.write(self, data)
548 def _next_packet(self, timeout):
549 if self.closed: return False
550 rl, wl, xl = select.select([self.infd], [], [], timeout)
551 if not rl: return False
552 assert(rl[0] == self.infd)
553 ns = ''.join(checked_reader(self.infd, 5))
554 n, fdw = struct.unpack('!IB', ns)
555 assert(n <= MAX_PACKET)
557 self.reader = checked_reader(self.infd, n)
559 for buf in checked_reader(self.infd, n):
560 sys.stderr.write(buf)
563 debug2("DemuxConn: marked closed\n")
566 def _load_buf(self, timeout):
567 if self.buf is not None:
569 while not self.closed:
570 while not self.reader:
571 if not self._next_packet(timeout):
574 self.buf = next(self.reader)
576 except StopIteration:
580 def _read_parts(self, ix_fn):
581 while self._load_buf(None):
582 assert(self.buf is not None)
584 if i is None or i == len(self.buf):
589 self.buf = self.buf[i:]
597 return buf.index('\n')+1
600 return ''.join(self._read_parts(find_eol))
602 def _read(self, size):
604 def until_size(buf): # Closes on csize
605 if len(buf) < csize[0]:
610 return ''.join(self._read_parts(until_size))
613 return self._load_buf(0)
617 """Generate a list of input lines from 'f' without terminating newlines."""
625 def chunkyreader(f, count = None):
626 """Generate a list of chunks of data read from 'f'.
628 If count is None, read until EOF is reached.
630 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
631 reached while reading, raise IOError.
635 b = f.read(min(count, 65536))
637 raise IOError('EOF with %d bytes remaining' % count)
648 def atomically_replaced_file(name, mode='w', buffering=-1):
649 """Yield a file that will be atomically renamed name when leaving the block.
651 This contextmanager yields an open file object that is backed by a
652 temporary file which will be renamed (atomically) to the target
653 name if everything succeeds.
655 The mode and buffering arguments are handled exactly as with open,
656 and the yielded file will have very restrictive permissions, as
661 with atomically_replaced_file('foo.txt', 'w') as f:
662 f.write('hello jack.')
666 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
667 text=('b' not in mode))
670 f = os.fdopen(ffd, mode, buffering)
678 os.rename(tempname, name)
680 unlink(tempname) # nonexistant file is ignored
684 """Append "/" to 's' if it doesn't aleady end in "/"."""
685 if s and not s.endswith('/'):
691 def _mmap_do(f, sz, flags, prot, close):
693 st = os.fstat(f.fileno())
696 # trying to open a zero-length map gives an error, but an empty
697 # string has all the same behaviour of a zero-length map, ie. it has
700 map = mmap.mmap(f.fileno(), sz, flags, prot)
702 f.close() # map will persist beyond file close
706 def mmap_read(f, sz = 0, close=True):
707 """Create a read-only memory mapped region on file 'f'.
708 If sz is 0, the region will cover the entire file.
710 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
713 def mmap_readwrite(f, sz = 0, close=True):
714 """Create a read-write memory mapped region on file 'f'.
715 If sz is 0, the region will cover the entire file.
717 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
721 def mmap_readwrite_private(f, sz = 0, close=True):
722 """Create a read-write memory mapped region on file 'f'.
723 If sz is 0, the region will cover the entire file.
724 The map is private, which means the changes are never flushed back to the
727 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
731 _mincore = getattr(_helpers, 'mincore', None)
733 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
734 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
736 _fmincore_chunk_size = None
737 def _set_fmincore_chunk_size():
738 global _fmincore_chunk_size
739 pref_chunk_size = 64 * 1024 * 1024
740 chunk_size = sc_page_size
741 if (sc_page_size < pref_chunk_size):
742 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
743 _fmincore_chunk_size = chunk_size
746 """Return the mincore() data for fd as a bytearray whose values can be
747 tested via MINCORE_INCORE, or None if fd does not fully
748 support the operation."""
750 if (st.st_size == 0):
752 if not _fmincore_chunk_size:
753 _set_fmincore_chunk_size()
754 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
755 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
756 chunk_count = page_count // _fmincore_chunk_size
759 result = bytearray(page_count)
760 for ci in compat.range(chunk_count):
761 pos = _fmincore_chunk_size * ci;
762 msize = min(_fmincore_chunk_size, st.st_size - pos)
764 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
765 except mmap.error as ex:
766 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
767 # Perhaps the file was a pipe, i.e. "... | bup split ..."
771 _mincore(m, msize, 0, result, ci * pages_per_chunk)
772 except OSError as ex:
773 if ex.errno == errno.ENOSYS:
779 def parse_timestamp(epoch_str):
780 """Return the number of nanoseconds since the epoch that are described
781 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
782 throw a ValueError that may contain additional information."""
783 ns_per = {'s' : 1000000000,
787 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
789 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
790 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
792 (n, units) = match.group(1, 2)
796 return n * ns_per[units]
800 """Parse data size information into a float number.
802 Here are some examples of conversions:
803 199.2k means 203981 bytes
804 1GB means 1073741824 bytes
805 2.1 tb means 2199023255552 bytes
807 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
809 raise ValueError("can't parse %r as a number" % s)
810 (val, unit) = g.groups()
813 if unit in ['t', 'tb']:
814 mult = 1024*1024*1024*1024
815 elif unit in ['g', 'gb']:
816 mult = 1024*1024*1024
817 elif unit in ['m', 'mb']:
819 elif unit in ['k', 'kb']:
821 elif unit in ['', 'b']:
824 raise ValueError("invalid unit %r in number %r" % (unit, s))
829 """Count the number of elements in an iterator. (consumes the iterator)"""
830 return reduce(lambda x,y: x+1, l)
835 """Append an error message to the list of saved errors.
837 Once processing is able to stop and output the errors, the saved errors are
838 accessible in the module variable helpers.saved_errors.
840 saved_errors.append(e)
849 def die_if_errors(msg=None, status=1):
853 msg = 'warning: %d errors encountered\n' % len(saved_errors)
859 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
861 The new exception handler will make sure that bup will exit without an ugly
862 stacktrace when Ctrl-C is hit.
864 oldhook = sys.excepthook
865 def newhook(exctype, value, traceback):
866 if exctype == KeyboardInterrupt:
867 log('\nInterrupted.\n')
869 return oldhook(exctype, value, traceback)
870 sys.excepthook = newhook
873 def columnate(l, prefix):
874 """Format elements of 'l' in columns with 'prefix' leading each line.
876 The number of columns is determined automatically based on the string
882 clen = max(len(s) for s in l)
883 ncols = (tty_width() - len(prefix)) // (clen + 2)
888 while len(l) % ncols:
890 rows = len(l) // ncols
891 for s in compat.range(0, len(l), rows):
892 cols.append(l[s:s+rows])
894 for row in zip(*cols):
895 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
899 def parse_date_or_fatal(str, fatal):
900 """Parses the given date or calls Option.fatal().
901 For now we expect a string that contains a float."""
904 except ValueError as e:
905 raise fatal('invalid date format (should be a float): %r' % e)
910 def parse_excludes(options, fatal):
911 """Traverse the options and extract all excludes, or call Option.fatal()."""
915 (option, parameter) = flag
916 if option == '--exclude':
917 excluded_paths.append(resolve_parent(parameter))
918 elif option == '--exclude-from':
920 f = open(resolve_parent(parameter))
922 raise fatal("couldn't read %s" % parameter)
923 for exclude_path in f.readlines():
924 # FIXME: perhaps this should be rstrip('\n')
925 exclude_path = resolve_parent(exclude_path.strip())
927 excluded_paths.append(exclude_path)
928 return sorted(frozenset(excluded_paths))
931 def parse_rx_excludes(options, fatal):
932 """Traverse the options and extract all rx excludes, or call
934 excluded_patterns = []
937 (option, parameter) = flag
938 if option == '--exclude-rx':
940 excluded_patterns.append(re.compile(parameter))
941 except re.error as ex:
942 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
943 elif option == '--exclude-rx-from':
945 f = open(resolve_parent(parameter))
947 raise fatal("couldn't read %s" % parameter)
948 for pattern in f.readlines():
949 spattern = pattern.rstrip('\n')
953 excluded_patterns.append(re.compile(spattern))
954 except re.error as ex:
955 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
956 return excluded_patterns
959 def should_rx_exclude_path(path, exclude_rxs):
960 """Return True if path matches a regular expression in exclude_rxs."""
961 for rx in exclude_rxs:
963 debug1('Skipping %r: excluded by rx pattern %r.\n'
964 % (path, rx.pattern))
969 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
970 # that resolve against the current filesystem in the strip/graft
971 # functions for example, but elsewhere as well. I suspect bup's not
972 # always being careful about that. For some cases, the contents of
973 # the current filesystem should be irrelevant, and consulting it might
974 # produce the wrong result, perhaps via unintended symlink resolution,
977 def path_components(path):
978 """Break path into a list of pairs of the form (name,
979 full_path_to_name). Path must start with '/'.
981 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
982 if not path.startswith('/'):
983 raise Exception('path must start with "/": %s' % path)
984 # Since we assume path startswith('/'), we can skip the first element.
986 norm_path = os.path.abspath(path)
990 for p in norm_path.split('/')[1:]:
992 result.append((p, full_path))
996 def stripped_path_components(path, strip_prefixes):
997 """Strip any prefix in strip_prefixes from path and return a list
998 of path components where each component is (name,
999 none_or_full_fs_path_to_name). Assume path startswith('/').
1000 See thelpers.py for examples."""
1001 normalized_path = os.path.abspath(path)
1002 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1003 for bp in sorted_strip_prefixes:
1004 normalized_bp = os.path.abspath(bp)
1005 if normalized_bp == '/':
1007 if normalized_path.startswith(normalized_bp):
1008 prefix = normalized_path[:len(normalized_bp)]
1010 for p in normalized_path[len(normalized_bp):].split('/'):
1014 result.append((p, prefix))
1017 return path_components(path)
1020 def grafted_path_components(graft_points, path):
1021 # Create a result that consists of some number of faked graft
1022 # directories before the graft point, followed by all of the real
1023 # directories from path that are after the graft point. Arrange
1024 # for the directory at the graft point in the result to correspond
1025 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1026 # for some examples.
1028 # Note that given --graft orig=new, orig and new have *nothing* to
1029 # do with each other, even if some of their component names
1030 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1031 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1034 # FIXME: This can't be the best solution...
1035 clean_path = os.path.abspath(path)
1036 for graft_point in graft_points:
1037 old_prefix, new_prefix = graft_point
1038 # Expand prefixes iff not absolute paths.
1039 old_prefix = os.path.normpath(old_prefix)
1040 new_prefix = os.path.normpath(new_prefix)
1041 if clean_path.startswith(old_prefix):
1042 escaped_prefix = re.escape(old_prefix)
1043 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1044 # Handle /foo=/ (at least) -- which produces //whatever.
1045 grafted_path = '/' + grafted_path.lstrip('/')
1046 clean_path_components = path_components(clean_path)
1047 # Count the components that were stripped.
1048 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1049 new_prefix_parts = new_prefix.split('/')
1050 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1051 result = [(p, None) for p in result_prefix] \
1052 + clean_path_components[strip_count:]
1053 # Now set the graft point name to match the end of new_prefix.
1054 graft_point = len(result_prefix)
1055 result[graft_point] = \
1056 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1057 if new_prefix == '/': # --graft ...=/ is a special case.
1060 return path_components(clean_path)
1066 _localtime = getattr(_helpers, 'localtime', None)
1069 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1070 'tm_hour', 'tm_min', 'tm_sec',
1071 'tm_wday', 'tm_yday',
1072 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1074 # Define a localtime() that returns bup_time when possible. Note:
1075 # this means that any helpers.localtime() results may need to be
1076 # passed through to_py_time() before being passed to python's time
1077 # module, which doesn't appear willing to ignore the extra items.
1079 def localtime(time):
1080 return bup_time(*_helpers.localtime(time))
1081 def utc_offset_str(t):
1082 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1083 If the current UTC offset does not represent an integer number
1084 of minutes, the fractional component will be truncated."""
1085 off = localtime(t).tm_gmtoff
1086 # Note: // doesn't truncate like C for negative values, it rounds down.
1087 offmin = abs(off) // 60
1089 h = (offmin - m) // 60
1090 return "%+03d%02d" % (-h if off < 0 else h, m)
1092 if isinstance(x, time.struct_time):
1094 return time.struct_time(x[:9])
1096 localtime = time.localtime
1097 def utc_offset_str(t):
1098 return time.strftime('%z', localtime(t))
1103 _some_invalid_save_parts_rx = re.compile(r'[\[ ~^:?*\\]|\.\.|//|@{')
1105 def valid_save_name(name):
1106 # Enforce a superset of the restrictions in git-check-ref-format(1)
1108 or name.startswith('/') or name.endswith('/') \
1109 or name.endswith('.'):
1111 if _some_invalid_save_parts_rx.search(name):
1114 if ord(c) < 0x20 or ord(c) == 0x7f:
1116 for part in name.split('/'):
1117 if part.startswith('.') or part.endswith('.lock'):
1122 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1124 def period_as_secs(s):
1127 match = _period_rx.match(s)
1130 mag = int(match.group(1))
1131 scale = match.group(2)
1132 return mag * {'s': 1,
1136 'w': 60 * 60 * 24 * 7,
1137 'm': 60 * 60 * 24 * 31,
1138 'y': 60 * 60 * 24 * 366}[scale]