1 """Helper functions and classes for bup."""
3 from __future__ import absolute_import, division
4 from collections import namedtuple
5 from contextlib import contextmanager
6 from ctypes import sizeof, c_void_p
8 from pipes import quote
9 from subprocess import PIPE, Popen
10 import sys, os, pwd, subprocess, errno, socket, select, mmap, stat, re, struct
11 import hashlib, heapq, math, operator, time, grp, tempfile
13 from bup import _helpers
14 from bup import compat
15 # This function should really be in helpers, not in bup.options. But we
16 # want options.py to be standalone so people can include it in other projects.
17 from bup.options import _tty_width as tty_width
21 """Helper to deal with Python scoping issues"""
25 sc_page_size = os.sysconf('SC_PAGE_SIZE')
26 assert(sc_page_size > 0)
28 sc_arg_max = os.sysconf('SC_ARG_MAX')
29 if sc_arg_max == -1: # "no definite limit" - let's choose 2M
30 sc_arg_max = 2 * 1024 * 1024
34 for result in iterable:
40 """Convert the string 's' to an integer. Return 0 if s is not a number."""
48 """Convert the string 's' to a float. Return 0 if s is not a number."""
50 return float(s or '0')
55 buglvl = atoi(os.environ.get('BUP_DEBUG', 0))
59 _fdatasync = os.fdatasync
60 except AttributeError:
63 if sys.platform.startswith('darwin'):
64 # Apparently os.fsync on OS X doesn't guarantee to sync all the way down
68 return fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
70 # Fallback for file systems (SMB) that do not support F_FULLFSYNC
71 if e.errno == errno.ENOTSUP:
76 fdatasync = _fdatasync
79 def partition(predicate, stream):
80 """Returns (leading_matches_it, rest_it), where leading_matches_it
81 must be completely exhausted before traversing rest_it.
86 ns.first_nonmatch = None
87 def leading_matches():
92 ns.first_nonmatch = (x,)
96 yield ns.first_nonmatch[0]
99 return (leading_matches(), rest())
109 def lines_until_sentinel(f, sentinel, ex_type):
110 # sentinel must end with \n and must contain only one \n
113 if not (line and line.endswith('\n')):
114 raise ex_type('Hit EOF while reading line')
120 def stat_if_exists(path):
124 if e.errno != errno.ENOENT:
129 # Write (blockingly) to sockets that may or may not be in blocking mode.
130 # We need this because our stderr is sometimes eaten by subprocesses
131 # (probably ssh) that sometimes make it nonblocking, if only temporarily,
132 # leading to race conditions. Ick. We'll do it the hard way.
133 def _hard_write(fd, buf):
135 (r,w,x) = select.select([], [fd], [], None)
137 raise IOError('select(fd) returned without being writable')
139 sz = os.write(fd, buf)
141 if e.errno != errno.EAGAIN:
149 """Print a log message to stderr."""
152 _hard_write(sys.stderr.fileno(), s)
166 istty1 = os.isatty(1) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 1)
167 istty2 = os.isatty(2) or (atoi(os.environ.get('BUP_FORCE_TTY')) & 2)
170 """Calls log() if stderr is a TTY. Does nothing otherwise."""
171 global _last_progress
178 """Calls progress() only if we haven't printed progress in a while.
180 This avoids overloading the stderr buffer with excess junk.
184 if now - _last_prog > 0.1:
190 """Calls progress() to redisplay the most recent progress message.
192 Useful after you've printed some other message that wipes out the
195 if _last_progress and _last_progress.endswith('\r'):
196 progress(_last_progress)
199 def mkdirp(d, mode=None):
200 """Recursively create directories on path 'd'.
202 Unlike os.makedirs(), it doesn't raise an exception if the last element of
203 the path already exists.
211 if e.errno == errno.EEXIST:
217 def merge_iter(iters, pfreq, pfunc, pfinal, key=None):
219 samekey = lambda e, pe: getattr(e, key) == getattr(pe, key, None)
221 samekey = operator.eq
223 total = sum(len(it) for it in iters)
224 iters = (iter(it) for it in iters)
225 heap = ((next(it, None),it) for it in iters)
226 heap = [(e,it) for e,it in heap if e]
231 if not count % pfreq:
234 if not samekey(e, pe):
240 except StopIteration:
241 heapq.heappop(heap) # remove current
243 heapq.heapreplace(heap, (e, it)) # shift current to new location
248 """Delete a file at path 'f' if it currently exists.
250 Unlike os.unlink(), does not throw an exception if the file didn't already
256 if e.errno != errno.ENOENT:
261 if isinstance(cmd, compat.str_type):
264 return ' '.join(map(quote, cmd))
266 exc = subprocess.check_call
276 assert stdin in (None, PIPE)
279 stdin=stdin, stdout=PIPE, stderr=stderr,
281 preexec_fn=preexec_fn)
282 out, err = p.communicate(input)
283 if check and p.returncode != 0:
284 raise Exception('subprocess %r failed with status %d, stderr: %r'
285 % (' '.join(map(quote, cmd)), p.returncode, err))
288 def readpipe(argv, preexec_fn=None, shell=False):
289 """Run a subprocess and return its output."""
290 p = subprocess.Popen(argv, stdout=subprocess.PIPE, preexec_fn=preexec_fn,
292 out, err = p.communicate()
293 if p.returncode != 0:
294 raise Exception('subprocess %r failed with status %d'
295 % (' '.join(argv), p.returncode))
299 def _argmax_base(command):
302 base_size += len(command) + 1
303 for k, v in compat.items(environ):
304 base_size += len(k) + len(v) + 2 + sizeof(c_void_p)
308 def _argmax_args_size(args):
309 return sum(len(x) + 1 + sizeof(c_void_p) for x in args)
312 def batchpipe(command, args, preexec_fn=None, arg_max=sc_arg_max):
313 """If args is not empty, yield the output produced by calling the
314 command list with args as a sequence of strings (It may be necessary
315 to return multiple strings in order to respect ARG_MAX)."""
316 # The optional arg_max arg is a workaround for an issue with the
317 # current wvtest behavior.
318 base_size = _argmax_base(command)
320 room = arg_max - base_size
323 next_size = _argmax_args_size(args[i:i+1])
324 if room - next_size < 0:
330 assert(len(sub_args))
331 yield readpipe(command + sub_args, preexec_fn=preexec_fn)
334 def resolve_parent(p):
335 """Return the absolute path of a file without following any final symlink.
337 Behaves like os.path.realpath, but doesn't follow a symlink for the last
338 element. (ie. if 'p' itself is a symlink, this one won't follow it, but it
339 will follow symlinks in p's directory)
345 if st and stat.S_ISLNK(st.st_mode):
346 (dir, name) = os.path.split(p)
347 dir = os.path.realpath(dir)
348 out = os.path.join(dir, name)
350 out = os.path.realpath(p)
351 #log('realpathing:%r,%r\n' % (p, out))
355 def detect_fakeroot():
356 "Return True if we appear to be running under fakeroot."
357 return os.getenv("FAKEROOTKEY") != None
360 if sys.platform.startswith('cygwin'):
362 # https://cygwin.com/ml/cygwin/2015-02/msg00057.html
363 groups = os.getgroups()
364 return 544 in groups or 0 in groups
367 return os.geteuid() == 0
370 def cache_key_value(get_value, key, cache):
371 """Return (value, was_cached). If there is a value in the cache
372 for key, use that, otherwise, call get_value(key) which should
373 throw a KeyError if there is no value -- in which case the cached
374 and returned value will be None.
376 try: # Do we already have it (or know there wasn't one)?
383 cache[key] = value = get_value(key)
391 """Get the FQDN of this machine."""
394 _hostname = socket.getfqdn()
398 _resource_path = None
399 def resource_path(subdir=''):
400 global _resource_path
401 if not _resource_path:
402 _resource_path = os.environ.get('BUP_RESOURCE_PATH') or '.'
403 return os.path.join(_resource_path, subdir)
405 def format_filesize(size):
410 exponent = int(math.log(size) // math.log(unit))
411 size_prefix = "KMGTPE"[exponent - 1]
412 return "%.1f%s" % (size // math.pow(unit, exponent), size_prefix)
415 class NotOk(Exception):
420 def __init__(self, outp):
424 while self._read(65536): pass
426 def read(self, size):
427 """Read 'size' bytes from input stream."""
429 return self._read(size)
432 """Read from input stream until a newline is found."""
434 return self._readline()
436 def write(self, data):
437 """Write 'data' to output stream."""
438 #log('%d writing: %d bytes\n' % (os.getpid(), len(data)))
439 self.outp.write(data)
442 """Return true if input stream is readable."""
443 raise NotImplemented("Subclasses must implement has_input")
446 """Indicate end of output from last sent command."""
450 """Indicate server error to the client."""
451 s = re.sub(r'\s+', ' ', str(s))
452 self.write('\nerror %s\n' % s)
454 def _check_ok(self, onempty):
457 for rl in linereader(self):
458 #log('%d got line: %r\n' % (os.getpid(), rl))
459 if not rl: # empty line
463 elif rl.startswith('error '):
464 #log('client: error: %s\n' % rl[6:])
468 raise Exception('server exited unexpectedly; see errors above')
470 def drain_and_check_ok(self):
471 """Remove all data for the current command from input stream."""
474 return self._check_ok(onempty)
477 """Verify that server action completed successfully."""
479 raise Exception('expected "ok", got %r' % rl)
480 return self._check_ok(onempty)
483 class Conn(BaseConn):
484 def __init__(self, inp, outp):
485 BaseConn.__init__(self, outp)
488 def _read(self, size):
489 return self.inp.read(size)
492 return self.inp.readline()
495 [rl, wl, xl] = select.select([self.inp.fileno()], [], [], 0)
497 assert(rl[0] == self.inp.fileno())
503 def checked_reader(fd, n):
505 rl, _, _ = select.select([fd], [], [])
508 if not buf: raise Exception("Unexpected EOF reading %d more bytes" % n)
513 MAX_PACKET = 128 * 1024
514 def mux(p, outfd, outr, errr):
517 while p.poll() is None:
518 rl, _, _ = select.select(fds, [], [])
521 buf = os.read(outr, MAX_PACKET)
523 os.write(outfd, struct.pack('!IB', len(buf), 1) + buf)
525 buf = os.read(errr, 1024)
527 os.write(outfd, struct.pack('!IB', len(buf), 2) + buf)
529 os.write(outfd, struct.pack('!IB', 0, 3))
532 class DemuxConn(BaseConn):
533 """A helper class for bup's client-server protocol."""
534 def __init__(self, infd, outp):
535 BaseConn.__init__(self, outp)
536 # Anything that comes through before the sync string was not
537 # multiplexed and can be assumed to be debug/log before mux init.
539 while tail != 'BUPMUX':
540 b = os.read(infd, (len(tail) < 6) and (6-len(tail)) or 1)
542 raise IOError('demux: unexpected EOF during initialization')
544 sys.stderr.write(tail[:-6]) # pre-mux log messages
551 def write(self, data):
553 BaseConn.write(self, data)
555 def _next_packet(self, timeout):
556 if self.closed: return False
557 rl, wl, xl = select.select([self.infd], [], [], timeout)
558 if not rl: return False
559 assert(rl[0] == self.infd)
560 ns = ''.join(checked_reader(self.infd, 5))
561 n, fdw = struct.unpack('!IB', ns)
562 assert(n <= MAX_PACKET)
564 self.reader = checked_reader(self.infd, n)
566 for buf in checked_reader(self.infd, n):
567 sys.stderr.write(buf)
570 debug2("DemuxConn: marked closed\n")
573 def _load_buf(self, timeout):
574 if self.buf is not None:
576 while not self.closed:
577 while not self.reader:
578 if not self._next_packet(timeout):
581 self.buf = next(self.reader)
583 except StopIteration:
587 def _read_parts(self, ix_fn):
588 while self._load_buf(None):
589 assert(self.buf is not None)
591 if i is None or i == len(self.buf):
596 self.buf = self.buf[i:]
604 return buf.index('\n')+1
607 return ''.join(self._read_parts(find_eol))
609 def _read(self, size):
611 def until_size(buf): # Closes on csize
612 if len(buf) < csize[0]:
617 return ''.join(self._read_parts(until_size))
620 return self._load_buf(0)
624 """Generate a list of input lines from 'f' without terminating newlines."""
632 def chunkyreader(f, count = None):
633 """Generate a list of chunks of data read from 'f'.
635 If count is None, read until EOF is reached.
637 If count is a positive integer, read 'count' bytes from 'f'. If EOF is
638 reached while reading, raise IOError.
642 b = f.read(min(count, 65536))
644 raise IOError('EOF with %d bytes remaining' % count)
655 def atomically_replaced_file(name, mode='w', buffering=-1):
656 """Yield a file that will be atomically renamed name when leaving the block.
658 This contextmanager yields an open file object that is backed by a
659 temporary file which will be renamed (atomically) to the target
660 name if everything succeeds.
662 The mode and buffering arguments are handled exactly as with open,
663 and the yielded file will have very restrictive permissions, as
668 with atomically_replaced_file('foo.txt', 'w') as f:
669 f.write('hello jack.')
673 (ffd, tempname) = tempfile.mkstemp(dir=os.path.dirname(name),
674 text=('b' not in mode))
677 f = os.fdopen(ffd, mode, buffering)
685 os.rename(tempname, name)
687 unlink(tempname) # nonexistant file is ignored
691 """Append "/" to 's' if it doesn't aleady end in "/"."""
692 if s and not s.endswith('/'):
698 def _mmap_do(f, sz, flags, prot, close):
700 st = os.fstat(f.fileno())
703 # trying to open a zero-length map gives an error, but an empty
704 # string has all the same behaviour of a zero-length map, ie. it has
707 map = mmap.mmap(f.fileno(), sz, flags, prot)
709 f.close() # map will persist beyond file close
713 def mmap_read(f, sz = 0, close=True):
714 """Create a read-only memory mapped region on file 'f'.
715 If sz is 0, the region will cover the entire file.
717 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ, close)
720 def mmap_readwrite(f, sz = 0, close=True):
721 """Create a read-write memory mapped region on file 'f'.
722 If sz is 0, the region will cover the entire file.
724 return _mmap_do(f, sz, mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE,
728 def mmap_readwrite_private(f, sz = 0, close=True):
729 """Create a read-write memory mapped region on file 'f'.
730 If sz is 0, the region will cover the entire file.
731 The map is private, which means the changes are never flushed back to the
734 return _mmap_do(f, sz, mmap.MAP_PRIVATE, mmap.PROT_READ|mmap.PROT_WRITE,
738 _mincore = getattr(_helpers, 'mincore', None)
740 # ./configure ensures that we're on Linux if MINCORE_INCORE isn't defined.
741 MINCORE_INCORE = getattr(_helpers, 'MINCORE_INCORE', 1)
743 _fmincore_chunk_size = None
744 def _set_fmincore_chunk_size():
745 global _fmincore_chunk_size
746 pref_chunk_size = 64 * 1024 * 1024
747 chunk_size = sc_page_size
748 if (sc_page_size < pref_chunk_size):
749 chunk_size = sc_page_size * (pref_chunk_size // sc_page_size)
750 _fmincore_chunk_size = chunk_size
753 """Return the mincore() data for fd as a bytearray whose values can be
754 tested via MINCORE_INCORE, or None if fd does not fully
755 support the operation."""
757 if (st.st_size == 0):
759 if not _fmincore_chunk_size:
760 _set_fmincore_chunk_size()
761 pages_per_chunk = _fmincore_chunk_size // sc_page_size;
762 page_count = (st.st_size + sc_page_size - 1) // sc_page_size;
763 chunk_count = page_count // _fmincore_chunk_size
766 result = bytearray(page_count)
767 for ci in compat.range(chunk_count):
768 pos = _fmincore_chunk_size * ci;
769 msize = min(_fmincore_chunk_size, st.st_size - pos)
771 m = mmap.mmap(fd, msize, mmap.MAP_PRIVATE, 0, 0, pos)
772 except mmap.error as ex:
773 if ex.errno == errno.EINVAL or ex.errno == errno.ENODEV:
774 # Perhaps the file was a pipe, i.e. "... | bup split ..."
778 _mincore(m, msize, 0, result, ci * pages_per_chunk)
779 except OSError as ex:
780 if ex.errno == errno.ENOSYS:
786 def parse_timestamp(epoch_str):
787 """Return the number of nanoseconds since the epoch that are described
788 by epoch_str (100ms, 100ns, ...); when epoch_str cannot be parsed,
789 throw a ValueError that may contain additional information."""
790 ns_per = {'s' : 1000000000,
794 match = re.match(r'^((?:[-+]?[0-9]+)?)(s|ms|us|ns)$', epoch_str)
796 if re.match(r'^([-+]?[0-9]+)$', epoch_str):
797 raise ValueError('must include units, i.e. 100ns, 100ms, ...')
799 (n, units) = match.group(1, 2)
803 return n * ns_per[units]
807 """Parse data size information into a float number.
809 Here are some examples of conversions:
810 199.2k means 203981 bytes
811 1GB means 1073741824 bytes
812 2.1 tb means 2199023255552 bytes
814 g = re.match(r'([-+\d.e]+)\s*(\w*)', str(s))
816 raise ValueError("can't parse %r as a number" % s)
817 (val, unit) = g.groups()
820 if unit in ['t', 'tb']:
821 mult = 1024*1024*1024*1024
822 elif unit in ['g', 'gb']:
823 mult = 1024*1024*1024
824 elif unit in ['m', 'mb']:
826 elif unit in ['k', 'kb']:
828 elif unit in ['', 'b']:
831 raise ValueError("invalid unit %r in number %r" % (unit, s))
836 """Count the number of elements in an iterator. (consumes the iterator)"""
837 return reduce(lambda x,y: x+1, l)
842 """Append an error message to the list of saved errors.
844 Once processing is able to stop and output the errors, the saved errors are
845 accessible in the module variable helpers.saved_errors.
847 saved_errors.append(e)
856 def die_if_errors(msg=None, status=1):
860 msg = 'warning: %d errors encountered\n' % len(saved_errors)
866 """Replace the default exception handler for KeyboardInterrupt (Ctrl-C).
868 The new exception handler will make sure that bup will exit without an ugly
869 stacktrace when Ctrl-C is hit.
871 oldhook = sys.excepthook
872 def newhook(exctype, value, traceback):
873 if exctype == KeyboardInterrupt:
874 log('\nInterrupted.\n')
876 return oldhook(exctype, value, traceback)
877 sys.excepthook = newhook
880 def columnate(l, prefix):
881 """Format elements of 'l' in columns with 'prefix' leading each line.
883 The number of columns is determined automatically based on the string
889 clen = max(len(s) for s in l)
890 ncols = (tty_width() - len(prefix)) // (clen + 2)
895 while len(l) % ncols:
897 rows = len(l) // ncols
898 for s in compat.range(0, len(l), rows):
899 cols.append(l[s:s+rows])
901 for row in zip(*cols):
902 out += prefix + ''.join(('%-*s' % (clen+2, s)) for s in row) + '\n'
906 def parse_date_or_fatal(str, fatal):
907 """Parses the given date or calls Option.fatal().
908 For now we expect a string that contains a float."""
911 except ValueError as e:
912 raise fatal('invalid date format (should be a float): %r' % e)
917 def parse_excludes(options, fatal):
918 """Traverse the options and extract all excludes, or call Option.fatal()."""
922 (option, parameter) = flag
923 if option == '--exclude':
924 excluded_paths.append(resolve_parent(parameter))
925 elif option == '--exclude-from':
927 f = open(resolve_parent(parameter))
929 raise fatal("couldn't read %s" % parameter)
930 for exclude_path in f.readlines():
931 # FIXME: perhaps this should be rstrip('\n')
932 exclude_path = resolve_parent(exclude_path.strip())
934 excluded_paths.append(exclude_path)
935 return sorted(frozenset(excluded_paths))
938 def parse_rx_excludes(options, fatal):
939 """Traverse the options and extract all rx excludes, or call
941 excluded_patterns = []
944 (option, parameter) = flag
945 if option == '--exclude-rx':
947 excluded_patterns.append(re.compile(parameter))
948 except re.error as ex:
949 fatal('invalid --exclude-rx pattern (%s): %s' % (parameter, ex))
950 elif option == '--exclude-rx-from':
952 f = open(resolve_parent(parameter))
954 raise fatal("couldn't read %s" % parameter)
955 for pattern in f.readlines():
956 spattern = pattern.rstrip('\n')
960 excluded_patterns.append(re.compile(spattern))
961 except re.error as ex:
962 fatal('invalid --exclude-rx pattern (%s): %s' % (spattern, ex))
963 return excluded_patterns
966 def should_rx_exclude_path(path, exclude_rxs):
967 """Return True if path matches a regular expression in exclude_rxs."""
968 for rx in exclude_rxs:
970 debug1('Skipping %r: excluded by rx pattern %r.\n'
971 % (path, rx.pattern))
976 # FIXME: Carefully consider the use of functions (os.path.*, etc.)
977 # that resolve against the current filesystem in the strip/graft
978 # functions for example, but elsewhere as well. I suspect bup's not
979 # always being careful about that. For some cases, the contents of
980 # the current filesystem should be irrelevant, and consulting it might
981 # produce the wrong result, perhaps via unintended symlink resolution,
984 def path_components(path):
985 """Break path into a list of pairs of the form (name,
986 full_path_to_name). Path must start with '/'.
988 '/home/foo' -> [('', '/'), ('home', '/home'), ('foo', '/home/foo')]"""
989 if not path.startswith('/'):
990 raise Exception('path must start with "/": %s' % path)
991 # Since we assume path startswith('/'), we can skip the first element.
993 norm_path = os.path.abspath(path)
997 for p in norm_path.split('/')[1:]:
999 result.append((p, full_path))
1003 def stripped_path_components(path, strip_prefixes):
1004 """Strip any prefix in strip_prefixes from path and return a list
1005 of path components where each component is (name,
1006 none_or_full_fs_path_to_name). Assume path startswith('/').
1007 See thelpers.py for examples."""
1008 normalized_path = os.path.abspath(path)
1009 sorted_strip_prefixes = sorted(strip_prefixes, key=len, reverse=True)
1010 for bp in sorted_strip_prefixes:
1011 normalized_bp = os.path.abspath(bp)
1012 if normalized_bp == '/':
1014 if normalized_path.startswith(normalized_bp):
1015 prefix = normalized_path[:len(normalized_bp)]
1017 for p in normalized_path[len(normalized_bp):].split('/'):
1021 result.append((p, prefix))
1024 return path_components(path)
1027 def grafted_path_components(graft_points, path):
1028 # Create a result that consists of some number of faked graft
1029 # directories before the graft point, followed by all of the real
1030 # directories from path that are after the graft point. Arrange
1031 # for the directory at the graft point in the result to correspond
1032 # to the "orig" directory in --graft orig=new. See t/thelpers.py
1033 # for some examples.
1035 # Note that given --graft orig=new, orig and new have *nothing* to
1036 # do with each other, even if some of their component names
1037 # match. i.e. --graft /foo/bar/baz=/foo/bar/bax is semantically
1038 # equivalent to --graft /foo/bar/baz=/x/y/z, or even
1041 # FIXME: This can't be the best solution...
1042 clean_path = os.path.abspath(path)
1043 for graft_point in graft_points:
1044 old_prefix, new_prefix = graft_point
1045 # Expand prefixes iff not absolute paths.
1046 old_prefix = os.path.normpath(old_prefix)
1047 new_prefix = os.path.normpath(new_prefix)
1048 if clean_path.startswith(old_prefix):
1049 escaped_prefix = re.escape(old_prefix)
1050 grafted_path = re.sub(r'^' + escaped_prefix, new_prefix, clean_path)
1051 # Handle /foo=/ (at least) -- which produces //whatever.
1052 grafted_path = '/' + grafted_path.lstrip('/')
1053 clean_path_components = path_components(clean_path)
1054 # Count the components that were stripped.
1055 strip_count = 0 if old_prefix == '/' else old_prefix.count('/')
1056 new_prefix_parts = new_prefix.split('/')
1057 result_prefix = grafted_path.split('/')[:new_prefix.count('/')]
1058 result = [(p, None) for p in result_prefix] \
1059 + clean_path_components[strip_count:]
1060 # Now set the graft point name to match the end of new_prefix.
1061 graft_point = len(result_prefix)
1062 result[graft_point] = \
1063 (new_prefix_parts[-1], clean_path_components[strip_count][1])
1064 if new_prefix == '/': # --graft ...=/ is a special case.
1067 return path_components(clean_path)
1073 _localtime = getattr(_helpers, 'localtime', None)
1076 bup_time = namedtuple('bup_time', ['tm_year', 'tm_mon', 'tm_mday',
1077 'tm_hour', 'tm_min', 'tm_sec',
1078 'tm_wday', 'tm_yday',
1079 'tm_isdst', 'tm_gmtoff', 'tm_zone'])
1081 # Define a localtime() that returns bup_time when possible. Note:
1082 # this means that any helpers.localtime() results may need to be
1083 # passed through to_py_time() before being passed to python's time
1084 # module, which doesn't appear willing to ignore the extra items.
1086 def localtime(time):
1087 return bup_time(*_helpers.localtime(time))
1088 def utc_offset_str(t):
1089 """Return the local offset from UTC as "+hhmm" or "-hhmm" for time t.
1090 If the current UTC offset does not represent an integer number
1091 of minutes, the fractional component will be truncated."""
1092 off = localtime(t).tm_gmtoff
1093 # Note: // doesn't truncate like C for negative values, it rounds down.
1094 offmin = abs(off) // 60
1096 h = (offmin - m) // 60
1097 return "%+03d%02d" % (-h if off < 0 else h, m)
1099 if isinstance(x, time.struct_time):
1101 return time.struct_time(x[:9])
1103 localtime = time.localtime
1104 def utc_offset_str(t):
1105 return time.strftime('%z', localtime(t))
1110 _some_invalid_save_parts_rx = re.compile(r'[\[ ~^:?*\\]|\.\.|//|@{')
1112 def valid_save_name(name):
1113 # Enforce a superset of the restrictions in git-check-ref-format(1)
1115 or name.startswith('/') or name.endswith('/') \
1116 or name.endswith('.'):
1118 if _some_invalid_save_parts_rx.search(name):
1121 if ord(c) < 0x20 or ord(c) == 0x7f:
1123 for part in name.split('/'):
1124 if part.startswith('.') or part.endswith('.lock'):
1129 _period_rx = re.compile(r'^([0-9]+)(s|min|h|d|w|m|y)$')
1131 def period_as_secs(s):
1134 match = _period_rx.match(s)
1137 mag = int(match.group(1))
1138 scale = match.group(2)
1139 return mag * {'s': 1,
1143 'w': 60 * 60 * 24 * 7,
1144 'm': 60 * 60 * 24 * 31,
1145 'y': 60 * 60 * 24 * 366}[scale]