# SYNOPSIS
-bup random [-S seed] [-f] <numbytes>
+bup random [-S seed] [-fv] <numbytes>
# DESCRIPTION
: generate output even if stdout is a tty. (Generating
random data to a tty is generally considered
ill-advised, but you can do if you really want.)
+
+-v, --verbose
+: print a progress message showing the number of bytes that
+ has been output so far.
# EXAMPLES
--
S,seed= optional random number seed [1]
f,force print random data to stdout even if it's a tty
+v,verbose print byte counter to stderr
"""
o = options.Options('bup random', optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if opt.force or (not os.isatty(1) and
not atoi(os.environ.get('BUP_FORCE_TTY')) & 1):
- _helpers.write_random(sys.stdout.fileno(), total, opt.seed)
+ _helpers.write_random(sys.stdout.fileno(), total, opt.seed,
+ opt.verbose and 1 or 0)
else:
log('error: not writing binary data to a terminal. Use -f to force.\n')
sys.exit(1)
date = time.time()
+last_prog = total_bytes = 0
+def prog(filenum, nbytes):
+ global last_prog, total_bytes
+ total_bytes += nbytes
+ now = time.time()
+ if now - last_prog < 0.2:
+ return
+ if filenum > 0:
+ progress('Splitting: file #%d, %d kbytes\r'
+ % (filenum+1, total_bytes/1024))
+ else:
+ progress('Splitting: %d kbytes\r' % (total_bytes/1024))
+ last_prog = now
+
+
is_reverse = os.environ.get('BUP_SERVER_REVERSE')
if is_reverse and opt.remote:
o.fatal("don't use -r in reverse mode; it's automatic")
if pack_writer:
shalist = hashsplit.split_to_shalist(pack_writer, files,
- keep_boundaries=opt.keep_boundaries)
+ keep_boundaries=opt.keep_boundaries,
+ progress=prog)
tree = pack_writer.new_tree(shalist)
else:
last = 0
for (blob, bits) in hashsplit.hashsplit_iter(files,
- keep_boundaries=opt.keep_boundaries):
+ keep_boundaries=opt.keep_boundaries,
+ progress=prog):
hashsplit.total_split += len(blob)
if opt.copy:
sys.stdout.write(str(blob))
static PyObject *write_random(PyObject *self, PyObject *args)
{
uint32_t buf[1024/4];
- int fd = -1, seed = 0;
+ int fd = -1, seed = 0, verbose = 0;
ssize_t ret;
long long len = 0, kbytes = 0, written = 0;
- if (!PyArg_ParseTuple(args, "iLi", &fd, &len, &seed))
+ if (!PyArg_ParseTuple(args, "iLii", &fd, &len, &seed, &verbose))
return NULL;
srandom(seed);
written += ret;
if (ret < (int)sizeof(buf))
break;
- if (kbytes/1024 > 0 && !(kbytes%1024))
+ if (verbose && kbytes/1024 > 0 && !(kbytes%1024))
fprintf(stderr, "Random: %lld Mbytes\r", kbytes/1024);
}
return (None, 0)
-def blobiter(files):
- for f in files:
+def blobiter(files, progress=None):
+ for filenum,f in enumerate(files):
ofs = 0
+ b = ''
while 1:
+ if progress:
+ progress(filenum, len(b))
fadvise_done(f, max(0, ofs - 1024*1024))
b = f.read(BLOB_HWM)
ofs += len(b)
yield (buf.get(buf.used()), 0)
-def _hashsplit_iter(files):
+def _hashsplit_iter(files, progress):
assert(BLOB_HWM > BLOB_MAX)
buf = Buf()
- fi = blobiter(files)
+ fi = blobiter(files, progress)
while 1:
for i in drainbuf(buf, finalize=False):
yield i
buf.put(bnew)
-def _hashsplit_iter_keep_boundaries(files):
- for f in files:
- for i in _hashsplit_iter([f]):
+def _hashsplit_iter_keep_boundaries(files, progress):
+ for real_filenum,f in enumerate(files):
+ if progress:
+ def prog(filenum, nbytes):
+ # the inner _hashsplit_iter doesn't know the real file count,
+ # so we'll replace it here.
+ return progress(real_filenum, nbytes)
+ else:
+ prog = None
+ for i in _hashsplit_iter([f], progress=prog):
yield i
-def hashsplit_iter(files, keep_boundaries):
+def hashsplit_iter(files, keep_boundaries, progress):
if keep_boundaries:
- return _hashsplit_iter_keep_boundaries(files)
+ return _hashsplit_iter_keep_boundaries(files, progress)
else:
- return _hashsplit_iter(files)
+ return _hashsplit_iter(files, progress)
total_split = 0
-def _split_to_blobs(w, files, keep_boundaries):
+def _split_to_blobs(w, files, keep_boundaries, progress):
global total_split
- for (blob, bits) in hashsplit_iter(files, keep_boundaries):
+ for (blob, bits) in hashsplit_iter(files, keep_boundaries, progress):
sha = w.new_blob(blob)
total_split += len(blob)
if w.outbytes >= max_pack_size or w.count >= max_pack_objects:
i += 1
-def split_to_shalist(w, files, keep_boundaries):
- sl = _split_to_blobs(w, files, keep_boundaries)
+def split_to_shalist(w, files, keep_boundaries, progress=None):
+ sl = _split_to_blobs(w, files, keep_boundaries, progress)
if not fanout:
shal = []
for (sha,size,bits) in sl: