X-Git-Url: https://arthur.barton.de/gitweb/?a=blobdiff_plain;ds=sidebyside;f=lib%2Fbup%2F_helpers.c;h=b29c54cb782e16882d5043149c88423b37c980e9;hb=1db1c19cc03ac0f41f983549ec8a70128bb1dc1c;hp=2a5dc468b7b660a849c6e67df60c8fa91da26b1a;hpb=6c14d90cecf074255ce522032b107abf961e5e3d;p=bup.git diff --git a/lib/bup/_helpers.c b/lib/bup/_helpers.c index 2a5dc46..b29c54c 100644 --- a/lib/bup/_helpers.c +++ b/lib/bup/_helpers.c @@ -11,11 +11,15 @@ #include #include #include +#include #include #include #include -#include +#include +#ifdef HAVE_SYS_MMAN_H +#include +#endif #ifdef HAVE_SYS_TYPES_H #include #endif @@ -25,6 +29,9 @@ #ifdef HAVE_UNISTD_H #include #endif +#ifdef HAVE_SYS_TIME_H +#include +#endif #ifdef HAVE_LINUX_FS_H #include @@ -33,6 +40,10 @@ #include #endif +#ifdef HAVE_TM_TM_GMTOFF +#include +#endif + #include "bupsplit.h" #if defined(FS_IOC_GETFLAGS) && defined(FS_IOC_SETFLAGS) @@ -52,7 +63,55 @@ #define FS_NOCOW_FL 0 #endif -static int istty2 = 0; + +typedef unsigned char byte; + + +typedef struct { + int istty2; +} state_t; + +// cstr_argf: for byte vectors without null characters (e.g. paths) +// rbuf_argf: for read-only byte vectors +// wbuf_argf: for mutable byte vectors + +#if PY_MAJOR_VERSION < 3 +static state_t state; +# define get_state(x) (&state) +# define cstr_argf "s" +# define rbuf_argf "s#" +# define wbuf_argf "s*" +#else +# define get_state(x) ((state_t *) PyModule_GetState(x)) +# define cstr_argf "y" +# define rbuf_argf "y#" +# define wbuf_argf "y*" +#endif // PY_MAJOR_VERSION >= 3 + + +static void *checked_malloc(size_t n, size_t size) +{ + size_t total; + if (__builtin_mul_overflow(n, size, &total)) + { + PyErr_Format(PyExc_OverflowError, + "request to allocate %lu items of size %lu is too large", + n, size); + return NULL; + } + void *result = malloc(total); + if (!result) + return PyErr_NoMemory(); + return result; +} + +static void *checked_calloc(size_t n, size_t size) +{ + void *result = calloc(n, size); + if (!result) + PyErr_NoMemory(); + return result; +} #ifndef htonll @@ -70,20 +129,37 @@ static uint64_t htonll(uint64_t value) #endif +#ifdef __clang__ #define INTEGRAL_ASSIGNMENT_FITS(dest, src) \ ({ \ *(dest) = (src); \ *(dest) == (src) && (*(dest) < 1) == ((src) < 1); \ }) +#else +// Disabling sign-compare here should be fine since we're explicitly +// checking for a sign mismatch, i.e. if the signs don't match, then +// it doesn't matter what the value comparison says. +// FIXME: ... so should we reverse the order? +#define INTEGRAL_ASSIGNMENT_FITS(dest, src) \ + ({ \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wsign-compare\""); \ + *(dest) = (src); \ + *(dest) == (src) && (*(dest) < 1) == ((src) < 1); \ + _Pragma("GCC diagnostic pop"); \ + }) +#endif -// At the moment any code that calls INTGER_TO_PY() will have to +// At the moment any code that calls INTEGER_TO_PY() will have to // disable -Wtautological-compare for clang. See below. #define INTEGER_TO_PY(x) \ (((x) >= 0) ? PyLong_FromUnsignedLongLong(x) : PyLong_FromLongLong(x)) + +#if PY_MAJOR_VERSION < 3 static int bup_ulong_from_pyint(unsigned long *x, PyObject *py, const char *name) { @@ -104,12 +180,15 @@ static int bup_ulong_from_pyint(unsigned long *x, PyObject *py, *x = tmp; return 1; } +#endif static int bup_ulong_from_py(unsigned long *x, PyObject *py, const char *name) { +#if PY_MAJOR_VERSION < 3 if (PyInt_Check(py)) return bup_ulong_from_pyint(x, py, name); +#endif if (!PyLong_Check(py)) { @@ -148,6 +227,7 @@ static int bup_uint_from_py(unsigned int *x, PyObject *py, const char *name) static int bup_ullong_from_py(unsigned PY_LONG_LONG *x, PyObject *py, const char *name) { +#if PY_MAJOR_VERSION < 3 if (PyInt_Check(py)) { unsigned long tmp; @@ -158,6 +238,7 @@ static int bup_ullong_from_py(unsigned PY_LONG_LONG *x, PyObject *py, } return 0; } +#endif if (!PyLong_Check(py)) { @@ -178,6 +259,27 @@ static int bup_ullong_from_py(unsigned PY_LONG_LONG *x, PyObject *py, } +static PyObject *bup_bytescmp(PyObject *self, PyObject *args) +{ + PyObject *py_s1, *py_s2; // This is really a PyBytes/PyString + if (!PyArg_ParseTuple(args, "SS", &py_s1, &py_s2)) + return NULL; + char *s1, *s2; + Py_ssize_t s1_len, s2_len; + if (PyBytes_AsStringAndSize(py_s1, &s1, &s1_len) == -1) + return NULL; + if (PyBytes_AsStringAndSize(py_s2, &s2, &s2_len) == -1) + return NULL; + const Py_ssize_t n = (s1_len < s2_len) ? s1_len : s2_len; + const int cmp = memcmp(s1, s2, n); + if (cmp != 0) + return PyLong_FromLong(cmp); + if (s1_len == s2_len) + return PyLong_FromLong(0);; + return PyLong_FromLong((s1_len < s2_len) ? -1 : 1); +} + + // Probably we should use autoconf or something and set HAVE_PY_GETARGCARGV... #if __WIN32__ || __CYGWIN__ @@ -229,16 +331,6 @@ static void unpythonize_argv(void) #endif // not __WIN32__ or __CYGWIN__ -static unsigned long long count_leading_zeros(const unsigned char * const buf, - unsigned long long len) -{ - const unsigned char *cur = buf; - while(len-- && *cur == 0) - cur++; - return cur - buf; -} - - static int write_all(int fd, const void *buf, const size_t count) { size_t written = 0; @@ -265,19 +357,156 @@ static int uadd(unsigned long long *dest, } +static PyObject *append_sparse_region(const int fd, unsigned long long n) +{ + while (n) + { + off_t new_off; + if (!INTEGRAL_ASSIGNMENT_FITS(&new_off, n)) + new_off = INT_MAX; + const off_t off = lseek(fd, new_off, SEEK_CUR); + if (off == (off_t) -1) + return PyErr_SetFromErrno(PyExc_IOError); + n -= new_off; + } + return NULL; +} + + +static PyObject *record_sparse_zeros(unsigned long long *new_pending, + const int fd, + unsigned long long prev_pending, + const unsigned long long count) +{ + // Add count additional sparse zeros to prev_pending and store the + // result in new_pending, or if the total won't fit in + // new_pending, write some of the zeros to fd sparsely, and store + // the remaining sum in new_pending. + if (!uadd(new_pending, prev_pending, count)) + { + PyObject *err = append_sparse_region(fd, prev_pending); + if (err != NULL) + return err; + *new_pending = count; + } + return NULL; +} + + +static byte* find_not_zero(const byte * const start, const byte * const end) +{ + // Return a pointer to first non-zero byte between start and end, + // or end if there isn't one. + assert(start <= end); + const unsigned char *cur = start; + while (cur < end && *cur == 0) + cur++; + return (byte *) cur; +} + + +static byte* find_trailing_zeros(const byte * const start, + const byte * const end) +{ + // Return a pointer to the start of any trailing run of zeros, or + // end if there isn't one. + assert(start <= end); + if (start == end) + return (byte *) end; + const byte * cur = end; + while (cur > start && *--cur == 0) {} + if (*cur == 0) + return (byte *) cur; + else + return (byte *) (cur + 1); +} + + +static byte *find_non_sparse_end(const byte * const start, + const byte * const end, + const ptrdiff_t min_len) +{ + // Return the first pointer to a min_len sparse block in [start, + // end) if there is one, otherwise a pointer to the start of any + // trailing run of zeros. If there are no trailing zeros, return + // end. + if (start == end) + return (byte *) end; + assert(start < end); + assert(min_len); + // Probe in min_len jumps, searching backward from the jump + // destination for a non-zero byte. If such a byte is found, move + // just past it and try again. + const byte *candidate = start; + // End of any run of zeros, starting at candidate, that we've already seen + const byte *end_of_known_zeros = candidate; + while (end - candidate >= min_len) // Handle all min_len candidate blocks + { + const byte * const probe_end = candidate + min_len; + const byte * const trailing_zeros = + find_trailing_zeros(end_of_known_zeros, probe_end); + if (trailing_zeros == probe_end) + end_of_known_zeros = candidate = probe_end; + else if (trailing_zeros == end_of_known_zeros) + { + assert(candidate >= start); + assert(candidate <= end); + assert(*candidate == 0); + return (byte *) candidate; + } + else + { + candidate = trailing_zeros; + end_of_known_zeros = probe_end; + } + } + + if (candidate == end) + return (byte *) end; + + // No min_len sparse run found, search backward from end + const byte * const trailing_zeros = find_trailing_zeros(end_of_known_zeros, + end); + + if (trailing_zeros == end_of_known_zeros) + { + assert(candidate >= start); + assert(candidate < end); + assert(*candidate == 0); + assert(end - candidate < min_len); + return (byte *) candidate; + } + + if (trailing_zeros == end) + { + assert(*(end - 1) != 0); + return (byte *) end; + } + + assert(end - trailing_zeros < min_len); + assert(trailing_zeros >= start); + assert(trailing_zeros < end); + assert(*trailing_zeros == 0); + return (byte *) trailing_zeros; +} + + static PyObject *bup_write_sparsely(PyObject *self, PyObject *args) { int fd; unsigned char *buf = NULL; Py_ssize_t sbuf_len; PyObject *py_min_sparse_len, *py_prev_sparse_len; - if (!PyArg_ParseTuple(args, "it#OO", + if (!PyArg_ParseTuple(args, "i" rbuf_argf "OO", &fd, &buf, &sbuf_len, &py_min_sparse_len, &py_prev_sparse_len)) return NULL; - unsigned long long min_sparse_len, prev_sparse_len, buf_len; - if (!bup_ullong_from_py(&min_sparse_len, py_min_sparse_len, "min_sparse_len")) + ptrdiff_t min_sparse_len; + unsigned long long prev_sparse_len, buf_len, ul_min_sparse_len; + if (!bup_ullong_from_py(&ul_min_sparse_len, py_min_sparse_len, "min_sparse_len")) return NULL; + if (!INTEGRAL_ASSIGNMENT_FITS(&min_sparse_len, ul_min_sparse_len)) + return PyErr_Format(PyExc_OverflowError, "min_sparse_len too large"); if (!bup_ullong_from_py(&prev_sparse_len, py_prev_sparse_len, "prev_sparse_len")) return NULL; if (sbuf_len < 0) @@ -285,69 +514,50 @@ static PyObject *bup_write_sparsely(PyObject *self, PyObject *args) if (!INTEGRAL_ASSIGNMENT_FITS(&buf_len, sbuf_len)) return PyErr_Format(PyExc_OverflowError, "buffer length too large"); - // For now, there are some cases where we just give up if the - // values are too large, but we could try to break up the relevant - // operations into chunks. - - // Deal with preceding zeros. Just make them sparse, along with - // any leading zeros in buf, even if the region's not >= min, - // since the alternative is a potentially extra small write. - if (prev_sparse_len) + const byte * block = buf; // Start of pending block + const byte * const end = buf + buf_len; + unsigned long long zeros = prev_sparse_len; + while (1) { - const unsigned long long zeros = count_leading_zeros(buf, buf_len); - unsigned long long new_sparse_len = 0; - if (!uadd(&new_sparse_len, prev_sparse_len, zeros)) - return PyErr_Format (PyExc_OverflowError, "sparse region too large"); - if (zeros == buf_len) - return PyLong_FromUnsignedLongLong(new_sparse_len); - - off_t new_off; - if (!INTEGRAL_ASSIGNMENT_FITS(&new_off, new_sparse_len)) - return PyErr_Format(PyExc_OverflowError, - "sparse region too large for seek"); - const off_t off = lseek(fd, new_off, SEEK_CUR); - if (off == -1) - return PyErr_SetFromErrno(PyExc_IOError); - buf += zeros; - buf_len -= zeros; - } + assert(block <= end); + if (block == end) + return PyLong_FromUnsignedLongLong(zeros); - int rc; - unsigned long long unexamined = buf_len; - unsigned char *block_start = buf, *cur = buf; - while(unexamined) - { - const unsigned long long zeros = count_leading_zeros(cur, unexamined); - assert(zeros <= unexamined); - unexamined -= zeros; - if (unexamined == 0) // Runs off the end. + if (*block != 0) { - rc = write_all(fd, block_start, cur - block_start); + // Look for the end of block, i.e. the next sparse run of + // at least min_sparse_len zeros, or the end of the + // buffer. + const byte * const probe = find_non_sparse_end(block + 1, end, + min_sparse_len); + // Either at end of block, or end of non-sparse; write pending data + PyObject *err = append_sparse_region(fd, zeros); + if (err != NULL) + return err; + int rc = write_all(fd, block, probe - block); if (rc) return PyErr_SetFromErrno(PyExc_IOError); - return PyLong_FromUnsignedLongLong(zeros); - } - cur += zeros; - if (zeros >= min_sparse_len) - { - off_t new_off; - if (!INTEGRAL_ASSIGNMENT_FITS(&new_off, zeros)) - return PyErr_Format(PyExc_ValueError, - "zero count overflows off_t"); - off_t off = lseek(fd, new_off, SEEK_CUR); - if (off == -1) - return PyErr_SetFromErrno(PyExc_IOError); - block_start = cur; + + if (end - probe < min_sparse_len) + zeros = end - probe; + else + zeros = min_sparse_len; + block = probe + zeros; } - while (unexamined && *cur != 0) + else // *block == 0 { - cur++; unexamined--; + // Should be in the first loop iteration, a sparse run of + // zeros, or nearly at the end of the block (within + // min_sparse_len). + const byte * const zeros_end = find_not_zero(block, end); + PyObject *err = record_sparse_zeros(&zeros, fd, + zeros, zeros_end - block); + if (err != NULL) + return err; + assert(block <= zeros_end); + block = zeros_end; } } - rc = write_all(fd, block_start, cur - block_start); - if (rc) - return PyErr_SetFromErrno(PyExc_IOError); - return PyInt_FromLong(0); } @@ -370,14 +580,29 @@ static PyObject *blobbits(PyObject *self, PyObject *args) static PyObject *splitbuf(PyObject *self, PyObject *args) { - unsigned char *buf = NULL; - Py_ssize_t len = 0; + // We stick to buffers in python 2 because they appear to be + // substantially smaller than memoryviews, and because + // zlib.compress() in python 2 can't accept a memoryview + // (cf. hashsplit.py). int out = 0, bits = -1; - - if (!PyArg_ParseTuple(args, "t#", &buf, &len)) - return NULL; - assert(len <= INT_MAX); - out = bupsplit_find_ofs(buf, len, &bits); + if (PY_MAJOR_VERSION > 2) + { + Py_buffer buf; + if (!PyArg_ParseTuple(args, "y*", &buf)) + return NULL; + assert(buf.len <= INT_MAX); + out = bupsplit_find_ofs(buf.buf, buf.len, &bits); + PyBuffer_Release(&buf); + } + else + { + unsigned char *buf = NULL; + Py_ssize_t len = 0; + if (!PyArg_ParseTuple(args, "t#", &buf, &len)) + return NULL; + assert(len <= INT_MAX); + out = bupsplit_find_ofs(buf, len, &bits); + } if (out) assert(bits >= BUP_BLOBBITS); return Py_BuildValue("ii", out, bits); } @@ -390,7 +615,7 @@ static PyObject *bitmatch(PyObject *self, PyObject *args) Py_ssize_t byte; int bit; - if (!PyArg_ParseTuple(args, "t#t#", &buf1, &len1, &buf2, &len2)) + if (!PyArg_ParseTuple(args, rbuf_argf rbuf_argf, &buf1, &len1, &buf2, &len2)) return NULL; bit = 0; @@ -417,7 +642,7 @@ static PyObject *firstword(PyObject *self, PyObject *args) Py_ssize_t len = 0; uint32_t v; - if (!PyArg_ParseTuple(args, "t#", &buf, &len)) + if (!PyArg_ParseTuple(args, rbuf_argf, &buf, &len)) return NULL; if (len < 4) @@ -486,72 +711,95 @@ BLOOM_GET_BIT(bloom_get_bit5, to_bloom_address_bitmask5, uint32_t) static PyObject *bloom_add(PyObject *self, PyObject *args) { - unsigned char *sha = NULL, *bloom = NULL; - unsigned char *end; - Py_ssize_t len = 0, blen = 0; + Py_buffer bloom, sha; int nbits = 0, k = 0; + if (!PyArg_ParseTuple(args, wbuf_argf wbuf_argf "ii", + &bloom, &sha, &nbits, &k)) + return NULL; - if (!PyArg_ParseTuple(args, "w#s#ii", &bloom, &blen, &sha, &len, &nbits, &k)) - return NULL; + PyObject *result = NULL; - if (blen < 16+(1< 29) - return NULL; - for (end = sha + len; sha < end; sha += 20/k) - bloom_set_bit5(bloom, sha, nbits); + if (nbits > 29) + goto clean_and_return; + unsigned char *cur = sha.buf; + unsigned char *end; + for (end = cur + sha.len; cur < end; cur += 20/k) + bloom_set_bit5(bloom.buf, cur, nbits); } else if (k == 4) { - if (nbits > 37) - return NULL; - for (end = sha + len; sha < end; sha += 20/k) - bloom_set_bit4(bloom, sha, nbits); + if (nbits > 37) + goto clean_and_return; + unsigned char *cur = sha.buf; + unsigned char *end = cur + sha.len; + for (; cur < end; cur += 20/k) + bloom_set_bit4(bloom.buf, cur, nbits); } else - return NULL; + goto clean_and_return; + result = Py_BuildValue("n", sha.len / 20); - return Py_BuildValue("n", len/20); + clean_and_return: + PyBuffer_Release(&bloom); + PyBuffer_Release(&sha); + return result; } static PyObject *bloom_contains(PyObject *self, PyObject *args) { - unsigned char *sha = NULL, *bloom = NULL; - Py_ssize_t len = 0, blen = 0; + Py_buffer bloom; + unsigned char *sha = NULL; + Py_ssize_t len = 0; int nbits = 0, k = 0; - unsigned char *end; - int steps; + if (!PyArg_ParseTuple(args, wbuf_argf rbuf_argf "ii", + &bloom, &sha, &len, &nbits, &k)) + return NULL; - if (!PyArg_ParseTuple(args, "t#s#ii", &bloom, &blen, &sha, &len, &nbits, &k)) - return NULL; + PyObject *result = NULL; if (len != 20) - return NULL; + goto clean_and_return; if (k == 5) { - if (nbits > 29) - return NULL; - for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) - if (!bloom_get_bit5(bloom, sha, nbits)) - return Py_BuildValue("Oi", Py_None, steps); + if (nbits > 29) + goto clean_and_return; + int steps; + unsigned char *end; + for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) + if (!bloom_get_bit5(bloom.buf, sha, nbits)) + { + result = Py_BuildValue("Oi", Py_None, steps); + goto clean_and_return; + } } else if (k == 4) { - if (nbits > 37) - return NULL; - for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) - if (!bloom_get_bit4(bloom, sha, nbits)) - return Py_BuildValue("Oi", Py_None, steps); + if (nbits > 37) + goto clean_and_return; + int steps; + unsigned char *end; + for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++) + if (!bloom_get_bit4(bloom.buf, sha, nbits)) + { + result = Py_BuildValue("Oi", Py_None, steps); + goto clean_and_return; + } } else - return NULL; + goto clean_and_return; + + result = Py_BuildValue("ii", 1, k); - return Py_BuildValue("ii", 1, k); + clean_and_return: + PyBuffer_Release(&bloom); + return result; } @@ -572,7 +820,7 @@ static PyObject *extract_bits(PyObject *self, PyObject *args) Py_ssize_t len = 0; int nbits = 0; - if (!PyArg_ParseTuple(args, "t#i", &buf, &len, &nbits)) + if (!PyArg_ParseTuple(args, rbuf_argf "i", &buf, &len, &nbits)) return NULL; if (len < 4) @@ -586,6 +834,11 @@ struct sha { unsigned char bytes[20]; }; +static inline int _cmp_sha(const struct sha *sha1, const struct sha *sha2) +{ + return memcmp(sha1->bytes, sha2->bytes, sizeof(sha1->bytes)); +} + struct idx { unsigned char *map; @@ -596,18 +849,7 @@ struct idx { int name_base; }; - -static int _cmp_sha(const struct sha *sha1, const struct sha *sha2) -{ - int i; - for (i = 0; i < sizeof(struct sha); i++) - if (sha1->bytes[i] != sha2->bytes[i]) - return sha1->bytes[i] - sha2->bytes[i]; - return 0; -} - - -static void _fix_idx_order(struct idx **idxs, int *last_i) +static void _fix_idx_order(struct idx **idxs, Py_ssize_t *last_i) { struct idx *idx; int low, mid, high, c = 0; @@ -657,36 +899,51 @@ static uint32_t _get_idx_i(struct idx *idx) static PyObject *merge_into(PyObject *self, PyObject *args) { - PyObject *py_total, *ilist = NULL; - unsigned char *fmap = NULL; struct sha *sha_ptr, *sha_start = NULL; uint32_t *table_ptr, *name_ptr, *name_start; - struct idx **idxs = NULL; - Py_ssize_t flen = 0; - int bits = 0, i; + int i; unsigned int total; uint32_t count, prefix; - int num_i; - int last_i; - if (!PyArg_ParseTuple(args, "w#iOO", - &fmap, &flen, &bits, &py_total, &ilist)) + + Py_buffer fmap; + int bits;; + PyObject *py_total, *ilist = NULL; + if (!PyArg_ParseTuple(args, wbuf_argf "iOO", + &fmap, &bits, &py_total, &ilist)) return NULL; + PyObject *result = NULL; + struct idx **idxs = NULL; + Py_ssize_t num_i = 0; + int *idx_buf_init = NULL; + Py_buffer *idx_buf = NULL; + if (!bup_uint_from_py(&total, py_total, "total")) - return NULL; + goto clean_and_return; num_i = PyList_Size(ilist); - idxs = (struct idx **)PyMem_Malloc(num_i * sizeof(struct idx *)); + + if (!(idxs = checked_malloc(num_i, sizeof(struct idx *)))) + goto clean_and_return; + if (!(idx_buf_init = checked_calloc(num_i, sizeof(int)))) + goto clean_and_return; + if (!(idx_buf = checked_malloc(num_i, sizeof(Py_buffer)))) + goto clean_and_return; for (i = 0; i < num_i; i++) { long len, sha_ofs, name_map_ofs; - idxs[i] = (struct idx *)PyMem_Malloc(sizeof(struct idx)); + if (!(idxs[i] = checked_malloc(1, sizeof(struct idx)))) + goto clean_and_return; PyObject *itup = PyList_GetItem(ilist, i); - if (!PyArg_ParseTuple(itup, "t#llli", &idxs[i]->map, &idxs[i]->bytes, - &len, &sha_ofs, &name_map_ofs, &idxs[i]->name_base)) + if (!PyArg_ParseTuple(itup, wbuf_argf "llli", + &(idx_buf[i]), &len, &sha_ofs, &name_map_ofs, + &idxs[i]->name_base)) return NULL; + idx_buf_init[i] = 1; + idxs[i]->map = idx_buf[i].buf; + idxs[i]->bytes = idx_buf[i].len; idxs[i]->cur = (struct sha *)&idxs[i]->map[sha_ofs]; idxs[i]->end = &idxs[i]->cur[len]; if (name_map_ofs) @@ -694,18 +951,18 @@ static PyObject *merge_into(PyObject *self, PyObject *args) else idxs[i]->cur_name = NULL; } - table_ptr = (uint32_t *)&fmap[MIDX4_HEADERLEN]; + table_ptr = (uint32_t *) &((unsigned char *) fmap.buf)[MIDX4_HEADERLEN]; sha_start = sha_ptr = (struct sha *)&table_ptr[1<= 0) { struct idx *idx; uint32_t new_prefix; - if (count % 102424 == 0 && istty2) + if (count % 102424 == 0 && get_state(self)->istty2) fprintf(stderr, "midx: writing %.2f%% (%d/%d)\r", count*100.0/total, count, total); idx = idxs[last_i]; @@ -720,15 +977,32 @@ static PyObject *merge_into(PyObject *self, PyObject *args) _fix_idx_order(idxs, &last_i); ++count; } - while (prefix < (1< 0x7fffffff) @@ -808,11 +1086,18 @@ static PyObject *write_idx(PyObject *self, PyObject *args) } } - int rc = msync(fmap, flen, MS_ASYNC); + int rc = msync(fmap.buf, fmap.len, MS_ASYNC); if (rc != 0) - return PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); + { + result = PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename); + goto clean_and_return; + } - return PyLong_FromUnsignedLong(count); + result = PyLong_FromUnsignedLong(count); + + clean_and_return: + PyBuffer_Release(&fmap); + return result; } @@ -922,7 +1207,7 @@ static PyObject *open_noatime(PyObject *self, PyObject *args) { char *filename = NULL; int fd; - if (!PyArg_ParseTuple(args, "s", &filename)) + if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; fd = _open_noatime(filename, 0); if (fd < 0) @@ -934,11 +1219,18 @@ static PyObject *open_noatime(PyObject *self, PyObject *args) static PyObject *fadvise_done(PyObject *self, PyObject *args) { int fd = -1; - long long ofs = 0; - if (!PyArg_ParseTuple(args, "iL", &fd, &ofs)) + long long llofs, lllen = 0; + if (!PyArg_ParseTuple(args, "iLL", &fd, &llofs, &lllen)) return NULL; + off_t ofs, len; + if (!INTEGRAL_ASSIGNMENT_FITS(&ofs, llofs)) + return PyErr_Format(PyExc_OverflowError, + "fadvise offset overflows off_t"); + if (!INTEGRAL_ASSIGNMENT_FITS(&len, lllen)) + return PyErr_Format(PyExc_OverflowError, + "fadvise length overflows off_t"); #ifdef POSIX_FADV_DONTNEED - posix_fadvise(fd, 0, ofs, POSIX_FADV_DONTNEED); + posix_fadvise(fd, ofs, len, POSIX_FADV_DONTNEED); #endif return Py_BuildValue(""); } @@ -962,7 +1254,7 @@ static PyObject *bup_get_linux_file_attr(PyObject *self, PyObject *args) char *path; int fd; - if (!PyArg_ParseTuple(args, "s", &path)) + if (!PyArg_ParseTuple(args, cstr_argf, &path)) return NULL; fd = _open_noatime(path, O_NONBLOCK); @@ -994,7 +1286,7 @@ static PyObject *bup_set_linux_file_attr(PyObject *self, PyObject *args) PyObject *py_attr; int fd; - if (!PyArg_ParseTuple(args, "sO", &path, &py_attr)) + if (!PyArg_ParseTuple(args, cstr_argf "O", &path, &py_attr)) return NULL; if (!bup_uint_from_py(&attr, py_attr, "attr")) @@ -1015,12 +1307,12 @@ static PyObject *bup_set_linux_file_attr(PyObject *self, PyObject *args) // The extents flag can't be removed, so don't (see chattr(1) and chattr.c). orig_attr = 0; // Handle int/long mismatch (see above) rc = ioctl(fd, FS_IOC_GETFLAGS, &orig_attr); - assert(orig_attr <= UINT_MAX); // Kernel type is actually int if (rc == -1) { close(fd); return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path); } + assert(orig_attr <= UINT_MAX); // Kernel type is actually int attr |= ((unsigned int) orig_attr) & FS_EXTENT_FL; rc = ioctl(fd, FS_IOC_SETFLAGS, &attr); @@ -1090,7 +1382,7 @@ static PyObject *bup_utimensat(PyObject *self, PyObject *args) PyObject *access_py, *modification_py; struct timespec ts[2]; - if (!PyArg_ParseTuple(args, "is((Ol)(Ol))i", + if (!PyArg_ParseTuple(args, "i" cstr_argf "((Ol)(Ol))i", &fd, &path, &access_py, &(ts[0].tv_nsec), @@ -1132,7 +1424,7 @@ static int bup_parse_xutimes_args(char **path, PyObject *access_py, *modification_py; long long access_us, modification_us; // POSIX guarantees tv_usec is signed. - if (!PyArg_ParseTuple(args, "s((OL)(OL))", + if (!PyArg_ParseTuple(args, cstr_argf "((OL)(OL))", path, &access_py, &access_us, &modification_py, &modification_us)) @@ -1224,7 +1516,7 @@ static PyObject *stat_struct_to_py(const struct stat *st, // compile time, but not (easily) the unspecified types, so handle // those via INTEGER_TO_PY(). Assumes ns values will fit in a // long. - return Py_BuildValue("OKOOOOOL(Ol)(Ol)(Ol)", + return Py_BuildValue("NKNNNNNL(Nl)(Nl)(Nl)", INTEGER_TO_PY(st->st_mode), (unsigned PY_LONG_LONG) st->st_ino, INTEGER_TO_PY(st->st_dev), @@ -1248,7 +1540,7 @@ static PyObject *bup_stat(PyObject *self, PyObject *args) int rc; char *filename; - if (!PyArg_ParseTuple(args, "s", &filename)) + if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; struct stat st; @@ -1264,7 +1556,7 @@ static PyObject *bup_lstat(PyObject *self, PyObject *args) int rc; char *filename; - if (!PyArg_ParseTuple(args, "s", &filename)) + if (!PyArg_ParseTuple(args, cstr_argf, &filename)) return NULL; struct stat st; @@ -1290,6 +1582,92 @@ static PyObject *bup_fstat(PyObject *self, PyObject *args) } +#ifdef HAVE_TM_TM_GMTOFF +static PyObject *bup_localtime(PyObject *self, PyObject *args) +{ + long long lltime; + time_t ttime; + if (!PyArg_ParseTuple(args, "L", &lltime)) + return NULL; + if (!INTEGRAL_ASSIGNMENT_FITS(&ttime, lltime)) + return PyErr_Format(PyExc_OverflowError, "time value too large"); + + struct tm tm; + tzset(); + if(localtime_r(&ttime, &tm) == NULL) + return PyErr_SetFromErrno(PyExc_OSError); + + // Match the Python struct_time values. + return Py_BuildValue("[i,i,i,i,i,i,i,i,i,i,s]", + 1900 + tm.tm_year, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_wday, tm.tm_yday + 1, + tm.tm_isdst, tm.tm_gmtoff, tm.tm_zone); +} +#endif /* def HAVE_TM_TM_GMTOFF */ + + +#ifdef BUP_MINCORE_BUF_TYPE +static PyObject *bup_mincore(PyObject *self, PyObject *args) +{ + Py_buffer src, dest; + PyObject *py_src_n, *py_src_off, *py_dest_off; + + if (!PyArg_ParseTuple(args, cstr_argf "*OOw*O", + &src, &py_src_n, &py_src_off, + &dest, &py_dest_off)) + return NULL; + + PyObject *result = NULL; + + unsigned long long src_n, src_off, dest_off; + if (!(bup_ullong_from_py(&src_n, py_src_n, "src_n") + && bup_ullong_from_py(&src_off, py_src_off, "src_off") + && bup_ullong_from_py(&dest_off, py_dest_off, "dest_off"))) + goto clean_and_return; + + unsigned long long src_region_end; + if (!uadd(&src_region_end, src_off, src_n)) { + result = PyErr_Format(PyExc_OverflowError, "(src_off + src_n) too large"); + goto clean_and_return; + } + assert(src.len >= 0); + if (src_region_end > (unsigned long long) src.len) { + result = PyErr_Format(PyExc_OverflowError, "region runs off end of src"); + goto clean_and_return; + } + + unsigned long long dest_size; + if (!INTEGRAL_ASSIGNMENT_FITS(&dest_size, dest.len)) { + result = PyErr_Format(PyExc_OverflowError, "invalid dest size"); + goto clean_and_return; + } + if (dest_off > dest_size) { + result = PyErr_Format(PyExc_OverflowError, "region runs off end of dest"); + goto clean_and_return; + } + + size_t length; + if (!INTEGRAL_ASSIGNMENT_FITS(&length, src_n)) { + result = PyErr_Format(PyExc_OverflowError, "src_n overflows size_t"); + goto clean_and_return; + } + int rc = mincore((void *)(src.buf + src_off), src_n, + (BUP_MINCORE_BUF_TYPE *) (dest.buf + dest_off)); + if (rc != 0) { + result = PyErr_SetFromErrno(PyExc_OSError); + goto clean_and_return; + } + result = Py_BuildValue("O", Py_None); + + clean_and_return: + PyBuffer_Release(&src); + PyBuffer_Release(&dest); + return result; +} +#endif /* def BUP_MINCORE_BUF_TYPE */ + + static PyMethodDef helper_methods[] = { { "write_sparsely", bup_write_sparsely, METH_VARARGS, "Write buf excepting zeros at the end. Return trailing zero count." }, @@ -1348,15 +1726,55 @@ static PyMethodDef helper_methods[] = { "Extended version of lstat." }, { "fstat", bup_fstat, METH_VARARGS, "Extended version of fstat." }, +#ifdef HAVE_TM_TM_GMTOFF + { "localtime", bup_localtime, METH_VARARGS, + "Return struct_time elements plus the timezone offset and name." }, +#endif + { "bytescmp", bup_bytescmp, METH_VARARGS, + "Return a negative value if x < y, zero if equal, positive otherwise."}, +#ifdef BUP_MINCORE_BUF_TYPE + { "mincore", bup_mincore, METH_VARARGS, + "For mincore(src, src_n, src_off, dest, dest_off)" + " call the system mincore(src + src_off, src_n, &dest[dest_off])." }, +#endif { NULL, NULL, 0, NULL }, // sentinel }; +static void test_integral_assignment_fits(void) +{ + assert(sizeof(signed short) == sizeof(unsigned short)); + assert(sizeof(signed short) < sizeof(signed long long)); + assert(sizeof(signed short) < sizeof(unsigned long long)); + assert(sizeof(unsigned short) < sizeof(signed long long)); + assert(sizeof(unsigned short) < sizeof(unsigned long long)); + assert(sizeof(Py_ssize_t) <= sizeof(size_t)); + { + signed short ss, ssmin = SHRT_MIN, ssmax = SHRT_MAX; + unsigned short us, usmax = USHRT_MAX; + signed long long sllmin = LLONG_MIN, sllmax = LLONG_MAX; + unsigned long long ullmax = ULLONG_MAX; + + assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmax)); + assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmin)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, usmax)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmin)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmax)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, ullmax)); + + assert(INTEGRAL_ASSIGNMENT_FITS(&us, usmax)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ssmin)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmin)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmax)); + assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ullmax)); + } +} -PyMODINIT_FUNC init_helpers(void) +static int setup_module(PyObject *m) { - // FIXME: migrate these tests to configure. Check against the - // type we're going to use when passing to python. Other stat - // types are tested at runtime. + // FIXME: migrate these tests to configure, or at least don't + // possibly crash the whole application. Check against the type + // we're going to use when passing to python. Other stat types + // are tested at runtime. assert(sizeof(ino_t) <= sizeof(unsigned PY_LONG_LONG)); assert(sizeof(off_t) <= sizeof(PY_LONG_LONG)); assert(sizeof(blksize_t) <= sizeof(PY_LONG_LONG)); @@ -1365,13 +1783,30 @@ PyMODINIT_FUNC init_helpers(void) assert(sizeof(PY_LONG_LONG) <= sizeof(long long)); assert(sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long long)); - char *e; - PyObject *m = Py_InitModule("_helpers", helper_methods); - if (m == NULL) - return; + test_integral_assignment_fits(); + // Originally required by append_sparse_region() + { + off_t probe; + if (!INTEGRAL_ASSIGNMENT_FITS(&probe, INT_MAX)) + { + fprintf(stderr, "off_t can't hold INT_MAX; please report.\n"); + exit(1); + } + } + + char *e; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" // For INTEGER_TO_PY(). + { + PyObject *value; + value = INTEGER_TO_PY(INT_MAX); + PyObject_SetAttrString(m, "INT_MAX", value); + Py_DECREF(value); + value = INTEGER_TO_PY(UINT_MAX); + PyObject_SetAttrString(m, "UINT_MAX", value); + Py_DECREF(value); + } #ifdef HAVE_UTIMENSAT { PyObject *value; @@ -1386,21 +1821,63 @@ PyMODINIT_FUNC init_helpers(void) Py_DECREF(value); } #endif +#ifdef BUP_HAVE_MINCORE_INCORE { PyObject *value; - const long arg_max = sysconf(_SC_ARG_MAX); - if (arg_max == -1) - { - fprintf(stderr, "Cannot find SC_ARG_MAX, please report a bug.\n"); - exit(1); - } - value = INTEGER_TO_PY(arg_max); - PyObject_SetAttrString(m, "SC_ARG_MAX", value); + value = INTEGER_TO_PY(MINCORE_INCORE); + PyObject_SetAttrString(m, "MINCORE_INCORE", value); Py_DECREF(value); } +#endif #pragma clang diagnostic pop // ignored "-Wtautological-compare" e = getenv("BUP_FORCE_TTY"); - istty2 = isatty(2) || (atoi(e ? e : "0") & 2); + get_state(m)->istty2 = isatty(2) || (atoi(e ? e : "0") & 2); unpythonize_argv(); + return 1; } + + +#if PY_MAJOR_VERSION < 3 + +PyMODINIT_FUNC init_helpers(void) +{ + PyObject *m = Py_InitModule("_helpers", helper_methods); + if (m == NULL) + return; + + if (!setup_module(m)) + { + Py_DECREF(m); + return; + } +} + +# else // PY_MAJOR_VERSION >= 3 + +static struct PyModuleDef helpers_def = { + PyModuleDef_HEAD_INIT, + "_helpers", + NULL, + sizeof(state_t), + helper_methods, + NULL, + NULL, // helpers_traverse, + NULL, // helpers_clear, + NULL +}; + +PyMODINIT_FUNC PyInit__helpers(void) +{ + PyObject *module = PyModule_Create(&helpers_def); + if (module == NULL) + return NULL; + if (!setup_module(module)) + { + Py_DECREF(module); + return NULL; + } + return module; +} + +#endif // PY_MAJOR_VERSION >= 3