#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
+#include <string.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
#ifdef HAVE_LINUX_FS_H
#include <linux/fs.h>
typedef unsigned char byte;
-static int istty2 = 0;
+
+typedef struct {
+ int istty2;
+} state_t;
+
+// cstr_argf: for byte vectors without null characters (e.g. paths)
+// rbuf_argf: for read-only byte vectors
+// wbuf_argf: for mutable byte vectors
+
+#if PY_MAJOR_VERSION < 3
+static state_t state;
+# define get_state(x) (&state)
+# define cstr_argf "s"
+# define rbuf_argf "s#"
+# define wbuf_argf "s*"
+#else
+# define get_state(x) ((state_t *) PyModule_GetState(x))
+# define cstr_argf "y"
+# define rbuf_argf "y#"
+# define wbuf_argf "y*"
+#endif // PY_MAJOR_VERSION >= 3
+
+
+static void *checked_malloc(size_t n, size_t size)
+{
+ size_t total;
+ if (__builtin_mul_overflow(n, size, &total))
+ {
+ PyErr_Format(PyExc_OverflowError,
+ "request to allocate %lu items of size %lu is too large",
+ n, size);
+ return NULL;
+ }
+ void *result = malloc(total);
+ if (!result)
+ return PyErr_NoMemory();
+ return result;
+}
+
+static void *checked_calloc(size_t n, size_t size)
+{
+ void *result = calloc(n, size);
+ if (!result)
+ PyErr_NoMemory();
+ return result;
+}
#ifndef htonll
#endif
+// Disabling sign-compare here should be fine since we're explicitly
+// checking for a sign mismatch, i.e. if the signs don't match, then
+// it doesn't matter what the value comparison says.
+// FIXME: ... so should we reverse the order?
#define INTEGRAL_ASSIGNMENT_FITS(dest, src) \
({ \
+ _Pragma("GCC diagnostic push"); \
+ _Pragma("GCC diagnostic ignored \"-Wsign-compare\""); \
*(dest) = (src); \
- *(dest) == (src) && (*(dest) < 1) == ((src) < 1); \
+ int result = *(dest) == (src) && (*(dest) < 1) == ((src) < 1); \
+ _Pragma("GCC diagnostic pop"); \
+ result; \
})
-// At the moment any code that calls INTGER_TO_PY() will have to
+// At the moment any code that calls INTEGER_TO_PY() will have to
// disable -Wtautological-compare for clang. See below.
#define INTEGER_TO_PY(x) \
(((x) >= 0) ? PyLong_FromUnsignedLongLong(x) : PyLong_FromLongLong(x))
+
+#if PY_MAJOR_VERSION < 3
static int bup_ulong_from_pyint(unsigned long *x, PyObject *py,
const char *name)
{
*x = tmp;
return 1;
}
+#endif
static int bup_ulong_from_py(unsigned long *x, PyObject *py, const char *name)
{
+#if PY_MAJOR_VERSION < 3
if (PyInt_Check(py))
return bup_ulong_from_pyint(x, py, name);
+#endif
if (!PyLong_Check(py))
{
static int bup_ullong_from_py(unsigned PY_LONG_LONG *x, PyObject *py,
const char *name)
{
+#if PY_MAJOR_VERSION < 3
if (PyInt_Check(py))
{
unsigned long tmp;
}
return 0;
}
+#endif
if (!PyLong_Check(py))
{
}
+static PyObject *bup_bytescmp(PyObject *self, PyObject *args)
+{
+ PyObject *py_s1, *py_s2; // This is really a PyBytes/PyString
+ if (!PyArg_ParseTuple(args, "SS", &py_s1, &py_s2))
+ return NULL;
+ char *s1, *s2;
+ Py_ssize_t s1_len, s2_len;
+ if (PyBytes_AsStringAndSize(py_s1, &s1, &s1_len) == -1)
+ return NULL;
+ if (PyBytes_AsStringAndSize(py_s2, &s2, &s2_len) == -1)
+ return NULL;
+ const Py_ssize_t n = (s1_len < s2_len) ? s1_len : s2_len;
+ const int cmp = memcmp(s1, s2, n);
+ if (cmp != 0)
+ return PyLong_FromLong(cmp);
+ if (s1_len == s2_len)
+ return PyLong_FromLong(0);;
+ return PyLong_FromLong((s1_len < s2_len) ? -1 : 1);
+}
+
+
// Probably we should use autoconf or something and set HAVE_PY_GETARGCARGV...
#if __WIN32__ || __CYGWIN__
}
-static const byte * find_not_zero(const byte * const start,
- const byte * const end)
+static byte* find_not_zero(const byte * const start, const byte * const end)
{
// Return a pointer to first non-zero byte between start and end,
// or end if there isn't one.
const unsigned char *cur = start;
while (cur < end && *cur == 0)
cur++;
- return cur;
+ return (byte *) cur;
}
-static const byte * const find_trailing_zeros(const byte * const start,
- const byte * const end)
+static byte* find_trailing_zeros(const byte * const start,
+ const byte * const end)
{
// Return a pointer to the start of any trailing run of zeros, or
// end if there isn't one.
assert(start <= end);
if (start == end)
- return end;
+ return (byte *) end;
const byte * cur = end;
while (cur > start && *--cur == 0) {}
if (*cur == 0)
- return cur;
+ return (byte *) cur;
else
- return cur + 1;
+ return (byte *) (cur + 1);
}
-static const byte *find_non_sparse_end(const byte * const start,
- const byte * const end,
- const unsigned long long min_len)
+static byte *find_non_sparse_end(const byte * const start,
+ const byte * const end,
+ const ptrdiff_t min_len)
{
// Return the first pointer to a min_len sparse block in [start,
// end) if there is one, otherwise a pointer to the start of any
// trailing run of zeros. If there are no trailing zeros, return
// end.
if (start == end)
- return end;
+ return (byte *) end;
assert(start < end);
assert(min_len);
// Probe in min_len jumps, searching backward from the jump
assert(candidate >= start);
assert(candidate <= end);
assert(*candidate == 0);
- return candidate;
+ return (byte *) candidate;
}
else
{
}
if (candidate == end)
- return end;
+ return (byte *) end;
// No min_len sparse run found, search backward from end
const byte * const trailing_zeros = find_trailing_zeros(end_of_known_zeros,
assert(candidate < end);
assert(*candidate == 0);
assert(end - candidate < min_len);
- return candidate;
+ return (byte *) candidate;
}
if (trailing_zeros == end)
{
assert(*(end - 1) != 0);
- return end;
+ return (byte *) end;
}
assert(end - trailing_zeros < min_len);
assert(trailing_zeros >= start);
assert(trailing_zeros < end);
assert(*trailing_zeros == 0);
- return trailing_zeros;
+ return (byte *) trailing_zeros;
}
unsigned char *buf = NULL;
Py_ssize_t sbuf_len;
PyObject *py_min_sparse_len, *py_prev_sparse_len;
- if (!PyArg_ParseTuple(args, "it#OO",
+ if (!PyArg_ParseTuple(args, "i" rbuf_argf "OO",
&fd, &buf, &sbuf_len,
&py_min_sparse_len, &py_prev_sparse_len))
return NULL;
- unsigned long long min_sparse_len, prev_sparse_len, buf_len;
- if (!bup_ullong_from_py(&min_sparse_len, py_min_sparse_len, "min_sparse_len"))
+ ptrdiff_t min_sparse_len;
+ unsigned long long prev_sparse_len, buf_len, ul_min_sparse_len;
+ if (!bup_ullong_from_py(&ul_min_sparse_len, py_min_sparse_len, "min_sparse_len"))
return NULL;
+ if (!INTEGRAL_ASSIGNMENT_FITS(&min_sparse_len, ul_min_sparse_len))
+ return PyErr_Format(PyExc_OverflowError, "min_sparse_len too large");
if (!bup_ullong_from_py(&prev_sparse_len, py_prev_sparse_len, "prev_sparse_len"))
return NULL;
if (sbuf_len < 0)
static PyObject *splitbuf(PyObject *self, PyObject *args)
{
- unsigned char *buf = NULL;
- Py_ssize_t len = 0;
+ // We stick to buffers in python 2 because they appear to be
+ // substantially smaller than memoryviews, and because
+ // zlib.compress() in python 2 can't accept a memoryview
+ // (cf. hashsplit.py).
int out = 0, bits = -1;
-
- if (!PyArg_ParseTuple(args, "t#", &buf, &len))
- return NULL;
- assert(len <= INT_MAX);
- out = bupsplit_find_ofs(buf, len, &bits);
+ if (PY_MAJOR_VERSION > 2)
+ {
+ Py_buffer buf;
+ if (!PyArg_ParseTuple(args, "y*", &buf))
+ return NULL;
+ assert(buf.len <= INT_MAX);
+ out = bupsplit_find_ofs(buf.buf, buf.len, &bits);
+ PyBuffer_Release(&buf);
+ }
+ else
+ {
+ unsigned char *buf = NULL;
+ Py_ssize_t len = 0;
+ if (!PyArg_ParseTuple(args, "t#", &buf, &len))
+ return NULL;
+ assert(len <= INT_MAX);
+ out = bupsplit_find_ofs(buf, len, &bits);
+ }
if (out) assert(bits >= BUP_BLOBBITS);
return Py_BuildValue("ii", out, bits);
}
Py_ssize_t byte;
int bit;
- if (!PyArg_ParseTuple(args, "t#t#", &buf1, &len1, &buf2, &len2))
+ if (!PyArg_ParseTuple(args, rbuf_argf rbuf_argf, &buf1, &len1, &buf2, &len2))
return NULL;
bit = 0;
Py_ssize_t len = 0;
uint32_t v;
- if (!PyArg_ParseTuple(args, "t#", &buf, &len))
+ if (!PyArg_ParseTuple(args, rbuf_argf, &buf, &len))
return NULL;
if (len < 4)
static PyObject *bloom_add(PyObject *self, PyObject *args)
{
- unsigned char *sha = NULL, *bloom = NULL;
- unsigned char *end;
- Py_ssize_t len = 0, blen = 0;
+ Py_buffer bloom, sha;
int nbits = 0, k = 0;
+ if (!PyArg_ParseTuple(args, wbuf_argf wbuf_argf "ii",
+ &bloom, &sha, &nbits, &k))
+ return NULL;
- if (!PyArg_ParseTuple(args, "w#s#ii", &bloom, &blen, &sha, &len, &nbits, &k))
- return NULL;
+ PyObject *result = NULL;
- if (blen < 16+(1<<nbits) || len % 20 != 0)
- return NULL;
+ if (bloom.len < 16+(1<<nbits) || sha.len % 20 != 0)
+ goto clean_and_return;
if (k == 5)
{
- if (nbits > 29)
- return NULL;
- for (end = sha + len; sha < end; sha += 20/k)
- bloom_set_bit5(bloom, sha, nbits);
+ if (nbits > 29)
+ goto clean_and_return;
+ unsigned char *cur = sha.buf;
+ unsigned char *end;
+ for (end = cur + sha.len; cur < end; cur += 20/k)
+ bloom_set_bit5(bloom.buf, cur, nbits);
}
else if (k == 4)
{
- if (nbits > 37)
- return NULL;
- for (end = sha + len; sha < end; sha += 20/k)
- bloom_set_bit4(bloom, sha, nbits);
+ if (nbits > 37)
+ goto clean_and_return;
+ unsigned char *cur = sha.buf;
+ unsigned char *end = cur + sha.len;
+ for (; cur < end; cur += 20/k)
+ bloom_set_bit4(bloom.buf, cur, nbits);
}
else
- return NULL;
+ goto clean_and_return;
+ result = Py_BuildValue("n", sha.len / 20);
- return Py_BuildValue("n", len/20);
+ clean_and_return:
+ PyBuffer_Release(&bloom);
+ PyBuffer_Release(&sha);
+ return result;
}
static PyObject *bloom_contains(PyObject *self, PyObject *args)
{
- unsigned char *sha = NULL, *bloom = NULL;
- Py_ssize_t len = 0, blen = 0;
+ Py_buffer bloom;
+ unsigned char *sha = NULL;
+ Py_ssize_t len = 0;
int nbits = 0, k = 0;
- unsigned char *end;
- int steps;
+ if (!PyArg_ParseTuple(args, wbuf_argf rbuf_argf "ii",
+ &bloom, &sha, &len, &nbits, &k))
+ return NULL;
- if (!PyArg_ParseTuple(args, "t#s#ii", &bloom, &blen, &sha, &len, &nbits, &k))
- return NULL;
+ PyObject *result = NULL;
if (len != 20)
- return NULL;
+ goto clean_and_return;
if (k == 5)
{
- if (nbits > 29)
- return NULL;
- for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++)
- if (!bloom_get_bit5(bloom, sha, nbits))
- return Py_BuildValue("Oi", Py_None, steps);
+ if (nbits > 29)
+ goto clean_and_return;
+ int steps;
+ unsigned char *end;
+ for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++)
+ if (!bloom_get_bit5(bloom.buf, sha, nbits))
+ {
+ result = Py_BuildValue("Oi", Py_None, steps);
+ goto clean_and_return;
+ }
}
else if (k == 4)
{
- if (nbits > 37)
- return NULL;
- for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++)
- if (!bloom_get_bit4(bloom, sha, nbits))
- return Py_BuildValue("Oi", Py_None, steps);
+ if (nbits > 37)
+ goto clean_and_return;
+ int steps;
+ unsigned char *end;
+ for (steps = 1, end = sha + 20; sha < end; sha += 20/k, steps++)
+ if (!bloom_get_bit4(bloom.buf, sha, nbits))
+ {
+ result = Py_BuildValue("Oi", Py_None, steps);
+ goto clean_and_return;
+ }
}
else
- return NULL;
+ goto clean_and_return;
- return Py_BuildValue("ii", 1, k);
+ result = Py_BuildValue("ii", 1, k);
+
+ clean_and_return:
+ PyBuffer_Release(&bloom);
+ return result;
}
Py_ssize_t len = 0;
int nbits = 0;
- if (!PyArg_ParseTuple(args, "t#i", &buf, &len, &nbits))
+ if (!PyArg_ParseTuple(args, rbuf_argf "i", &buf, &len, &nbits))
return NULL;
if (len < 4)
unsigned char bytes[20];
};
+static inline int _cmp_sha(const struct sha *sha1, const struct sha *sha2)
+{
+ return memcmp(sha1->bytes, sha2->bytes, sizeof(sha1->bytes));
+}
+
struct idx {
unsigned char *map;
int name_base;
};
-
-static int _cmp_sha(const struct sha *sha1, const struct sha *sha2)
-{
- int i;
- for (i = 0; i < sizeof(struct sha); i++)
- if (sha1->bytes[i] != sha2->bytes[i])
- return sha1->bytes[i] - sha2->bytes[i];
- return 0;
-}
-
-
-static void _fix_idx_order(struct idx **idxs, int *last_i)
+static void _fix_idx_order(struct idx **idxs, Py_ssize_t *last_i)
{
struct idx *idx;
int low, mid, high, c = 0;
static PyObject *merge_into(PyObject *self, PyObject *args)
{
- PyObject *py_total, *ilist = NULL;
- unsigned char *fmap = NULL;
struct sha *sha_ptr, *sha_start = NULL;
uint32_t *table_ptr, *name_ptr, *name_start;
- struct idx **idxs = NULL;
- Py_ssize_t flen = 0;
- int bits = 0, i;
+ int i;
unsigned int total;
uint32_t count, prefix;
- int num_i;
- int last_i;
- if (!PyArg_ParseTuple(args, "w#iOO",
- &fmap, &flen, &bits, &py_total, &ilist))
+
+ Py_buffer fmap;
+ int bits;;
+ PyObject *py_total, *ilist = NULL;
+ if (!PyArg_ParseTuple(args, wbuf_argf "iOO",
+ &fmap, &bits, &py_total, &ilist))
return NULL;
+ PyObject *result = NULL;
+ struct idx **idxs = NULL;
+ Py_ssize_t num_i = 0;
+ int *idx_buf_init = NULL;
+ Py_buffer *idx_buf = NULL;
+
if (!bup_uint_from_py(&total, py_total, "total"))
- return NULL;
+ goto clean_and_return;
num_i = PyList_Size(ilist);
- idxs = (struct idx **)PyMem_Malloc(num_i * sizeof(struct idx *));
+
+ if (!(idxs = checked_malloc(num_i, sizeof(struct idx *))))
+ goto clean_and_return;
+ if (!(idx_buf_init = checked_calloc(num_i, sizeof(int))))
+ goto clean_and_return;
+ if (!(idx_buf = checked_malloc(num_i, sizeof(Py_buffer))))
+ goto clean_and_return;
for (i = 0; i < num_i; i++)
{
long len, sha_ofs, name_map_ofs;
- idxs[i] = (struct idx *)PyMem_Malloc(sizeof(struct idx));
+ if (!(idxs[i] = checked_malloc(1, sizeof(struct idx))))
+ goto clean_and_return;
PyObject *itup = PyList_GetItem(ilist, i);
- if (!PyArg_ParseTuple(itup, "t#llli", &idxs[i]->map, &idxs[i]->bytes,
- &len, &sha_ofs, &name_map_ofs, &idxs[i]->name_base))
+ if (!PyArg_ParseTuple(itup, wbuf_argf "llli",
+ &(idx_buf[i]), &len, &sha_ofs, &name_map_ofs,
+ &idxs[i]->name_base))
return NULL;
+ idx_buf_init[i] = 1;
+ idxs[i]->map = idx_buf[i].buf;
+ idxs[i]->bytes = idx_buf[i].len;
idxs[i]->cur = (struct sha *)&idxs[i]->map[sha_ofs];
idxs[i]->end = &idxs[i]->cur[len];
if (name_map_ofs)
else
idxs[i]->cur_name = NULL;
}
- table_ptr = (uint32_t *)&fmap[MIDX4_HEADERLEN];
+ table_ptr = (uint32_t *) &((unsigned char *) fmap.buf)[MIDX4_HEADERLEN];
sha_start = sha_ptr = (struct sha *)&table_ptr[1<<bits];
name_start = name_ptr = (uint32_t *)&sha_ptr[total];
- last_i = num_i-1;
+ Py_ssize_t last_i = num_i - 1;
count = 0;
prefix = 0;
while (last_i >= 0)
{
struct idx *idx;
uint32_t new_prefix;
- if (count % 102424 == 0 && istty2)
+ if (count % 102424 == 0 && get_state(self)->istty2)
fprintf(stderr, "midx: writing %.2f%% (%d/%d)\r",
count*100.0/total, count, total);
idx = idxs[last_i];
_fix_idx_order(idxs, &last_i);
++count;
}
- while (prefix < (1<<bits))
+ while (prefix < ((uint32_t) 1 << bits))
table_ptr[prefix++] = htonl(count);
assert(count == total);
- assert(prefix == (1<<bits));
+ assert(prefix == ((uint32_t) 1 << bits));
assert(sha_ptr == sha_start+count);
assert(name_ptr == name_start+count);
- PyMem_Free(idxs);
- return PyLong_FromUnsignedLong(count);
+ result = PyLong_FromUnsignedLong(count);
+
+ clean_and_return:
+ if (idx_buf_init)
+ {
+ for (i = 0; i < num_i; i++)
+ if (idx_buf_init[i])
+ PyBuffer_Release(&(idx_buf[i]));
+ free(idx_buf_init);
+ free(idx_buf);
+ }
+ if (idxs)
+ {
+ for (i = 0; i < num_i; i++)
+ free(idxs[i]);
+ free(idxs);
+ }
+ PyBuffer_Release(&fmap);
+ return result;
}
#define FAN_ENTRIES 256
char *filename = NULL;
PyObject *py_total, *idx = NULL;
PyObject *part;
- unsigned char *fmap = NULL;
- Py_ssize_t flen = 0;
unsigned int total = 0;
uint32_t count;
int i, j, ofs64_count;
uint64_t *ofs64_ptr;
struct sha *sha_ptr;
- if (!PyArg_ParseTuple(args, "sw#OO",
- &filename, &fmap, &flen, &idx, &py_total))
+ Py_buffer fmap;
+ if (!PyArg_ParseTuple(args, cstr_argf wbuf_argf "OO",
+ &filename, &fmap, &idx, &py_total))
return NULL;
+ PyObject *result = NULL;
+
if (!bup_uint_from_py(&total, py_total, "total"))
- return NULL;
+ goto clean_and_return;
if (PyList_Size (idx) != FAN_ENTRIES) // Check for list of the right length.
- return PyErr_Format (PyExc_TypeError, "idx must contain %d entries",
- FAN_ENTRIES);
+ {
+ result = PyErr_Format (PyExc_TypeError, "idx must contain %d entries",
+ FAN_ENTRIES);
+ goto clean_and_return;
+ }
const char idx_header[] = "\377tOc\0\0\0\002";
- memcpy (fmap, idx_header, sizeof(idx_header) - 1);
+ memcpy (fmap.buf, idx_header, sizeof(idx_header) - 1);
- fan_ptr = (uint32_t *)&fmap[sizeof(idx_header) - 1];
+ fan_ptr = (uint32_t *)&((unsigned char *)fmap.buf)[sizeof(idx_header) - 1];
sha_ptr = (struct sha *)&fan_ptr[FAN_ENTRIES];
crc_ptr = (uint32_t *)&sha_ptr[total];
ofs_ptr = (uint32_t *)&crc_ptr[total];
unsigned int crc;
unsigned PY_LONG_LONG ofs_ull;
uint64_t ofs;
- if (!PyArg_ParseTuple(PyList_GET_ITEM(part, j), "t#OO",
+ if (!PyArg_ParseTuple(PyList_GET_ITEM(part, j), rbuf_argf "OO",
&sha, &sha_len, &crc_py, &ofs_py))
- return NULL;
+ goto clean_and_return;
if(!bup_uint_from_py(&crc, crc_py, "crc"))
- return NULL;
+ goto clean_and_return;
if(!bup_ullong_from_py(&ofs_ull, ofs_py, "ofs"))
- return NULL;
+ goto clean_and_return;
assert(crc <= UINT32_MAX);
assert(ofs_ull <= UINT64_MAX);
ofs = ofs_ull;
if (sha_len != sizeof(struct sha))
- return NULL;
+ goto clean_and_return;
memcpy(sha_ptr++, sha, sizeof(struct sha));
*crc_ptr++ = htonl(crc);
if (ofs > 0x7fffffff)
}
}
- int rc = msync(fmap, flen, MS_ASYNC);
+ int rc = msync(fmap.buf, fmap.len, MS_ASYNC);
if (rc != 0)
- return PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename);
+ {
+ result = PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename);
+ goto clean_and_return;
+ }
- return PyLong_FromUnsignedLong(count);
+ result = PyLong_FromUnsignedLong(count);
+
+ clean_and_return:
+ PyBuffer_Release(&fmap);
+ return result;
}
memset(shabuf, 0, sizeof(shabuf));
for (i=0; i < 20/4; i++)
shabuf[i] = random();
- return Py_BuildValue("s#", shabuf, 20);
+ return Py_BuildValue(rbuf_argf, shabuf, 20);
}
{
char *filename = NULL;
int fd;
- if (!PyArg_ParseTuple(args, "s", &filename))
+ if (!PyArg_ParseTuple(args, cstr_argf, &filename))
return NULL;
fd = _open_noatime(filename, 0);
if (fd < 0)
char *path;
int fd;
- if (!PyArg_ParseTuple(args, "s", &path))
+ if (!PyArg_ParseTuple(args, cstr_argf, &path))
return NULL;
fd = _open_noatime(path, O_NONBLOCK);
PyObject *py_attr;
int fd;
- if (!PyArg_ParseTuple(args, "sO", &path, &py_attr))
+ if (!PyArg_ParseTuple(args, cstr_argf "O", &path, &py_attr))
return NULL;
if (!bup_uint_from_py(&attr, py_attr, "attr"))
// The extents flag can't be removed, so don't (see chattr(1) and chattr.c).
orig_attr = 0; // Handle int/long mismatch (see above)
rc = ioctl(fd, FS_IOC_GETFLAGS, &orig_attr);
- assert(orig_attr <= UINT_MAX); // Kernel type is actually int
if (rc == -1)
{
close(fd);
return PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
}
+ assert(orig_attr <= UINT_MAX); // Kernel type is actually int
attr |= ((unsigned int) orig_attr) & FS_EXTENT_FL;
rc = ioctl(fd, FS_IOC_SETFLAGS, &attr);
PyObject *access_py, *modification_py;
struct timespec ts[2];
- if (!PyArg_ParseTuple(args, "is((Ol)(Ol))i",
+ if (!PyArg_ParseTuple(args, "i" cstr_argf "((Ol)(Ol))i",
&fd,
&path,
&access_py, &(ts[0].tv_nsec),
PyObject *access_py, *modification_py;
long long access_us, modification_us; // POSIX guarantees tv_usec is signed.
- if (!PyArg_ParseTuple(args, "s((OL)(OL))",
+ if (!PyArg_ParseTuple(args, cstr_argf "((OL)(OL))",
path,
&access_py, &access_us,
&modification_py, &modification_us))
// compile time, but not (easily) the unspecified types, so handle
// those via INTEGER_TO_PY(). Assumes ns values will fit in a
// long.
- return Py_BuildValue("OKOOOOOL(Ol)(Ol)(Ol)",
+ return Py_BuildValue("NKNNNNNL(Nl)(Nl)(Nl)",
INTEGER_TO_PY(st->st_mode),
(unsigned PY_LONG_LONG) st->st_ino,
INTEGER_TO_PY(st->st_dev),
int rc;
char *filename;
- if (!PyArg_ParseTuple(args, "s", &filename))
+ if (!PyArg_ParseTuple(args, cstr_argf, &filename))
return NULL;
struct stat st;
int rc;
char *filename;
- if (!PyArg_ParseTuple(args, "s", &filename))
+ if (!PyArg_ParseTuple(args, cstr_argf, &filename))
return NULL;
struct stat st;
#ifdef BUP_MINCORE_BUF_TYPE
static PyObject *bup_mincore(PyObject *self, PyObject *args)
{
- const char *src;
- Py_ssize_t src_ssize;
- Py_buffer dest;
+ Py_buffer src, dest;
PyObject *py_src_n, *py_src_off, *py_dest_off;
- if (!PyArg_ParseTuple(args, "s#OOw*O",
- &src, &src_ssize, &py_src_n, &py_src_off,
+
+ if (!PyArg_ParseTuple(args, cstr_argf "*OOw*O",
+ &src, &py_src_n, &py_src_off,
&dest, &py_dest_off))
return NULL;
- unsigned long long src_size, src_n, src_off, dest_size, dest_off;
+ PyObject *result = NULL;
+
+ unsigned long long src_n, src_off, dest_off;
if (!(bup_ullong_from_py(&src_n, py_src_n, "src_n")
&& bup_ullong_from_py(&src_off, py_src_off, "src_off")
&& bup_ullong_from_py(&dest_off, py_dest_off, "dest_off")))
- return NULL;
+ goto clean_and_return;
- if (!INTEGRAL_ASSIGNMENT_FITS(&src_size, src_ssize))
- return PyErr_Format(PyExc_OverflowError, "invalid src size");
unsigned long long src_region_end;
+ if (!uadd(&src_region_end, src_off, src_n)) {
+ result = PyErr_Format(PyExc_OverflowError, "(src_off + src_n) too large");
+ goto clean_and_return;
+ }
+ assert(src.len >= 0);
+ if (src_region_end > (unsigned long long) src.len) {
+ result = PyErr_Format(PyExc_OverflowError, "region runs off end of src");
+ goto clean_and_return;
+ }
- if (!uadd(&src_region_end, src_off, src_n))
- return PyErr_Format(PyExc_OverflowError, "(src_off + src_n) too large");
- if (src_region_end > src_size)
- return PyErr_Format(PyExc_OverflowError, "region runs off end of src");
-
- if (!INTEGRAL_ASSIGNMENT_FITS(&dest_size, dest.len))
- return PyErr_Format(PyExc_OverflowError, "invalid dest size");
- if (dest_off > dest_size)
- return PyErr_Format(PyExc_OverflowError, "region runs off end of dest");
+ unsigned long long dest_size;
+ if (!INTEGRAL_ASSIGNMENT_FITS(&dest_size, dest.len)) {
+ result = PyErr_Format(PyExc_OverflowError, "invalid dest size");
+ goto clean_and_return;
+ }
+ if (dest_off > dest_size) {
+ result = PyErr_Format(PyExc_OverflowError, "region runs off end of dest");
+ goto clean_and_return;
+ }
size_t length;
- if (!INTEGRAL_ASSIGNMENT_FITS(&length, src_n))
- return PyErr_Format(PyExc_OverflowError, "src_n overflows size_t");
- int rc = mincore((void *)(src + src_off), src_n,
+ if (!INTEGRAL_ASSIGNMENT_FITS(&length, src_n)) {
+ result = PyErr_Format(PyExc_OverflowError, "src_n overflows size_t");
+ goto clean_and_return;
+ }
+ int rc = mincore((void *)(src.buf + src_off), src_n,
(BUP_MINCORE_BUF_TYPE *) (dest.buf + dest_off));
- if (rc != 0)
- return PyErr_SetFromErrno(PyExc_OSError);
- return Py_BuildValue("O", Py_None);
+ if (rc != 0) {
+ result = PyErr_SetFromErrno(PyExc_OSError);
+ goto clean_and_return;
+ }
+ result = Py_BuildValue("O", Py_None);
+
+ clean_and_return:
+ PyBuffer_Release(&src);
+ PyBuffer_Release(&dest);
+ return result;
}
#endif /* def BUP_MINCORE_BUF_TYPE */
{ "localtime", bup_localtime, METH_VARARGS,
"Return struct_time elements plus the timezone offset and name." },
#endif
+ { "bytescmp", bup_bytescmp, METH_VARARGS,
+ "Return a negative value if x < y, zero if equal, positive otherwise."},
#ifdef BUP_MINCORE_BUF_TYPE
{ "mincore", bup_mincore, METH_VARARGS,
"For mincore(src, src_n, src_off, dest, dest_off)"
{ NULL, NULL, 0, NULL }, // sentinel
};
+static void test_integral_assignment_fits(void)
+{
+ assert(sizeof(signed short) == sizeof(unsigned short));
+ assert(sizeof(signed short) < sizeof(signed long long));
+ assert(sizeof(signed short) < sizeof(unsigned long long));
+ assert(sizeof(unsigned short) < sizeof(signed long long));
+ assert(sizeof(unsigned short) < sizeof(unsigned long long));
+ assert(sizeof(Py_ssize_t) <= sizeof(size_t));
+ {
+ signed short ss, ssmin = SHRT_MIN, ssmax = SHRT_MAX;
+ unsigned short us, usmax = USHRT_MAX;
+ signed long long sllmin = LLONG_MIN, sllmax = LLONG_MAX;
+ unsigned long long ullmax = ULLONG_MAX;
+
+ assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmax));
+ assert(INTEGRAL_ASSIGNMENT_FITS(&ss, ssmin));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, usmax));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmin));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, sllmax));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&ss, ullmax));
+
+ assert(INTEGRAL_ASSIGNMENT_FITS(&us, usmax));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ssmin));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmin));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&us, sllmax));
+ assert(!INTEGRAL_ASSIGNMENT_FITS(&us, ullmax));
+ }
+}
-PyMODINIT_FUNC init_helpers(void)
+static int setup_module(PyObject *m)
{
- // FIXME: migrate these tests to configure. Check against the
- // type we're going to use when passing to python. Other stat
- // types are tested at runtime.
+ // FIXME: migrate these tests to configure, or at least don't
+ // possibly crash the whole application. Check against the type
+ // we're going to use when passing to python. Other stat types
+ // are tested at runtime.
assert(sizeof(ino_t) <= sizeof(unsigned PY_LONG_LONG));
assert(sizeof(off_t) <= sizeof(PY_LONG_LONG));
assert(sizeof(blksize_t) <= sizeof(PY_LONG_LONG));
assert(sizeof(PY_LONG_LONG) <= sizeof(long long));
assert(sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long long));
+ test_integral_assignment_fits();
+
// Originally required by append_sparse_region()
{
off_t probe;
}
char *e;
- PyObject *m = Py_InitModule("_helpers", helper_methods);
- if (m == NULL)
- return;
-
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wtautological-compare" // For INTEGER_TO_PY().
{
#pragma clang diagnostic pop // ignored "-Wtautological-compare"
e = getenv("BUP_FORCE_TTY");
- istty2 = isatty(2) || (atoi(e ? e : "0") & 2);
+ get_state(m)->istty2 = isatty(2) || (atoi(e ? e : "0") & 2);
unpythonize_argv();
+ return 1;
}
+
+
+#if PY_MAJOR_VERSION < 3
+
+PyMODINIT_FUNC init_helpers(void)
+{
+ PyObject *m = Py_InitModule("_helpers", helper_methods);
+ if (m == NULL)
+ return;
+
+ if (!setup_module(m))
+ {
+ Py_DECREF(m);
+ return;
+ }
+}
+
+# else // PY_MAJOR_VERSION >= 3
+
+static struct PyModuleDef helpers_def = {
+ PyModuleDef_HEAD_INIT,
+ "_helpers",
+ NULL,
+ sizeof(state_t),
+ helper_methods,
+ NULL,
+ NULL, // helpers_traverse,
+ NULL, // helpers_clear,
+ NULL
+};
+
+PyMODINIT_FUNC PyInit__helpers(void)
+{
+ PyObject *module = PyModule_Create(&helpers_def);
+ if (module == NULL)
+ return NULL;
+ if (!setup_module(module))
+ {
+ Py_DECREF(module);
+ return NULL;
+ }
+ return module;
+}
+
+#endif // PY_MAJOR_VERSION >= 3