/*
- * $Id: ad_lock.c,v 1.13 2006-09-29 09:39:16 didg Exp $
- *
* Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
* All Rights Reserved. See COPYRIGHT for more information.
*
#include <string.h>
-#include "ad_private.h"
+#include "ad_lock.h"
/* translate between ADLOCK styles and specific locking mechanisms */
#define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
((type) == ADLOCK_WR ? LOCK_EX : \
((type) == ADLOCK_CLR ? LOCK_UN : -1)))
-#ifdef DISABLE_LOCKING
-#define fcntl(a, b, c ) (0)
-#endif
+/* ----------------------- */
+static int set_lock(int fd, int cmd, struct flock *lock)
+{
+ if (fd == -2) {
+ /* We assign fd = -2 for symlinks -> do nothing */
+ if (cmd == F_GETLK)
+ lock->l_type = F_UNLCK;
+ return 0;
+ }
+ return fcntl(fd, cmd, lock);
+}
/* ----------------------- */
static int XLATE_FCNTL_LOCK(int type)
/* ----------------------- */
static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
{
- return (!alen && a <= b) ||
- (!blen && b <= a) ||
- ( (a + alen > b) && (b + blen > a) );
+ return (!alen && a <= b) ||
+ (!blen && b <= a) ||
+ ( (a + alen > b) && (b + blen > a) );
}
/* allocation for lock regions. we allocate aggressively and shrink
#define ARRAY_FREE_DELTA 100
/* remove a lock and compact space if necessary */
-static __inline__ void adf_freelock(struct ad_fd *ad, const int i)
+static void adf_freelock(struct ad_fd *ad, const int i)
{
+#if 0
adf_lock_t *lock = ad->adf_lock + i;
if (--(*lock->refcount) < 1) {
free(lock->refcount);
if (!ad->adf_excl) {
lock->lock.l_type = F_UNLCK;
- fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
+ set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
}
}
ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
}
}
+#endif
}
/* this needs to deal with the following cases:
- * 1) user is the only user of the lock
- * 2) user shares a read lock with another user
+ * 1) fork is the only user of the lock
+ * 2) fork shares a read lock with another open fork
*
* i converted to using arrays of locks. everytime a lock
* gets removed, we shift all of the locks down.
*/
-static __inline__ void adf_unlock(struct ad_fd *ad, const int user)
+static void adf_unlock(struct ad_fd *ad, const int fork)
{
+#if 0
adf_lock_t *lock = ad->adf_lock;
int i;
for (i = 0; i < ad->adf_lockcount; i++) {
- if (lock[i].user == user) {
+
+ if (lock[i].user == fork) {
/* we're really going to delete this lock. note: read locks
are the only ones that allow refcounts > 1 */
adf_freelock(ad, i);
i--; /* we shifted things down, so we need to backtrack */
- }
+ /* unlikely but realloc may have change adf_lock */
+ lock = ad->adf_lock;
+ }
}
+#endif
}
/* relock any byte lock that overlaps off/len. unlock everything
* else. */
-static __inline__ void adf_relockrange(struct ad_fd *ad, int fd,
- const off_t off, const off_t len)
+static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
{
+#if 0
adf_lock_t *lock = ad->adf_lock;
int i;
if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
- fcntl(fd, F_SETLK, &lock[i].lock);
+ set_lock(fd, F_SETLK, &lock[i].lock);
}
+#endif
}
-/* find a byte lock that overlaps off/len for a particular user */
-static __inline__ int adf_findlock(struct ad_fd *ad,
- const int user, const int type,
- const off_t off,
- const off_t len)
+/* find a byte lock that overlaps off/len for a particular open fork */
+static int adf_findlock(struct ad_fd *ad,
+ const int fork, const int type,
+ const off_t off,
+ const off_t len)
{
+#if 0
adf_lock_t *lock = ad->adf_lock;
int i;
for (i = 0; i < ad->adf_lockcount; i++) {
if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
- (lock[i].user == user) &&
+ (lock[i].user == fork) &&
OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
return i;
}
}
-
+#endif
return -1;
}
-/* search other user lock lists */
-static __inline__ int adf_findxlock(struct ad_fd *ad,
- const int user, const int type,
- const off_t off,
- const off_t len)
+/* search other fork lock lists */
+static int adf_findxlock(struct ad_fd *ad,
+ const int fork, const int type,
+ const off_t off,
+ const off_t len)
{
+#if 0
adf_lock_t *lock = ad->adf_lock;
int i;
for (i = 0; i < ad->adf_lockcount; i++) {
if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
- (lock[i].user != user) &&
+ (lock[i].user != fork) &&
OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
return i;
}
+#endif
return -1;
}
* 2) if the header file doesn't exist, we stick the locks
* in the locations specified by AD_FILELOCK_RD/WR.
*/
-#define LOCK_RSRC_RD (0)
-#define LOCK_RSRC_WR (1)
-#define LOCK_DATA_RD (2)
-#define LOCK_DATA_WR (3)
+#define LOCK_DATA_WR (0)
+#define LOCK_DATA_RD (1)
+#define LOCK_RSRC_WR (2)
+#define LOCK_RSRC_RD (3)
#define LOCK_RSRC_DRD (4)
#define LOCK_RSRC_DWR (5)
}
/* ------------------ */
-int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
- const off_t off, const off_t len, const int user)
+static int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
+ const off_t off, const off_t len, const int fork)
{
+#if 0
struct flock lock;
struct ad_fd *adf;
adf_lock_t *adflock;
lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
}
- /* see if it's locked by another user.
+ /* see if it's locked by another fork.
* NOTE: this guarantees that any existing locks must be at most
* read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
* guaranteed to be ORable. */
- if (adf_findxlock(adf, user, ADLOCK_WR |
+ if (adf_findxlock(adf, fork, ADLOCK_WR |
((type & ADLOCK_WR) ? ADLOCK_RD : 0),
lock.l_start, lock.l_len) > -1) {
errno = EACCES;
}
/* look for any existing lock that we may have */
- i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
+ i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
adflock = (i < 0) ? NULL : adf->adf_lock + i;
/* here's what we check for:
1) we're trying to re-lock a lock, but we didn't specify an update.
2) we're trying to free only part of a lock.
3) we're trying to free a non-existent lock. */
- if ((!adflock && (lock.l_type == F_UNLCK)) ||
- (adflock && !(type & ADLOCK_UPGRADE) &&
- ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
- (adflock->lock.l_len != lock.l_len)))) {
- errno = EINVAL;
- return -1;
+ if ( (!adflock && (lock.l_type == F_UNLCK))
+ ||
+ (adflock
+ && !(type & ADLOCK_UPGRADE)
+ && ((lock.l_type != F_UNLCK)
+ || (adflock->lock.l_start != lock.l_start)
+ || (adflock->lock.l_len != lock.l_len) ))
+ ) {
+ errno = EINVAL;
+ return -1;
}
}
/* attempt to lock the file. */
- if (!adf->adf_excl && fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
+ if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
return -1;
/* we upgraded this lock. */
/* it wasn't an upgrade */
oldlock = -1;
if (lock.l_type == F_RDLCK) {
- oldlock = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, lock.l_len);
+ oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
}
/* no more space. this will also happen if lockmax == lockcount == 0 */
/* fill in fields */
memcpy(&adflock->lock, &lock, sizeof(lock));
- adflock->user = user;
+ adflock->user = fork;
if (oldlock > -1) {
adflock->refcount = (adf->adf_lock + oldlock)->refcount;
} else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
fcntl_lock_err:
lock.l_type = F_UNLCK;
- if (!adf->adf_excl) fcntl(adf->adf_fd, F_SETLK, &lock);
+ if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
return -1;
+#endif
+ return 0;
}
/* -------------------------
error ==> -1
*/
-int ad_testlock(struct adouble *ad, int eid, const off_t off)
+static int testlock(struct ad_fd *adf, off_t off, off_t len)
{
+#if 0
struct flock lock;
- struct ad_fd *adf;
adf_lock_t *plock;
int i;
lock.l_start = off;
- if (eid == ADEID_DFORK) {
- adf = &ad->ad_data_fork;
- if ((ad_meta_fileno(ad) != -1)) {
- adf = ad->ad_md;
- lock.l_start = df2off(off);
- }
- }
- else { /* rfork */
- if ((ad_meta_fileno(ad) == -1)) {
- /* there's no resource fork. return no lock */
- return 0;
- }
- adf = ad->ad_md;
- lock.l_start = hf2off(off);
- }
plock = adf->adf_lock;
- /* Does we have a lock? */
lock.l_whence = SEEK_SET;
- lock.l_len = 1;
+ lock.l_len = len;
+
+ /* Do we have a lock? */
for (i = 0; i < adf->adf_lockcount; i++) {
if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
return 1; /* */
}
/* Does another process have a lock?
- FIXME F_GETLK ?
*/
- lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
+ lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
- if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0) {
- return (errno == EACCES || errno == EAGAIN)?1:-1;
+ if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
+ /* is that kind of error possible ?*/
+ return (errno == EACCES || errno == EAGAIN)?1:-1;
}
- lock.l_type = F_UNLCK;
- return fcntl(adf->adf_fd, F_SETLK, &lock);
+ if (lock.l_type == F_UNLCK) {
+ return 0;
+ }
+ return 1;
+#endif
+ return 0;
}
+
/* -------------------------
*/
-int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
- const off_t off, const off_t len, const int user)
+static int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
+ const off_t off, const off_t len, const int fork)
{
struct flock lock;
struct ad_fd *adf;
lock.l_whence = SEEK_SET;
lock.l_len = len;
- /* see if it's locked by another user. */
- if (user && adf_findxlock(adf, user, ADLOCK_WR |
+ /* see if it's locked by another fork. */
+ if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
((type & ADLOCK_WR) ? ADLOCK_RD : 0),
lock.l_start, lock.l_len) > -1) {
errno = EACCES;
* we just want to upgrade all the locks and then downgrade them
* here. */
if (!adf->adf_excl) {
- err = fcntl(adf->adf_fd, F_SETLK, &lock);
+ err = set_lock(adf->adf_fd, F_SETLK, &lock);
}
else {
err = 0;
return err;
}
+/* --------------------- */
+static void ad_fcntl_unlock(struct adouble *ad, const int fork)
+{
+ if (ad_data_fileno(ad) != -1) {
+ adf_unlock(&ad->ad_data_fork, fork);
+ }
+ if (ad_reso_fileno(ad) != -1) {
+ adf_unlock(&ad->ad_resource_fork, fork);
+ }
+
+ if (ad->ad_flags != AD_VERSION_EA) {
+ return;
+ }
+ if (ad_meta_fileno(ad) != -1) {
+ adf_unlock(&ad->ad_metadata_fork, fork);
+ }
+}
+
+/******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+/* --------------- */
+int ad_testlock(struct adouble *ad, int eid, const off_t off)
+{
+ return 0;
+#if 0
+ struct ad_fd *adf;
+ off_t lock_offset;
+
+ lock_offset = off;
+ if (eid == ADEID_DFORK) {
+ adf = &ad->ad_data_fork;
+ if (ad_meta_fileno(ad) != -1) {
+ adf = ad->ad_md;
+ lock_offset = df2off(off);
+ }
+ }
+ else { /* rfork */
+ if (ad_meta_fileno(ad) == -1) {
+ /* there's no resource fork. return no lock */
+ return 0;
+ }
+ adf = ad->ad_md;
+ lock_offset = hf2off(off);
+ }
+ return testlock(adf, lock_offset, 1);
+#endif
+}
+
+/* -------------------------
+ return if a file is open by another process.
+ Optimized for the common case:
+ - there's no locks held by another process (clients)
+ - or we already know the answer and don't need to test.
+*/
+uint16_t ad_openforks(struct adouble *ad, u_int16_t attrbits)
+{
+ return 0;
+#if 0
+ u_int16_t ret = 0;
+ struct ad_fd *adf;
+ off_t off;
+
+ if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
+ off_t len;
+ /* XXX know the locks layout:
+ AD_FILELOCK_OPEN_WR is first
+ and use it for merging requests
+ */
+ if (ad_meta_fileno(ad) != -1) {
+ /* there's a resource fork test the four bytes for
+ * data RW/RD and fork RW/RD locks in one request
+ */
+ adf = ad->ad_md;
+ off = LOCK_DATA_WR;
+ len = 4;
+ }
+ else {
+ /* no resource fork, only data RD/RW may exist */
+ adf = &ad->ad_data_fork;
+ off = AD_FILELOCK_OPEN_WR;
+ len = 2;
+ }
+ if (!testlock(adf, off, len))
+ return ret;
+ }
+ /* either there's a lock or we already know one
+ fork is open
+ */
+ if (!(attrbits & ATTRBIT_DOPEN)) {
+ if (ad_meta_fileno(ad) != -1) {
+ adf = ad->ad_md;
+ off = LOCK_DATA_WR;
+ }
+ else {
+ adf = &ad->ad_data_fork;
+ off = AD_FILELOCK_OPEN_WR;
+ }
+ ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
+ }
+
+ if (!(attrbits & ATTRBIT_ROPEN)) {
+ if (ad_meta_fileno(ad) != -1) {
+ adf = ad->ad_md;
+ off = LOCK_RSRC_WR;
+ ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
+ }
+ }
+
+ return ret;
+#endif
+}
+
/* -------------------------
the fork is opened in Read Write, Deny Read, Deny Write mode
lock the whole file once
*/
int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
{
+ return 0;
+#if 0
struct ad_fd *adf;
struct flock lock;
int err;
lock.l_start = ad_getentryoff(ad, eid);
}
- err = fcntl(adf->adf_fd, F_SETLK, &lock);
+ err = set_lock(adf->adf_fd, F_SETLK, &lock);
if (!err)
adf->adf_excl = 1;
return err;
+#endif
}
-/* --------------------- */
-void ad_fcntl_unlock(struct adouble *ad, const int user)
+int ad_lock(struct adouble *ad, uint32_t eid, int type, off_t off, off_t len, int user)
{
- if (ad_data_fileno(ad) != -1) {
- adf_unlock(&ad->ad_data_fork, user);
- }
- if (ad_reso_fileno(ad) != -1) {
- adf_unlock(&ad->ad_resource_fork, user);
- }
+ return 0;
+}
- if (ad->ad_flags != AD_VERSION1_SFM) {
+void ad_unlock(struct adouble *ad, int user)
+{
return;
- }
- if (ad_meta_fileno(ad) != -1) {
- adf_unlock(&ad->ad_metadata_fork, user);
- }
+}
+int ad_tmplock(struct adouble *ad, uint32_t eid, int type, off_t off, off_t len, int user)
+{
+ return 0;
}