/*
- * $Id: ad_lock.c,v 1.4 2002-05-13 07:21:57 jmarcus Exp $
+ * $Id: ad_lock.c,v 1.11.6.4.2.2 2008-11-25 15:16:33 didg Exp $
*
* Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
* All Rights Reserved. See COPYRIGHT for more information.
#include "config.h"
#endif /* HAVE_CONFIG_H */
+#include <atalk/adouble.h>
+
#include <stdio.h>
#include <stdlib.h>
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif /* HAVE_UNISTD_H */
-#ifdef HAVE_FCNTL_H
-#include <fcntl.h>
-#endif /* HAVE_FCNTL_H */
#include <errno.h>
-#include <atalk/adouble.h>
+#include <string.h>
#include "ad_private.h"
((type) == ADLOCK_WR ? LOCK_EX : \
((type) == ADLOCK_CLR ? LOCK_UN : -1)))
-#define XLATE_FCNTL_LOCK(type) ((type) == ADLOCK_RD ? F_RDLCK : \
-((type) == ADLOCK_WR ? F_WRLCK : \
- ((type) == ADLOCK_CLR ? F_UNLCK : -1)))
-
-#define OVERLAP(a,alen,b,blen) ((!(alen) && (a) <= (b)) || \
- (!(blen) && (b) <= (a)) || \
- ((((a) + (alen)) > (b)) && \
- (((b) + (blen)) > (a))))
+#ifdef DISABLE_LOCKING
+#define fcntl(a, b, c ) (0)
+#endif
+/* ----------------------- */
+static int XLATE_FCNTL_LOCK(int type)
+{
+ switch(type) {
+ case ADLOCK_RD:
+ return F_RDLCK;
+ case ADLOCK_WR:
+ return F_WRLCK;
+ case ADLOCK_CLR:
+ return F_UNLCK;
+ }
+ return -1;
+}
+
+/* ----------------------- */
+static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
+{
+ return (!alen && a <= b) ||
+ (!blen && b <= a) ||
+ ( (a + alen > b) && (b + blen > a) );
+}
/* allocation for lock regions. we allocate aggressively and shrink
* only in large chunks. */
#define ARRAY_FREE_DELTA 100
/* remove a lock and compact space if necessary */
-static __inline__ void adf_freelock(struct ad_fd *ad, const int i)
+static void adf_freelock(struct ad_fd *ad, const int i)
{
adf_lock_t *lock = ad->adf_lock + i;
if (--(*lock->refcount) < 1) {
free(lock->refcount);
- lock->lock.l_type = F_UNLCK;
- fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
+ if (!ad->adf_excl) {
+ lock->lock.l_type = F_UNLCK;
+ fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
+ }
}
ad->adf_lockcount--;
* i converted to using arrays of locks. everytime a lock
* gets removed, we shift all of the locks down.
*/
-static __inline__ void adf_unlock(struct ad_fd *ad, int fd, const int user)
+static void adf_unlock(struct ad_fd *ad, const int user)
{
adf_lock_t *lock = ad->adf_lock;
int i;
/* relock any byte lock that overlaps off/len. unlock everything
* else. */
-static __inline__ void adf_relockrange(struct ad_fd *ad, int fd,
- const off_t off, const size_t len)
+static void adf_relockrange(struct ad_fd *ad, int fd,
+ const off_t off, const off_t len)
{
adf_lock_t *lock = ad->adf_lock;
int i;
- for (i = 0; i < ad->adf_lockcount; i++) {
+ if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
fcntl(fd, F_SETLK, &lock[i].lock);
}
/* find a byte lock that overlaps off/len for a particular user */
-static __inline__ int adf_findlock(struct ad_fd *ad,
+static int adf_findlock(struct ad_fd *ad,
const int user, const int type,
const off_t off,
- const size_t len)
+ const off_t len)
{
adf_lock_t *lock = ad->adf_lock;
int i;
/* search other user lock lists */
-static __inline__ int adf_findxlock(struct ad_fd *ad,
+static int adf_findxlock(struct ad_fd *ad,
const int user, const int type,
const off_t off,
- const size_t len)
+ const off_t len)
{
adf_lock_t *lock = ad->adf_lock;
int i;
translate a data fork lock to an offset
*/
-static int df2off(int off)
+static off_t df2off(int off)
{
int start = off;
if (off == AD_FILELOCK_OPEN_WR)
translate a resource fork lock to an offset
*/
-static int hf2off(int off)
+static off_t hf2off(int off)
{
int start = off;
if (off == AD_FILELOCK_OPEN_WR)
return start;
}
-int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type,
- const off_t off, const size_t len, const int user)
+/* ------------------ */
+int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
+ const off_t off, const off_t len, const int user)
{
struct flock lock;
struct ad_fd *adf;
- adf_lock_t *adflock, *oldlock;
+ adf_lock_t *adflock;
+ int oldlock;
int i;
-
+ int type;
+
lock.l_start = off;
+ type = locktype;
if (eid == ADEID_DFORK) {
adf = &ad->ad_df;
if ((type & ADLOCK_FILELOCK)) {
if (ad_hfileno(ad) != -1) {
- lock.l_start = df2off(off);
+ lock.l_start = df2off(off);
adf = &ad->ad_hf;
}
}
} else { /* rfork */
adf = &ad->ad_hf;
+ if (adf->adf_fd == -1) {
+ /* there's no resource fork. return a lock error
+ * otherwise if a second process is able to create it
+ * locks are a mess.
+ */
+ errno = EACCES;
+ return -1;
+ }
if (type & ADLOCK_FILELOCK)
lock.l_start = hf2off(off);
else
lock.l_start += ad_getentryoff(ad, eid);
}
-
+ /* NOTE: we can't write lock a read-only file. on those, we just
+ * make sure that we have a read lock set. that way, we at least prevent
+ * someone else from really setting a deny read/write on the file.
+ */
+ if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
+ type = (type & ~ADLOCK_WR) | ADLOCK_RD;
+ }
+
lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
+ lock.l_whence = SEEK_SET;
+ lock.l_len = len;
+
+ /* byte_lock(len=-1) lock whole file */
+ if (len == BYTELOCK_MAX) {
+ lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
+ }
/* see if it's locked by another user.
* NOTE: this guarantees that any existing locks must be at most
* guaranteed to be ORable. */
if (adf_findxlock(adf, user, ADLOCK_WR |
((type & ADLOCK_WR) ? ADLOCK_RD : 0),
- lock.l_start, len) > -1) {
+ lock.l_start, lock.l_len) > -1) {
errno = EACCES;
return -1;
}
/* look for any existing lock that we may have */
- i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, len);
+ i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
adflock = (i < 0) ? NULL : adf->adf_lock + i;
/* here's what we check for:
if ((!adflock && (lock.l_type == F_UNLCK)) ||
(adflock && !(type & ADLOCK_UPGRADE) &&
((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
- (adflock->lock.l_len != len)))) {
+ (adflock->lock.l_len != lock.l_len)))) {
errno = EINVAL;
return -1;
}
- lock.l_whence = SEEK_SET;
- lock.l_len = len;
/* now, update our list of locks */
/* clear the lock */
}
/* attempt to lock the file. */
- if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
+ if (!adf->adf_excl && fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
return -1;
/* we upgraded this lock. */
}
/* it wasn't an upgrade */
- oldlock = NULL;
- if ((lock.l_type = F_RDLCK) &&
- ((i = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, len)) > -1)) {
- oldlock = adf->adf_lock + i;
+ oldlock = -1;
+ if (lock.l_type == F_RDLCK) {
+ oldlock = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, lock.l_len);
}
/* no more space. this will also happen if lockmax == lockcount == 0 */
/* fill in fields */
memcpy(&adflock->lock, &lock, sizeof(lock));
adflock->user = user;
- if (oldlock)
- adflock->refcount = oldlock->refcount;
- else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
+ if (oldlock > -1) {
+ adflock->refcount = (adf->adf_lock + oldlock)->refcount;
+ } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
goto fcntl_lock_err;
}
fcntl_lock_err:
lock.l_type = F_UNLCK;
- fcntl(adf->adf_fd, F_SETLK, &lock);
+ if (!adf->adf_excl) fcntl(adf->adf_fd, F_SETLK, &lock);
return -1;
}
struct ad_fd *adf;
adf_lock_t *plock;
int i;
- int lockmode;
-
+
lock.l_start = off;
if (eid == ADEID_DFORK) {
adf = &ad->ad_df;
if ((ad_hfileno(ad) != -1)) {
adf = &ad->ad_hf;
lock.l_start = df2off(off);
- }
- } else { /* rfork */
- adf = &ad->ad_hf;
+ }
+ }
+ else { /* rfork */
+ if ((ad_hfileno(ad) == -1)) {
+ /* there's no resource fork. return no lock */
+ return 0;
+ }
+ adf = &ad->ad_hf;
lock.l_start = hf2off(off);
}
/* Does another process have a lock?
FIXME F_GETLK ?
*/
- lock.l_type = (ad_getoflags(ad, eid) & O_RDWR) ?F_WRLCK : F_RDLCK;
+ lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0) {
- return (errno == EACCES)?1:-1;
+ return (errno == EACCES || errno == EAGAIN)?1:-1;
}
lock.l_type = F_UNLCK;
/* -------------------------
*/
-/* with temp locks, we don't need to distinguish within the same
- * process as everything is single-threaded. in addition, if
- * multi-threading gets added, it will only be in a few areas. */
-int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
- const off_t off, const size_t len)
+int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
+ const off_t off, const off_t len, const int user)
{
struct flock lock;
struct ad_fd *adf;
int err;
+ int type;
lock.l_start = off;
+ type = locktype;
if (eid == ADEID_DFORK) {
adf = &ad->ad_df;
} else {
adf = &ad->ad_hf;
+ if (adf->adf_fd == -1) {
+ /* there's no resource fork. return success */
+ return 0;
+ }
/* if ADLOCK_FILELOCK we want a lock from offset 0
* it's used when deleting a file:
* in open we put read locks on meta datas
if (!(type & ADLOCK_FILELOCK))
lock.l_start += ad_getentryoff(ad, eid);
}
+
+ if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
+ type = (type & ~ADLOCK_WR) | ADLOCK_RD;
+ }
+
lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
lock.l_whence = SEEK_SET;
lock.l_len = len;
+ /* see if it's locked by another user. */
+ if (user && adf_findxlock(adf, user, ADLOCK_WR |
+ ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
+ lock.l_start, lock.l_len) > -1) {
+ errno = EACCES;
+ return -1;
+ }
+
/* okay, we might have ranges byte-locked. we need to make sure that
* we restore the appropriate ranges once we're done. so, we check
* for overlap on an unlock and relock.
* XXX: in the future, all the byte locks will be sorted and contiguous.
* we just want to upgrade all the locks and then downgrade them
* here. */
- err = fcntl(adf->adf_fd, F_SETLK, &lock);
+ if (!adf->adf_excl) {
+ err = fcntl(adf->adf_fd, F_SETLK, &lock);
+ }
+ else {
+ err = 0;
+ }
if (!err && (lock.l_type == F_UNLCK))
adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
return err;
}
-
-void ad_fcntl_unlock(struct adouble *ad, const int user)
-{
- if (ad->ad_df.adf_fd != -1) {
- adf_unlock(&ad->ad_df, ad->ad_df.adf_fd, user);
- }
- if (ad->ad_hf.adf_fd != -1) {
- adf_unlock(&ad->ad_hf, ad->ad_hf.adf_fd, user);
- }
-}
-
-/* byte-range locks. ad_lock is used by afp_bytelock and afp_openfork
- * to establish locks. both ad_lock and ad_tmplock take 0, 0, 0 to
- * signify locking of the entire file. in the absence of working
- * byte-range locks, this will default to file-wide flock-style locks.
- */
-int ad_flock_lock(struct adouble *ad, const u_int32_t eid, const int type,
- const off_t off, const size_t len, const int user)
+/* -------------------------
+ the fork is opened in Read Write, Deny Read, Deny Write mode
+ lock the whole file once
+*/
+int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
{
- int err, lock_type;
+ struct ad_fd *adf;
+ struct flock lock;
+ int err;
- lock_type = XLATE_FLOCK(type & ADLOCK_MASK);
- if (eid == ADEID_DFORK) {
- if ((err = flock(ad_dfileno(ad), lock_type | LOCK_NB)) == 0)
- ad->ad_df.adf_lockcount = lock_type;
- } else if ((err = flock(ad_hfileno(ad), lock_type | LOCK_NB)) == 0)
- ad->ad_hf.adf_lockcount = lock_type;
-
- if (err) {
- if ((EWOULDBLOCK != EAGAIN) && (errno == EWOULDBLOCK))
- errno = EAGAIN;
- return -1;
- }
-
- return 0;
-}
+ lock.l_start = 0;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_len = 0;
-/* ad_tmplock is used by afpd to lock actual read/write operations.
- * it saves the current lock state before attempting to lock to prevent
- * mixups. if byte-locks don't exist, it will lock the entire file with
- * an flock. we can be a little smart here by just upgrading/downgrading
- * locks. */
-int ad_flock_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
- const off_t off, const size_t len)
-{
- int fd, oldlock, lock_type;
-
if (eid == ADEID_DFORK) {
- oldlock = ad->ad_df.adf_lockcount;
- fd = ad_dfileno(ad);
+ adf = &ad->ad_df;
} else {
- oldlock = ad->ad_hf.adf_lockcount;
- fd = ad_hfileno(ad);
+ adf = &ad->ad_hf;
+ lock.l_start = ad_getentryoff(ad, eid);
}
+
+ err = fcntl(adf->adf_fd, F_SETLK, &lock);
+ if (!err)
+ adf->adf_excl = 1;
+ return err;
+}
- /* if we already have a write lock, we don't need to do anything */
- if (oldlock == LOCK_EX) {
- return 0;
+/* --------------------- */
+void ad_fcntl_unlock(struct adouble *ad, const int user)
+{
+ if (ad->ad_df.adf_fd != -1) {
+ adf_unlock(&ad->ad_df, user);
}
-
- /* if we have a read lock, upgrade it if necessary */
- lock_type = XLATE_FLOCK(type & ADLOCK_MASK);
- if (oldlock == LOCK_SH) {
- if (lock_type == LOCK_EX)
- return flock(fd, LOCK_EX | LOCK_NB);
- else if (lock_type == LOCK_UN) /* reset it */
- return flock(fd, LOCK_SH | LOCK_NB);
- else /* do nothing */
- return 0;
+ if (ad->ad_hf.adf_fd != -1) {
+ adf_unlock(&ad->ad_hf, user);
}
-
- /* if we don't already have a lock, just do it. */
- return flock(fd, lock_type | LOCK_NB);
}
-
-