2 * $Id: ad_lock.c,v 1.9 2003-01-30 17:32:45 didg Exp $
4 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
5 * All Rights Reserved. See COPYRIGHT for more information.
7 * Byte-range locks. This uses either whole-file flocks to fake byte
8 * locks or fcntl-based actual byte locks. Because fcntl locks are
9 * process-oriented, we need to keep around a list of file descriptors
10 * that refer to the same file. Currently, this doesn't serialize access
11 * to the locks. as a result, there's the potential for race conditions.
13 * TODO: fix the race when reading/writing.
14 * keep a pool of both locks and reference counters around so that
15 * we can save on mallocs. we should also use a tree to keep things
21 #endif /* HAVE_CONFIG_H */
28 #endif /* HAVE_UNISTD_H */
31 #endif /* HAVE_FCNTL_H */
34 #include <atalk/adouble.h>
36 #include "ad_private.h"
38 /* translate between ADLOCK styles and specific locking mechanisms */
39 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
40 ((type) == ADLOCK_WR ? LOCK_EX : \
41 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
43 /* ----------------------- */
44 static int XLATE_FCNTL_LOCK(int type)
57 /* ----------------------- */
59 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
61 return (!alen && a <= b) ||
63 ( (a + alen > b) && (b + blen > a) );
67 /* allocation for lock regions. we allocate aggressively and shrink
68 * only in large chunks. */
69 #define ARRAY_BLOCK_SIZE 10
70 #define ARRAY_FREE_DELTA 100
72 /* remove a lock and compact space if necessary */
73 static __inline__ void adf_freelock(struct ad_fd *ad, const int i)
75 adf_lock_t *lock = ad->adf_lock + i;
77 if (--(*lock->refcount) < 1) {
79 lock->lock.l_type = F_UNLCK;
80 fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
85 /* move another lock into the empty space */
86 if (i < ad->adf_lockcount) {
87 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
90 /* free extra cruft if we go past a boundary. we always want to
91 * keep at least some stuff around for allocations. this wastes
92 * a bit of space to save time on reallocations. */
93 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
94 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
95 struct adf_lock_t *tmp;
97 tmp = (struct adf_lock_t *)
98 realloc(ad->adf_lock, sizeof(adf_lock_t)*
99 (ad->adf_lockcount + ARRAY_FREE_DELTA));
102 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
108 /* this needs to deal with the following cases:
109 * 1) user is the only user of the lock
110 * 2) user shares a read lock with another user
112 * i converted to using arrays of locks. everytime a lock
113 * gets removed, we shift all of the locks down.
115 static __inline__ void adf_unlock(struct ad_fd *ad, int fd, const int user)
117 adf_lock_t *lock = ad->adf_lock;
120 for (i = 0; i < ad->adf_lockcount; i++) {
121 if (lock[i].user == user) {
122 /* we're really going to delete this lock. note: read locks
123 are the only ones that allow refcounts > 1 */
125 i--; /* we shifted things down, so we need to backtrack */
130 /* relock any byte lock that overlaps off/len. unlock everything
132 static __inline__ void adf_relockrange(struct ad_fd *ad, int fd,
133 const off_t off, const off_t len)
135 adf_lock_t *lock = ad->adf_lock;
138 for (i = 0; i < ad->adf_lockcount; i++) {
139 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
140 fcntl(fd, F_SETLK, &lock[i].lock);
145 /* find a byte lock that overlaps off/len for a particular user */
146 static __inline__ int adf_findlock(struct ad_fd *ad,
147 const int user, const int type,
151 adf_lock_t *lock = ad->adf_lock;
154 for (i = 0; i < ad->adf_lockcount; i++) {
155 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
156 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
157 (lock[i].user == user) &&
158 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
167 /* search other user lock lists */
168 static __inline__ int adf_findxlock(struct ad_fd *ad,
169 const int user, const int type,
173 adf_lock_t *lock = ad->adf_lock;
176 for (i = 0; i < ad->adf_lockcount; i++) {
177 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
178 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
179 (lock[i].user != user) &&
180 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
186 /* okay, this needs to do the following:
187 * 1) check current list of locks. error on conflict.
188 * 2) apply the lock. error on conflict with another process.
189 * 3) update the list of locks this file has.
191 * NOTE: this treats synchronization locks a little differently. we
192 * do the following things for those:
193 * 1) if the header file exists, all the locks go in the beginning
195 * 2) if the header file doesn't exist, we stick the locks
196 * in the locations specified by AD_FILELOCK_RD/WR.
198 #define LOCK_RSRC_RD (0)
199 #define LOCK_RSRC_WR (1)
200 #define LOCK_DATA_RD (2)
201 #define LOCK_DATA_WR (3)
203 #define LOCK_RSRC_DRD (4)
204 #define LOCK_RSRC_DWR (5)
205 #define LOCK_DATA_DRD (6)
206 #define LOCK_DATA_DWR (7)
208 #define LOCK_RSRC_NONE (8)
209 #define LOCK_DATA_NONE (9)
212 translate a data fork lock to an offset
215 static int df2off(int off)
218 if (off == AD_FILELOCK_OPEN_WR)
219 start = LOCK_DATA_WR;
220 else if (off == AD_FILELOCK_OPEN_RD)
221 start = LOCK_DATA_RD;
222 else if (off == AD_FILELOCK_DENY_RD)
223 start = LOCK_DATA_DRD;
224 else if (off == AD_FILELOCK_DENY_WR)
225 start = LOCK_DATA_DWR;
226 else if (off == AD_FILELOCK_OPEN_NONE)
227 start = LOCK_DATA_NONE;
232 translate a resource fork lock to an offset
235 static int hf2off(int off)
238 if (off == AD_FILELOCK_OPEN_WR)
239 start = LOCK_RSRC_WR;
240 else if (off == AD_FILELOCK_OPEN_RD)
241 start = LOCK_RSRC_RD;
242 else if (off == AD_FILELOCK_DENY_RD)
243 start = LOCK_RSRC_DRD;
244 else if (off == AD_FILELOCK_DENY_WR)
245 start = LOCK_RSRC_DWR;
246 else if (off == AD_FILELOCK_OPEN_NONE)
247 start = LOCK_RSRC_NONE;
251 /* ------------------ */
252 int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
253 const off_t off, const off_t len, const int user)
257 adf_lock_t *adflock, *oldlock;
263 if (eid == ADEID_DFORK) {
265 if ((type & ADLOCK_FILELOCK)) {
266 if (ad_hfileno(ad) != -1) {
267 lock.l_start = df2off(off);
273 if (type & ADLOCK_FILELOCK)
274 lock.l_start = hf2off(off);
276 lock.l_start += ad_getentryoff(ad, eid);
278 /* NOTE: we can't write lock a read-only file. on those, we just
279 * make sure that we have a read lock set. that way, we at least prevent
280 * someone else from really setting a deny read/write on the file.
282 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
283 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
286 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
287 lock.l_whence = SEEK_SET;
290 /* byte_lock(len=-1) lock whole file */
291 if (len == BYTELOCK_MAX) {
292 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
295 /* see if it's locked by another user.
296 * NOTE: this guarantees that any existing locks must be at most
297 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
298 * guaranteed to be ORable. */
299 if (adf_findxlock(adf, user, ADLOCK_WR |
300 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
301 lock.l_start, lock.l_len) > -1) {
306 /* look for any existing lock that we may have */
307 i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
308 adflock = (i < 0) ? NULL : adf->adf_lock + i;
310 /* here's what we check for:
311 1) we're trying to re-lock a lock, but we didn't specify an update.
312 2) we're trying to free only part of a lock.
313 3) we're trying to free a non-existent lock. */
314 if ((!adflock && (lock.l_type == F_UNLCK)) ||
315 (adflock && !(type & ADLOCK_UPGRADE) &&
316 ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
317 (adflock->lock.l_len != lock.l_len)))) {
323 /* now, update our list of locks */
325 if (lock.l_type == F_UNLCK) {
326 adf_freelock(adf, i);
330 /* attempt to lock the file. */
331 if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
334 /* we upgraded this lock. */
335 if (adflock && (type & ADLOCK_UPGRADE)) {
336 memcpy(&adflock->lock, &lock, sizeof(lock));
340 /* it wasn't an upgrade */
342 if ((lock.l_type == F_RDLCK) &&
343 ((i = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, lock.l_len)) > -1)) {
344 oldlock = adf->adf_lock + i;
347 /* no more space. this will also happen if lockmax == lockcount == 0 */
348 if (adf->adf_lockmax == adf->adf_lockcount) {
349 adf_lock_t *tmp = (adf_lock_t *)
350 realloc(adf->adf_lock, sizeof(adf_lock_t)*
351 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
355 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
357 adflock = adf->adf_lock + adf->adf_lockcount;
360 memcpy(&adflock->lock, &lock, sizeof(lock));
361 adflock->user = user;
363 adflock->refcount = oldlock->refcount;
364 else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
368 (*adflock->refcount)++;
369 adf->adf_lockcount++;
373 lock.l_type = F_UNLCK;
374 fcntl(adf->adf_fd, F_SETLK, &lock);
378 /* -------------------------
379 we are using lock as tristate variable
386 int ad_testlock(struct adouble *ad, int eid, const off_t off)
394 if (eid == ADEID_DFORK) {
396 if ((ad_hfileno(ad) != -1)) {
398 lock.l_start = df2off(off);
402 lock.l_start = hf2off(off);
405 plock = adf->adf_lock;
406 /* Does we have a lock? */
407 lock.l_whence = SEEK_SET;
409 for (i = 0; i < adf->adf_lockcount; i++) {
410 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
413 /* Does another process have a lock?
416 lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
418 if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0) {
419 return (errno == EACCES || errno == EAGAIN)?1:-1;
422 lock.l_type = F_UNLCK;
423 return fcntl(adf->adf_fd, F_SETLK, &lock);
426 /* -------------------------
428 int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
429 const off_t off, const off_t len, const int user)
436 if (eid == ADEID_DFORK) {
440 /* if ADLOCK_FILELOCK we want a lock from offset 0
441 * it's used when deleting a file:
442 * in open we put read locks on meta datas
443 * in delete a write locks on the whole file
444 * so if the file is open by somebody else it fails
446 if (!(type & ADLOCK_FILELOCK))
447 lock.l_start += ad_getentryoff(ad, eid);
450 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
451 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
454 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
455 lock.l_whence = SEEK_SET;
458 /* see if it's locked by another user. */
459 if (user && adf_findxlock(adf, user, ADLOCK_WR |
460 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
461 lock.l_start, lock.l_len) > -1) {
466 /* okay, we might have ranges byte-locked. we need to make sure that
467 * we restore the appropriate ranges once we're done. so, we check
468 * for overlap on an unlock and relock.
469 * XXX: in the future, all the byte locks will be sorted and contiguous.
470 * we just want to upgrade all the locks and then downgrade them
472 err = fcntl(adf->adf_fd, F_SETLK, &lock);
473 if (!err && (lock.l_type == F_UNLCK))
474 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
480 void ad_fcntl_unlock(struct adouble *ad, const int user)
482 if (ad->ad_df.adf_fd != -1) {
483 adf_unlock(&ad->ad_df, ad->ad_df.adf_fd, user);
485 if (ad->ad_hf.adf_fd != -1) {
486 adf_unlock(&ad->ad_hf, ad->ad_hf.adf_fd, user);