2 * $Id: ad_lock.c,v 1.18 2009-11-12 06:28:40 didg Exp $
4 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
5 * All Rights Reserved. See COPYRIGHT for more information.
7 * Byte-range locks. This uses either whole-file flocks to fake byte
8 * locks or fcntl-based actual byte locks. Because fcntl locks are
9 * process-oriented, we need to keep around a list of file descriptors
10 * that refer to the same file. Currently, this doesn't serialize access
11 * to the locks. as a result, there's the potential for race conditions.
13 * TODO: fix the race when reading/writing.
14 * keep a pool of both locks and reference counters around so that
15 * we can save on mallocs. we should also use a tree to keep things
21 #endif /* HAVE_CONFIG_H */
23 #include <atalk/adouble.h>
31 #include "ad_private.h"
33 /* translate between ADLOCK styles and specific locking mechanisms */
34 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
35 ((type) == ADLOCK_WR ? LOCK_EX : \
36 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
38 #ifdef DISABLE_LOCKING
39 #define fcntl(a, b, c ) (0)
42 /* ----------------------- */
43 static int XLATE_FCNTL_LOCK(int type)
56 /* ----------------------- */
57 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
59 return (!alen && a <= b) ||
61 ( (a + alen > b) && (b + blen > a) );
64 /* allocation for lock regions. we allocate aggressively and shrink
65 * only in large chunks. */
66 #define ARRAY_BLOCK_SIZE 10
67 #define ARRAY_FREE_DELTA 100
69 /* remove a lock and compact space if necessary */
70 static void adf_freelock(struct ad_fd *ad, const int i)
72 adf_lock_t *lock = ad->adf_lock + i;
74 if (--(*lock->refcount) < 1) {
77 lock->lock.l_type = F_UNLCK;
78 fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
84 /* move another lock into the empty space */
85 if (i < ad->adf_lockcount) {
86 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
89 /* free extra cruft if we go past a boundary. we always want to
90 * keep at least some stuff around for allocations. this wastes
91 * a bit of space to save time on reallocations. */
92 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
93 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
94 struct adf_lock_t *tmp;
96 tmp = (struct adf_lock_t *)
97 realloc(ad->adf_lock, sizeof(adf_lock_t)*
98 (ad->adf_lockcount + ARRAY_FREE_DELTA));
101 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
107 /* this needs to deal with the following cases:
108 * 1) fork is the only user of the lock
109 * 2) fork shares a read lock with another open fork
111 * i converted to using arrays of locks. everytime a lock
112 * gets removed, we shift all of the locks down.
114 static void adf_unlock(struct ad_fd *ad, const int fork)
116 adf_lock_t *lock = ad->adf_lock;
119 for (i = 0; i < ad->adf_lockcount; i++) {
121 if (lock[i].user == fork) {
122 /* we're really going to delete this lock. note: read locks
123 are the only ones that allow refcounts > 1 */
125 i--; /* we shifted things down, so we need to backtrack */
126 /* unlikely but realloc may have change adf_lock */
132 /* relock any byte lock that overlaps off/len. unlock everything
134 static void adf_relockrange(struct ad_fd *ad, int fd,
135 const off_t off, const off_t len)
137 adf_lock_t *lock = ad->adf_lock;
140 if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
141 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
142 fcntl(fd, F_SETLK, &lock[i].lock);
147 /* find a byte lock that overlaps off/len for a particular open fork */
148 static int adf_findlock(struct ad_fd *ad,
149 const int fork, const int type,
153 adf_lock_t *lock = ad->adf_lock;
156 for (i = 0; i < ad->adf_lockcount; i++) {
157 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
158 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
159 (lock[i].user == fork) &&
160 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
169 /* search other fork lock lists */
170 static int adf_findxlock(struct ad_fd *ad,
171 const int fork, const int type,
175 adf_lock_t *lock = ad->adf_lock;
178 for (i = 0; i < ad->adf_lockcount; i++) {
179 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
180 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
181 (lock[i].user != fork) &&
182 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
188 /* okay, this needs to do the following:
189 * 1) check current list of locks. error on conflict.
190 * 2) apply the lock. error on conflict with another process.
191 * 3) update the list of locks this file has.
193 * NOTE: this treats synchronization locks a little differently. we
194 * do the following things for those:
195 * 1) if the header file exists, all the locks go in the beginning
197 * 2) if the header file doesn't exist, we stick the locks
198 * in the locations specified by AD_FILELOCK_RD/WR.
200 #define LOCK_DATA_WR (0)
201 #define LOCK_DATA_RD (1)
202 #define LOCK_RSRC_WR (2)
203 #define LOCK_RSRC_RD (3)
205 #define LOCK_RSRC_DRD (4)
206 #define LOCK_RSRC_DWR (5)
207 #define LOCK_DATA_DRD (6)
208 #define LOCK_DATA_DWR (7)
210 #define LOCK_RSRC_NONE (8)
211 #define LOCK_DATA_NONE (9)
214 translate a data fork lock to an offset
217 static off_t df2off(int off)
220 if (off == AD_FILELOCK_OPEN_WR)
221 start = LOCK_DATA_WR;
222 else if (off == AD_FILELOCK_OPEN_RD)
223 start = LOCK_DATA_RD;
224 else if (off == AD_FILELOCK_DENY_RD)
225 start = LOCK_DATA_DRD;
226 else if (off == AD_FILELOCK_DENY_WR)
227 start = LOCK_DATA_DWR;
228 else if (off == AD_FILELOCK_OPEN_NONE)
229 start = LOCK_DATA_NONE;
234 translate a resource fork lock to an offset
237 static off_t hf2off(int off)
240 if (off == AD_FILELOCK_OPEN_WR)
241 start = LOCK_RSRC_WR;
242 else if (off == AD_FILELOCK_OPEN_RD)
243 start = LOCK_RSRC_RD;
244 else if (off == AD_FILELOCK_DENY_RD)
245 start = LOCK_RSRC_DRD;
246 else if (off == AD_FILELOCK_DENY_WR)
247 start = LOCK_RSRC_DWR;
248 else if (off == AD_FILELOCK_OPEN_NONE)
249 start = LOCK_RSRC_NONE;
253 /* ------------------ */
254 int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
255 const off_t off, const off_t len, const int fork)
266 if (eid == ADEID_DFORK) {
267 adf = &ad->ad_data_fork;
268 if ((type & ADLOCK_FILELOCK)) {
269 if (ad_meta_fileno(ad) != -1) { /* META */
271 lock.l_start = df2off(off);
275 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
276 /* there's no meta data. return a lock error
277 * otherwise if a second process is able to create it
283 if (type & ADLOCK_FILELOCK) {
284 adf = ad->ad_md; /* either resource or meta data (set in ad_open) */
285 lock.l_start = hf2off(off);
288 /* we really want the resource fork it's a byte lock */
289 adf = &ad->ad_resource_fork;
290 lock.l_start += ad_getentryoff(ad, eid);
293 /* NOTE: we can't write lock a read-only file. on those, we just
294 * make sure that we have a read lock set. that way, we at least prevent
295 * someone else from really setting a deny read/write on the file.
297 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
298 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
301 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
302 lock.l_whence = SEEK_SET;
305 /* byte_lock(len=-1) lock whole file */
306 if (len == BYTELOCK_MAX) {
307 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
310 /* see if it's locked by another fork.
311 * NOTE: this guarantees that any existing locks must be at most
312 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
313 * guaranteed to be ORable. */
314 if (adf_findxlock(adf, fork, ADLOCK_WR |
315 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
316 lock.l_start, lock.l_len) > -1) {
321 /* look for any existing lock that we may have */
322 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
323 adflock = (i < 0) ? NULL : adf->adf_lock + i;
325 /* here's what we check for:
326 1) we're trying to re-lock a lock, but we didn't specify an update.
327 2) we're trying to free only part of a lock.
328 3) we're trying to free a non-existent lock. */
329 if ((!adflock && (lock.l_type == F_UNLCK)) ||
330 (adflock && !(type & ADLOCK_UPGRADE) &&
331 ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
332 (adflock->lock.l_len != lock.l_len)))) {
338 /* now, update our list of locks */
340 if (lock.l_type == F_UNLCK) {
341 adf_freelock(adf, i);
345 /* attempt to lock the file. */
346 if (!adf->adf_excl && fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
349 /* we upgraded this lock. */
350 if (adflock && (type & ADLOCK_UPGRADE)) {
351 memcpy(&adflock->lock, &lock, sizeof(lock));
355 /* it wasn't an upgrade */
357 if (lock.l_type == F_RDLCK) {
358 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
361 /* no more space. this will also happen if lockmax == lockcount == 0 */
362 if (adf->adf_lockmax == adf->adf_lockcount) {
363 adf_lock_t *tmp = (adf_lock_t *)
364 realloc(adf->adf_lock, sizeof(adf_lock_t)*
365 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
369 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
371 adflock = adf->adf_lock + adf->adf_lockcount;
374 memcpy(&adflock->lock, &lock, sizeof(lock));
375 adflock->user = fork;
377 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
378 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
382 (*adflock->refcount)++;
383 adf->adf_lockcount++;
387 lock.l_type = F_UNLCK;
388 if (!adf->adf_excl) fcntl(adf->adf_fd, F_SETLK, &lock);
392 /* -------------------------
393 we are using lock as tristate variable
400 static int testlock(struct ad_fd *adf, off_t off, off_t len)
408 plock = adf->adf_lock;
409 lock.l_whence = SEEK_SET;
412 /* Do we have a lock? */
413 for (i = 0; i < adf->adf_lockcount; i++) {
414 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
417 /* Does another process have a lock?
419 lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
421 if (fcntl(adf->adf_fd, F_GETLK, &lock) < 0) {
422 /* is that kind of error possible ?*/
423 return (errno == EACCES || errno == EAGAIN)?1:-1;
426 if (lock.l_type == F_UNLCK) {
432 /* --------------- */
433 int ad_testlock(struct adouble *ad, int eid, const off_t off)
439 if (eid == ADEID_DFORK) {
440 adf = &ad->ad_data_fork;
441 if (ad_meta_fileno(ad) != -1) {
443 lock_offset = df2off(off);
447 if (ad_meta_fileno(ad) == -1) {
448 /* there's no resource fork. return no lock */
452 lock_offset = hf2off(off);
454 return testlock(adf, lock_offset, 1);
457 /* -------------------------
458 return if a file is open by another process.
459 Optimized for the common case:
460 - there's no locks held by another process (clients)
461 - or we already know the answer and don't need to test.
463 u_int16_t ad_openforks(struct adouble *ad, u_int16_t attrbits)
469 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
471 /* XXX know the locks layout:
472 AD_FILELOCK_OPEN_WR is first
473 and use it for merging requests
475 if (ad_meta_fileno(ad) != -1) {
476 /* there's a resource fork test the four bytes for
477 * data RW/RD and fork RW/RD locks in one request
484 /* no resource fork, only data RD/RW may exist */
485 adf = &ad->ad_data_fork;
486 off = AD_FILELOCK_OPEN_WR;
489 if (!testlock(adf, off, len))
492 /* either there's a lock or we already know one
495 if (!(attrbits & ATTRBIT_DOPEN)) {
496 if (ad_meta_fileno(ad) != -1) {
501 adf = &ad->ad_data_fork;
502 off = AD_FILELOCK_OPEN_WR;
504 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
507 if (!(attrbits & ATTRBIT_ROPEN)) {
508 if (ad_meta_fileno(ad) != -1) {
511 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
518 /* -------------------------
520 int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
521 const off_t off, const off_t len, const int fork)
530 if (eid == ADEID_DFORK) {
531 adf = &ad->ad_data_fork;
534 adf = &ad->ad_resource_fork;
535 if (adf->adf_fd == -1) {
536 /* there's no resource fork. return success */
539 /* if ADLOCK_FILELOCK we want a lock from offset 0
540 * it's used when deleting a file:
541 * in open we put read locks on meta datas
542 * in delete a write locks on the whole file
543 * so if the file is open by somebody else it fails
545 if (!(type & ADLOCK_FILELOCK))
546 lock.l_start += ad_getentryoff(ad, eid);
549 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
550 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
553 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
554 lock.l_whence = SEEK_SET;
557 /* see if it's locked by another fork. */
558 if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
559 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
560 lock.l_start, lock.l_len) > -1) {
565 /* okay, we might have ranges byte-locked. we need to make sure that
566 * we restore the appropriate ranges once we're done. so, we check
567 * for overlap on an unlock and relock.
568 * XXX: in the future, all the byte locks will be sorted and contiguous.
569 * we just want to upgrade all the locks and then downgrade them
571 if (!adf->adf_excl) {
572 err = fcntl(adf->adf_fd, F_SETLK, &lock);
577 if (!err && (lock.l_type == F_UNLCK))
578 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
583 /* -------------------------
584 the fork is opened in Read Write, Deny Read, Deny Write mode
585 lock the whole file once
587 int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
594 lock.l_type = F_WRLCK;
595 lock.l_whence = SEEK_SET;
598 if (eid == ADEID_DFORK) {
599 adf = &ad->ad_data_fork;
601 adf = &ad->ad_resource_fork;
602 lock.l_start = ad_getentryoff(ad, eid);
605 err = fcntl(adf->adf_fd, F_SETLK, &lock);
611 /* --------------------- */
612 void ad_fcntl_unlock(struct adouble *ad, const int fork)
614 if (ad_data_fileno(ad) != -1) {
615 adf_unlock(&ad->ad_data_fork, fork);
617 if (ad_reso_fileno(ad) != -1) {
618 adf_unlock(&ad->ad_resource_fork, fork);
621 if (ad->ad_flags != AD_VERSION1_SFM) {
624 if (ad_meta_fileno(ad) != -1) {
625 adf_unlock(&ad->ad_metadata_fork, fork);