2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Because fcntl locks are
6 * process-oriented, we need to keep around a list of file descriptors
7 * that refer to the same file.
9 * TODO: fix the race when reading/writing.
10 * keep a pool of both locks and reference counters around so that
11 * we can save on mallocs. we should also use a tree to keep things
17 #endif /* HAVE_CONFIG_H */
19 #include <atalk/adouble.h>
20 #include <atalk/logger.h>
21 #include <atalk/compat.h>
22 #include <atalk/errchk.h>
23 #include <atalk/util.h>
34 static const char *shmdstrfromoff(off_t off)
37 case AD_FILELOCK_OPEN_WR:
38 return "OPEN_WR_DATA";
39 case AD_FILELOCK_OPEN_RD:
40 return "OPEN_RD_DATA";
41 case AD_FILELOCK_RSRC_OPEN_WR:
42 return "OPEN_WR_RSRC";
43 case AD_FILELOCK_RSRC_OPEN_RD:
44 return "OPEN_RD_RSRC";
45 case AD_FILELOCK_DENY_WR:
46 return "DENY_WR_DATA";
47 case AD_FILELOCK_DENY_RD:
48 return "DENY_RD_DATA";
49 case AD_FILELOCK_RSRC_DENY_WR:
50 return "DENY_WR_RSRC";
51 case AD_FILELOCK_RSRC_DENY_RD:
52 return "DENY_RD_RSRC";
53 case AD_FILELOCK_OPEN_NONE:
54 return "OPEN_NONE_DATA";
55 case AD_FILELOCK_RSRC_OPEN_NONE:
56 return "OPEN_NONE_RSRC";
62 /* ----------------------- */
63 static int set_lock(int fd, int cmd, struct flock *lock)
67 LOG(log_debug, logtype_ad, "set_lock(fd: %d, %s, %s, off: %jd (%s), len: %jd): BEGIN",
68 fd, cmd == F_SETLK ? "F_SETLK" : "F_GETLK",
69 lock->l_type == F_RDLCK ? "F_RDLCK" : lock->l_type == F_WRLCK ? "F_WRLCK" : "F_UNLCK",
70 (intmax_t)lock->l_start,
71 shmdstrfromoff(lock->l_start),
72 (intmax_t)lock->l_len);
74 if (fd == AD_SYMLINK) {
76 lock->l_type = F_UNLCK;
80 EC_NEG1( fcntl(fd, cmd, lock) );
86 /* ----------------------- */
87 static int XLATE_FCNTL_LOCK(int type)
100 /* ----------------------- */
101 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
103 return (!alen && a <= b) ||
105 ( (a + alen > b) && (b + blen > a) );
108 /* allocation for lock regions. we allocate aggressively and shrink
109 * only in large chunks. */
110 #define ARRAY_BLOCK_SIZE 10
111 #define ARRAY_FREE_DELTA 100
113 /* remove a lock and compact space if necessary */
114 static void adf_freelock(struct ad_fd *ad, const int i)
116 adf_lock_t *lock = ad->adf_lock + i;
118 if (--(*lock->refcount) < 1) {
119 free(lock->refcount);
120 lock->lock.l_type = F_UNLCK;
121 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
126 /* move another lock into the empty space */
127 if (i < ad->adf_lockcount) {
128 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
131 /* free extra cruft if we go past a boundary. we always want to
132 * keep at least some stuff around for allocations. this wastes
133 * a bit of space to save time on reallocations. */
134 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
135 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
136 struct adf_lock_t *tmp;
138 tmp = (struct adf_lock_t *)
139 realloc(ad->adf_lock, sizeof(adf_lock_t)*
140 (ad->adf_lockcount + ARRAY_FREE_DELTA));
143 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
149 /* this needs to deal with the following cases:
150 * 1) free all UNIX byterange lock from any fork
151 * 2) free all locks of the requested fork
153 * i converted to using arrays of locks. everytime a lock
154 * gets removed, we shift all of the locks down.
156 static void adf_unlock(struct adouble *ad, struct ad_fd *adf, const int fork, int unlckbrl)
158 adf_lock_t *lock = adf->adf_lock;
161 for (i = 0; i < adf->adf_lockcount; i++) {
162 if ((unlckbrl && lock[i].lock.l_start < AD_FILELOCK_BASE)
163 || lock[i].user == fork) {
164 /* we're really going to delete this lock. note: read locks
165 are the only ones that allow refcounts > 1 */
166 adf_freelock(adf, i);
167 /* we shifted things down, so we need to backtrack */
169 /* unlikely but realloc may have change adf_lock */
170 lock = adf->adf_lock;
175 /* relock any byte lock that overlaps off/len. unlock everything
177 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
179 adf_lock_t *lock = ad->adf_lock;
182 for (i = 0; i < ad->adf_lockcount; i++) {
183 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
184 set_lock(fd, F_SETLK, &lock[i].lock);
189 /* find a byte lock that overlaps off/len for a particular open fork */
190 static int adf_findlock(struct ad_fd *ad,
191 const int fork, const int type,
195 adf_lock_t *lock = ad->adf_lock;
198 for (i = 0; i < ad->adf_lockcount; i++) {
199 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
200 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
201 (lock[i].user == fork) &&
202 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
210 /* search other fork lock lists */
211 static int adf_findxlock(struct ad_fd *ad,
212 const int fork, const int type,
216 adf_lock_t *lock = ad->adf_lock;
219 for (i = 0; i < ad->adf_lockcount; i++) {
220 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK))
222 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK)))
224 (lock[i].user != fork)
226 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
232 /* okay, this needs to do the following:
233 * 1) check current list of locks. error on conflict.
234 * 2) apply the lock. error on conflict with another process.
235 * 3) update the list of locks this file has.
237 * NOTE: this treats synchronization locks a little differently. we
238 * do the following things for those:
239 * 1) if the header file exists, all the locks go in the beginning
241 * 2) if the header file doesn't exist, we stick the locks
242 * in the locations specified by AD_FILELOCK_RD/WR.
246 translate a resource fork lock to an offset
248 static off_t rf2off(off_t off)
251 if (off == AD_FILELOCK_OPEN_WR)
252 start = AD_FILELOCK_RSRC_OPEN_WR;
253 else if (off == AD_FILELOCK_OPEN_RD)
254 start = AD_FILELOCK_RSRC_OPEN_RD;
255 else if (off == AD_FILELOCK_DENY_RD)
256 start = AD_FILELOCK_RSRC_DENY_RD;
257 else if (off == AD_FILELOCK_DENY_WR)
258 start = AD_FILELOCK_RSRC_DENY_WR;
259 else if (off == AD_FILELOCK_OPEN_NONE)
260 start = AD_FILELOCK_RSRC_OPEN_NONE;
267 * (1) Test against our own locks array
268 * (2) Test fcntl lock, locks from other processes
270 * @param adf (r) handle
271 * @param off (r) offset
272 * @param len (r) lenght
274 * @returns 1 if there's an existing lock, 0 if there's no lock,
275 * -1 in case any error occured
277 static int testlock(const struct ad_fd *adf, off_t off, off_t len)
285 plock = adf->adf_lock;
286 lock.l_whence = SEEK_SET;
289 /* (1) Do we have a lock ? */
290 for (i = 0; i < adf->adf_lockcount; i++) {
291 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
295 /* (2) Does another process have a lock? */
296 lock.l_type = (adf->adf_flags & O_RDWR) ? F_WRLCK : F_RDLCK;
298 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
299 /* is that kind of error possible ?*/
300 return (errno == EACCES || errno == EAGAIN) ? 1 : -1;
303 if (lock.l_type == F_UNLCK) {
309 #define LTYPE2STRBUFSIZ 128
310 static const char *locktypetostr(int type)
313 static char buf[LTYPE2STRBUFSIZ];
318 strlcat(buf, "CLR", LTYPE2STRBUFSIZ);
322 if (type & ADLOCK_RD) {
324 strlcat(buf, "|", LTYPE2STRBUFSIZ);
325 strlcat(buf, "RD", LTYPE2STRBUFSIZ);
328 if (type & ADLOCK_WR) {
330 strlcat(buf, "|", LTYPE2STRBUFSIZ);
331 strlcat(buf, "WR", LTYPE2STRBUFSIZ);
334 if (type & ADLOCK_UPGRADE) {
336 strlcat(buf, "|", LTYPE2STRBUFSIZ);
337 strlcat(buf, "UPG", LTYPE2STRBUFSIZ);
340 if (type & ADLOCK_FILELOCK) {
342 strlcat(buf, "|", LTYPE2STRBUFSIZ);
343 strlcat(buf, "FILELOCK", LTYPE2STRBUFSIZ);
350 /******************************************************************************
352 ******************************************************************************/
354 int ad_lock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
362 int ret = 0, fcntl_lock_err = 0;
364 LOG(log_debug, logtype_ad, "ad_lock(%s, %s, off: %jd (%s), len: %jd): BEGIN",
365 eid == ADEID_DFORK ? "data" : "reso",
366 locktypetostr(locktype),
371 if ((locktype & ADLOCK_FILELOCK) && (len != 1))
372 AFP_PANIC("lock API error");
376 if (eid == ADEID_DFORK) {
377 adf = &ad->ad_data_fork;
380 if (type & ADLOCK_FILELOCK) {
381 adf = &ad->ad_data_fork;
382 lock.l_start = rf2off(off);
385 lock.l_start = off + ad_getentryoff(ad, ADEID_RFORK);
389 /* NOTE: we can't write lock a read-only file. on those, we just
390 * make sure that we have a read lock set. that way, we at least prevent
391 * someone else from really setting a deny read/write on the file.
393 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
394 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
397 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
398 lock.l_whence = SEEK_SET;
401 /* byte_lock(len=-1) lock whole file */
402 if (len == BYTELOCK_MAX) {
403 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
406 /* see if it's locked by another fork.
407 * NOTE: this guarantees that any existing locks must be at most
408 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
409 * guaranteed to be ORable. */
410 if (adf_findxlock(adf, fork, ADLOCK_WR |
411 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
412 lock.l_start, lock.l_len) > -1) {
418 /* look for any existing lock that we may have */
419 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
420 adflock = (i < 0) ? NULL : adf->adf_lock + i;
422 /* here's what we check for:
423 1) we're trying to re-lock a lock, but we didn't specify an update.
424 2) we're trying to free only part of a lock.
425 3) we're trying to free a non-existent lock. */
426 if ( (!adflock && (lock.l_type == F_UNLCK))
429 && !(type & ADLOCK_UPGRADE)
430 && ((lock.l_type != F_UNLCK)
431 || (adflock->lock.l_start != lock.l_start)
432 || (adflock->lock.l_len != lock.l_len) ))
440 /* now, update our list of locks */
442 if (lock.l_type == F_UNLCK) {
443 adf_freelock(adf, i);
447 /* attempt to lock the file. */
448 if (set_lock(adf->adf_fd, F_SETLK, &lock) < 0) {
453 /* we upgraded this lock. */
454 if (adflock && (type & ADLOCK_UPGRADE)) {
455 memcpy(&adflock->lock, &lock, sizeof(lock));
459 /* it wasn't an upgrade */
461 if (lock.l_type == F_RDLCK) {
462 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
465 /* no more space. this will also happen if lockmax == lockcount == 0 */
466 if (adf->adf_lockmax == adf->adf_lockcount) {
467 adf_lock_t *tmp = (adf_lock_t *)
468 realloc(adf->adf_lock, sizeof(adf_lock_t)*
469 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
471 ret = fcntl_lock_err = -1;
475 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
477 adflock = adf->adf_lock + adf->adf_lockcount;
480 memcpy(&adflock->lock, &lock, sizeof(lock));
481 adflock->user = fork;
483 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
484 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
485 ret = fcntl_lock_err = 1;
489 (*adflock->refcount)++;
490 adf->adf_lockcount++;
494 if (fcntl_lock_err != 0) {
495 lock.l_type = F_UNLCK;
496 set_lock(adf->adf_fd, F_SETLK, &lock);
499 LOG(log_debug, logtype_ad, "ad_lock: END: %d", ret);
503 int ad_tmplock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
510 LOG(log_debug, logtype_ad, "ad_tmplock(%s, %s, off: %jd (%s), len: %jd): BEGIN",
511 eid == ADEID_DFORK ? "data" : "reso",
512 locktypetostr(locktype),
520 if (eid == ADEID_DFORK) {
521 adf = &ad->ad_data_fork;
523 adf = &ad->ad_resource_fork;
524 if (adf->adf_fd == -1) {
525 /* there's no resource fork. return success */
529 /* if ADLOCK_FILELOCK we want a lock from offset 0
530 * it's used when deleting a file:
531 * in open we put read locks on meta datas
532 * in delete a write locks on the whole file
533 * so if the file is open by somebody else it fails
535 if (!(type & ADLOCK_FILELOCK))
536 lock.l_start += ad_getentryoff(ad, eid);
539 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
540 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
543 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
544 lock.l_whence = SEEK_SET;
547 /* see if it's locked by another fork. */
548 if (fork && adf_findxlock(adf, fork,
549 ADLOCK_WR | ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
550 lock.l_start, lock.l_len) > -1) {
556 /* okay, we might have ranges byte-locked. we need to make sure that
557 * we restore the appropriate ranges once we're done. so, we check
558 * for overlap on an unlock and relock.
559 * XXX: in the future, all the byte locks will be sorted and contiguous.
560 * we just want to upgrade all the locks and then downgrade them
562 err = set_lock(adf->adf_fd, F_SETLK, &lock);
563 if (!err && (lock.l_type == F_UNLCK))
564 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
567 LOG(log_debug, logtype_ad, "ad_tmplock: END: %d", err);
571 /* --------------------- */
572 void ad_unlock(struct adouble *ad, const int fork, int unlckbrl)
574 LOG(log_debug, logtype_ad, "ad_unlock(unlckbrl: %d): BEGIN", unlckbrl);
576 if (ad_data_fileno(ad) != -1) {
577 adf_unlock(ad, &ad->ad_data_fork, fork, unlckbrl);
579 if (ad_reso_fileno(ad) != -1) {
580 adf_unlock(ad, &ad->ad_resource_fork, fork, unlckbrl);
583 LOG(log_debug, logtype_ad, "ad_unlock: END");
587 * Test for a share mode lock
589 * @param ad (rw) handle
590 * @param eid (r) datafork or ressource fork
591 * @param off (r) sharemode lock to test
593 * @returns 1 if there's an existing lock, 0 if there's no lock,
594 * -1 in case any error occured
596 int ad_testlock(struct adouble *ad, int eid, const off_t off)
601 LOG(log_debug, logtype_ad, "ad_testlock(%s, off: %jd (%s): BEGIN",
602 eid == ADEID_DFORK ? "data" : "reso",
604 shmdstrfromoff(off));
606 if (eid == ADEID_DFORK) {
609 lock_offset = rf2off(off);
612 ret = testlock(&ad->ad_data_fork, lock_offset, 1);
614 LOG(log_debug, logtype_ad, "ad_testlock: END: %d", ret);
619 * Return if a file is open by another process.
621 * Optimized for the common case:
622 * - there's no locks held by another process (clients)
623 * - or we already know the answer and don't need to test (attrbits)
625 * @param ad (rw) handle
626 * @param attrbits (r) forks opened by us
627 * @returns bitflags ATTRBIT_DOPEN | ATTRBIT_ROPEN if
628 * other process has fork of file opened
630 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
636 if (ad_data_fileno(ad) == -1)
639 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
640 /* Test all 4 locks at once */
641 off = AD_FILELOCK_OPEN_WR;
643 if (testlock(&ad->ad_data_fork, off, len) == 0)
647 /* either there's a lock or we already know one fork is open */
649 if (!(attrbits & ATTRBIT_DOPEN)) {
650 off = AD_FILELOCK_OPEN_WR;
651 ret = testlock(&ad->ad_data_fork, off, 2) > 0 ? ATTRBIT_DOPEN : 0;
654 if (!(attrbits & ATTRBIT_ROPEN)) {
655 off = AD_FILELOCK_RSRC_OPEN_WR;
656 ret |= testlock(&ad->ad_data_fork, off, 2) > 0? ATTRBIT_ROPEN : 0;