2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Because fcntl locks are
6 * process-oriented, we need to keep around a list of file descriptors
7 * that refer to the same file.
9 * TODO: fix the race when reading/writing.
10 * keep a pool of both locks and reference counters around so that
11 * we can save on mallocs. we should also use a tree to keep things
17 #endif /* HAVE_CONFIG_H */
19 #include <atalk/adouble.h>
20 #include <atalk/logger.h>
21 #include <atalk/compat.h>
22 #include <atalk/errchk.h>
33 /* ----------------------- */
34 static int set_lock(int fd, int cmd, struct flock *lock)
38 LOG(log_debug, logtype_default, "set_lock(%s, %s, off: %jd, len: %jd): BEGIN",
39 cmd == F_SETLK ? "F_SETLK" : "F_GETLK",
40 lock->l_type == F_RDLCK ? "F_RDLCK" : lock->l_type == F_WRLCK ? "F_WRLCK" : "F_UNLCK",
41 (intmax_t)lock->l_start, (intmax_t)lock->l_len);
44 /* We assign fd = -2 for symlinks -> do nothing */
46 lock->l_type = F_UNLCK;
50 EC_NEG1_LOGSTR( fcntl(fd, cmd, lock),
51 "set_lock: %s", strerror(errno));
57 /* ----------------------- */
58 static int XLATE_FCNTL_LOCK(int type)
71 /* ----------------------- */
72 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
74 return (!alen && a <= b) ||
76 ( (a + alen > b) && (b + blen > a) );
79 /* allocation for lock regions. we allocate aggressively and shrink
80 * only in large chunks. */
81 #define ARRAY_BLOCK_SIZE 10
82 #define ARRAY_FREE_DELTA 100
84 /* remove a lock and compact space if necessary */
85 static void adf_freelock(struct ad_fd *ad, const int i)
87 adf_lock_t *lock = ad->adf_lock + i;
89 if (--(*lock->refcount) < 1) {
91 lock->lock.l_type = F_UNLCK;
92 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
97 /* move another lock into the empty space */
98 if (i < ad->adf_lockcount) {
99 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
102 /* free extra cruft if we go past a boundary. we always want to
103 * keep at least some stuff around for allocations. this wastes
104 * a bit of space to save time on reallocations. */
105 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
106 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
107 struct adf_lock_t *tmp;
109 tmp = (struct adf_lock_t *)
110 realloc(ad->adf_lock, sizeof(adf_lock_t)*
111 (ad->adf_lockcount + ARRAY_FREE_DELTA));
114 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
120 /* this needs to deal with the following cases:
121 * 1) free all UNIX byterange lock from any fork
122 * 2) free all locks of the requested fork
124 * i converted to using arrays of locks. everytime a lock
125 * gets removed, we shift all of the locks down.
127 static void adf_unlock(struct ad_fd *ad, const int fork)
129 adf_lock_t *lock = ad->adf_lock;
132 for (i = 0; i < ad->adf_lockcount; i++) {
133 if (lock[i].lock.l_start < AD_FILELOCK_BASE
134 || lock[i].user == fork) {
135 /* we're really going to delete this lock. note: read locks
136 are the only ones that allow refcounts > 1 */
139 /* we shifted things down, so we need to backtrack */
140 /* unlikely but realloc may have change adf_lock */
146 /* relock any byte lock that overlaps off/len. unlock everything
148 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
150 adf_lock_t *lock = ad->adf_lock;
153 for (i = 0; i < ad->adf_lockcount; i++) {
154 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
155 set_lock(fd, F_SETLK, &lock[i].lock);
160 /* find a byte lock that overlaps off/len for a particular open fork */
161 static int adf_findlock(struct ad_fd *ad,
162 const int fork, const int type,
166 adf_lock_t *lock = ad->adf_lock;
169 for (i = 0; i < ad->adf_lockcount; i++) {
170 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
171 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
172 (lock[i].user == fork) &&
173 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
181 /* search other fork lock lists */
182 static int adf_findxlock(struct ad_fd *ad,
183 const int fork, const int type,
187 adf_lock_t *lock = ad->adf_lock;
190 for (i = 0; i < ad->adf_lockcount; i++) {
191 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK))
193 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK)))
195 (lock[i].user != fork)
197 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
203 /* okay, this needs to do the following:
204 * 1) check current list of locks. error on conflict.
205 * 2) apply the lock. error on conflict with another process.
206 * 3) update the list of locks this file has.
208 * NOTE: this treats synchronization locks a little differently. we
209 * do the following things for those:
210 * 1) if the header file exists, all the locks go in the beginning
212 * 2) if the header file doesn't exist, we stick the locks
213 * in the locations specified by AD_FILELOCK_RD/WR.
215 #define LOCK_DATA_WR (0)
216 #define LOCK_DATA_RD (1)
217 #define LOCK_RSRC_WR (2)
218 #define LOCK_RSRC_RD (3)
220 #define LOCK_RSRC_DRD (4)
221 #define LOCK_RSRC_DWR (5)
222 #define LOCK_DATA_DRD (6)
223 #define LOCK_DATA_DWR (7)
225 #define LOCK_RSRC_NONE (8)
226 #define LOCK_DATA_NONE (9)
229 translate a data fork lock to an offset
232 static off_t df2off(off_t off)
235 if (off == AD_FILELOCK_OPEN_WR)
236 start = LOCK_DATA_WR;
237 else if (off == AD_FILELOCK_OPEN_RD)
238 start = LOCK_DATA_RD;
239 else if (off == AD_FILELOCK_DENY_RD)
240 start = LOCK_DATA_DRD;
241 else if (off == AD_FILELOCK_DENY_WR)
242 start = LOCK_DATA_DWR;
243 else if (off == AD_FILELOCK_OPEN_NONE)
244 start = LOCK_DATA_NONE;
249 translate a resource fork lock to an offset
252 static off_t hf2off(off_t off)
255 if (off == AD_FILELOCK_OPEN_WR)
256 start = LOCK_RSRC_WR;
257 else if (off == AD_FILELOCK_OPEN_RD)
258 start = LOCK_RSRC_RD;
259 else if (off == AD_FILELOCK_DENY_RD)
260 start = LOCK_RSRC_DRD;
261 else if (off == AD_FILELOCK_DENY_WR)
262 start = LOCK_RSRC_DWR;
263 else if (off == AD_FILELOCK_OPEN_NONE)
264 start = LOCK_RSRC_NONE;
269 translate a resource fork lock to an offset
271 static off_t rf2off(off_t off)
274 if (off == AD_FILELOCK_OPEN_WR)
275 start = AD_FILELOCK_RSRC_OPEN_WR;
276 else if (off == AD_FILELOCK_OPEN_RD)
277 start = AD_FILELOCK_RSRC_OPEN_RD;
278 else if (off == AD_FILELOCK_DENY_RD)
279 start = AD_FILELOCK_RSRC_DENY_RD;
280 else if (off == AD_FILELOCK_DENY_WR)
281 start = AD_FILELOCK_RSRC_DENY_WR;
282 else if (off == AD_FILELOCK_OPEN_NONE)
283 start = AD_FILELOCK_RSRC_OPEN_NONE;
290 * (1) Test against our own locks array
291 * (2) Test fcntl lock, locks from other processes
293 * @param adf (r) handle
294 * @param off (r) offset
295 * @param len (r) lenght
297 * @returns 1 if there's an existing lock, 0 if there's no lock,
298 * -1 in case any error occured
300 static int testlock(const struct ad_fd *adf, off_t off, off_t len)
308 plock = adf->adf_lock;
309 lock.l_whence = SEEK_SET;
312 /* (1) Do we have a lock ? */
313 for (i = 0; i < adf->adf_lockcount; i++) {
314 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
318 /* (2) Does another process have a lock? */
319 lock.l_type = (adf->adf_flags & O_RDWR) ? F_WRLCK : F_RDLCK;
321 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
322 /* is that kind of error possible ?*/
323 return (errno == EACCES || errno == EAGAIN) ? 1 : -1;
326 if (lock.l_type == F_UNLCK) {
332 static uint16_t ad_openforks_v2(struct adouble *ad, uint16_t attrbits)
338 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
340 /* XXX know the locks layout:
341 AD_FILELOCK_OPEN_WR is first
342 and use it for merging requests
344 if (ad_meta_fileno(ad) != -1) {
345 /* there's a resource fork test the four bytes for
346 * data RW/RD and fork RW/RD locks in one request
353 /* no resource fork, only data RD/RW may exist */
354 adf = &ad->ad_data_fork;
355 off = AD_FILELOCK_OPEN_WR;
358 if (!testlock(adf, off, len))
361 /* either there's a lock or we already know one
364 if (!(attrbits & ATTRBIT_DOPEN)) {
365 if (ad_meta_fileno(ad) != -1) {
370 adf = &ad->ad_data_fork;
371 off = AD_FILELOCK_OPEN_WR;
373 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
376 if (!(attrbits & ATTRBIT_ROPEN)) {
377 if (ad_meta_fileno(ad) != -1) {
380 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
387 /* test for sharemode locks, adouble:ea stores them on the datafork */
388 static uint16_t ad_openforks_ea(struct adouble *ad, uint16_t attrbits)
395 if (ad_data_fileno(ad) == -1)
398 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
399 /* Test all 4 locks at once */
400 off = AD_FILELOCK_OPEN_WR;
402 if (testlock(&ad->ad_data_fork, off, len) == 0)
406 /* either there's a lock or we already know one fork is open */
408 if (!(attrbits & ATTRBIT_DOPEN)) {
409 off = AD_FILELOCK_OPEN_WR;
410 ret = testlock(&ad->ad_data_fork, off, 2) > 0 ? ATTRBIT_DOPEN : 0;
413 if (!(attrbits & ATTRBIT_ROPEN)) {
414 off = AD_FILELOCK_RSRC_OPEN_WR;
415 ret |= testlock(&ad->ad_data_fork, off, 2) > 0? ATTRBIT_ROPEN : 0;
421 static int ad_testlock_v2(struct adouble *ad, int eid, const off_t off)
427 if (eid == ADEID_DFORK) {
428 adf = &ad->ad_data_fork;
429 if (ad_meta_fileno(ad) != -1) {
431 lock_offset = df2off(off);
434 if (ad_meta_fileno(ad) == -1) {
435 /* there's no resource fork. return no lock */
439 lock_offset = hf2off(off);
441 return testlock(adf, lock_offset, 1);
444 static int ad_testlock_ea(struct adouble *ad, int eid, const off_t off)
448 if (eid == ADEID_DFORK) {
451 lock_offset = rf2off(off);
453 return testlock(&ad->ad_data_fork, lock_offset, 1);
456 #define LTYPE2STRBUFSIZ 128
457 static const char *locktypetostr(int type)
460 static char buf[LTYPE2STRBUFSIZ];
465 strlcat(buf, "CLR", LTYPE2STRBUFSIZ);
469 if (type & ADLOCK_RD) {
471 strlcat(buf, "|", LTYPE2STRBUFSIZ);
472 strlcat(buf, "RD", LTYPE2STRBUFSIZ);
475 if (type & ADLOCK_WR) {
477 strlcat(buf, "|", LTYPE2STRBUFSIZ);
478 strlcat(buf, "WR", LTYPE2STRBUFSIZ);
481 if (type & ADLOCK_UPGRADE) {
483 strlcat(buf, "|", LTYPE2STRBUFSIZ);
484 strlcat(buf, "UPG", LTYPE2STRBUFSIZ);
487 if (type & ADLOCK_FILELOCK) {
489 strlcat(buf, "|", LTYPE2STRBUFSIZ);
490 strlcat(buf, "FILELOCK", LTYPE2STRBUFSIZ);
497 static const char *shmdstrfromoff(off_t off)
500 case AD_FILELOCK_OPEN_WR:
501 return "OPEN_WR_DATA";
502 case AD_FILELOCK_OPEN_RD:
503 return "OPEN_RD_DATA";
504 case AD_FILELOCK_RSRC_OPEN_WR:
505 return "OPEN_WR_RSRC";
506 case AD_FILELOCK_RSRC_OPEN_RD:
507 return "OPEN_RD_RSRC";
508 case AD_FILELOCK_DENY_WR:
509 return "DENY_WR_DATA";
510 case AD_FILELOCK_DENY_RD:
511 return "DENY_RD_DATA";
512 case AD_FILELOCK_RSRC_DENY_WR:
513 return "DENY_WR_RSRC";
514 case AD_FILELOCK_RSRC_DENY_RD:
515 return "DENY_RD_RSRC";
516 case AD_FILELOCK_OPEN_NONE:
517 return "OPEN_NONE_DATA";
518 case AD_FILELOCK_RSRC_OPEN_NONE:
519 return "OPEN_NONE_RSRC";
525 /******************************************************************************
527 ******************************************************************************/
529 int ad_lock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
538 LOG(log_debug, logtype_default, "ad_lock(\"%s\", %s, %s, off: %jd (%s), len: %jd): BEGIN",
539 ad->ad_m_name ? ad->ad_m_name : "???",
540 eid == ADEID_DFORK ? "data" : "reso",
541 locktypetostr(locktype),
546 if ((locktype & ADLOCK_FILELOCK) && (len != 1))
553 if (eid == ADEID_DFORK) {
554 adf = &ad->ad_data_fork;
555 if ((ad->ad_vers == AD_VERSION2) && (type & ADLOCK_FILELOCK)) {
556 if (ad_meta_fileno(ad) != -1) { /* META */
558 lock.l_start = df2off(off);
562 switch (ad->ad_vers) {
564 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
565 /* there's no meta data. return a lock error
566 * otherwise if a second process is able to create it
567 * locks are a mess. */
571 if (type & ADLOCK_FILELOCK) {
572 adf = ad->ad_mdp; /* either resource or meta data (set in ad_open) */
573 lock.l_start = hf2off(off);
575 /* we really want the resource fork it's a byte lock */
576 adf = &ad->ad_resource_fork;
577 lock.l_start += ad_getentryoff(ad, eid);
582 if (type & ADLOCK_FILELOCK) {
583 adf = &ad->ad_data_fork;
584 lock.l_start = rf2off(off);
590 lock.l_start= ADEDOFF_RFORK_OSX + off;
600 /* NOTE: we can't write lock a read-only file. on those, we just
601 * make sure that we have a read lock set. that way, we at least prevent
602 * someone else from really setting a deny read/write on the file.
604 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
605 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
608 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
609 lock.l_whence = SEEK_SET;
612 /* byte_lock(len=-1) lock whole file */
613 if (len == BYTELOCK_MAX) {
614 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
617 /* see if it's locked by another fork.
618 * NOTE: this guarantees that any existing locks must be at most
619 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
620 * guaranteed to be ORable. */
621 if (adf_findxlock(adf, fork, ADLOCK_WR |
622 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
623 lock.l_start, lock.l_len) > -1) {
628 /* look for any existing lock that we may have */
629 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
630 adflock = (i < 0) ? NULL : adf->adf_lock + i;
632 /* here's what we check for:
633 1) we're trying to re-lock a lock, but we didn't specify an update.
634 2) we're trying to free only part of a lock.
635 3) we're trying to free a non-existent lock. */
636 if ( (!adflock && (lock.l_type == F_UNLCK))
639 && !(type & ADLOCK_UPGRADE)
640 && ((lock.l_type != F_UNLCK)
641 || (adflock->lock.l_start != lock.l_start)
642 || (adflock->lock.l_len != lock.l_len) ))
649 /* now, update our list of locks */
651 if (lock.l_type == F_UNLCK) {
652 adf_freelock(adf, i);
656 /* attempt to lock the file. */
657 if (set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
660 /* we upgraded this lock. */
661 if (adflock && (type & ADLOCK_UPGRADE)) {
662 memcpy(&adflock->lock, &lock, sizeof(lock));
666 /* it wasn't an upgrade */
668 if (lock.l_type == F_RDLCK) {
669 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
672 /* no more space. this will also happen if lockmax == lockcount == 0 */
673 if (adf->adf_lockmax == adf->adf_lockcount) {
674 adf_lock_t *tmp = (adf_lock_t *)
675 realloc(adf->adf_lock, sizeof(adf_lock_t)*
676 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
680 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
682 adflock = adf->adf_lock + adf->adf_lockcount;
685 memcpy(&adflock->lock, &lock, sizeof(lock));
686 adflock->user = fork;
688 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
689 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
693 (*adflock->refcount)++;
694 adf->adf_lockcount++;
698 lock.l_type = F_UNLCK;
699 set_lock(adf->adf_fd, F_SETLK, &lock);
703 /* -------------------------
705 int ad_tmplock(struct adouble *ad, const uint32_t eid, const int locktype,
706 const off_t off, const off_t len, const int fork)
715 if (eid == ADEID_DFORK) {
716 adf = &ad->ad_data_fork;
719 adf = &ad->ad_resource_fork;
720 if (adf->adf_fd == -1) {
721 /* there's no resource fork. return success */
724 /* if ADLOCK_FILELOCK we want a lock from offset 0
725 * it's used when deleting a file:
726 * in open we put read locks on meta datas
727 * in delete a write locks on the whole file
728 * so if the file is open by somebody else it fails
730 if (!(type & ADLOCK_FILELOCK))
731 lock.l_start += ad_getentryoff(ad, eid);
734 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
735 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
738 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
739 lock.l_whence = SEEK_SET;
742 /* see if it's locked by another fork. */
743 if (fork && adf_findxlock(adf, fork,
744 ADLOCK_WR | ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
745 lock.l_start, lock.l_len) > -1) {
750 /* okay, we might have ranges byte-locked. we need to make sure that
751 * we restore the appropriate ranges once we're done. so, we check
752 * for overlap on an unlock and relock.
753 * XXX: in the future, all the byte locks will be sorted and contiguous.
754 * we just want to upgrade all the locks and then downgrade them
756 err = set_lock(adf->adf_fd, F_SETLK, &lock);
757 if (!err && (lock.l_type == F_UNLCK))
758 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
763 /* --------------------- */
764 void ad_unlock(struct adouble *ad, const int fork)
766 if (ad_data_fileno(ad) != -1) {
767 adf_unlock(&ad->ad_data_fork, fork);
769 if (ad_reso_fileno(ad) != -1) {
770 adf_unlock(&ad->ad_resource_fork, fork);
775 * Test for a share mode lock
777 * @param ad (rw) handle
778 * @param eid (r) datafork or ressource fork
779 * @param off (r) sharemode lock to test
781 * @returns 1 if there's an existing lock, 0 if there's no lock,
782 * -1 in case any error occured
784 int ad_testlock(struct adouble *ad, int eid, const off_t off)
786 switch (ad->ad_vers) {
788 return ad_testlock_v2(ad, eid, off);
790 return ad_testlock_ea(ad, eid, off);
797 * Return if a file is open by another process.
799 * Optimized for the common case:
800 * - there's no locks held by another process (clients)
801 * - or we already know the answer and don't need to test (attrbits)
803 * @param ad (rw) handle
804 * @param attrbits (r) forks opened by us
805 * @returns bitflags ATTRBIT_DOPEN | ATTRBIT_ROPEN if
806 * other process has fork of file opened
808 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
810 switch (ad->ad_vers) {
812 return ad_openforks_v2(ad, attrbits);
814 return ad_openforks_ea(ad, attrbits);