2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Because fcntl locks are
6 * process-oriented, we need to keep around a list of file descriptors
7 * that refer to the same file.
9 * TODO: fix the race when reading/writing.
10 * keep a pool of both locks and reference counters around so that
11 * we can save on mallocs. we should also use a tree to keep things
17 #endif /* HAVE_CONFIG_H */
19 #include <atalk/adouble.h>
20 #include <atalk/logger.h>
31 /* ----------------------- */
32 static int set_lock(int fd, int cmd, struct flock *lock)
35 /* We assign fd = -2 for symlinks -> do nothing */
37 lock->l_type = F_UNLCK;
40 return fcntl(fd, cmd, lock);
43 /* ----------------------- */
44 static int XLATE_FCNTL_LOCK(int type)
57 /* ----------------------- */
58 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
60 return (!alen && a <= b) ||
62 ( (a + alen > b) && (b + blen > a) );
65 /* allocation for lock regions. we allocate aggressively and shrink
66 * only in large chunks. */
67 #define ARRAY_BLOCK_SIZE 10
68 #define ARRAY_FREE_DELTA 100
70 /* remove a lock and compact space if necessary */
71 static void adf_freelock(struct ad_fd *ad, const int i)
73 adf_lock_t *lock = ad->adf_lock + i;
75 if (--(*lock->refcount) < 1) {
77 lock->lock.l_type = F_UNLCK;
78 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
83 /* move another lock into the empty space */
84 if (i < ad->adf_lockcount) {
85 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
88 /* free extra cruft if we go past a boundary. we always want to
89 * keep at least some stuff around for allocations. this wastes
90 * a bit of space to save time on reallocations. */
91 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
92 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
93 struct adf_lock_t *tmp;
95 tmp = (struct adf_lock_t *)
96 realloc(ad->adf_lock, sizeof(adf_lock_t)*
97 (ad->adf_lockcount + ARRAY_FREE_DELTA));
100 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
106 /* this needs to deal with the following cases:
107 * 1) fork is the only user of the lock
108 * 2) fork shares a read lock with another open fork
110 * i converted to using arrays of locks. everytime a lock
111 * gets removed, we shift all of the locks down.
113 static void adf_unlock(struct ad_fd *ad, const int fork)
115 adf_lock_t *lock = ad->adf_lock;
118 for (i = 0; i < ad->adf_lockcount; i++) {
120 if (lock[i].user == fork) {
121 /* we're really going to delete this lock. note: read locks
122 are the only ones that allow refcounts > 1 */
124 i--; /* we shifted things down, so we need to backtrack */
125 /* unlikely but realloc may have change adf_lock */
131 /* relock any byte lock that overlaps off/len. unlock everything
133 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
135 adf_lock_t *lock = ad->adf_lock;
138 for (i = 0; i < ad->adf_lockcount; i++) {
139 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
140 set_lock(fd, F_SETLK, &lock[i].lock);
145 /* find a byte lock that overlaps off/len for a particular open fork */
146 static int adf_findlock(struct ad_fd *ad,
147 const int fork, const int type,
151 adf_lock_t *lock = ad->adf_lock;
154 for (i = 0; i < ad->adf_lockcount; i++) {
155 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
156 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
157 (lock[i].user == fork) &&
158 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
166 /* search other fork lock lists */
167 static int adf_findxlock(struct ad_fd *ad,
168 const int fork, const int type,
172 adf_lock_t *lock = ad->adf_lock;
175 for (i = 0; i < ad->adf_lockcount; i++) {
176 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK))
178 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK)))
180 (lock[i].user != fork)
182 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
188 /* okay, this needs to do the following:
189 * 1) check current list of locks. error on conflict.
190 * 2) apply the lock. error on conflict with another process.
191 * 3) update the list of locks this file has.
193 * NOTE: this treats synchronization locks a little differently. we
194 * do the following things for those:
195 * 1) if the header file exists, all the locks go in the beginning
197 * 2) if the header file doesn't exist, we stick the locks
198 * in the locations specified by AD_FILELOCK_RD/WR.
200 #define LOCK_DATA_WR (0)
201 #define LOCK_DATA_RD (1)
202 #define LOCK_RSRC_WR (2)
203 #define LOCK_RSRC_RD (3)
205 #define LOCK_RSRC_DRD (4)
206 #define LOCK_RSRC_DWR (5)
207 #define LOCK_DATA_DRD (6)
208 #define LOCK_DATA_DWR (7)
210 #define LOCK_RSRC_NONE (8)
211 #define LOCK_DATA_NONE (9)
214 translate a data fork lock to an offset
217 static off_t df2off(off_t off)
220 if (off == AD_FILELOCK_OPEN_WR)
221 start = LOCK_DATA_WR;
222 else if (off == AD_FILELOCK_OPEN_RD)
223 start = LOCK_DATA_RD;
224 else if (off == AD_FILELOCK_DENY_RD)
225 start = LOCK_DATA_DRD;
226 else if (off == AD_FILELOCK_DENY_WR)
227 start = LOCK_DATA_DWR;
228 else if (off == AD_FILELOCK_OPEN_NONE)
229 start = LOCK_DATA_NONE;
234 translate a resource fork lock to an offset
237 static off_t hf2off(off_t off)
240 if (off == AD_FILELOCK_OPEN_WR)
241 start = LOCK_RSRC_WR;
242 else if (off == AD_FILELOCK_OPEN_RD)
243 start = LOCK_RSRC_RD;
244 else if (off == AD_FILELOCK_DENY_RD)
245 start = LOCK_RSRC_DRD;
246 else if (off == AD_FILELOCK_DENY_WR)
247 start = LOCK_RSRC_DWR;
248 else if (off == AD_FILELOCK_OPEN_NONE)
249 start = LOCK_RSRC_NONE;
254 translate a resource fork lock to an offset
256 static off_t rf2off(off_t off)
259 if (off == AD_FILELOCK_OPEN_WR)
260 start = AD_FILELOCK_RSRC_OPEN_WR;
261 else if (off == AD_FILELOCK_OPEN_RD)
262 start = AD_FILELOCK_RSRC_OPEN_RD;
263 else if (off == AD_FILELOCK_DENY_RD)
264 start = AD_FILELOCK_RSRC_DENY_RD;
265 else if (off == AD_FILELOCK_DENY_WR)
266 start = AD_FILELOCK_RSRC_DENY_WR;
267 else if (off == AD_FILELOCK_OPEN_NONE)
268 start = AD_FILELOCK_RSRC_OPEN_NONE;
275 * (1) Test against our own locks array
276 * (2) Test fcntl lock, locks from other processes
278 * @param adf (r) handle
279 * @param off (r) offset
280 * @param len (r) lenght
282 * @returns 1 if there's an existing lock, 0 if there's no lock,
283 * -1 in case any error occured
285 static int testlock(const struct ad_fd *adf, off_t off, off_t len)
293 plock = adf->adf_lock;
294 lock.l_whence = SEEK_SET;
297 /* (1) Do we have a lock ? */
298 for (i = 0; i < adf->adf_lockcount; i++) {
299 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
303 /* (2) Does another process have a lock? */
304 lock.l_type = (adf->adf_flags & O_RDWR) ? F_WRLCK : F_RDLCK;
306 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
307 /* is that kind of error possible ?*/
308 return (errno == EACCES || errno == EAGAIN) ? 1 : -1;
311 if (lock.l_type == F_UNLCK) {
317 static uint16_t ad_openforks_v2(struct adouble *ad, uint16_t attrbits)
323 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
325 /* XXX know the locks layout:
326 AD_FILELOCK_OPEN_WR is first
327 and use it for merging requests
329 if (ad_meta_fileno(ad) != -1) {
330 /* there's a resource fork test the four bytes for
331 * data RW/RD and fork RW/RD locks in one request
338 /* no resource fork, only data RD/RW may exist */
339 adf = &ad->ad_data_fork;
340 off = AD_FILELOCK_OPEN_WR;
343 if (!testlock(adf, off, len))
346 /* either there's a lock or we already know one
349 if (!(attrbits & ATTRBIT_DOPEN)) {
350 if (ad_meta_fileno(ad) != -1) {
355 adf = &ad->ad_data_fork;
356 off = AD_FILELOCK_OPEN_WR;
358 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
361 if (!(attrbits & ATTRBIT_ROPEN)) {
362 if (ad_meta_fileno(ad) != -1) {
365 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
372 /* test for sharemode locks, adouble:ea stores them on the datafork */
373 static uint16_t ad_openforks_ea(struct adouble *ad, uint16_t attrbits)
380 if (ad_data_fileno(ad) == -1)
383 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
384 /* Test all 4 locks at once */
385 off = AD_FILELOCK_OPEN_WR;
387 if (testlock(&ad->ad_data_fork, off, len) == 0)
391 /* either there's a lock or we already know one fork is open */
393 if (!(attrbits & ATTRBIT_DOPEN)) {
394 off = AD_FILELOCK_OPEN_WR;
395 ret = testlock(&ad->ad_data_fork, off, 2) > 0 ? ATTRBIT_DOPEN : 0;
398 if (!(attrbits & ATTRBIT_ROPEN)) {
399 off = AD_FILELOCK_RSRC_OPEN_WR;
400 ret |= testlock(&ad->ad_data_fork, off, 2) > 0? ATTRBIT_ROPEN : 0;
406 static int ad_testlock_v2(struct adouble *ad, int eid, const off_t off)
412 if (eid == ADEID_DFORK) {
413 adf = &ad->ad_data_fork;
414 if (ad_meta_fileno(ad) != -1) {
416 lock_offset = df2off(off);
419 if (ad_meta_fileno(ad) == -1) {
420 /* there's no resource fork. return no lock */
424 lock_offset = hf2off(off);
426 return testlock(adf, lock_offset, 1);
429 static int ad_testlock_ea(struct adouble *ad, int eid, const off_t off)
433 if (eid == ADEID_DFORK) {
436 lock_offset = rf2off(off);
438 return testlock(&ad->ad_data_fork, lock_offset, 1);
441 /******************************************************************************
443 ******************************************************************************/
445 int ad_lock(struct adouble *ad, uint32_t eid, int locktype, off_t off, off_t len, int fork)
454 if ((type & ADLOCK_FILELOCK) && (len != 1))
460 if (eid == ADEID_DFORK) {
461 adf = &ad->ad_data_fork;
462 if ((ad->ad_vers == AD_VERSION2) && (type & ADLOCK_FILELOCK)) {
463 if (ad_meta_fileno(ad) != -1) { /* META */
465 lock.l_start = df2off(off);
469 switch (ad->ad_vers) {
471 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
472 /* there's no meta data. return a lock error
473 * otherwise if a second process is able to create it
474 * locks are a mess. */
478 if (type & ADLOCK_FILELOCK) {
479 adf = ad->ad_mdp; /* either resource or meta data (set in ad_open) */
480 lock.l_start = hf2off(off);
482 /* we really want the resource fork it's a byte lock */
483 adf = &ad->ad_resource_fork;
484 lock.l_start += ad_getentryoff(ad, eid);
489 if (type & ADLOCK_FILELOCK) {
490 lock.l_start = rf2off(off);
492 /* it's a byterange lock on the rsrcfork -> discard it */
495 adf = &ad->ad_data_fork;
503 /* NOTE: we can't write lock a read-only file. on those, we just
504 * make sure that we have a read lock set. that way, we at least prevent
505 * someone else from really setting a deny read/write on the file.
507 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
508 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
511 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
512 lock.l_whence = SEEK_SET;
515 /* byte_lock(len=-1) lock whole file */
516 if (len == BYTELOCK_MAX) {
517 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
520 /* see if it's locked by another fork.
521 * NOTE: this guarantees that any existing locks must be at most
522 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
523 * guaranteed to be ORable. */
524 if (adf_findxlock(adf, fork, ADLOCK_WR |
525 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
526 lock.l_start, lock.l_len) > -1) {
531 /* look for any existing lock that we may have */
532 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
533 adflock = (i < 0) ? NULL : adf->adf_lock + i;
535 /* here's what we check for:
536 1) we're trying to re-lock a lock, but we didn't specify an update.
537 2) we're trying to free only part of a lock.
538 3) we're trying to free a non-existent lock. */
539 if ( (!adflock && (lock.l_type == F_UNLCK))
542 && !(type & ADLOCK_UPGRADE)
543 && ((lock.l_type != F_UNLCK)
544 || (adflock->lock.l_start != lock.l_start)
545 || (adflock->lock.l_len != lock.l_len) ))
552 /* now, update our list of locks */
554 if (lock.l_type == F_UNLCK) {
555 adf_freelock(adf, i);
559 /* attempt to lock the file. */
560 if (set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
563 /* we upgraded this lock. */
564 if (adflock && (type & ADLOCK_UPGRADE)) {
565 memcpy(&adflock->lock, &lock, sizeof(lock));
569 /* it wasn't an upgrade */
571 if (lock.l_type == F_RDLCK) {
572 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
575 /* no more space. this will also happen if lockmax == lockcount == 0 */
576 if (adf->adf_lockmax == adf->adf_lockcount) {
577 adf_lock_t *tmp = (adf_lock_t *)
578 realloc(adf->adf_lock, sizeof(adf_lock_t)*
579 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
583 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
585 adflock = adf->adf_lock + adf->adf_lockcount;
588 memcpy(&adflock->lock, &lock, sizeof(lock));
589 adflock->user = fork;
591 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
592 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
596 (*adflock->refcount)++;
597 adf->adf_lockcount++;
601 lock.l_type = F_UNLCK;
602 set_lock(adf->adf_fd, F_SETLK, &lock);
606 /* -------------------------
608 int ad_tmplock(struct adouble *ad, const uint32_t eid, const int locktype,
609 const off_t off, const off_t len, const int fork)
616 if (ad->ad_vers == AD_VERSION_EA)
621 if (eid == ADEID_DFORK) {
622 adf = &ad->ad_data_fork;
625 adf = &ad->ad_resource_fork;
626 if (adf->adf_fd == -1) {
627 /* there's no resource fork. return success */
630 /* if ADLOCK_FILELOCK we want a lock from offset 0
631 * it's used when deleting a file:
632 * in open we put read locks on meta datas
633 * in delete a write locks on the whole file
634 * so if the file is open by somebody else it fails
636 if (!(type & ADLOCK_FILELOCK))
637 lock.l_start += ad_getentryoff(ad, eid);
640 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
641 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
644 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
645 lock.l_whence = SEEK_SET;
648 /* see if it's locked by another fork. */
649 if (fork && adf_findxlock(adf, fork,
650 ADLOCK_WR | ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
651 lock.l_start, lock.l_len) > -1) {
656 /* okay, we might have ranges byte-locked. we need to make sure that
657 * we restore the appropriate ranges once we're done. so, we check
658 * for overlap on an unlock and relock.
659 * XXX: in the future, all the byte locks will be sorted and contiguous.
660 * we just want to upgrade all the locks and then downgrade them
662 err = set_lock(adf->adf_fd, F_SETLK, &lock);
663 if (!err && (lock.l_type == F_UNLCK))
664 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
669 /* --------------------- */
670 void ad_unlock(struct adouble *ad, const int fork)
672 if (ad_data_fileno(ad) != -1) {
673 adf_unlock(&ad->ad_data_fork, fork);
675 if (ad_reso_fileno(ad) != -1) {
676 adf_unlock(&ad->ad_resource_fork, fork);
681 * Test for a share mode lock
683 * @param ad (rw) handle
684 * @param eid (r) datafork or ressource fork
685 * @param off (r) sharemode lock to test
687 * @returns 1 if there's an existing lock, 0 if there's no lock,
688 * -1 in case any error occured
690 int ad_testlock(struct adouble *ad, int eid, const off_t off)
692 switch (ad->ad_vers) {
694 return ad_testlock_v2(ad, eid, off);
696 return ad_testlock_ea(ad, eid, off);
703 * Return if a file is open by another process.
705 * Optimized for the common case:
706 * - there's no locks held by another process (clients)
707 * - or we already know the answer and don't need to test (attrbits)
709 * @param ad (rw) handle
710 * @param attrbits (r) forks opened by us
711 * @returns bitflags ATTRBIT_DOPEN | ATTRBIT_ROPEN if
712 * other process has fork of file opened
714 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
716 switch (ad->ad_vers) {
718 return ad_openforks_v2(ad, attrbits);
720 return ad_openforks_ea(ad, attrbits);