2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
11 * TODO: fix the race when reading/writing.
12 * keep a pool of both locks and reference counters around so that
13 * we can save on mallocs. we should also use a tree to keep things
19 #endif /* HAVE_CONFIG_H */
21 #include <atalk/adouble.h>
22 #include <atalk/logger.h>
33 /* translate between ADLOCK styles and specific locking mechanisms */
34 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
35 ((type) == ADLOCK_WR ? LOCK_EX : \
36 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
38 /* ----------------------- */
39 static int set_lock(int fd, int cmd, struct flock *lock)
42 /* We assign fd = -2 for symlinks -> do nothing */
44 lock->l_type = F_UNLCK;
47 return fcntl(fd, cmd, lock);
50 /* ----------------------- */
51 static int XLATE_FCNTL_LOCK(int type)
64 /* ----------------------- */
65 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
67 return (!alen && a <= b) ||
69 ( (a + alen > b) && (b + blen > a) );
72 /* allocation for lock regions. we allocate aggressively and shrink
73 * only in large chunks. */
74 #define ARRAY_BLOCK_SIZE 10
75 #define ARRAY_FREE_DELTA 100
77 /* remove a lock and compact space if necessary */
78 static void adf_freelock(struct ad_fd *ad, const int i)
81 adf_lock_t *lock = ad->adf_lock + i;
83 if (--(*lock->refcount) < 1) {
86 lock->lock.l_type = F_UNLCK;
87 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
93 /* move another lock into the empty space */
94 if (i < ad->adf_lockcount) {
95 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
98 /* free extra cruft if we go past a boundary. we always want to
99 * keep at least some stuff around for allocations. this wastes
100 * a bit of space to save time on reallocations. */
101 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
102 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
103 struct adf_lock_t *tmp;
105 tmp = (struct adf_lock_t *)
106 realloc(ad->adf_lock, sizeof(adf_lock_t)*
107 (ad->adf_lockcount + ARRAY_FREE_DELTA));
110 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
117 /* this needs to deal with the following cases:
118 * 1) fork is the only user of the lock
119 * 2) fork shares a read lock with another open fork
121 * i converted to using arrays of locks. everytime a lock
122 * gets removed, we shift all of the locks down.
124 static void adf_unlock(struct ad_fd *ad, const int fork)
127 adf_lock_t *lock = ad->adf_lock;
130 for (i = 0; i < ad->adf_lockcount; i++) {
132 if (lock[i].user == fork) {
133 /* we're really going to delete this lock. note: read locks
134 are the only ones that allow refcounts > 1 */
136 i--; /* we shifted things down, so we need to backtrack */
137 /* unlikely but realloc may have change adf_lock */
144 /* relock any byte lock that overlaps off/len. unlock everything
146 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
149 adf_lock_t *lock = ad->adf_lock;
152 if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
153 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
154 set_lock(fd, F_SETLK, &lock[i].lock);
160 /* find a byte lock that overlaps off/len for a particular open fork */
161 static int adf_findlock(struct ad_fd *ad,
162 const int fork, const int type,
167 adf_lock_t *lock = ad->adf_lock;
170 for (i = 0; i < ad->adf_lockcount; i++) {
171 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
172 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
173 (lock[i].user == fork) &&
174 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
183 /* search other fork lock lists */
184 static int adf_findxlock(struct ad_fd *ad,
185 const int fork, const int type,
190 adf_lock_t *lock = ad->adf_lock;
193 for (i = 0; i < ad->adf_lockcount; i++) {
194 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
195 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
196 (lock[i].user != fork) &&
197 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
204 /* okay, this needs to do the following:
205 * 1) check current list of locks. error on conflict.
206 * 2) apply the lock. error on conflict with another process.
207 * 3) update the list of locks this file has.
209 * NOTE: this treats synchronization locks a little differently. we
210 * do the following things for those:
211 * 1) if the header file exists, all the locks go in the beginning
213 * 2) if the header file doesn't exist, we stick the locks
214 * in the locations specified by AD_FILELOCK_RD/WR.
216 #define LOCK_DATA_WR (0)
217 #define LOCK_DATA_RD (1)
218 #define LOCK_RSRC_WR (2)
219 #define LOCK_RSRC_RD (3)
221 #define LOCK_RSRC_DRD (4)
222 #define LOCK_RSRC_DWR (5)
223 #define LOCK_DATA_DRD (6)
224 #define LOCK_DATA_DWR (7)
226 #define LOCK_RSRC_NONE (8)
227 #define LOCK_DATA_NONE (9)
230 translate a data fork lock to an offset
233 static off_t df2off(off_t off)
236 if (off == AD_FILELOCK_OPEN_WR)
237 start = LOCK_DATA_WR;
238 else if (off == AD_FILELOCK_OPEN_RD)
239 start = LOCK_DATA_RD;
240 else if (off == AD_FILELOCK_DENY_RD)
241 start = LOCK_DATA_DRD;
242 else if (off == AD_FILELOCK_DENY_WR)
243 start = LOCK_DATA_DWR;
244 else if (off == AD_FILELOCK_OPEN_NONE)
245 start = LOCK_DATA_NONE;
250 translate a resource fork lock to an offset
253 static off_t hf2off(off_t off)
256 if (off == AD_FILELOCK_OPEN_WR)
257 start = LOCK_RSRC_WR;
258 else if (off == AD_FILELOCK_OPEN_RD)
259 start = LOCK_RSRC_RD;
260 else if (off == AD_FILELOCK_DENY_RD)
261 start = LOCK_RSRC_DRD;
262 else if (off == AD_FILELOCK_DENY_WR)
263 start = LOCK_RSRC_DWR;
264 else if (off == AD_FILELOCK_OPEN_NONE)
265 start = LOCK_RSRC_NONE;
269 /* ------------------ */
270 static int ad_fcntl_lock(struct adouble *ad, const uint32_t eid, const int locktype,
271 const off_t off, const off_t len, const int fork)
283 if (eid == ADEID_DFORK) {
284 adf = &ad->ad_data_fork;
285 if ((type & ADLOCK_FILELOCK)) {
286 if (ad_meta_fileno(ad) != -1) { /* META */
288 lock.l_start = df2off(off);
292 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
293 /* there's no meta data. return a lock error
294 * otherwise if a second process is able to create it
300 if (type & ADLOCK_FILELOCK) {
301 adf = ad->ad_md; /* either resource or meta data (set in ad_open) */
302 lock.l_start = hf2off(off);
305 /* we really want the resource fork it's a byte lock */
306 adf = &ad->ad_resource_fork;
307 lock.l_start += ad_getentryoff(ad, eid);
310 /* NOTE: we can't write lock a read-only file. on those, we just
311 * make sure that we have a read lock set. that way, we at least prevent
312 * someone else from really setting a deny read/write on the file.
314 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
315 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
318 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
319 lock.l_whence = SEEK_SET;
322 /* byte_lock(len=-1) lock whole file */
323 if (len == BYTELOCK_MAX) {
324 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
327 /* see if it's locked by another fork.
328 * NOTE: this guarantees that any existing locks must be at most
329 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
330 * guaranteed to be ORable. */
331 if (adf_findxlock(adf, fork, ADLOCK_WR |
332 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
333 lock.l_start, lock.l_len) > -1) {
338 /* look for any existing lock that we may have */
339 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
340 adflock = (i < 0) ? NULL : adf->adf_lock + i;
342 /* here's what we check for:
343 1) we're trying to re-lock a lock, but we didn't specify an update.
344 2) we're trying to free only part of a lock.
345 3) we're trying to free a non-existent lock. */
346 if ( (!adflock && (lock.l_type == F_UNLCK))
349 && !(type & ADLOCK_UPGRADE)
350 && ((lock.l_type != F_UNLCK)
351 || (adflock->lock.l_start != lock.l_start)
352 || (adflock->lock.l_len != lock.l_len) ))
359 /* now, update our list of locks */
361 if (lock.l_type == F_UNLCK) {
362 adf_freelock(adf, i);
366 /* attempt to lock the file. */
367 if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
370 /* we upgraded this lock. */
371 if (adflock && (type & ADLOCK_UPGRADE)) {
372 memcpy(&adflock->lock, &lock, sizeof(lock));
376 /* it wasn't an upgrade */
378 if (lock.l_type == F_RDLCK) {
379 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
382 /* no more space. this will also happen if lockmax == lockcount == 0 */
383 if (adf->adf_lockmax == adf->adf_lockcount) {
384 adf_lock_t *tmp = (adf_lock_t *)
385 realloc(adf->adf_lock, sizeof(adf_lock_t)*
386 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
390 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
392 adflock = adf->adf_lock + adf->adf_lockcount;
395 memcpy(&adflock->lock, &lock, sizeof(lock));
396 adflock->user = fork;
398 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
399 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
403 (*adflock->refcount)++;
404 adf->adf_lockcount++;
408 lock.l_type = F_UNLCK;
409 if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
415 /* -------------------------
416 we are using lock as tristate variable
423 static int testlock(struct ad_fd *adf, off_t off, off_t len)
432 plock = adf->adf_lock;
433 lock.l_whence = SEEK_SET;
436 /* Do we have a lock? */
437 for (i = 0; i < adf->adf_lockcount; i++) {
438 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
441 /* Does another process have a lock?
443 lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
445 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
446 /* is that kind of error possible ?*/
447 return (errno == EACCES || errno == EAGAIN)?1:-1;
450 if (lock.l_type == F_UNLCK) {
459 /* -------------------------
461 static int ad_fcntl_tmplock(struct adouble *ad, const uint32_t eid, const int locktype,
462 const off_t off, const off_t len, const int fork)
471 if (eid == ADEID_DFORK) {
472 adf = &ad->ad_data_fork;
475 adf = &ad->ad_resource_fork;
476 if (adf->adf_fd == -1) {
477 /* there's no resource fork. return success */
480 /* if ADLOCK_FILELOCK we want a lock from offset 0
481 * it's used when deleting a file:
482 * in open we put read locks on meta datas
483 * in delete a write locks on the whole file
484 * so if the file is open by somebody else it fails
486 if (!(type & ADLOCK_FILELOCK))
487 lock.l_start += ad_getentryoff(ad, eid);
490 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
491 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
494 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
495 lock.l_whence = SEEK_SET;
498 /* see if it's locked by another fork. */
499 if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
500 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
501 lock.l_start, lock.l_len) > -1) {
506 /* okay, we might have ranges byte-locked. we need to make sure that
507 * we restore the appropriate ranges once we're done. so, we check
508 * for overlap on an unlock and relock.
509 * XXX: in the future, all the byte locks will be sorted and contiguous.
510 * we just want to upgrade all the locks and then downgrade them
512 if (!adf->adf_excl) {
513 err = set_lock(adf->adf_fd, F_SETLK, &lock);
518 if (!err && (lock.l_type == F_UNLCK))
519 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
524 /* --------------------- */
525 static void ad_fcntl_unlock(struct adouble *ad, const int fork)
527 if (ad_data_fileno(ad) != -1) {
528 adf_unlock(&ad->ad_data_fork, fork);
530 if (ad_reso_fileno(ad) != -1) {
531 adf_unlock(&ad->ad_resource_fork, fork);
534 if (ad->ad_flags != AD_VERSION_EA) {
537 if (ad_meta_fileno(ad) != -1) {
538 adf_unlock(&ad->ad_metadata_fork, fork);
542 /******************************************************************************
544 ******************************************************************************/
546 /* --------------- */
547 int ad_testlock(struct adouble *ad, int eid, const off_t off)
555 if (eid == ADEID_DFORK) {
556 adf = &ad->ad_data_fork;
557 if (ad_meta_fileno(ad) != -1) {
559 lock_offset = df2off(off);
563 if (ad_meta_fileno(ad) == -1) {
564 /* there's no resource fork. return no lock */
568 lock_offset = hf2off(off);
570 return testlock(adf, lock_offset, 1);
574 /* -------------------------
575 return if a file is open by another process.
576 Optimized for the common case:
577 - there's no locks held by another process (clients)
578 - or we already know the answer and don't need to test.
580 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
588 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
590 /* XXX know the locks layout:
591 AD_FILELOCK_OPEN_WR is first
592 and use it for merging requests
594 if (ad_meta_fileno(ad) != -1) {
595 /* there's a resource fork test the four bytes for
596 * data RW/RD and fork RW/RD locks in one request
603 /* no resource fork, only data RD/RW may exist */
604 adf = &ad->ad_data_fork;
605 off = AD_FILELOCK_OPEN_WR;
608 if (!testlock(adf, off, len))
611 /* either there's a lock or we already know one
614 if (!(attrbits & ATTRBIT_DOPEN)) {
615 if (ad_meta_fileno(ad) != -1) {
620 adf = &ad->ad_data_fork;
621 off = AD_FILELOCK_OPEN_WR;
623 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
626 if (!(attrbits & ATTRBIT_ROPEN)) {
627 if (ad_meta_fileno(ad) != -1) {
630 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
638 /* -------------------------
639 the fork is opened in Read Write, Deny Read, Deny Write mode
640 lock the whole file once
642 int ad_excl_lock(struct adouble *ad, const uint32_t eid)
651 lock.l_type = F_WRLCK;
652 lock.l_whence = SEEK_SET;
655 if (eid == ADEID_DFORK) {
656 adf = &ad->ad_data_fork;
658 adf = &ad->ad_resource_fork;
659 lock.l_start = ad_getentryoff(ad, eid);
662 err = set_lock(adf->adf_fd, F_SETLK, &lock);
669 int ad_lock(struct adouble *ad, uint32_t eid, int type, off_t off, off_t len, int user)
674 void ad_unlock(struct adouble *ad, int user)
679 int ad_tmplock(struct adouble *ad, uint32_t eid, int type, off_t off, off_t len, int user)