2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
11 * TODO: fix the race when reading/writing.
12 * keep a pool of both locks and reference counters around so that
13 * we can save on mallocs. we should also use a tree to keep things
19 #endif /* HAVE_CONFIG_H */
21 #include <atalk/adouble.h>
22 #include <atalk/logger.h>
31 #include "ad_private.h"
33 /* translate between ADLOCK styles and specific locking mechanisms */
34 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
35 ((type) == ADLOCK_WR ? LOCK_EX : \
36 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
38 #ifdef DISABLE_LOCKING
39 #define fcntl(a, b, c ) (0)
42 /* ----------------------- */
43 static int set_lock(int fd, int cmd, struct flock *lock)
46 /* We assign fd = -2 for symlinks -> do nothing */
48 lock->l_type = F_UNLCK;
51 return fcntl(fd, cmd, lock);
54 /* ----------------------- */
55 static int XLATE_FCNTL_LOCK(int type)
68 /* ----------------------- */
69 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
71 return (!alen && a <= b) ||
73 ( (a + alen > b) && (b + blen > a) );
76 /* allocation for lock regions. we allocate aggressively and shrink
77 * only in large chunks. */
78 #define ARRAY_BLOCK_SIZE 10
79 #define ARRAY_FREE_DELTA 100
81 /* remove a lock and compact space if necessary */
82 static void adf_freelock(struct ad_fd *ad, const int i)
84 adf_lock_t *lock = ad->adf_lock + i;
86 if (--(*lock->refcount) < 1) {
89 lock->lock.l_type = F_UNLCK;
90 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
96 /* move another lock into the empty space */
97 if (i < ad->adf_lockcount) {
98 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
101 /* free extra cruft if we go past a boundary. we always want to
102 * keep at least some stuff around for allocations. this wastes
103 * a bit of space to save time on reallocations. */
104 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
105 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
106 struct adf_lock_t *tmp;
108 tmp = (struct adf_lock_t *)
109 realloc(ad->adf_lock, sizeof(adf_lock_t)*
110 (ad->adf_lockcount + ARRAY_FREE_DELTA));
113 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
119 /* this needs to deal with the following cases:
120 * 1) fork is the only user of the lock
121 * 2) fork shares a read lock with another open fork
123 * i converted to using arrays of locks. everytime a lock
124 * gets removed, we shift all of the locks down.
126 static void adf_unlock(struct ad_fd *ad, const int fork)
128 adf_lock_t *lock = ad->adf_lock;
131 for (i = 0; i < ad->adf_lockcount; i++) {
133 if (lock[i].user == fork) {
134 /* we're really going to delete this lock. note: read locks
135 are the only ones that allow refcounts > 1 */
137 i--; /* we shifted things down, so we need to backtrack */
138 /* unlikely but realloc may have change adf_lock */
144 /* relock any byte lock that overlaps off/len. unlock everything
146 static void adf_relockrange(struct ad_fd *ad, int fd,
147 const off_t off, const off_t len)
149 adf_lock_t *lock = ad->adf_lock;
152 if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
153 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
154 set_lock(fd, F_SETLK, &lock[i].lock);
159 /* find a byte lock that overlaps off/len for a particular open fork */
160 static int adf_findlock(struct ad_fd *ad,
161 const int fork, const int type,
165 adf_lock_t *lock = ad->adf_lock;
168 for (i = 0; i < ad->adf_lockcount; i++) {
169 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
170 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
171 (lock[i].user == fork) &&
172 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
181 /* search other fork lock lists */
182 static int adf_findxlock(struct ad_fd *ad,
183 const int fork, const int type,
187 adf_lock_t *lock = ad->adf_lock;
190 for (i = 0; i < ad->adf_lockcount; i++) {
191 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
192 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
193 (lock[i].user != fork) &&
194 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
200 /* okay, this needs to do the following:
201 * 1) check current list of locks. error on conflict.
202 * 2) apply the lock. error on conflict with another process.
203 * 3) update the list of locks this file has.
205 * NOTE: this treats synchronization locks a little differently. we
206 * do the following things for those:
207 * 1) if the header file exists, all the locks go in the beginning
209 * 2) if the header file doesn't exist, we stick the locks
210 * in the locations specified by AD_FILELOCK_RD/WR.
212 #define LOCK_DATA_WR (0)
213 #define LOCK_DATA_RD (1)
214 #define LOCK_RSRC_WR (2)
215 #define LOCK_RSRC_RD (3)
217 #define LOCK_RSRC_DRD (4)
218 #define LOCK_RSRC_DWR (5)
219 #define LOCK_DATA_DRD (6)
220 #define LOCK_DATA_DWR (7)
222 #define LOCK_RSRC_NONE (8)
223 #define LOCK_DATA_NONE (9)
226 translate a data fork lock to an offset
229 static off_t df2off(off_t off)
232 if (off == AD_FILELOCK_OPEN_WR)
233 start = LOCK_DATA_WR;
234 else if (off == AD_FILELOCK_OPEN_RD)
235 start = LOCK_DATA_RD;
236 else if (off == AD_FILELOCK_DENY_RD)
237 start = LOCK_DATA_DRD;
238 else if (off == AD_FILELOCK_DENY_WR)
239 start = LOCK_DATA_DWR;
240 else if (off == AD_FILELOCK_OPEN_NONE)
241 start = LOCK_DATA_NONE;
246 translate a resource fork lock to an offset
249 static off_t hf2off(off_t off)
252 if (off == AD_FILELOCK_OPEN_WR)
253 start = LOCK_RSRC_WR;
254 else if (off == AD_FILELOCK_OPEN_RD)
255 start = LOCK_RSRC_RD;
256 else if (off == AD_FILELOCK_DENY_RD)
257 start = LOCK_RSRC_DRD;
258 else if (off == AD_FILELOCK_DENY_WR)
259 start = LOCK_RSRC_DWR;
260 else if (off == AD_FILELOCK_OPEN_NONE)
261 start = LOCK_RSRC_NONE;
265 /* ------------------ */
266 int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype,
267 const off_t off, const off_t len, const int fork)
278 if (eid == ADEID_DFORK) {
279 adf = &ad->ad_data_fork;
280 if ((type & ADLOCK_FILELOCK)) {
281 if (ad_meta_fileno(ad) != -1) { /* META */
283 lock.l_start = df2off(off);
287 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
288 /* there's no meta data. return a lock error
289 * otherwise if a second process is able to create it
295 if (type & ADLOCK_FILELOCK) {
296 adf = ad->ad_md; /* either resource or meta data (set in ad_open) */
297 lock.l_start = hf2off(off);
300 /* we really want the resource fork it's a byte lock */
301 adf = &ad->ad_resource_fork;
302 lock.l_start += ad_getentryoff(ad, eid);
305 /* NOTE: we can't write lock a read-only file. on those, we just
306 * make sure that we have a read lock set. that way, we at least prevent
307 * someone else from really setting a deny read/write on the file.
309 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
310 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
313 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
314 lock.l_whence = SEEK_SET;
317 /* byte_lock(len=-1) lock whole file */
318 if (len == BYTELOCK_MAX) {
319 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
322 /* see if it's locked by another fork.
323 * NOTE: this guarantees that any existing locks must be at most
324 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
325 * guaranteed to be ORable. */
326 if (adf_findxlock(adf, fork, ADLOCK_WR |
327 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
328 lock.l_start, lock.l_len) > -1) {
333 /* look for any existing lock that we may have */
334 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
335 adflock = (i < 0) ? NULL : adf->adf_lock + i;
337 /* here's what we check for:
338 1) we're trying to re-lock a lock, but we didn't specify an update.
339 2) we're trying to free only part of a lock.
340 3) we're trying to free a non-existent lock. */
341 if ( (!adflock && (lock.l_type == F_UNLCK))
344 && !(type & ADLOCK_UPGRADE)
345 && ((lock.l_type != F_UNLCK)
346 || (adflock->lock.l_start != lock.l_start)
347 || (adflock->lock.l_len != lock.l_len) ))
354 /* now, update our list of locks */
356 if (lock.l_type == F_UNLCK) {
357 adf_freelock(adf, i);
361 /* attempt to lock the file. */
362 if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
365 /* we upgraded this lock. */
366 if (adflock && (type & ADLOCK_UPGRADE)) {
367 memcpy(&adflock->lock, &lock, sizeof(lock));
371 /* it wasn't an upgrade */
373 if (lock.l_type == F_RDLCK) {
374 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
377 /* no more space. this will also happen if lockmax == lockcount == 0 */
378 if (adf->adf_lockmax == adf->adf_lockcount) {
379 adf_lock_t *tmp = (adf_lock_t *)
380 realloc(adf->adf_lock, sizeof(adf_lock_t)*
381 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
385 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
387 adflock = adf->adf_lock + adf->adf_lockcount;
390 memcpy(&adflock->lock, &lock, sizeof(lock));
391 adflock->user = fork;
393 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
394 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
398 (*adflock->refcount)++;
399 adf->adf_lockcount++;
403 lock.l_type = F_UNLCK;
404 if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
408 /* -------------------------
409 we are using lock as tristate variable
416 static int testlock(struct ad_fd *adf, off_t off, off_t len)
424 plock = adf->adf_lock;
425 lock.l_whence = SEEK_SET;
428 /* Do we have a lock? */
429 for (i = 0; i < adf->adf_lockcount; i++) {
430 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
433 /* Does another process have a lock?
435 lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
437 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
438 /* is that kind of error possible ?*/
439 return (errno == EACCES || errno == EAGAIN)?1:-1;
442 if (lock.l_type == F_UNLCK) {
448 /* --------------- */
449 int ad_testlock(struct adouble *ad, int eid, const off_t off)
455 if (eid == ADEID_DFORK) {
456 adf = &ad->ad_data_fork;
457 if (ad_meta_fileno(ad) != -1) {
459 lock_offset = df2off(off);
463 if (ad_meta_fileno(ad) == -1) {
464 /* there's no resource fork. return no lock */
468 lock_offset = hf2off(off);
470 return testlock(adf, lock_offset, 1);
473 /* -------------------------
474 return if a file is open by another process.
475 Optimized for the common case:
476 - there's no locks held by another process (clients)
477 - or we already know the answer and don't need to test.
479 u_int16_t ad_openforks(struct adouble *ad, u_int16_t attrbits)
485 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
487 /* XXX know the locks layout:
488 AD_FILELOCK_OPEN_WR is first
489 and use it for merging requests
491 if (ad_meta_fileno(ad) != -1) {
492 /* there's a resource fork test the four bytes for
493 * data RW/RD and fork RW/RD locks in one request
500 /* no resource fork, only data RD/RW may exist */
501 adf = &ad->ad_data_fork;
502 off = AD_FILELOCK_OPEN_WR;
505 if (!testlock(adf, off, len))
508 /* either there's a lock or we already know one
511 if (!(attrbits & ATTRBIT_DOPEN)) {
512 if (ad_meta_fileno(ad) != -1) {
517 adf = &ad->ad_data_fork;
518 off = AD_FILELOCK_OPEN_WR;
520 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
523 if (!(attrbits & ATTRBIT_ROPEN)) {
524 if (ad_meta_fileno(ad) != -1) {
527 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
534 /* -------------------------
536 int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype,
537 const off_t off, const off_t len, const int fork)
546 if (eid == ADEID_DFORK) {
547 adf = &ad->ad_data_fork;
550 adf = &ad->ad_resource_fork;
551 if (adf->adf_fd == -1) {
552 /* there's no resource fork. return success */
555 /* if ADLOCK_FILELOCK we want a lock from offset 0
556 * it's used when deleting a file:
557 * in open we put read locks on meta datas
558 * in delete a write locks on the whole file
559 * so if the file is open by somebody else it fails
561 if (!(type & ADLOCK_FILELOCK))
562 lock.l_start += ad_getentryoff(ad, eid);
565 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
566 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
569 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
570 lock.l_whence = SEEK_SET;
573 /* see if it's locked by another fork. */
574 if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
575 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
576 lock.l_start, lock.l_len) > -1) {
581 /* okay, we might have ranges byte-locked. we need to make sure that
582 * we restore the appropriate ranges once we're done. so, we check
583 * for overlap on an unlock and relock.
584 * XXX: in the future, all the byte locks will be sorted and contiguous.
585 * we just want to upgrade all the locks and then downgrade them
587 if (!adf->adf_excl) {
588 err = set_lock(adf->adf_fd, F_SETLK, &lock);
593 if (!err && (lock.l_type == F_UNLCK))
594 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
599 /* -------------------------
600 the fork is opened in Read Write, Deny Read, Deny Write mode
601 lock the whole file once
603 int ad_excl_lock(struct adouble *ad, const u_int32_t eid)
610 lock.l_type = F_WRLCK;
611 lock.l_whence = SEEK_SET;
614 if (eid == ADEID_DFORK) {
615 adf = &ad->ad_data_fork;
617 adf = &ad->ad_resource_fork;
618 lock.l_start = ad_getentryoff(ad, eid);
621 err = set_lock(adf->adf_fd, F_SETLK, &lock);
627 /* --------------------- */
628 void ad_fcntl_unlock(struct adouble *ad, const int fork)
630 if (ad_data_fileno(ad) != -1) {
631 adf_unlock(&ad->ad_data_fork, fork);
633 if (ad_reso_fileno(ad) != -1) {
634 adf_unlock(&ad->ad_resource_fork, fork);
637 if (ad->ad_flags != AD_VERSION1_SFM) {
640 if (ad_meta_fileno(ad) != -1) {
641 adf_unlock(&ad->ad_metadata_fork, fork);