2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
11 * TODO: fix the race when reading/writing.
12 * keep a pool of both locks and reference counters around so that
13 * we can save on mallocs. we should also use a tree to keep things
19 #endif /* HAVE_CONFIG_H */
21 #include <atalk/adouble.h>
22 #include <atalk/logger.h>
33 /* translate between ADLOCK styles and specific locking mechanisms */
34 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
35 ((type) == ADLOCK_WR ? LOCK_EX : \
36 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
38 /* ----------------------- */
39 static int set_lock(int fd, int cmd, struct flock *lock)
42 /* We assign fd = -2 for symlinks -> do nothing */
44 lock->l_type = F_UNLCK;
47 return fcntl(fd, cmd, lock);
50 /* ----------------------- */
51 static int XLATE_FCNTL_LOCK(int type)
64 /* ----------------------- */
65 static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen)
67 return (!alen && a <= b) ||
69 ( (a + alen > b) && (b + blen > a) );
72 /* allocation for lock regions. we allocate aggressively and shrink
73 * only in large chunks. */
74 #define ARRAY_BLOCK_SIZE 10
75 #define ARRAY_FREE_DELTA 100
77 /* remove a lock and compact space if necessary */
78 static void adf_freelock(struct ad_fd *ad, const int i)
80 adf_lock_t *lock = ad->adf_lock + i;
82 if (--(*lock->refcount) < 1) {
85 lock->lock.l_type = F_UNLCK;
86 set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
92 /* move another lock into the empty space */
93 if (i < ad->adf_lockcount) {
94 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
97 /* free extra cruft if we go past a boundary. we always want to
98 * keep at least some stuff around for allocations. this wastes
99 * a bit of space to save time on reallocations. */
100 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
101 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
102 struct adf_lock_t *tmp;
104 tmp = (struct adf_lock_t *)
105 realloc(ad->adf_lock, sizeof(adf_lock_t)*
106 (ad->adf_lockcount + ARRAY_FREE_DELTA));
109 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
115 /* this needs to deal with the following cases:
116 * 1) fork is the only user of the lock
117 * 2) fork shares a read lock with another open fork
119 * i converted to using arrays of locks. everytime a lock
120 * gets removed, we shift all of the locks down.
122 static void adf_unlock(struct ad_fd *ad, const int fork)
124 adf_lock_t *lock = ad->adf_lock;
127 for (i = 0; i < ad->adf_lockcount; i++) {
129 if (lock[i].user == fork) {
130 /* we're really going to delete this lock. note: read locks
131 are the only ones that allow refcounts > 1 */
133 i--; /* we shifted things down, so we need to backtrack */
134 /* unlikely but realloc may have change adf_lock */
140 /* relock any byte lock that overlaps off/len. unlock everything
142 static void adf_relockrange(struct ad_fd *ad, int fd, off_t off, off_t len)
144 adf_lock_t *lock = ad->adf_lock;
147 if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) {
148 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
149 set_lock(fd, F_SETLK, &lock[i].lock);
154 /* find a byte lock that overlaps off/len for a particular open fork */
155 static int adf_findlock(struct ad_fd *ad,
156 const int fork, const int type,
160 adf_lock_t *lock = ad->adf_lock;
163 for (i = 0; i < ad->adf_lockcount; i++) {
164 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
165 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
166 (lock[i].user == fork) &&
167 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
175 /* search other fork lock lists */
176 static int adf_findxlock(struct ad_fd *ad,
177 const int fork, const int type,
181 adf_lock_t *lock = ad->adf_lock;
184 for (i = 0; i < ad->adf_lockcount; i++) {
185 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
186 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
187 (lock[i].user != fork) &&
188 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
194 /* okay, this needs to do the following:
195 * 1) check current list of locks. error on conflict.
196 * 2) apply the lock. error on conflict with another process.
197 * 3) update the list of locks this file has.
199 * NOTE: this treats synchronization locks a little differently. we
200 * do the following things for those:
201 * 1) if the header file exists, all the locks go in the beginning
203 * 2) if the header file doesn't exist, we stick the locks
204 * in the locations specified by AD_FILELOCK_RD/WR.
206 #define LOCK_DATA_WR (0)
207 #define LOCK_DATA_RD (1)
208 #define LOCK_RSRC_WR (2)
209 #define LOCK_RSRC_RD (3)
211 #define LOCK_RSRC_DRD (4)
212 #define LOCK_RSRC_DWR (5)
213 #define LOCK_DATA_DRD (6)
214 #define LOCK_DATA_DWR (7)
216 #define LOCK_RSRC_NONE (8)
217 #define LOCK_DATA_NONE (9)
220 translate a data fork lock to an offset
223 static off_t df2off(off_t off)
226 if (off == AD_FILELOCK_OPEN_WR)
227 start = LOCK_DATA_WR;
228 else if (off == AD_FILELOCK_OPEN_RD)
229 start = LOCK_DATA_RD;
230 else if (off == AD_FILELOCK_DENY_RD)
231 start = LOCK_DATA_DRD;
232 else if (off == AD_FILELOCK_DENY_WR)
233 start = LOCK_DATA_DWR;
234 else if (off == AD_FILELOCK_OPEN_NONE)
235 start = LOCK_DATA_NONE;
240 translate a resource fork lock to an offset
243 static off_t hf2off(off_t off)
246 if (off == AD_FILELOCK_OPEN_WR)
247 start = LOCK_RSRC_WR;
248 else if (off == AD_FILELOCK_OPEN_RD)
249 start = LOCK_RSRC_RD;
250 else if (off == AD_FILELOCK_DENY_RD)
251 start = LOCK_RSRC_DRD;
252 else if (off == AD_FILELOCK_DENY_WR)
253 start = LOCK_RSRC_DWR;
254 else if (off == AD_FILELOCK_OPEN_NONE)
255 start = LOCK_RSRC_NONE;
259 /* ------------------ */
260 int ad_lock(struct adouble *ad, const uint32_t eid, const int locktype,
261 const off_t off, const off_t len, const int fork)
272 if (eid == ADEID_DFORK) {
273 adf = &ad->ad_data_fork;
274 if ((type & ADLOCK_FILELOCK)) {
275 if (ad_meta_fileno(ad) != -1) { /* META */
277 lock.l_start = df2off(off);
281 if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) {
282 /* there's no meta data. return a lock error
283 * otherwise if a second process is able to create it
289 if (type & ADLOCK_FILELOCK) {
290 adf = ad->ad_mdp; /* either resource or meta data (set in ad_open) */
291 lock.l_start = hf2off(off);
294 /* we really want the resource fork it's a byte lock */
295 adf = &ad->ad_resource_fork;
296 lock.l_start += ad_getentryoff(ad, eid);
299 /* NOTE: we can't write lock a read-only file. on those, we just
300 * make sure that we have a read lock set. that way, we at least prevent
301 * someone else from really setting a deny read/write on the file.
303 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
304 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
307 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
308 lock.l_whence = SEEK_SET;
311 /* byte_lock(len=-1) lock whole file */
312 if (len == BYTELOCK_MAX) {
313 lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */
316 /* see if it's locked by another fork.
317 * NOTE: this guarantees that any existing locks must be at most
318 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
319 * guaranteed to be ORable. */
320 if (adf_findxlock(adf, fork, ADLOCK_WR |
321 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
322 lock.l_start, lock.l_len) > -1) {
327 /* look for any existing lock that we may have */
328 i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len);
329 adflock = (i < 0) ? NULL : adf->adf_lock + i;
331 /* here's what we check for:
332 1) we're trying to re-lock a lock, but we didn't specify an update.
333 2) we're trying to free only part of a lock.
334 3) we're trying to free a non-existent lock. */
335 if ( (!adflock && (lock.l_type == F_UNLCK))
338 && !(type & ADLOCK_UPGRADE)
339 && ((lock.l_type != F_UNLCK)
340 || (adflock->lock.l_start != lock.l_start)
341 || (adflock->lock.l_len != lock.l_len) ))
348 /* now, update our list of locks */
350 if (lock.l_type == F_UNLCK) {
351 adf_freelock(adf, i);
355 /* attempt to lock the file. */
356 if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0)
359 /* we upgraded this lock. */
360 if (adflock && (type & ADLOCK_UPGRADE)) {
361 memcpy(&adflock->lock, &lock, sizeof(lock));
365 /* it wasn't an upgrade */
367 if (lock.l_type == F_RDLCK) {
368 oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len);
371 /* no more space. this will also happen if lockmax == lockcount == 0 */
372 if (adf->adf_lockmax == adf->adf_lockcount) {
373 adf_lock_t *tmp = (adf_lock_t *)
374 realloc(adf->adf_lock, sizeof(adf_lock_t)*
375 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
379 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
381 adflock = adf->adf_lock + adf->adf_lockcount;
384 memcpy(&adflock->lock, &lock, sizeof(lock));
385 adflock->user = fork;
387 adflock->refcount = (adf->adf_lock + oldlock)->refcount;
388 } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
392 (*adflock->refcount)++;
393 adf->adf_lockcount++;
397 lock.l_type = F_UNLCK;
398 if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock);
403 /* -------------------------
404 we are using lock as tristate variable
411 static int testlock(struct ad_fd *adf, off_t off, off_t len)
419 plock = adf->adf_lock;
420 lock.l_whence = SEEK_SET;
423 /* Do we have a lock? */
424 for (i = 0; i < adf->adf_lockcount; i++) {
425 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
428 /* Does another process have a lock?
430 lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK;
432 if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) {
433 /* is that kind of error possible ?*/
434 return (errno == EACCES || errno == EAGAIN)?1:-1;
437 if (lock.l_type == F_UNLCK) {
445 /* -------------------------
447 int ad_tmplock(struct adouble *ad, const uint32_t eid, const int locktype,
448 const off_t off, const off_t len, const int fork)
457 if (eid == ADEID_DFORK) {
458 adf = &ad->ad_data_fork;
461 adf = &ad->ad_resource_fork;
462 if (adf->adf_fd == -1) {
463 /* there's no resource fork. return success */
466 /* if ADLOCK_FILELOCK we want a lock from offset 0
467 * it's used when deleting a file:
468 * in open we put read locks on meta datas
469 * in delete a write locks on the whole file
470 * so if the file is open by somebody else it fails
472 if (!(type & ADLOCK_FILELOCK))
473 lock.l_start += ad_getentryoff(ad, eid);
476 if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) {
477 type = (type & ~ADLOCK_WR) | ADLOCK_RD;
480 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
481 lock.l_whence = SEEK_SET;
484 /* see if it's locked by another fork. */
485 if (fork && adf_findxlock(adf, fork, ADLOCK_WR |
486 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
487 lock.l_start, lock.l_len) > -1) {
492 /* okay, we might have ranges byte-locked. we need to make sure that
493 * we restore the appropriate ranges once we're done. so, we check
494 * for overlap on an unlock and relock.
495 * XXX: in the future, all the byte locks will be sorted and contiguous.
496 * we just want to upgrade all the locks and then downgrade them
498 if (!adf->adf_excl) {
499 err = set_lock(adf->adf_fd, F_SETLK, &lock);
504 if (!err && (lock.l_type == F_UNLCK))
505 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
510 /* --------------------- */
511 void ad_unlock(struct adouble *ad, const int fork)
513 if (ad_data_fileno(ad) != -1) {
514 adf_unlock(&ad->ad_data_fork, fork);
516 if (ad_reso_fileno(ad) != -1) {
517 adf_unlock(&ad->ad_resource_fork, fork);
521 /******************************************************************************
523 ******************************************************************************/
525 /* --------------- */
526 int ad_testlock(struct adouble *ad, int eid, const off_t off)
532 if (eid == ADEID_DFORK) {
533 adf = &ad->ad_data_fork;
534 if (ad_meta_fileno(ad) != -1) {
536 lock_offset = df2off(off);
540 if (ad_meta_fileno(ad) == -1) {
541 /* there's no resource fork. return no lock */
545 lock_offset = hf2off(off);
547 return testlock(adf, lock_offset, 1);
550 /* -------------------------
551 return if a file is open by another process.
552 Optimized for the common case:
553 - there's no locks held by another process (clients)
554 - or we already know the answer and don't need to test.
556 uint16_t ad_openforks(struct adouble *ad, uint16_t attrbits)
562 if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) {
564 /* XXX know the locks layout:
565 AD_FILELOCK_OPEN_WR is first
566 and use it for merging requests
568 if (ad_meta_fileno(ad) != -1) {
569 /* there's a resource fork test the four bytes for
570 * data RW/RD and fork RW/RD locks in one request
577 /* no resource fork, only data RD/RW may exist */
578 adf = &ad->ad_data_fork;
579 off = AD_FILELOCK_OPEN_WR;
582 if (!testlock(adf, off, len))
585 /* either there's a lock or we already know one
588 if (!(attrbits & ATTRBIT_DOPEN)) {
589 if (ad_meta_fileno(ad) != -1) {
594 adf = &ad->ad_data_fork;
595 off = AD_FILELOCK_OPEN_WR;
597 ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0;
600 if (!(attrbits & ATTRBIT_ROPEN)) {
601 if (ad_meta_fileno(ad) != -1) {
604 ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0;
611 /* -------------------------
612 the fork is opened in Read Write, Deny Read, Deny Write mode
613 lock the whole file once
615 int ad_excl_lock(struct adouble *ad, const uint32_t eid)
622 lock.l_type = F_WRLCK;
623 lock.l_whence = SEEK_SET;
626 if (eid == ADEID_DFORK) {
627 adf = &ad->ad_data_fork;
629 adf = &ad->ad_resource_fork;
630 lock.l_start = ad_getentryoff(ad, eid);
633 err = set_lock(adf->adf_fd, F_SETLK, &lock);