2 * $Id: ad_lock.c,v 1.6 2002-11-14 17:15:22 srittau Exp $
4 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
5 * All Rights Reserved. See COPYRIGHT for more information.
7 * Byte-range locks. This uses either whole-file flocks to fake byte
8 * locks or fcntl-based actual byte locks. Because fcntl locks are
9 * process-oriented, we need to keep around a list of file descriptors
10 * that refer to the same file. Currently, this doesn't serialize access
11 * to the locks. as a result, there's the potential for race conditions.
13 * TODO: fix the race when reading/writing.
14 * keep a pool of both locks and reference counters around so that
15 * we can save on mallocs. we should also use a tree to keep things
21 #endif /* HAVE_CONFIG_H */
28 #endif /* HAVE_UNISTD_H */
31 #endif /* HAVE_FCNTL_H */
34 #include <atalk/adouble.h>
36 #include "ad_private.h"
38 /* translate between ADLOCK styles and specific locking mechanisms */
39 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
40 ((type) == ADLOCK_WR ? LOCK_EX : \
41 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
43 #define XLATE_FCNTL_LOCK(type) ((type) == ADLOCK_RD ? F_RDLCK : \
44 ((type) == ADLOCK_WR ? F_WRLCK : \
45 ((type) == ADLOCK_CLR ? F_UNLCK : -1)))
47 #define OVERLAP(a,alen,b,blen) ((!(alen) && (a) <= (b)) || \
48 (!(blen) && (b) <= (a)) || \
49 ((((a) + (alen)) > (b)) && \
50 (((b) + (blen)) > (a))))
53 /* allocation for lock regions. we allocate aggressively and shrink
54 * only in large chunks. */
55 #define ARRAY_BLOCK_SIZE 10
56 #define ARRAY_FREE_DELTA 100
58 /* remove a lock and compact space if necessary */
59 static __inline__ void adf_freelock(struct ad_fd *ad, const int i)
61 adf_lock_t *lock = ad->adf_lock + i;
63 if (--(*lock->refcount) < 1) {
65 lock->lock.l_type = F_UNLCK;
66 fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
71 /* move another lock into the empty space */
72 if (i < ad->adf_lockcount) {
73 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
76 /* free extra cruft if we go past a boundary. we always want to
77 * keep at least some stuff around for allocations. this wastes
78 * a bit of space to save time on reallocations. */
79 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
80 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
81 struct adf_lock_t *tmp;
83 tmp = (struct adf_lock_t *)
84 realloc(ad->adf_lock, sizeof(adf_lock_t)*
85 (ad->adf_lockcount + ARRAY_FREE_DELTA));
88 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
94 /* this needs to deal with the following cases:
95 * 1) user is the only user of the lock
96 * 2) user shares a read lock with another user
98 * i converted to using arrays of locks. everytime a lock
99 * gets removed, we shift all of the locks down.
101 static __inline__ void adf_unlock(struct ad_fd *ad, int fd, const int user)
103 adf_lock_t *lock = ad->adf_lock;
106 for (i = 0; i < ad->adf_lockcount; i++) {
107 if (lock[i].user == user) {
108 /* we're really going to delete this lock. note: read locks
109 are the only ones that allow refcounts > 1 */
111 i--; /* we shifted things down, so we need to backtrack */
116 /* relock any byte lock that overlaps off/len. unlock everything
118 static __inline__ void adf_relockrange(struct ad_fd *ad, int fd,
119 const off_t off, const size_t len)
121 adf_lock_t *lock = ad->adf_lock;
124 for (i = 0; i < ad->adf_lockcount; i++) {
125 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
126 fcntl(fd, F_SETLK, &lock[i].lock);
131 /* find a byte lock that overlaps off/len for a particular user */
132 static __inline__ int adf_findlock(struct ad_fd *ad,
133 const int user, const int type,
137 adf_lock_t *lock = ad->adf_lock;
140 for (i = 0; i < ad->adf_lockcount; i++) {
141 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
142 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
143 (lock[i].user == user) &&
144 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
153 /* search other user lock lists */
154 static __inline__ int adf_findxlock(struct ad_fd *ad,
155 const int user, const int type,
159 adf_lock_t *lock = ad->adf_lock;
162 for (i = 0; i < ad->adf_lockcount; i++) {
163 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
164 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
165 (lock[i].user != user) &&
166 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
172 /* okay, this needs to do the following:
173 * 1) check current list of locks. error on conflict.
174 * 2) apply the lock. error on conflict with another process.
175 * 3) update the list of locks this file has.
177 * NOTE: this treats synchronization locks a little differently. we
178 * do the following things for those:
179 * 1) if the header file exists, all the locks go in the beginning
181 * 2) if the header file doesn't exist, we stick the locks
182 * in the locations specified by AD_FILELOCK_RD/WR.
184 #define LOCK_RSRC_RD (0)
185 #define LOCK_RSRC_WR (1)
186 #define LOCK_DATA_RD (2)
187 #define LOCK_DATA_WR (3)
189 #define LOCK_RSRC_DRD (4)
190 #define LOCK_RSRC_DWR (5)
191 #define LOCK_DATA_DRD (6)
192 #define LOCK_DATA_DWR (7)
194 #define LOCK_RSRC_NONE (8)
195 #define LOCK_DATA_NONE (9)
198 translate a data fork lock to an offset
201 static int df2off(int off)
204 if (off == AD_FILELOCK_OPEN_WR)
205 start = LOCK_DATA_WR;
206 else if (off == AD_FILELOCK_OPEN_RD)
207 start = LOCK_DATA_RD;
208 else if (off == AD_FILELOCK_DENY_RD)
209 start = LOCK_DATA_DRD;
210 else if (off == AD_FILELOCK_DENY_WR)
211 start = LOCK_DATA_DWR;
212 else if (off == AD_FILELOCK_OPEN_NONE)
213 start = LOCK_DATA_NONE;
218 translate a resource fork lock to an offset
221 static int hf2off(int off)
224 if (off == AD_FILELOCK_OPEN_WR)
225 start = LOCK_RSRC_WR;
226 else if (off == AD_FILELOCK_OPEN_RD)
227 start = LOCK_RSRC_RD;
228 else if (off == AD_FILELOCK_DENY_RD)
229 start = LOCK_RSRC_DRD;
230 else if (off == AD_FILELOCK_DENY_WR)
231 start = LOCK_RSRC_DWR;
232 else if (off == AD_FILELOCK_OPEN_NONE)
233 start = LOCK_RSRC_NONE;
237 int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type,
238 const off_t off, const size_t len, const int user)
242 adf_lock_t *adflock, *oldlock;
246 if (eid == ADEID_DFORK) {
248 if ((type & ADLOCK_FILELOCK)) {
249 if (ad_hfileno(ad) != -1) {
250 lock.l_start = df2off(off);
256 if (type & ADLOCK_FILELOCK)
257 lock.l_start = hf2off(off);
259 lock.l_start += ad_getentryoff(ad, eid);
262 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
264 /* see if it's locked by another user.
265 * NOTE: this guarantees that any existing locks must be at most
266 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
267 * guaranteed to be ORable. */
268 if (adf_findxlock(adf, user, ADLOCK_WR |
269 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
270 lock.l_start, len) > -1) {
275 /* look for any existing lock that we may have */
276 i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, len);
277 adflock = (i < 0) ? NULL : adf->adf_lock + i;
279 /* here's what we check for:
280 1) we're trying to re-lock a lock, but we didn't specify an update.
281 2) we're trying to free only part of a lock.
282 3) we're trying to free a non-existent lock. */
283 if ((!adflock && (lock.l_type == F_UNLCK)) ||
284 (adflock && !(type & ADLOCK_UPGRADE) &&
285 ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
286 (adflock->lock.l_len != len)))) {
291 lock.l_whence = SEEK_SET;
294 /* now, update our list of locks */
296 if (lock.l_type == F_UNLCK) {
297 adf_freelock(adf, i);
301 /* attempt to lock the file. */
302 if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
305 /* we upgraded this lock. */
306 if (adflock && (type & ADLOCK_UPGRADE)) {
307 memcpy(&adflock->lock, &lock, sizeof(lock));
311 /* it wasn't an upgrade */
313 if ((lock.l_type = F_RDLCK) &&
314 ((i = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, len)) > -1)) {
315 oldlock = adf->adf_lock + i;
318 /* no more space. this will also happen if lockmax == lockcount == 0 */
319 if (adf->adf_lockmax == adf->adf_lockcount) {
320 adf_lock_t *tmp = (adf_lock_t *)
321 realloc(adf->adf_lock, sizeof(adf_lock_t)*
322 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
326 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
328 adflock = adf->adf_lock + adf->adf_lockcount;
331 memcpy(&adflock->lock, &lock, sizeof(lock));
332 adflock->user = user;
334 adflock->refcount = oldlock->refcount;
335 else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
339 (*adflock->refcount)++;
340 adf->adf_lockcount++;
344 lock.l_type = F_UNLCK;
345 fcntl(adf->adf_fd, F_SETLK, &lock);
349 /* -------------------------
350 we are using lock as tristate variable
357 int ad_testlock(struct adouble *ad, int eid, const off_t off)
365 if (eid == ADEID_DFORK) {
367 if ((ad_hfileno(ad) != -1)) {
369 lock.l_start = df2off(off);
373 lock.l_start = hf2off(off);
376 plock = adf->adf_lock;
377 /* Does we have a lock? */
378 lock.l_whence = SEEK_SET;
380 for (i = 0; i < adf->adf_lockcount; i++) {
381 if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len))
384 /* Does another process have a lock?
387 lock.l_type = (ad_getoflags(ad, eid) & O_RDWR) ?F_WRLCK : F_RDLCK;
389 if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0) {
390 return (errno == EACCES || errno == EAGAIN)?1:-1;
393 lock.l_type = F_UNLCK;
394 return fcntl(adf->adf_fd, F_SETLK, &lock);
397 /* -------------------------
399 /* with temp locks, we don't need to distinguish within the same
400 * process as everything is single-threaded. in addition, if
401 * multi-threading gets added, it will only be in a few areas. */
402 int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
403 const off_t off, const size_t len)
410 if (eid == ADEID_DFORK) {
414 /* if ADLOCK_FILELOCK we want a lock from offset 0
415 * it's used when deleting a file:
416 * in open we put read locks on meta datas
417 * in delete a write locks on the whole file
418 * so if the file is open by somebody else it fails
420 if (!(type & ADLOCK_FILELOCK))
421 lock.l_start += ad_getentryoff(ad, eid);
423 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
424 lock.l_whence = SEEK_SET;
427 /* okay, we might have ranges byte-locked. we need to make sure that
428 * we restore the appropriate ranges once we're done. so, we check
429 * for overlap on an unlock and relock.
430 * XXX: in the future, all the byte locks will be sorted and contiguous.
431 * we just want to upgrade all the locks and then downgrade them
433 err = fcntl(adf->adf_fd, F_SETLK, &lock);
434 if (!err && (lock.l_type == F_UNLCK))
435 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
441 void ad_fcntl_unlock(struct adouble *ad, const int user)
443 if (ad->ad_df.adf_fd != -1) {
444 adf_unlock(&ad->ad_df, ad->ad_df.adf_fd, user);
446 if (ad->ad_hf.adf_fd != -1) {
447 adf_unlock(&ad->ad_hf, ad->ad_hf.adf_fd, user);