2 * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu)
3 * All Rights Reserved. See COPYRIGHT for more information.
5 * Byte-range locks. This uses either whole-file flocks to fake byte
6 * locks or fcntl-based actual byte locks. Because fcntl locks are
7 * process-oriented, we need to keep around a list of file descriptors
8 * that refer to the same file. Currently, this doesn't serialize access
9 * to the locks. as a result, there's the potential for race conditions.
11 * TODO: fix the race when reading/writing.
12 * keep a pool of both locks and reference counters around so that
13 * we can save on mallocs. we should also use a tree to keep things
24 #include <atalk/adouble.h>
26 #include "ad_private.h"
28 /* translate between ADLOCK styles and specific locking mechanisms */
29 #define XLATE_FLOCK(type) ((type) == ADLOCK_RD ? LOCK_SH : \
30 ((type) == ADLOCK_WR ? LOCK_EX : \
31 ((type) == ADLOCK_CLR ? LOCK_UN : -1)))
33 #define XLATE_FCNTL_LOCK(type) ((type) == ADLOCK_RD ? F_RDLCK : \
34 ((type) == ADLOCK_WR ? F_WRLCK : \
35 ((type) == ADLOCK_CLR ? F_UNLCK : -1)))
37 #define OVERLAP(a,alen,b,blen) ((!(alen) && (a) <= (b)) || \
38 (!(blen) && (b) <= (a)) || \
39 ((((a) + (alen)) > (b)) && \
40 (((b) + (blen)) > (a))))
43 /* allocation for lock regions. we allocate aggressively and shrink
44 * only in large chunks. */
45 #define ARRAY_BLOCK_SIZE 10
46 #define ARRAY_FREE_DELTA 100
48 /* remove a lock and compact space if necessary */
49 static __inline__ void adf_freelock(struct ad_fd *ad, const int i)
51 adf_lock_t *lock = ad->adf_lock + i;
53 if (--(*lock->refcount) < 1) {
55 lock->lock.l_type = F_UNLCK;
56 fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */
61 /* move another lock into the empty space */
62 if (i < ad->adf_lockcount) {
63 memcpy(lock, lock + ad->adf_lockcount - i, sizeof(adf_lock_t));
66 /* free extra cruft if we go past a boundary. we always want to
67 * keep at least some stuff around for allocations. this wastes
68 * a bit of space to save time on reallocations. */
69 if ((ad->adf_lockmax > ARRAY_FREE_DELTA) &&
70 (ad->adf_lockcount + ARRAY_FREE_DELTA < ad->adf_lockmax)) {
71 struct adf_lock_t *tmp;
73 tmp = (struct adf_lock_t *)
74 realloc(ad->adf_lock, sizeof(adf_lock_t)*
75 (ad->adf_lockcount + ARRAY_FREE_DELTA));
78 ad->adf_lockmax = ad->adf_lockcount + ARRAY_FREE_DELTA;
84 /* this needs to deal with the following cases:
85 * 1) user is the only user of the lock
86 * 2) user shares a read lock with another user
88 * i converted to using arrays of locks. everytime a lock
89 * gets removed, we shift all of the locks down.
91 static __inline__ void adf_unlock(struct ad_fd *ad, int fd, const int user)
93 adf_lock_t *lock = ad->adf_lock;
96 for (i = 0; i < ad->adf_lockcount; i++) {
97 if (lock[i].user == user) {
98 /* we're really going to delete this lock. note: read locks
99 are the only ones that allow refcounts > 1 */
101 i--; /* we shifted things down, so we need to backtrack */
106 /* relock any byte lock that overlaps off/len. unlock everything
108 static __inline__ void adf_relockrange(struct ad_fd *ad, int fd,
109 const off_t off, const size_t len)
111 adf_lock_t *lock = ad->adf_lock;
114 for (i = 0; i < ad->adf_lockcount; i++) {
115 if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
116 fcntl(fd, F_SETLK, &lock[i].lock);
121 /* find a byte lock that overlaps off/len for a particular user */
122 static __inline__ int adf_findlock(struct ad_fd *ad,
123 const int user, const int type,
127 adf_lock_t *lock = ad->adf_lock;
130 for (i = 0; i < ad->adf_lockcount; i++) {
131 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
132 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
133 (lock[i].user == user) &&
134 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) {
143 /* search other user lock lists */
144 static __inline__ int adf_findxlock(struct ad_fd *ad,
145 const int user, const int type,
149 adf_lock_t *lock = ad->adf_lock;
152 for (i = 0; i < ad->adf_lockcount; i++) {
153 if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) ||
154 ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) &&
155 (lock[i].user != user) &&
156 OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len))
162 /* okay, this needs to do the following:
163 * 1) check current list of locks. error on conflict.
164 * 2) apply the lock. error on conflict with another process.
165 * 3) update the list of locks this file has.
167 * NOTE: this treats synchronization locks a little differently. we
168 * do the following things for those:
169 * 1) if the header file exists, all the locks go in the beginning
171 * 2) if the header file doesn't exist, we stick the locks
172 * in the locations specified by AD_FILELOCK_RD/WR.
174 #define LOCK_RSRC_RD (0)
175 #define LOCK_RSRC_WR (1)
176 #define LOCK_DATA_RD (2)
177 #define LOCK_DATA_WR (3)
178 int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type,
179 const off_t off, const size_t len, const int user)
183 adf_lock_t *adflock, *oldlock;
187 if (eid == ADEID_DFORK) {
188 if ((type & ADLOCK_FILELOCK) && (ad_hfileno(ad) != -1)) {
190 if (off == AD_FILELOCK_WR)
191 lock.l_start = LOCK_DATA_WR;
192 else if (off == AD_FILELOCK_RD)
193 lock.l_start = LOCK_DATA_RD;
199 if (type & ADLOCK_FILELOCK) {
200 if (off == AD_FILELOCK_WR)
201 lock.l_start = LOCK_RSRC_WR;
202 else if (off == AD_FILELOCK_RD)
203 lock.l_start = LOCK_RSRC_RD;
205 lock.l_start += ad_getentryoff(ad, eid);
208 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
210 /* see if it's locked by another user.
211 * NOTE: this guarantees that any existing locks must be at most
212 * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't
213 * guaranteed to be ORable. */
214 if (adf_findxlock(adf, user, ADLOCK_WR |
215 ((type & ADLOCK_WR) ? ADLOCK_RD : 0),
216 lock.l_start, len) > -1) {
221 /* look for any existing lock that we may have */
222 i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, len);
223 adflock = (i < 0) ? NULL : adf->adf_lock + i;
225 /* here's what we check for:
226 1) we're trying to re-lock a lock, but we didn't specify an update.
227 2) we're trying to free only part of a lock.
228 3) we're trying to free a non-existent lock. */
229 if ((!adflock && (lock.l_type == F_UNLCK)) ||
230 (adflock && !(type & ADLOCK_UPGRADE) &&
231 ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) ||
232 (adflock->lock.l_len != len)))) {
237 lock.l_whence = SEEK_SET;
240 /* now, update our list of locks */
242 if (lock.l_type == F_UNLCK) {
243 adf_freelock(adf, i);
247 /* attempt to lock the file. */
248 if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0)
251 /* we upgraded this lock. */
252 if (adflock && (type & ADLOCK_UPGRADE)) {
253 memcpy(&adflock->lock, &lock, sizeof(lock));
257 /* it wasn't an upgrade */
259 if ((lock.l_type = F_RDLCK) &&
260 ((i = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, len)) > -1)) {
261 oldlock = adf->adf_lock + i;
264 /* no more space. this will also happen if lockmax == lockcount == 0 */
265 if (adf->adf_lockmax == adf->adf_lockcount) {
266 adf_lock_t *tmp = (adf_lock_t *)
267 realloc(adf->adf_lock, sizeof(adf_lock_t)*
268 (adf->adf_lockmax + ARRAY_BLOCK_SIZE));
272 adf->adf_lockmax += ARRAY_BLOCK_SIZE;
274 adflock = adf->adf_lock + adf->adf_lockcount;
277 memcpy(&adflock->lock, &lock, sizeof(lock));
278 adflock->user = user;
280 adflock->refcount = oldlock->refcount;
281 else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) {
285 (*adflock->refcount)++;
286 adf->adf_lockcount++;
290 lock.l_type = F_UNLCK;
291 fcntl(adf->adf_fd, F_SETLK, &lock);
296 /* with temp locks, we don't need to distinguish within the same
297 * process as everything is single-threaded. in addition, if
298 * multi-threading gets added, it will only be in a few areas. */
299 int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
300 const off_t off, const size_t len)
307 if (eid == ADEID_DFORK) {
311 lock.l_start += ad_getentryoff(ad, eid);
313 lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK);
314 lock.l_whence = SEEK_SET;
317 /* okay, we might have ranges byte-locked. we need to make sure that
318 * we restore the appropriate ranges once we're done. so, we check
319 * for overlap on an unlock and relock.
320 * XXX: in the future, all the byte locks will be sorted and contiguous.
321 * we just want to upgrade all the locks and then downgrade them
323 err = fcntl(adf->adf_fd, F_SETLK, &lock);
324 if (!err && (lock.l_type == F_UNLCK))
325 adf_relockrange(adf, adf->adf_fd, lock.l_start, len);
331 void ad_fcntl_unlock(struct adouble *ad, const int user)
333 if (ad->ad_df.adf_fd != -1) {
334 adf_unlock(&ad->ad_df, ad->ad_df.adf_fd, user);
336 if (ad->ad_hf.adf_fd != -1) {
337 adf_unlock(&ad->ad_hf, ad->ad_hf.adf_fd, user);
341 /* byte-range locks. ad_lock is used by afp_bytelock and afp_openfork
342 * to establish locks. both ad_lock and ad_tmplock take 0, 0, 0 to
343 * signify locking of the entire file. in the absence of working
344 * byte-range locks, this will default to file-wide flock-style locks.
346 int ad_flock_lock(struct adouble *ad, const u_int32_t eid, const int type,
347 const off_t off, const size_t len, const int user)
351 lock_type = XLATE_FLOCK(type & ADLOCK_MASK);
352 if (eid == ADEID_DFORK) {
353 if ((err = flock(ad_dfileno(ad), lock_type | LOCK_NB)) == 0)
354 ad->ad_df.adf_lockcount = lock_type;
355 } else if ((err = flock(ad_hfileno(ad), lock_type | LOCK_NB)) == 0)
356 ad->ad_hf.adf_lockcount = lock_type;
359 if ((EWOULDBLOCK != EAGAIN) && (errno == EWOULDBLOCK))
367 /* ad_tmplock is used by afpd to lock actual read/write operations.
368 * it saves the current lock state before attempting to lock to prevent
369 * mixups. if byte-locks don't exist, it will lock the entire file with
370 * an flock. we can be a little smart here by just upgrading/downgrading
372 int ad_flock_tmplock(struct adouble *ad, const u_int32_t eid, const int type,
373 const off_t off, const size_t len)
375 int fd, oldlock, lock_type;
377 if (eid == ADEID_DFORK) {
378 oldlock = ad->ad_df.adf_lockcount;
381 oldlock = ad->ad_hf.adf_lockcount;
385 /* if we already have a write lock, we don't need to do anything */
386 if (oldlock == LOCK_EX) {
390 /* if we have a read lock, upgrade it if necessary */
391 lock_type = XLATE_FLOCK(type & ADLOCK_MASK);
392 if (oldlock == LOCK_SH) {
393 if (lock_type == LOCK_EX)
394 return flock(fd, LOCK_EX | LOCK_NB);
395 else if (lock_type == LOCK_UN) /* reset it */
396 return flock(fd, LOCK_SH | LOCK_NB);
397 else /* do nothing */
401 /* if we don't already have a lock, just do it. */
402 return flock(fd, lock_type | LOCK_NB);