X-Git-Url: https://arthur.barton.de/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=libatalk%2Fadouble%2Fad_lock.c;h=fce2c2d02b77ee2e4ae6c5f07abe84c241417255;hb=e87de1e72892cc7c0f5a0cec353937404b2fe3ac;hp=29cf22f0e4bcb72441c0000ab0461a32880b7700;hpb=458fcac4e6aee7eb54ba744f57169f70cce12505;p=netatalk.git diff --git a/libatalk/adouble/ad_lock.c b/libatalk/adouble/ad_lock.c index 29cf22f0..fce2c2d0 100644 --- a/libatalk/adouble/ad_lock.c +++ b/libatalk/adouble/ad_lock.c @@ -1,4 +1,6 @@ /* + * $Id: ad_lock.c,v 1.19 2010-02-26 12:57:50 didg Exp $ + * * Copyright (c) 1998,1999 Adrian Sun (asun@zoology.washington.edu) * All Rights Reserved. See COPYRIGHT for more information. * @@ -16,16 +18,15 @@ #ifdef HAVE_CONFIG_H #include "config.h" -#endif +#endif /* HAVE_CONFIG_H */ + +#include #include #include -#include -#include -#include #include -#include +#include #include "ad_private.h" @@ -34,15 +35,43 @@ ((type) == ADLOCK_WR ? LOCK_EX : \ ((type) == ADLOCK_CLR ? LOCK_UN : -1))) -#define XLATE_FCNTL_LOCK(type) ((type) == ADLOCK_RD ? F_RDLCK : \ -((type) == ADLOCK_WR ? F_WRLCK : \ - ((type) == ADLOCK_CLR ? F_UNLCK : -1))) - -#define OVERLAP(a,alen,b,blen) ((!(alen) && (a) <= (b)) || \ - (!(blen) && (b) <= (a)) || \ - ((((a) + (alen)) > (b)) && \ - (((b) + (blen)) > (a)))) +#ifdef DISABLE_LOCKING +#define fcntl(a, b, c ) (0) +#endif +/* ----------------------- */ +static int set_lock(int fd, int cmd, struct flock *lock) +{ + if (!fd) { + /* We assign fd = 0 for symlinks -> do nothing */ + if (cmd == F_GETLK) + lock->l_type = F_UNLCK; + return 0; + } + return fcntl(fd, cmd, lock); +} + +/* ----------------------- */ +static int XLATE_FCNTL_LOCK(int type) +{ + switch(type) { + case ADLOCK_RD: + return F_RDLCK; + case ADLOCK_WR: + return F_WRLCK; + case ADLOCK_CLR: + return F_UNLCK; + } + return -1; +} + +/* ----------------------- */ +static int OVERLAP(off_t a, off_t alen, off_t b, off_t blen) +{ + return (!alen && a <= b) || + (!blen && b <= a) || + ( (a + alen > b) && (b + blen > a) ); +} /* allocation for lock regions. we allocate aggressively and shrink * only in large chunks. */ @@ -50,14 +79,16 @@ #define ARRAY_FREE_DELTA 100 /* remove a lock and compact space if necessary */ -static __inline__ void adf_freelock(struct ad_fd *ad, const int i) +static void adf_freelock(struct ad_fd *ad, const int i) { adf_lock_t *lock = ad->adf_lock + i; if (--(*lock->refcount) < 1) { free(lock->refcount); - lock->lock.l_type = F_UNLCK; - fcntl(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */ + if (!ad->adf_excl) { + lock->lock.l_type = F_UNLCK; + set_lock(ad->adf_fd, F_SETLK, &lock->lock); /* unlock */ + } } ad->adf_lockcount--; @@ -86,47 +117,50 @@ static __inline__ void adf_freelock(struct ad_fd *ad, const int i) /* this needs to deal with the following cases: - * 1) user is the only user of the lock - * 2) user shares a read lock with another user + * 1) fork is the only user of the lock + * 2) fork shares a read lock with another open fork * * i converted to using arrays of locks. everytime a lock * gets removed, we shift all of the locks down. */ -static __inline__ void adf_unlock(struct ad_fd *ad, int fd, const int user) +static void adf_unlock(struct ad_fd *ad, const int fork) { adf_lock_t *lock = ad->adf_lock; int i; for (i = 0; i < ad->adf_lockcount; i++) { - if (lock[i].user == user) { + + if (lock[i].user == fork) { /* we're really going to delete this lock. note: read locks are the only ones that allow refcounts > 1 */ adf_freelock(ad, i); i--; /* we shifted things down, so we need to backtrack */ - } + /* unlikely but realloc may have change adf_lock */ + lock = ad->adf_lock; + } } } /* relock any byte lock that overlaps off/len. unlock everything * else. */ -static __inline__ void adf_relockrange(struct ad_fd *ad, int fd, - const off_t off, const size_t len) +static void adf_relockrange(struct ad_fd *ad, int fd, + const off_t off, const off_t len) { adf_lock_t *lock = ad->adf_lock; int i; - for (i = 0; i < ad->adf_lockcount; i++) { + if (!ad->adf_excl) for (i = 0; i < ad->adf_lockcount; i++) { if (OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) - fcntl(fd, F_SETLK, &lock[i].lock); + set_lock(fd, F_SETLK, &lock[i].lock); } } -/* find a byte lock that overlaps off/len for a particular user */ -static __inline__ int adf_findlock(struct ad_fd *ad, - const int user, const int type, +/* find a byte lock that overlaps off/len for a particular open fork */ +static int adf_findlock(struct ad_fd *ad, + const int fork, const int type, const off_t off, - const size_t len) + const off_t len) { adf_lock_t *lock = ad->adf_lock; int i; @@ -134,7 +168,7 @@ static __inline__ int adf_findlock(struct ad_fd *ad, for (i = 0; i < ad->adf_lockcount; i++) { if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) || ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) && - (lock[i].user == user) && + (lock[i].user == fork) && OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) { return i; } @@ -144,11 +178,11 @@ static __inline__ int adf_findlock(struct ad_fd *ad, } -/* search other user lock lists */ -static __inline__ int adf_findxlock(struct ad_fd *ad, - const int user, const int type, +/* search other fork lock lists */ +static int adf_findxlock(struct ad_fd *ad, + const int fork, const int type, const off_t off, - const size_t len) + const off_t len) { adf_lock_t *lock = ad->adf_lock; int i; @@ -156,7 +190,7 @@ static __inline__ int adf_findxlock(struct ad_fd *ad, for (i = 0; i < ad->adf_lockcount; i++) { if ((((type & ADLOCK_RD) && (lock[i].lock.l_type == F_RDLCK)) || ((type & ADLOCK_WR) && (lock[i].lock.l_type == F_WRLCK))) && - (lock[i].user != user) && + (lock[i].user != fork) && OVERLAP(off, len, lock[i].lock.l_start, lock[i].lock.l_len)) return i; } @@ -175,55 +209,129 @@ static __inline__ int adf_findxlock(struct ad_fd *ad, * 2) if the header file doesn't exist, we stick the locks * in the locations specified by AD_FILELOCK_RD/WR. */ -#define LOCK_RSRC_RD (0) -#define LOCK_RSRC_WR (1) -#define LOCK_DATA_RD (2) -#define LOCK_DATA_WR (3) -int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, - const off_t off, const size_t len, const int user) +#define LOCK_DATA_WR (0) +#define LOCK_DATA_RD (1) +#define LOCK_RSRC_WR (2) +#define LOCK_RSRC_RD (3) + +#define LOCK_RSRC_DRD (4) +#define LOCK_RSRC_DWR (5) +#define LOCK_DATA_DRD (6) +#define LOCK_DATA_DWR (7) + +#define LOCK_RSRC_NONE (8) +#define LOCK_DATA_NONE (9) + +/* -------------- + translate a data fork lock to an offset +*/ + +static off_t df2off(int off) +{ +int start = off; + if (off == AD_FILELOCK_OPEN_WR) + start = LOCK_DATA_WR; + else if (off == AD_FILELOCK_OPEN_RD) + start = LOCK_DATA_RD; + else if (off == AD_FILELOCK_DENY_RD) + start = LOCK_DATA_DRD; + else if (off == AD_FILELOCK_DENY_WR) + start = LOCK_DATA_DWR; + else if (off == AD_FILELOCK_OPEN_NONE) + start = LOCK_DATA_NONE; + return start; +} + +/* -------------- + translate a resource fork lock to an offset +*/ + +static off_t hf2off(int off) +{ +int start = off; + if (off == AD_FILELOCK_OPEN_WR) + start = LOCK_RSRC_WR; + else if (off == AD_FILELOCK_OPEN_RD) + start = LOCK_RSRC_RD; + else if (off == AD_FILELOCK_DENY_RD) + start = LOCK_RSRC_DRD; + else if (off == AD_FILELOCK_DENY_WR) + start = LOCK_RSRC_DWR; + else if (off == AD_FILELOCK_OPEN_NONE) + start = LOCK_RSRC_NONE; + return start; +} + +/* ------------------ */ +int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int locktype, + const off_t off, const off_t len, const int fork) { struct flock lock; struct ad_fd *adf; - adf_lock_t *adflock, *oldlock; + adf_lock_t *adflock; + int oldlock; int i; - + int type; + lock.l_start = off; + type = locktype; if (eid == ADEID_DFORK) { - if ((type & ADLOCK_FILELOCK) && (ad_hfileno(ad) != -1)) { - adf = &ad->ad_hf; - if (off == AD_FILELOCK_WR) - lock.l_start = LOCK_DATA_WR; - else if (off == AD_FILELOCK_RD) - lock.l_start = LOCK_DATA_RD; - } else - adf = &ad->ad_df; - + adf = &ad->ad_data_fork; + if ((type & ADLOCK_FILELOCK)) { + if (ad_meta_fileno(ad) != -1) { /* META */ + adf = ad->ad_md; + lock.l_start = df2off(off); + } + } } else { /* rfork */ - adf = &ad->ad_hf; + if (ad_meta_fileno(ad) == -1 || ad_reso_fileno(ad) == -1) { + /* there's no meta data. return a lock error + * otherwise if a second process is able to create it + * locks are a mess. + */ + errno = EACCES; + return -1; + } if (type & ADLOCK_FILELOCK) { - if (off == AD_FILELOCK_WR) - lock.l_start = LOCK_RSRC_WR; - else if (off == AD_FILELOCK_RD) - lock.l_start = LOCK_RSRC_RD; - } else + adf = ad->ad_md; /* either resource or meta data (set in ad_open) */ + lock.l_start = hf2off(off); + } + else { + /* we really want the resource fork it's a byte lock */ + adf = &ad->ad_resource_fork; lock.l_start += ad_getentryoff(ad, eid); + } } - + /* NOTE: we can't write lock a read-only file. on those, we just + * make sure that we have a read lock set. that way, we at least prevent + * someone else from really setting a deny read/write on the file. + */ + if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) { + type = (type & ~ADLOCK_WR) | ADLOCK_RD; + } + lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK); + lock.l_whence = SEEK_SET; + lock.l_len = len; + + /* byte_lock(len=-1) lock whole file */ + if (len == BYTELOCK_MAX) { + lock.l_len -= lock.l_start; /* otherwise EOVERFLOW error */ + } - /* see if it's locked by another user. + /* see if it's locked by another fork. * NOTE: this guarantees that any existing locks must be at most * read locks. we use ADLOCK_WR/RD because F_RD/WRLCK aren't * guaranteed to be ORable. */ - if (adf_findxlock(adf, user, ADLOCK_WR | + if (adf_findxlock(adf, fork, ADLOCK_WR | ((type & ADLOCK_WR) ? ADLOCK_RD : 0), - lock.l_start, len) > -1) { + lock.l_start, lock.l_len) > -1) { errno = EACCES; return -1; } /* look for any existing lock that we may have */ - i = adf_findlock(adf, user, ADLOCK_RD | ADLOCK_WR, lock.l_start, len); + i = adf_findlock(adf, fork, ADLOCK_RD | ADLOCK_WR, lock.l_start, lock.l_len); adflock = (i < 0) ? NULL : adf->adf_lock + i; /* here's what we check for: @@ -233,13 +341,11 @@ int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, if ((!adflock && (lock.l_type == F_UNLCK)) || (adflock && !(type & ADLOCK_UPGRADE) && ((lock.l_type != F_UNLCK) || (adflock->lock.l_start != lock.l_start) || - (adflock->lock.l_len != len)))) { + (adflock->lock.l_len != lock.l_len)))) { errno = EINVAL; return -1; } - lock.l_whence = SEEK_SET; - lock.l_len = len; /* now, update our list of locks */ /* clear the lock */ @@ -249,7 +355,7 @@ int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, } /* attempt to lock the file. */ - if (fcntl(adf->adf_fd, F_SETLK, &lock) < 0) + if (!adf->adf_excl && set_lock(adf->adf_fd, F_SETLK, &lock) < 0) return -1; /* we upgraded this lock. */ @@ -259,10 +365,9 @@ int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, } /* it wasn't an upgrade */ - oldlock = NULL; - if ((lock.l_type = F_RDLCK) && - ((i = adf_findxlock(adf, user, ADLOCK_RD, lock.l_start, len)) > -1)) { - oldlock = adf->adf_lock + i; + oldlock = -1; + if (lock.l_type == F_RDLCK) { + oldlock = adf_findxlock(adf, fork, ADLOCK_RD, lock.l_start, lock.l_len); } /* no more space. this will also happen if lockmax == lockcount == 0 */ @@ -279,10 +384,10 @@ int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, /* fill in fields */ memcpy(&adflock->lock, &lock, sizeof(lock)); - adflock->user = user; - if (oldlock) - adflock->refcount = oldlock->refcount; - else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) { + adflock->user = fork; + if (oldlock > -1) { + adflock->refcount = (adf->adf_lock + oldlock)->refcount; + } else if ((adflock->refcount = calloc(1, sizeof(int))) == NULL) { goto fcntl_lock_err; } @@ -292,118 +397,244 @@ int ad_fcntl_lock(struct adouble *ad, const u_int32_t eid, const int type, fcntl_lock_err: lock.l_type = F_UNLCK; - fcntl(adf->adf_fd, F_SETLK, &lock); + if (!adf->adf_excl) set_lock(adf->adf_fd, F_SETLK, &lock); return -1; } +/* ------------------------- + we are using lock as tristate variable + + we have a lock ==> 1 + no ==> 0 + error ==> -1 + +*/ +static int testlock(struct ad_fd *adf, off_t off, off_t len) +{ + struct flock lock; + adf_lock_t *plock; + int i; + + lock.l_start = off; -/* with temp locks, we don't need to distinguish within the same - * process as everything is single-threaded. in addition, if - * multi-threading gets added, it will only be in a few areas. */ -int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int type, - const off_t off, const size_t len) + plock = adf->adf_lock; + lock.l_whence = SEEK_SET; + lock.l_len = len; + + /* Do we have a lock? */ + for (i = 0; i < adf->adf_lockcount; i++) { + if (OVERLAP(lock.l_start, 1, plock[i].lock.l_start, plock[i].lock.l_len)) + return 1; /* */ + } + /* Does another process have a lock? + */ + lock.l_type = (adf->adf_flags & O_RDWR) ?F_WRLCK : F_RDLCK; + + if (set_lock(adf->adf_fd, F_GETLK, &lock) < 0) { + /* is that kind of error possible ?*/ + return (errno == EACCES || errno == EAGAIN)?1:-1; + } + + if (lock.l_type == F_UNLCK) { + return 0; + } + return 1; +} + +/* --------------- */ +int ad_testlock(struct adouble *ad, int eid, const off_t off) +{ + struct ad_fd *adf; + off_t lock_offset; + + lock_offset = off; + if (eid == ADEID_DFORK) { + adf = &ad->ad_data_fork; + if (ad_meta_fileno(ad) != -1) { + adf = ad->ad_md; + lock_offset = df2off(off); + } + } + else { /* rfork */ + if (ad_meta_fileno(ad) == -1) { + /* there's no resource fork. return no lock */ + return 0; + } + adf = ad->ad_md; + lock_offset = hf2off(off); + } + return testlock(adf, lock_offset, 1); +} + +/* ------------------------- + return if a file is open by another process. + Optimized for the common case: + - there's no locks held by another process (clients) + - or we already know the answer and don't need to test. +*/ +u_int16_t ad_openforks(struct adouble *ad, u_int16_t attrbits) +{ + u_int16_t ret = 0; + struct ad_fd *adf; + off_t off; + + if (!(attrbits & (ATTRBIT_DOPEN | ATTRBIT_ROPEN))) { + off_t len; + /* XXX know the locks layout: + AD_FILELOCK_OPEN_WR is first + and use it for merging requests + */ + if (ad_meta_fileno(ad) != -1) { + /* there's a resource fork test the four bytes for + * data RW/RD and fork RW/RD locks in one request + */ + adf = ad->ad_md; + off = LOCK_DATA_WR; + len = 4; + } + else { + /* no resource fork, only data RD/RW may exist */ + adf = &ad->ad_data_fork; + off = AD_FILELOCK_OPEN_WR; + len = 2; + } + if (!testlock(adf, off, len)) + return ret; + } + /* either there's a lock or we already know one + fork is open + */ + if (!(attrbits & ATTRBIT_DOPEN)) { + if (ad_meta_fileno(ad) != -1) { + adf = ad->ad_md; + off = LOCK_DATA_WR; + } + else { + adf = &ad->ad_data_fork; + off = AD_FILELOCK_OPEN_WR; + } + ret = testlock(adf, off, 2) > 0? ATTRBIT_DOPEN : 0; + } + + if (!(attrbits & ATTRBIT_ROPEN)) { + if (ad_meta_fileno(ad) != -1) { + adf = ad->ad_md; + off = LOCK_RSRC_WR; + ret |= testlock(adf, off, 2) > 0? ATTRBIT_ROPEN : 0; + } + } + + return ret; +} + +/* ------------------------- +*/ +int ad_fcntl_tmplock(struct adouble *ad, const u_int32_t eid, const int locktype, + const off_t off, const off_t len, const int fork) { struct flock lock; struct ad_fd *adf; int err; + int type; lock.l_start = off; + type = locktype; if (eid == ADEID_DFORK) { - adf = &ad->ad_df; + adf = &ad->ad_data_fork; } else { - adf = &ad->ad_hf; - lock.l_start += ad_getentryoff(ad, eid); + /* FIXME META */ + adf = &ad->ad_resource_fork; + if (adf->adf_fd == -1) { + /* there's no resource fork. return success */ + return 0; + } + /* if ADLOCK_FILELOCK we want a lock from offset 0 + * it's used when deleting a file: + * in open we put read locks on meta datas + * in delete a write locks on the whole file + * so if the file is open by somebody else it fails + */ + if (!(type & ADLOCK_FILELOCK)) + lock.l_start += ad_getentryoff(ad, eid); } + + if (!(adf->adf_flags & O_RDWR) && (type & ADLOCK_WR)) { + type = (type & ~ADLOCK_WR) | ADLOCK_RD; + } + lock.l_type = XLATE_FCNTL_LOCK(type & ADLOCK_MASK); lock.l_whence = SEEK_SET; lock.l_len = len; + /* see if it's locked by another fork. */ + if (fork && adf_findxlock(adf, fork, ADLOCK_WR | + ((type & ADLOCK_WR) ? ADLOCK_RD : 0), + lock.l_start, lock.l_len) > -1) { + errno = EACCES; + return -1; + } + /* okay, we might have ranges byte-locked. we need to make sure that * we restore the appropriate ranges once we're done. so, we check * for overlap on an unlock and relock. * XXX: in the future, all the byte locks will be sorted and contiguous. * we just want to upgrade all the locks and then downgrade them * here. */ - err = fcntl(adf->adf_fd, F_SETLK, &lock); + if (!adf->adf_excl) { + err = set_lock(adf->adf_fd, F_SETLK, &lock); + } + else { + err = 0; + } if (!err && (lock.l_type == F_UNLCK)) adf_relockrange(adf, adf->adf_fd, lock.l_start, len); return err; } - -void ad_fcntl_unlock(struct adouble *ad, const int user) +/* ------------------------- + the fork is opened in Read Write, Deny Read, Deny Write mode + lock the whole file once +*/ +int ad_excl_lock(struct adouble *ad, const u_int32_t eid) { - if (ad->ad_df.adf_fd != -1) { - adf_unlock(&ad->ad_df, ad->ad_df.adf_fd, user); - } - if (ad->ad_hf.adf_fd != -1) { - adf_unlock(&ad->ad_hf, ad->ad_hf.adf_fd, user); - } -} - -/* byte-range locks. ad_lock is used by afp_bytelock and afp_openfork - * to establish locks. both ad_lock and ad_tmplock take 0, 0, 0 to - * signify locking of the entire file. in the absence of working - * byte-range locks, this will default to file-wide flock-style locks. - */ -int ad_flock_lock(struct adouble *ad, const u_int32_t eid, const int type, - const off_t off, const size_t len, const int user) -{ - int err, lock_type; + struct ad_fd *adf; + struct flock lock; + int err; - lock_type = XLATE_FLOCK(type & ADLOCK_MASK); - if (eid == ADEID_DFORK) { - if ((err = flock(ad_dfileno(ad), lock_type | LOCK_NB)) == 0) - ad->ad_df.adf_lockcount = lock_type; - } else if ((err = flock(ad_hfileno(ad), lock_type | LOCK_NB)) == 0) - ad->ad_hf.adf_lockcount = lock_type; - - if (err) { - if ((EWOULDBLOCK != EAGAIN) && (errno == EWOULDBLOCK)) - errno = EAGAIN; - return -1; - } - - return 0; -} + lock.l_start = 0; + lock.l_type = F_WRLCK; + lock.l_whence = SEEK_SET; + lock.l_len = 0; -/* ad_tmplock is used by afpd to lock actual read/write operations. - * it saves the current lock state before attempting to lock to prevent - * mixups. if byte-locks don't exist, it will lock the entire file with - * an flock. we can be a little smart here by just upgrading/downgrading - * locks. */ -int ad_flock_tmplock(struct adouble *ad, const u_int32_t eid, const int type, - const off_t off, const size_t len) -{ - int fd, oldlock, lock_type; - if (eid == ADEID_DFORK) { - oldlock = ad->ad_df.adf_lockcount; - fd = ad_dfileno(ad); + adf = &ad->ad_data_fork; } else { - oldlock = ad->ad_hf.adf_lockcount; - fd = ad_hfileno(ad); + adf = &ad->ad_resource_fork; + lock.l_start = ad_getentryoff(ad, eid); } + + err = set_lock(adf->adf_fd, F_SETLK, &lock); + if (!err) + adf->adf_excl = 1; + return err; +} - /* if we already have a write lock, we don't need to do anything */ - if (oldlock == LOCK_EX) { - return 0; +/* --------------------- */ +void ad_fcntl_unlock(struct adouble *ad, const int fork) +{ + if (ad_data_fileno(ad) != -1) { + adf_unlock(&ad->ad_data_fork, fork); + } + if (ad_reso_fileno(ad) != -1) { + adf_unlock(&ad->ad_resource_fork, fork); } - /* if we have a read lock, upgrade it if necessary */ - lock_type = XLATE_FLOCK(type & ADLOCK_MASK); - if (oldlock == LOCK_SH) { - if (lock_type == LOCK_EX) - return flock(fd, LOCK_EX | LOCK_NB); - else if (lock_type == LOCK_UN) /* reset it */ - return flock(fd, LOCK_SH | LOCK_NB); - else /* do nothing */ - return 0; + if (ad->ad_flags != AD_VERSION1_SFM) { + return; + } + if (ad_meta_fileno(ad) != -1) { + adf_unlock(&ad->ad_metadata_fork, fork); } - /* if we don't already have a lock, just do it. */ - return flock(fd, lock_type | LOCK_NB); } - -