]> arthur.barton.de Git - netatalk.git/blob - libatalk/tdb/spinlock.c
Remove bdb env on exit
[netatalk.git] / libatalk / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    Samba database functions
4    Copyright (C) Anton Blanchard                   2001
5    
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10    
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15    
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #define STANDALONE 1
21
22 #if HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #if STANDALONE
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <errno.h>
33 #include <sys/stat.h>
34 #include <time.h>
35 #include <signal.h>
36 #include "spinlock.h"
37
38 #define DEBUG
39 #else
40 #include "includes.h"
41 #endif
42
43 #ifdef USE_SPINLOCKS
44
45 /*
46  * ARCH SPECIFIC
47  */
48
49 #if defined(SPARC_SPINLOCKS)
50
51 static inline int __spin_trylock(spinlock_t *lock)
52 {
53         unsigned int result;
54
55         asm volatile("ldstub    [%1], %0"
56                 : "=r" (result)
57                 : "r" (lock)
58                 : "memory");
59
60         return (result == 0) ? 0 : EBUSY;
61 }
62
63 static inline void __spin_unlock(spinlock_t *lock)
64 {
65         asm volatile("":::"memory");
66         *lock = 0;
67 }
68
69 static inline void __spin_lock_init(spinlock_t *lock)
70 {
71         *lock = 0;
72 }
73
74 static inline int __spin_is_locked(spinlock_t *lock)
75 {
76         return (*lock != 0);
77 }
78
79 #elif defined(POWERPC_SPINLOCKS) 
80
81 static inline int __spin_trylock(spinlock_t *lock)
82 {
83         unsigned int result;
84
85         __asm__ __volatile__(
86 "1:     lwarx           %0,0,%1\n\
87         cmpwi           0,%0,0\n\
88         li              %0,0\n\
89         bne-            2f\n\
90         li              %0,1\n\
91         stwcx.          %0,0,%1\n\
92         bne-            1b\n\
93         isync\n\
94 2:"     : "=&r"(result)
95         : "r"(lock)
96         : "cr0", "memory");
97
98         return (result == 1) ? 0 : EBUSY;
99 }
100
101 static inline void __spin_unlock(spinlock_t *lock)
102 {
103         asm volatile("eieio":::"memory");
104         *lock = 0;
105 }
106
107 static inline void __spin_lock_init(spinlock_t *lock)
108 {
109         *lock = 0;
110 }
111
112 static inline int __spin_is_locked(spinlock_t *lock)
113 {
114         return (*lock != 0);
115 }
116
117 #elif defined(INTEL_SPINLOCKS) 
118
119 static inline int __spin_trylock(spinlock_t *lock)
120 {
121         int oldval;
122
123         asm volatile("xchgl %0,%1"
124                 : "=r" (oldval), "=m" (*lock)
125                 : "0" (0)
126                 : "memory");
127
128         return oldval > 0 ? 0 : EBUSY;
129 }
130
131 static inline void __spin_unlock(spinlock_t *lock)
132 {
133         asm volatile("":::"memory");
134         *lock = 1;
135 }
136
137 static inline void __spin_lock_init(spinlock_t *lock)
138 {
139         *lock = 1;
140 }
141
142 static inline int __spin_is_locked(spinlock_t *lock)
143 {
144         return (*lock != 1);
145 }
146
147 #elif defined(MIPS_SPINLOCKS) 
148
149 static inline unsigned int load_linked(unsigned long addr)
150 {
151         unsigned int res;
152
153         __asm__ __volatile__("ll\t%0,(%1)"
154                 : "=r" (res)
155                 : "r" (addr));
156
157         return res;
158 }
159
160 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
161 {
162         unsigned int res;
163
164         __asm__ __volatile__("sc\t%0,(%2)"
165                 : "=r" (res)
166                 : "0" (value), "r" (addr));
167         return res;
168 }
169
170 static inline int __spin_trylock(spinlock_t *lock)
171 {
172         unsigned int mw;
173
174         do {
175                 mw = load_linked(lock);
176                 if (mw) 
177                         return EBUSY;
178         } while (!store_conditional(lock, 1));
179
180         asm volatile("":::"memory");
181
182         return 0;
183 }
184
185 static inline void __spin_unlock(spinlock_t *lock)
186 {
187         asm volatile("":::"memory");
188         *lock = 0;
189 }
190
191 static inline void __spin_lock_init(spinlock_t *lock)
192 {
193         *lock = 0;
194 }
195
196 static inline int __spin_is_locked(spinlock_t *lock)
197 {
198         return (*lock != 0);
199 }
200
201 #else
202 #error Need to implement spinlock code in spinlock.c
203 #endif
204
205 /*
206  * OS SPECIFIC
207  */
208
209 static void yield_cpu(void)
210 {
211         struct timespec tm;
212
213 #ifdef USE_SCHED_YIELD
214         sched_yield();
215 #else
216         /* Linux will busy loop for delays < 2ms on real time tasks */
217         tm.tv_sec = 0;
218         tm.tv_nsec = 2000000L + 1;
219         nanosleep(&tm, NULL);
220 #endif
221 }
222
223 static int this_is_smp(void)
224 {
225         return 0;
226 }
227
228 /*
229  * GENERIC
230  */
231
232 static int smp_machine = 0;
233
234 static inline void __spin_lock(spinlock_t *lock)
235 {
236         int ntries = 0;
237
238         while(__spin_trylock(lock)) {
239                 while(__spin_is_locked(lock)) {
240                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
241                                 continue;
242                         yield_cpu();
243                 }
244         }
245 }
246
247 static void __read_lock(tdb_rwlock_t *rwlock)
248 {
249         int ntries = 0;
250
251         while(1) {
252                 __spin_lock(&rwlock->lock);
253
254                 if (!(rwlock->count & RWLOCK_BIAS)) {
255                         rwlock->count++;
256                         __spin_unlock(&rwlock->lock);
257                         return;
258                 }
259         
260                 __spin_unlock(&rwlock->lock);
261
262                 while(rwlock->count & RWLOCK_BIAS) {
263                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
264                                 continue;
265                         yield_cpu();
266                 }
267         }
268 }
269
270 static void __write_lock(tdb_rwlock_t *rwlock)
271 {
272         int ntries = 0;
273
274         while(1) {
275                 __spin_lock(&rwlock->lock);
276
277                 if (rwlock->count == 0) {
278                         rwlock->count |= RWLOCK_BIAS;
279                         __spin_unlock(&rwlock->lock);
280                         return;
281                 }
282
283                 __spin_unlock(&rwlock->lock);
284
285                 while(rwlock->count != 0) {
286                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
287                                 continue;
288                         yield_cpu();
289                 }
290         }
291 }
292
293 static void __write_unlock(tdb_rwlock_t *rwlock)
294 {
295         __spin_lock(&rwlock->lock);
296
297 #ifdef DEBUG
298         if (!(rwlock->count & RWLOCK_BIAS))
299                 fprintf(stderr, "bug: write_unlock\n");
300 #endif
301
302         rwlock->count &= ~RWLOCK_BIAS;
303         __spin_unlock(&rwlock->lock);
304 }
305
306 static void __read_unlock(tdb_rwlock_t *rwlock)
307 {
308         __spin_lock(&rwlock->lock);
309
310 #ifdef DEBUG
311         if (!rwlock->count)
312                 fprintf(stderr, "bug: read_unlock\n");
313
314         if (rwlock->count & RWLOCK_BIAS)
315                 fprintf(stderr, "bug: read_unlock\n");
316 #endif
317
318         rwlock->count--;
319         __spin_unlock(&rwlock->lock);
320 }
321
322 /* TDB SPECIFIC */
323
324 /* lock a list in the database. list -1 is the alloc list */
325 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
326 {
327         tdb_rwlock_t *rwlocks;
328
329         if (!tdb->map_ptr) return -1;
330         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
331
332         switch(rw_type) {
333         case F_RDLCK:
334                 __read_lock(&rwlocks[list+1]);
335                 break;
336
337         case F_WRLCK:
338                 __write_lock(&rwlocks[list+1]);
339                 break;
340
341         default:
342                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
343         }
344         return 0;
345 }
346
347 /* unlock the database. */
348 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
349 {
350         tdb_rwlock_t *rwlocks;
351
352         if (!tdb->map_ptr) return -1;
353         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
354
355         switch(rw_type) {
356         case F_RDLCK:
357                 __read_unlock(&rwlocks[list+1]);
358                 break;
359
360         case F_WRLCK:
361                 __write_unlock(&rwlocks[list+1]);
362                 break;
363
364         default:
365                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
366         }
367
368         return 0;
369 }
370
371 int tdb_create_rwlocks(int fd, unsigned int hash_size)
372 {
373         unsigned size, i;
374         tdb_rwlock_t *rwlocks;
375
376         size = (hash_size + 1) * sizeof(tdb_rwlock_t);
377         rwlocks = malloc(size);
378         if (!rwlocks)
379                 return -1;
380
381         for(i = 0; i < hash_size+1; i++) {
382                 __spin_lock_init(&rwlocks[i].lock);
383                 rwlocks[i].count = 0;
384         }
385
386         /* Write it out (appending to end) */
387         if (write(fd, rwlocks, size) != size) {
388                 free(rwlocks);
389                 return -1;
390         }
391         smp_machine = this_is_smp();
392         free(rwlocks);
393         return 0;
394 }
395
396 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
397 {
398         tdb_rwlock_t *rwlocks;
399         unsigned i;
400
401         if (tdb->header.rwlocks == 0) return 0;
402         if (!tdb->map_ptr) return -1;
403
404         /* We're mmapped here */
405         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
406         for(i = 0; i < tdb->header.hash_size+1; i++) {
407                 __spin_lock_init(&rwlocks[i].lock);
408                 rwlocks[i].count = 0;
409         }
410         return 0;
411 }
412 #else
413 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
414 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
415 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
416
417 /* Non-spinlock version: remove spinlock pointer */
418 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
419 {
420         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
421                                 - (char *)&tdb->header);
422
423         tdb->header.rwlocks = 0;
424         if (lseek(tdb->fd, off, SEEK_SET) != off
425             || write(tdb->fd, (void *)&tdb->header.rwlocks,
426                      sizeof(tdb->header.rwlocks)) 
427             != sizeof(tdb->header.rwlocks))
428                 return -1;
429         return 0;
430 }
431 #endif